blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3785d41263e4b9234ce9fa12094e5c58c4066148
|
14567e2f77d2bf697bb18c3c1e3d6744c11f41c8
|
/kfpt/ftp/old/yewubiangeng.py
|
b6326593df20c37274bbb06c1dd6d1d4c6865c11
|
[] |
no_license
|
yanislong/junnan
|
268e64c288e18456da621d5485e04bf8eb8f5322
|
fc35f32a29a7b6da2a8ea334d0e53a21a81d97f3
|
refs/heads/master
| 2021-01-01T20:08:05.825407
| 2017-09-08T02:24:40
| 2017-09-08T02:24:40
| 98,772,303
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,854
|
py
|
#!/usr/bin/python
# -*- coding=utf-8 -*-
import threading
import requests
import chardet
import suds
import sys
sys.path.append('/root/git_20170730/kfpt')
import jiami
import config
import jiemi
reload(sys)
sys.setdefaultencoding('utf-8')
def DI():
num = "1005891"
num2 = "1000213"
token4 = "MTAwNDAwMFQwM19PVF8wMDY5OTkyMDE3LTA4"
token3 = "MTAwM2FiY1QwM19PVF8wMDZ4eXoyMDE3LTA4"
XML1 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token4 +"</ACCESS_TOKEN><HANDLE_TYPE>1</HANDLE_TYPE><CUST_COMPANY>1002</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID></FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML2 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token3 +"</ACCESS_TOKEN><HANDLE_TYPE>2</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>"+ num +"</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML3 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token4 +"</ACCESS_TOKEN><HANDLE_TYPE>3</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>" + num + "</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML4 = "<PACKET><HEAD><SYS_COMPANY>1004</SYS_COMPANY><SERVICE_CODE>T03_OT_006</SERVICE_CODE><FILE_TYPE>YWBG</FILE_TYPE><REQUEST_TIME>2017-07-05 09:00:00</REQUEST_TIME><ACCESS_TOKEN>"+ token3 +"</ACCESS_TOKEN><HANDLE_TYPE>4</HANDLE_TYPE><CUST_COMPANY>1003</CUST_COMPANY><ACCOUNT_PERIOD>201704</ACCOUNT_PERIOD><PROVINCE_ID>370000</PROVINCE_ID><CITY_ID>370100</CITY_ID><FLOW_ID>"+ num2 +"</FLOW_ID><STATUS>1</STATUS></HEAD></PACKET>"
XML = XML2
print "请求报文明文:\n", XML
r0 = requests.post(config.encode, data={'requestXml':XML})
endata = r0.content.replace(r"\n","")
# print "请求报文密文:\n", endata
print u">> 业务变更确认信息同步接口"
print "*************"
en = endata[1:-1]
u = config.url + "/services/filesMutual?wsdl"
cc = suds.client.Client(u).service.ftpFilesMutual(encReqXml=en)
# print "请求返回的加密报文:\n", cc
print jiemi.jiemi(cc.replace(r"\n",""))
return cc
if __name__ == "__main__":
for i in range(1):
for i in range(1):
t = threading.Thread(target=DI, args=())
t.start()
t.join()
print ">> program run end"
|
[
"335916781@qq.com"
] |
335916781@qq.com
|
efb56f8d52f1fa5f6f1068625bd3f62c292a2263
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/refactoring/move/class/before/src/a.py
|
3672729542fb5dc6e6b22bfe999a85b4377e35a4
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
from lib1 import URLOpener
import lib1
class C(object):
def __init__(self):
self.opener = lib1.URLOpener(None)
def f(self, x):
o = URLOpener(x)
return o.urlopen()
def g(self, x):
return 'f({0!r}) = {1!r}'.format(URLOpener(x), lib1.URLOpener(x))
def main():
c = C()
print c
|
[
"andrey.vlasovskikh@jetbrains.com"
] |
andrey.vlasovskikh@jetbrains.com
|
776c5ce3437a6a7358d8726241ec13c72d739770
|
f7346cf6969fb68d157147e91b90584a881ab814
|
/tutorials/EI networks/STEP3_ExcInhNet_Brunel2000_brian2.py
|
e04b4f6bf78e4d5d25d75b7bb94d075b5d40eaae
|
[
"MIT"
] |
permissive
|
h-mayorquin/camp_india_2016
|
d423f330523fafd4320dbce0429ac4eaafc32e3d
|
a8bf8db7778c39c7ca959a7f876c1aa85f2cae8b
|
refs/heads/master
| 2021-01-20T20:32:50.036961
| 2016-07-15T17:50:34
| 2016-07-15T17:50:34
| 62,492,660
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
from brian2 import *
from data_utils import *
set_device('cpp_standalone', build_on_run=False)
# neuronal parameters
N = 12500 # total number of neurons
NE = 10000 # number of excitatory neurons
vth = 20*mV # threshold potential
vr = 10*mV # reset potential
tau = 20*ms # membrane time constant
eqs_neurons='''
inp : volt
dv/dt = (-v + inp)/tau : volt
'''
P=NeuronGroup(N=N,model=eqs_neurons,\
threshold='v>=vth',reset='v=vr',\
refractory=2*ms,method='euler')
P.v = uniform(size=12500)*vth
# synaptic parameters
g = 5 # ratio of inh to exc
J = 0.1*mV # synaptic weight
p = 0.1 # connection probability
delay = 1.5*ms # synaptic delay
# delta-function synapses
con = Synapses(P,P,'w:volt (constant)',on_pre='v_post+=w',method='euler')
#con.connect(condition='i!=j',p=p)
print 'computing connection matrix'
CE = int(p*NE)
CI = int(p*(N-NE))
C = CE+CI
conn_i = np.zeros(C*N,dtype=int)
preneuronsE = arange(0,NE,dtype=int)
preneuronsI = arange(NE,N,dtype=int)
for j in range(N): # loop over post-synaptic neurons
# draw CE number of neuron indices out of NE neurons, no autapses
if j<NE: preneurons = np.delete(preneuronsE,j)
else: preneurons = preneuronsE
conn_i[j*C:j*C+CE] = np.random.permutation(preneurons)[:CE]
# draw CI number of neuron indices out of inhibitory neurons, no autapses
if j>NE: preneurons = np.delete(preneuronsI,j-NE)
else: preneurons = preneuronsI
conn_i[j*C+CE:(j+1)*C] = np.random.permutation(preneurons)[:CI]
conn_j = np.repeat(range(N),C)
print 'connecting network'
con.connect(i=conn_i,j=conn_j)
con.delay = delay
con.w['i<NE'] = J
con.w['i>=NE'] = -g*J
# input parameters
inpfactor = 2
nu_theta = vth/(p*NE*J*tau)
Pinp = PoissonGroup(N=N,rates=inpfactor*nu_theta)
con_ext = Synapses(Pinp, P, on_pre='v += J')
con_ext.connect(True, p=p*NE/float(N))
con_ext.delay = delay
sm = SpikeMonitor(P)
sr = PopulationRateMonitor(P)
sm_vm = StateMonitor(P,'v',record=range(5))
print 'compiling/running'
run(0.25*second, report='text')
device.build(directory='output', compile=True, run=True, debug=False);
print "mean activity (Hz) =",mean(sr.rate/Hz)
figure()
plot(sm.t/ms,sm.i,'.')
#ylim([1350,1400])
figure()
plot(sr.t/ms,sr.rate/Hz,',-')
#figure()
#hist(CV_spiketrains(array(sm.t),array(sm.i),0.,range(N)),bins=100)
show()
|
[
"h.mayorquin@gmail.com"
] |
h.mayorquin@gmail.com
|
4240dcc89fea3086e9ad8bb2404e025a5801990a
|
b0b566dc3d3df8b60b8ce26d151991700341667f
|
/cms/custom_settings/models.py
|
fd4b05b7a5f1c8fae3e729c316734ea79e048e5e
|
[] |
no_license
|
lorne-luo/hawkeye
|
cf2f7cbe2494aea92e0fc333217f86d00cf1ecba
|
82b633dfc1278ab8f2d25ec699d6034b26c791e2
|
refs/heads/master
| 2023-01-06T17:28:28.489494
| 2020-01-22T23:53:10
| 2020-01-22T23:53:24
| 185,289,740
| 0
| 0
| null | 2023-01-04T14:03:49
| 2019-05-07T00:13:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,026
|
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from wagtail.contrib.settings.models import BaseSetting
from wagtail.contrib.settings.registry import register_setting
from core.constants import AU_CITY_CHOICES, AU_STATE_CHOICES
from wagtail.snippets.models import register_snippet
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.images.edit_handlers import ImageChooserPanel
@register_setting(icon='form')
class ContactUs(BaseSetting):
name = models.CharField(_('name'), max_length=255, blank=True, help_text='contactor name')
address1 = models.CharField(_('address1'), max_length=255, blank=True, help_text='address1')
address2 = models.CharField(_('address2'), max_length=255, blank=True, help_text='address2')
city = models.CharField(_('city'), choices=AU_CITY_CHOICES, max_length=255, blank=True, help_text='city')
state = models.CharField(_('state'), choices=AU_STATE_CHOICES, max_length=255, blank=True, help_text='state')
postcode = models.CharField(_('postcode'), max_length=32, blank=True, help_text='postcode')
phone = models.CharField(_('phone'), max_length=32, blank=True, help_text='phone')
email = models.EmailField(_('email'), max_length=255, blank=True, help_text='email')
class Meta:
verbose_name = 'contact us'
@register_snippet
class Link(models.Model):
name = models.CharField(_('name'), max_length=255, blank=False, help_text='Partner name')
link = models.URLField(_('link'), blank=True, help_text='Partner link')
logo = models.ForeignKey(
'wagtailimages.Image',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
class_name = models.CharField(_('styling class name'), max_length=64, blank=True, help_text='styling class name')
panels = [
FieldPanel('name'),
ImageChooserPanel('logo'),
FieldPanel('link'),
FieldPanel('class_name'),
]
def __str__(self):
return self.name
|
[
"dev@luotao.net"
] |
dev@luotao.net
|
720b8b5ad105cefcdea842e54d46b83fb5563320
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/parcoords/_labelangle.py
|
13d8fff892a81eaefbdfe73a406fc69782ba9295
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 406
|
py
|
import _plotly_utils.basevalidators
class LabelangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(self, plotly_name="labelangle", parent_name="parcoords", **kwargs):
super(LabelangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
8c90dde967a168d19294d7de308bc868f12d2843
|
1719920a92f7194766624474b98d59ef8d6eddaf
|
/models/workbook_chart.py
|
e1ff1c8523996a870b6b7be3ab6088101b52548a
|
[
"MIT"
] |
permissive
|
MIchaelMainer/msgraph-v10-models-python
|
cfa5e3a65ba675383975a99779763211ed9fa0a9
|
adad66363ebe151be2332f3ef74a664584385748
|
refs/heads/master
| 2020-03-19T12:51:06.370673
| 2018-06-08T00:16:12
| 2018-06-08T00:16:12
| 136,544,573
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,259
|
py
|
# -*- coding: utf-8 -*-
'''
# Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information.
#
# This file was generated and any changes will be overwritten.
'''
from __future__ import unicode_literals
from ..model.workbook_chart_axes import WorkbookChartAxes
from ..model.workbook_chart_data_labels import WorkbookChartDataLabels
from ..model.workbook_chart_area_format import WorkbookChartAreaFormat
from ..model.workbook_chart_legend import WorkbookChartLegend
from ..model.workbook_chart_series import WorkbookChartSeries
from ..model.workbook_chart_title import WorkbookChartTitle
from ..model.workbook_worksheet import WorkbookWorksheet
from ..one_drive_object_base import OneDriveObjectBase
class WorkbookChart(OneDriveObjectBase):
def __init__(self, prop_dict={}):
self._prop_dict = prop_dict
@property
def height(self):
"""
Gets and sets the height
Returns:
float:
The height
"""
if "height" in self._prop_dict:
return self._prop_dict["height"]
else:
return None
@height.setter
def height(self, val):
self._prop_dict["height"] = val
@property
def left(self):
"""
Gets and sets the left
Returns:
float:
The left
"""
if "left" in self._prop_dict:
return self._prop_dict["left"]
else:
return None
@left.setter
def left(self, val):
self._prop_dict["left"] = val
@property
def name(self):
"""
Gets and sets the name
Returns:
str:
The name
"""
if "name" in self._prop_dict:
return self._prop_dict["name"]
else:
return None
@name.setter
def name(self, val):
self._prop_dict["name"] = val
@property
def top(self):
"""
Gets and sets the top
Returns:
float:
The top
"""
if "top" in self._prop_dict:
return self._prop_dict["top"]
else:
return None
@top.setter
def top(self, val):
self._prop_dict["top"] = val
@property
def width(self):
"""
Gets and sets the width
Returns:
float:
The width
"""
if "width" in self._prop_dict:
return self._prop_dict["width"]
else:
return None
@width.setter
def width(self, val):
self._prop_dict["width"] = val
@property
def axes(self):
"""
Gets and sets the axes
Returns:
:class:`WorkbookChartAxes<onedrivesdk.model.workbook_chart_axes.WorkbookChartAxes>`:
The axes
"""
if "axes" in self._prop_dict:
if isinstance(self._prop_dict["axes"], OneDriveObjectBase):
return self._prop_dict["axes"]
else :
self._prop_dict["axes"] = WorkbookChartAxes(self._prop_dict["axes"])
return self._prop_dict["axes"]
return None
@axes.setter
def axes(self, val):
self._prop_dict["axes"] = val
@property
def data_labels(self):
"""
Gets and sets the dataLabels
Returns:
:class:`WorkbookChartDataLabels<onedrivesdk.model.workbook_chart_data_labels.WorkbookChartDataLabels>`:
The dataLabels
"""
if "dataLabels" in self._prop_dict:
if isinstance(self._prop_dict["dataLabels"], OneDriveObjectBase):
return self._prop_dict["dataLabels"]
else :
self._prop_dict["dataLabels"] = WorkbookChartDataLabels(self._prop_dict["dataLabels"])
return self._prop_dict["dataLabels"]
return None
@data_labels.setter
def data_labels(self, val):
self._prop_dict["dataLabels"] = val
@property
def format(self):
"""
Gets and sets the format
Returns:
:class:`WorkbookChartAreaFormat<onedrivesdk.model.workbook_chart_area_format.WorkbookChartAreaFormat>`:
The format
"""
if "format" in self._prop_dict:
if isinstance(self._prop_dict["format"], OneDriveObjectBase):
return self._prop_dict["format"]
else :
self._prop_dict["format"] = WorkbookChartAreaFormat(self._prop_dict["format"])
return self._prop_dict["format"]
return None
@format.setter
def format(self, val):
self._prop_dict["format"] = val
@property
def legend(self):
"""
Gets and sets the legend
Returns:
:class:`WorkbookChartLegend<onedrivesdk.model.workbook_chart_legend.WorkbookChartLegend>`:
The legend
"""
if "legend" in self._prop_dict:
if isinstance(self._prop_dict["legend"], OneDriveObjectBase):
return self._prop_dict["legend"]
else :
self._prop_dict["legend"] = WorkbookChartLegend(self._prop_dict["legend"])
return self._prop_dict["legend"]
return None
@legend.setter
def legend(self, val):
self._prop_dict["legend"] = val
@property
def series(self):
"""Gets and sets the series
Returns:
:class:`SeriesCollectionPage<onedrivesdk.request.series_collection.SeriesCollectionPage>`:
The series
"""
if "series" in self._prop_dict:
return SeriesCollectionPage(self._prop_dict["series"])
else:
return None
@property
def title(self):
"""
Gets and sets the title
Returns:
:class:`WorkbookChartTitle<onedrivesdk.model.workbook_chart_title.WorkbookChartTitle>`:
The title
"""
if "title" in self._prop_dict:
if isinstance(self._prop_dict["title"], OneDriveObjectBase):
return self._prop_dict["title"]
else :
self._prop_dict["title"] = WorkbookChartTitle(self._prop_dict["title"])
return self._prop_dict["title"]
return None
@title.setter
def title(self, val):
self._prop_dict["title"] = val
@property
def worksheet(self):
"""
Gets and sets the worksheet
Returns:
:class:`WorkbookWorksheet<onedrivesdk.model.workbook_worksheet.WorkbookWorksheet>`:
The worksheet
"""
if "worksheet" in self._prop_dict:
if isinstance(self._prop_dict["worksheet"], OneDriveObjectBase):
return self._prop_dict["worksheet"]
else :
self._prop_dict["worksheet"] = WorkbookWorksheet(self._prop_dict["worksheet"])
return self._prop_dict["worksheet"]
return None
@worksheet.setter
def worksheet(self, val):
self._prop_dict["worksheet"] = val
|
[
"mmainer@microsoft.com"
] |
mmainer@microsoft.com
|
57da4fcb912fee8e7c21d8f1cbf7a1539e0aaf81
|
df4ecb12fe9d20cb9fb92014736045425bf57c0d
|
/setup.py
|
19fe4c62ffb2598aeb7ade8be207c6859e5fe45b
|
[
"MIT"
] |
permissive
|
gecko-robotics/pygecko
|
a277c717d516de6d836ccfd47ac5b1a6e7dd09ef
|
a809593a894d8e591e992455a01aa73d8f7b7981
|
refs/heads/master
| 2022-07-09T09:28:44.500735
| 2019-10-26T23:07:51
| 2019-10-26T23:07:51
| 70,022,547
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,018
|
py
|
from __future__ import print_function
from setuptools import setup
from build_utils import BuildCommand
from build_utils import PublishCommand
from build_utils import BinaryDistribution
from build_utils import SetGitTag
from build_utils import get_pkg_version
# ver = {}
# with open("pygecko/version.py") as fp:
# exec(fp.read(), ver)
# VERSION = ver['__version__']
VERSION = get_pkg_version('pygecko/__init__.py')
PACKAGE_NAME = 'pygecko'
BuildCommand.pkg = PACKAGE_NAME
BuildCommand.py2 = False # not supporting python2 anymore
# BuildCommand.test = False # don't do tests
PublishCommand.pkg = PACKAGE_NAME
PublishCommand.version = VERSION
SetGitTag.version = VERSION
README = open('readme.md').read()
setup(
name=PACKAGE_NAME,
version=VERSION,
author="Kevin Walchko",
keywords=['framework', 'robotic', 'robot', 'vision', 'ros', 'distributed'],
author_email="walchko@noreply.github.com",
description="A python robotic framework and tools",
license="MIT",
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: Unix',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Software Development :: Libraries :: Python Modules'
],
install_requires=[
'pyyaml', # config files
'psutil',
'simplejson', # config files
'msgpack', # efficient message serialization through zmq
'pyzmq', # connecting to different processes and computers
# 'bjoern', # http server, multiple connections
# 'the_collector', # saving data
'colorama', # log messages
'numpy', # basic image stuff ... remove/optional?
'build_utils' # installing and building the library
],
url="https://github.com/MomsFriendlyRobotCompany/{}".format(PACKAGE_NAME),
long_description=README,
long_description_content_type='text/markdown',
packages=[PACKAGE_NAME],
cmdclass={
'publish': PublishCommand,
'make': BuildCommand,
'tag': SetGitTag
},
scripts=[
'bin/geckocore.py',
'bin/pycore.py',
'bin/geckolaunch.py',
# 'bin/mjpeg_server.py', # why? use opencvutils instead
# 'bin/bag_play.py',
# 'bin/bag_record.py',
# 'bin/camera_calibrate.py',
# 'bin/image_view.py',
# 'bin/service.py', # fix
'bin/gecko_log_display.py',
'bin/geckotopic.py',
'bin/twist_keyboard.py'
# 'bin/video.py',
# 'bin/webserver.py'
]
)
|
[
"walchko@users.noreply.github.com"
] |
walchko@users.noreply.github.com
|
5d00565d21a9ad4f8942b1f3c6dd71b679e404a9
|
722386e8cb2be70e3a59e4e4667ad2733d84cb93
|
/fishc/列表/test1.py
|
0634933ace66d7b1339a02e7d2431af7fe045326
|
[] |
no_license
|
yuansuixin/Python_Learning
|
15720a33c5d3d4e2e3b2f5214fdbfb4c3d1ed92e
|
40aa8d0d034599f448f9125b34897648e87c8f37
|
refs/heads/master
| 2021-03-31T01:04:10.830905
| 2018-03-11T09:55:49
| 2018-03-11T09:55:49
| 124,743,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 304
|
py
|
member=['米兔',88,'夜',90,'小甲鱼',87,'意境',56,'求无斜阳',99]
# for i in member:
# print(i)
# temp=0
# for i in member :
# temp+=1
# print(i,end=" ")
# if temp%2==0:
# print()
for i in range(len(member)):
if i%2==0:
print(member[i],member[i+1])
|
[
"cyss428@163.com"
] |
cyss428@163.com
|
33de0048d31e64df7114554ace988932e59ef82a
|
a61263850fe63de61ec3004519f0d9aa69f104ac
|
/python_Algorithm/battle16/ArraySideSpin.py
|
1608d9afead08d58b4772c40d12f1b9339d25e39
|
[] |
no_license
|
Kimhyeonsuk/Programmers_Python
|
dd0e13ef6690cfab0c46a7c8b07a5f3b40175071
|
cc5687c8db2cfa098602829dec3acbf17c5c2177
|
refs/heads/master
| 2023-07-16T22:30:29.457419
| 2021-09-02T10:40:56
| 2021-09-02T10:40:56
| 355,876,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,177
|
py
|
def solution(rows, columns, queries):
maplist = [[j+i*columns for j in range(1,columns+1)]for i in range(rows)]
answer=[]
# maplist = [[] for _ in range(rows)]
# for i in range(1, rows + 1):
# for j in range(1, columns + 1):
# maplist[i - 1].append((i - 1) * columns + j)
for querie in queries:
x1=querie[0]-1
y1=querie[1]-1
x2=querie[2]-1
y2=querie[3]-1
tmplist=[]
for i in range(x1,x2):
tmplist.append(maplist[i][y1])
for i in range(y1,y2):
tmplist.append(maplist[x2][i])
for i in range(x2,x1,-1):
tmplist.append(maplist[i][y2])
for i in range(y2,y1,-1):
tmplist.append(maplist[x1][i])
val=tmplist.pop(0)
tmplist.append(val)
minval=min(tmplist)
answer.append(minval)
for i in range(x1,x2):
maplist[i][y1]=tmplist.pop(0)
for i in range(y1,y2):
maplist[x2][i]=tmplist.pop(0)
for i in range(x2,x1,-1):
maplist[i][y2]=tmplist.pop(0)
for i in range(y2,y1,-1):
maplist[x1][i]=tmplist.pop(0)
return answer
|
[
"philippe10@naver.com"
] |
philippe10@naver.com
|
aeea0cff7aca2caf25f9f9dd3296fa30bac15a92
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2158/60837/267537.py
|
f9eb16e1f46295dd50a8fa352db22017d4a5bdca
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
def atoi(string):
result=[]
for i in range(len(string)):
if i==0:
if string[i]=='-' or string[i].isdigit():
result.append(string[i])
else:
break
else:
if string[i].isdigit():
result.append(string[i])
else:
break
if len(result)==0:
return 0
return int(''.join(result))
a=input()
print(atoi(a))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5999d541cfc1bb6b84ba5ce6029f9f926694038a
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_116/ch120_2020_03_25_19_26_17_731011.py
|
72962cf219a8f0a53e1c3745ee3728e79282ae89
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
from random import randint
dinheiro=100
print(dinheiro)
while dinheiro>0:
aposta=int(input("qual o valor da aposta? "))
if aposta !=0:
opcao=input("a aposta é em um número ou paridade? ")
if opcao == "n":
numero=int(input("numero de 1 a 36: "))
roleta=randint(2,35)
if numero == roleta:
dinheiro+=(aposta*35)
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
elif opcao == "p":
roleta=randint(0,36)
if roleta % 2 == 0 or roleta==0:
dinheiro+=aposta
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
elif opcao == "i":
roleta=randint(0,36)
if roleta % 2 != 0 and roleta !=0:
dinheiro+=aposta
print(dinheiro)
else:
dinheiro-=aposta
print(dinheiro)
else:
dinheiro-=dinheiro
|
[
"you@example.com"
] |
you@example.com
|
c517d6fa8f95cfd54f1582a7435d4da648b9952e
|
d77cd334b0d05dc12c620d792bf4a1b8382c9cbe
|
/examples/keras-iris-pipeline/run_pipeline.py
|
40c6bb7a3df4745e2e5a2753b981380cba2cec26
|
[
"Apache-2.0"
] |
permissive
|
pcrete/skil-python
|
befc4cdbad78213e6e0221c78e960db5eea16a73
|
672a1aa9e8af020c960ab9ee280cbb6b194afc3f
|
refs/heads/master
| 2020-05-18T17:23:30.325751
| 2019-05-16T07:34:47
| 2019-05-16T07:34:47
| 180,715,194
| 0
| 0
|
Apache-2.0
| 2019-04-11T04:39:12
| 2019-04-11T04:39:11
| null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
import skil
import numpy as np
skil_server = skil.Skil()
work_space = skil.WorkSpace(skil_server)
experiment = skil.Experiment(work_space)
transform = skil.Transform(transform='iris_tp.json', experiment=experiment)
model = skil.Model(model='iris_model.h5', experiment=experiment)
deployment = skil.Deployment(skil_server)
pipeline = skil.Pipeline(deployment, model, transform)
with open('iris.data', 'r') as f:
data = np.array(f.readlines())
print(pipeline.predict(data))
|
[
"max.pumperla@googlemail.com"
] |
max.pumperla@googlemail.com
|
2425a8967772fd09cd8e552026ade42063b6fbd5
|
921f5c21500eb3526d153c6b50fb73bbfe4ecef9
|
/1.4 Ad Hoc/Game (Chess)/p278.py
|
8a937641efa41281b9c4caa505235964a159317b
|
[] |
no_license
|
alex-stephens/competitive-programming
|
c3c2919b1e3978e2f498f2d53837774b490c2a3c
|
833363f56ef9ada91952c501829a8f430db0caf5
|
refs/heads/master
| 2021-09-13T18:25:22.432073
| 2018-05-03T00:46:39
| 2018-05-03T00:46:39
| 119,809,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
# Competitive Programming 3
# Problem 278
T = int(input())
for _ in range(T):
inputStr = input().split()
piece, m, n = inputStr[0], int(inputStr[1]), int(inputStr[2])
if piece == 'k':
print( (m*n + 1) // 2 )
elif piece == 'r' or piece == 'Q':
print(min(m, n))
elif piece == 'K':
print( ((n+1)//2) * ((m+1)//2))
|
[
"alexstephens9@gmail.com"
] |
alexstephens9@gmail.com
|
9d027ba393e19bf31b550809b6ed0fc83cc038b4
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/verbal-arithmetic-puzzle.py
|
0ce93235f4960cf53349882ed8ec47a385473257
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 1,711
|
py
|
# Time: O(10! * n * l)
# Space: O(n * l)
import collections
class Solution(object):
def isSolvable(self, words, result):
"""
:type words: List[str]
:type result: str
:rtype: bool
"""
def backtracking(words, result, i, j, carry, lookup, used):
if j == len(result):
return carry == 0
if i != len(words):
if j >= len(words[i]) or words[i][j] in lookup:
return backtracking(words, result, i+1, j, carry, lookup, used)
for val in xrange(10):
if val in used or (val == 0 and j == len(words[i])-1):
continue
lookup[words[i][j]] = val
used.add(val)
if backtracking(words, result, i+1, j, carry, lookup, used):
return True
used.remove(val)
del lookup[words[i][j]]
return False
carry, val = divmod(carry + sum(lookup[w[j]] for w in words if j < len(w)), 10)
if result[j] in lookup:
return val == lookup[result[j]] and \
backtracking(words, result, 0, j+1, carry, lookup, used)
if val in used or (val == 0 and j == len(result)-1):
return False
lookup[result[j]] = val
used.add(val)
if backtracking(words, result, 0, j+1, carry, lookup, used):
return True
used.remove(val)
del lookup[result[j]]
return False
return backtracking([w[::-1] for w in words], result[::-1], 0, 0, 0, {}, set())
|
[
"noreply@github.com"
] |
kamyu104.noreply@github.com
|
63148046f11c2f2384d376fa158a19b4f33f4a5b
|
ea44a1681e276b3cc85226b53de217f6096a05d4
|
/fhir/resources/STU3/documentmanifest.py
|
76787d0d214eee4100a2f3e37c06bdfd35bce5fe
|
[
"BSD-3-Clause"
] |
permissive
|
stephanie-howson/fhir.resources
|
69d2a5a6b0fe4387b82e984255b24027b37985c4
|
126e9dc6e14541f74e69ef7c1a0b8a74aa981905
|
refs/heads/master
| 2020-05-04T22:24:49.826585
| 2019-06-27T15:51:26
| 2019-06-27T15:51:26
| 179,511,579
| 0
| 0
| null | 2019-04-04T14:14:53
| 2019-04-04T14:14:52
| null |
UTF-8
|
Python
| false
| false
| 7,250
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 3.0.1.11917 (http://hl7.org/fhir/StructureDefinition/DocumentManifest) on 2019-01-17.
# 2019, SMART Health IT.
from . import domainresource
class DocumentManifest(domainresource.DomainResource):
""" A list that defines a set of documents.
A collection of documents compiled for a purpose together with metadata
that applies to the collection.
"""
resource_type = "DocumentManifest"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.author = None
""" Who and/or what authored the manifest.
List of `FHIRReference` items referencing `Practitioner, Organization, Device, Patient, RelatedPerson` (represented as `dict` in JSON). """
self.content = None
""" The items included.
List of `DocumentManifestContent` items (represented as `dict` in JSON). """
self.created = None
""" When this document manifest created.
Type `FHIRDate` (represented as `str` in JSON). """
self.description = None
""" Human-readable description (title).
Type `str`. """
self.identifier = None
""" Other identifiers for the manifest.
List of `Identifier` items (represented as `dict` in JSON). """
self.masterIdentifier = None
""" Unique Identifier for the set of documents.
Type `Identifier` (represented as `dict` in JSON). """
self.recipient = None
""" Intended to get notified about this set of documents.
List of `FHIRReference` items referencing `Patient, Practitioner, RelatedPerson, Organization` (represented as `dict` in JSON). """
self.related = None
""" Related things.
List of `DocumentManifestRelated` items (represented as `dict` in JSON). """
self.source = None
""" The source system/application/software.
Type `str`. """
self.status = None
""" current | superseded | entered-in-error.
Type `str`. """
self.subject = None
""" The subject of the set of documents.
Type `FHIRReference` referencing `Patient, Practitioner, Group, Device` (represented as `dict` in JSON). """
self.type = None
""" Kind of document set.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DocumentManifest, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifest, self).elementProperties()
js.extend([
("author", "author", fhirreference.FHIRReference, True, None, False),
("content", "content", DocumentManifestContent, True, None, True),
("created", "created", fhirdate.FHIRDate, False, None, False),
("description", "description", str, False, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("masterIdentifier", "masterIdentifier", identifier.Identifier, False, None, False),
("recipient", "recipient", fhirreference.FHIRReference, True, None, False),
("related", "related", DocumentManifestRelated, True, None, False),
("source", "source", str, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("type", "type", codeableconcept.CodeableConcept, False, None, False),
])
return js
from . import backboneelement
class DocumentManifestContent(backboneelement.BackboneElement):
""" The items included.
The list of Documents included in the manifest.
"""
resource_type = "DocumentManifestContent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.pAttachment = None
""" Contents of this set of documents.
Type `Attachment` (represented as `dict` in JSON). """
self.pReference = None
""" Contents of this set of documents.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(DocumentManifestContent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifestContent, self).elementProperties()
js.extend([
("pAttachment", "pAttachment", attachment.Attachment, False, "p", True),
("pReference", "pReference", fhirreference.FHIRReference, False, "p", True),
])
return js
class DocumentManifestRelated(backboneelement.BackboneElement):
""" Related things.
Related identifiers or resources associated with the DocumentManifest.
"""
resource_type = "DocumentManifestRelated"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.identifier = None
""" Identifiers of things that are related.
Type `Identifier` (represented as `dict` in JSON). """
self.ref = None
""" Related Resource.
Type `FHIRReference` referencing `Resource` (represented as `dict` in JSON). """
super(DocumentManifestRelated, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DocumentManifestRelated, self).elementProperties()
js.extend([
("identifier", "identifier", identifier.Identifier, False, None, False),
("ref", "ref", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import attachment
except ImportError:
attachment = sys.modules[__package__ + '.attachment']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
|
[
"connect2nazrul@gmail.com"
] |
connect2nazrul@gmail.com
|
7e0bca58ce7844776a9d4810c22e4a72cfef623b
|
a3926c09872e1f74b57431fbb3e711918a11dc0a
|
/python/array/0766_toeplitz_matrix.py
|
3462854c72324c5f85f5dea1c0cc69e7fd5f6b58
|
[
"MIT"
] |
permissive
|
linshaoyong/leetcode
|
e64297dc6afcebcee0614a153a566323bf223779
|
57080da5fbe5d62cbc0b8a34e362a8b0978d5b59
|
refs/heads/main
| 2022-09-15T00:05:36.476268
| 2022-08-16T14:09:11
| 2022-08-16T14:09:11
| 196,914,051
| 6
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 653
|
py
|
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
m, n = len(matrix), len(matrix[0])
for i in range(0, m):
for j in range(0, n):
if i < m - 1 and j < n - 1 and \
matrix[i][j] != matrix[i + 1][j + 1]:
return False
return True
def test_is_toeplitz_matrix():
assert Solution().isToeplitzMatrix([
[1, 2, 3, 4],
[5, 1, 2, 3],
[9, 5, 1, 2]
])
assert Solution().isToeplitzMatrix([
[1, 2],
[2, 2]
]) is False
|
[
"linshaoyong@gmail.com"
] |
linshaoyong@gmail.com
|
ebb24c87e2166bd9394bf1a84d7f4ae129ca184b
|
c970d6543bc17b5a546ae80dc02cbae3f8b3830a
|
/server/dhcpd.py
|
d6dba2dcdd1e75ca8d5caebafbfdb0e5f631e0c9
|
[] |
no_license
|
Studentergaarden/APsetup
|
fe0be854f9e74f5ccf4d469d9a448bf8ef5b21cc
|
b5c2015a87b3ffb904ce13c7e08f656aa839228d
|
refs/heads/master
| 2021-01-10T14:08:54.980350
| 2017-03-06T10:31:44
| 2017-03-06T10:31:44
| 54,288,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,123
|
py
|
#!/usr/bin/env python2
from __future__ import print_function
navne = ["adm",
"cosmos","kanni","abort","dyt","ug","mg","iv","bzrk","barbar","pharis","psyko"]
vlan_id = range(10,22)
andre = ["priv wifi", "free wifi", "wire"]
andre_id = [30, 32, 40]
ap_id = range(10, 21)
file = "dhcpd.txt"
f = open(file, 'w')
for idx, val in enumerate(vlan_id):
f.write("# %s\n"%(navne[idx]))
f.write("subnet 10.42.%d.0 netmask 255.255.255.0 {\n"%(val))
f.write("\trange dynamic-bootp 10.42.%d.50 10.42.%d.250;\n"%(val, val))
f.write("\toption routers 10.42.%d.1;\n"%(val))
f.write("\toption domain-name-servers 10.42.%d.1;\n"%(val))
f.write("\tnext-server 10.42.%d.1;\n"%(val))
f.write("}\n\n")
for idx, val in enumerate(andre_id):
f.write("# %s\n"%(andre[idx]))
f.write("subnet 10.42.%d.0 netmask 255.255.254.0 {\n"%(val))
f.write("\trange dynamic-bootp 10.42.%d.50 10.42.%d.250;\n"%(val, val+1))
f.write("\toption routers 10.42.%d.1;\n"%(val))
f.write("\toption domain-name-servers 10.42.%d.1;\n"%(val))
f.write("\tnext-server 10.42.%d.1;\n"%(val))
f.write("}\n\n")
|
[
"pawsen@gmail.com"
] |
pawsen@gmail.com
|
ef77160c3cd3e81ca2ba7e4c01584f18b0e7ef73
|
f139a99d51cfa01a7892f0ac5bbb022c0cee0664
|
/Pythonlogy/ShareYourSystem/Standards/Controllers/Drawer/01_ExampleDoc.py
|
ffe2e7bb003b595d10f996f43eee1168929bdb9e
|
[
"MIT"
] |
permissive
|
Ledoux/ShareYourSystem
|
90bb2e6be3088b458348afa37ace68c93c4b6a7a
|
3a2ffabf46f1f68b2c4fd80fa6edb07ae85fa3b2
|
refs/heads/master
| 2021-01-25T12:14:34.118295
| 2017-01-12T14:44:31
| 2017-01-12T14:44:31
| 29,198,670
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,825
|
py
|
#ImportModules
import ShareYourSystem as SYS
#Define
MyDrawer=SYS.DrawerClass(
).draw(
{
'|fig1':{
'-Panels':{
'|A':{
'-Axes':{
'|a':{
'-Plots':{
'|0':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
},
'|1':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[0,1,2],
[2,3,4]
],
'#kwarg':{
'linestyle':"--",
'color':'r'
}
}
)
],
}
}
},
'|b':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}
}
},
'|B':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
},
}
},
'|fig2':{
'FiguringDrawVariable':
[
(
'#plot',
{
'#liarg':[
[1,2,3],
[2,6,3]
],
'#kwarg':{
'linestyle':"",
'marker':'o'
}
}
)
]
}
}
)
#print
print('MyDrawer is ')
SYS._print(MyDrawer)
|
[
"erwan.ledoux@ens.fr"
] |
erwan.ledoux@ens.fr
|
216d04773776fc81e9f52786b3e8e6d88651197d
|
f9f019da9bb01be7e35887082747c4c468a73809
|
/enarksh/logger/message/LogFileMessage.py
|
5bb11200fb079bca1d1f5fb33a80288aceab3cb2
|
[
"MIT"
] |
permissive
|
SetBased/py-enarksh
|
63df38ce8622b6b22c0115bbe28630b26de042b7
|
ec0c33cdae4a0afeea37928743abd744ef291a9f
|
refs/heads/master
| 2020-04-11T00:14:33.628135
| 2020-01-24T06:52:17
| 2020-01-24T06:52:17
| 60,259,451
| 3
| 2
| null | 2016-08-02T14:42:21
| 2016-06-02T11:43:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,299
|
py
|
"""
Enarksh
Copyright 2013-2016 Set Based IT Consultancy
Licence MIT
"""
from enarksh.message.Message import Message
class LogFileMessage(Message):
"""
Message type for notifying the logger that a log file is available for storing into the database.
"""
MESSAGE_TYPE = 'logger:LogFileMessage'
"""
The message type.
:type: str
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, rnd_id, name, total_size, filename1, filename2):
"""
Object constructor.
:param int rnd_id: The ID of the run node.
:param str name: The name of he output:
- 'out' for stdout
- 'err' for stderr
:param int total_size: The total size in bytes of the log.
:param str|None filename1: The name of the file where the first chunk of the log is stored.
:param str|None filename2: The name of the file where the last chunk of the log is stored.
"""
Message.__init__(self, LogFileMessage.MESSAGE_TYPE)
self.rnd_id = rnd_id
"""
The ID of the run node.
:type: int
"""
self.name = name
"""
The name of he output:
- 'out' for stdout
- 'err' for stderr
:type: str
"""
self.total_size = total_size
"""
The total size in bytes of the log.
:type: int
"""
self.filename1 = filename1
"""
The name of the file where the first chunk of the log is stored.
:type: str
"""
self.filename2 = filename2
"""
The name of the file where the last chunk of the log is stored.
:type: str
"""
# ------------------------------------------------------------------------------------------------------------------
def send_message(self, end_point):
"""
Sends the message to an end point.
:param str end_point: The end point.
:rtype: None
"""
self.message_controller.send_message(end_point, self)
# ----------------------------------------------------------------------------------------------------------------------
|
[
"p.r.water@setbased.nl"
] |
p.r.water@setbased.nl
|
11decc9e417e1fab8528f8ed17648dc30b41d0c2
|
98be41d34ca238e040408017e1a2af8cfd71a419
|
/command_line/aimless_absorption_map.py
|
5bb462cc2b80aba1f78ccb44a5aee9b3ce5329e1
|
[] |
no_license
|
hainm/xia2
|
338a834fd40aa0a684e4833d34244d6d8f6e0417
|
a5ae68c731577f14b8400404e883029d2147c548
|
refs/heads/master
| 2021-01-24T15:24:44.917551
| 2016-03-10T09:47:27
| 2016-03-10T09:47:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
# LIBTBX_SET_DISPATCHER_NAME dev.xia2.aimless_absorption_map
from __future__ import division
def main(log, png):
from xia2.Toolkit.AimlessSurface import evaluate_1degree, scrape_coefficients
evaluate_1degree(scrape_coefficients(log), png)
return
if __name__ == '__main__':
import sys
main(sys.argv[1], sys.argv[2])
|
[
"graeme.winter@gmail.com"
] |
graeme.winter@gmail.com
|
0eb6a2d29b81069f9b412a45ae5e72d5688176c9
|
154ec3de1efcf3c97d154ac2ed0c7cd1c9a25040
|
/tests/h/services/delete_user_test.py
|
3c3d768d91bc479bf86e39a9e1d7dd833ea1695b
|
[
"BSD-3-Clause",
"BSD-2-Clause-Views",
"BSD-2-Clause"
] |
permissive
|
Manuelinux/kubeh
|
98a9c5c0a98be67c3583dd222bd74046cd5ee484
|
a549f0d1c09619843290f9b78bce7668ed90853a
|
refs/heads/master
| 2023-03-16T00:51:43.318292
| 2021-09-17T03:33:14
| 2021-09-17T03:33:14
| 211,371,455
| 0
| 0
|
BSD-2-Clause
| 2023-03-03T07:20:50
| 2019-09-27T17:37:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,514
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from unittest import mock
import pytest
import sqlalchemy
from h.models import Annotation, Document
from h.services.delete_user import delete_user_service_factory
from h.services.annotation_delete import AnnotationDeleteService
@pytest.mark.usefixtures("annotation_delete_service")
class TestDeleteUserService:
def test_delete_disassociate_group_memberships(self, factories, svc):
user = factories.User()
svc.delete(user)
assert user.groups == []
def test_delete_deletes_annotations(
self, factories, pyramid_request, svc, annotation_delete_service
):
user = factories.User(username="bob")
anns = [
factories.Annotation(userid=user.userid),
factories.Annotation(userid=user.userid),
]
svc.delete(user)
annotation_delete_service.delete.assert_has_calls(
[mock.call(anns[0]), mock.call(anns[1])], any_order=True
)
def test_delete_deletes_user(self, db_session, factories, pyramid_request, svc):
user = factories.User()
svc.delete(user)
assert user in db_session.deleted
def test_delete_user_removes_groups_if_no_collaborators(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
db_session.delete(member_ann)
svc.delete(creator)
assert sqlalchemy.inspect(group).was_deleted
def test_creator_is_none_if_groups_have_collaborators(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
svc.delete(creator)
assert group.creator is None
def test_delete_user_removes_only_groups_created_by_user(
self, db_session, group_with_two_users, pyramid_request, svc
):
pyramid_request.db = db_session
(group, creator, member, creator_ann, member_ann) = group_with_two_users
svc.delete(member)
assert group not in db_session.deleted
@pytest.fixture
def svc(self, db_session, pyramid_request):
pyramid_request.db = db_session
return delete_user_service_factory({}, pyramid_request)
@pytest.fixture
def pyramid_request(pyramid_request):
pyramid_request.notify_after_commit = mock.Mock()
return pyramid_request
@pytest.fixture
def group_with_two_users(db_session, factories):
"""
Create a group with two members and an annotation created by each.
"""
creator = factories.User()
member = factories.User()
group = factories.Group(
authority=creator.authority, creator=creator, members=[creator, member]
)
doc = Document(web_uri="https://example.org")
creator_ann = Annotation(userid=creator.userid, groupid=group.pubid, document=doc)
member_ann = Annotation(userid=member.userid, groupid=group.pubid, document=doc)
db_session.add(creator_ann)
db_session.add(member_ann)
db_session.flush()
return (group, creator, member, creator_ann, member_ann)
@pytest.fixture
def annotation_delete_service(pyramid_config):
service = mock.create_autospec(
AnnotationDeleteService, spec_set=True, instance=True
)
pyramid_config.register_service(service, name="annotation_delete")
return service
|
[
"manuelaguirre@Admins-MacBook-Pro.local"
] |
manuelaguirre@Admins-MacBook-Pro.local
|
4cb25fcb4d4a805d548a5b8cc8fd783cbdf29274
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03378/s823001968.py
|
18624fdbcc9f357534760d943dbd2474726acb00
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
N, M, X = map(int, input().split())
A = list(map(int, input().split()))
l_cost = 0
g_cost = 0
for i in A:
if i > X:
l_cost += 1
else:
g_cost += 1
print(min(l_cost, g_cost))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
eb0a7405d6d4ee0c550e9d35ccfe74c20a028799
|
648796da46791794ee5de7a8004da437c840323e
|
/311_calls/p2.py
|
4bbbe7466034c4642042a3360d44ed80983f59db
|
[] |
no_license
|
YulianaGomez/ml_pp
|
86530a2ee26bb2f39117ec6a458368a5c1c74104
|
3891350e1ef6fbf2fd29a792387182601f94c250
|
refs/heads/master
| 2020-03-07T19:09:25.958025
| 2018-05-25T22:34:28
| 2018-05-25T22:34:28
| 127,663,580
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,906
|
py
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
import json
import requests
import sys
"""
Homework 1: Diagnostic
Looking at 311 requests from the Chicago Open Data Portal and
census API's for analysis of Chicago communities
author: Yuliana Zamora
Date: April 3, 2018
"""
class dataCounter():
def __init__(self):
self.child15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.child16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.bach15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.bach16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.mom15 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
self.mom16 = {"Far North":0,"Northwest":0,"North":0,"West":0,"Central":0,"South":0,"Southwest":0,"Far Southwest":0,"Far Southeast":0}
def main():
####--Populating demo data--####
#Number of children on govt assistant, bachelors degrees, children in single mom homes
processed15 = glob.glob("2015.json")
processed16 = glob.glob("2016.json")
if len(processed15) > 0 and len(processed16) > 0:
json_data= open("2015.json", "r")
demo_15 = json.load(json_data)
json_data.close()
json_data= open("2016.json", "r")
demo_16 = json.load(json_data)
json_data.close()
else:
for year in range(2015,2017):
url = "https://api.census.gov/data/"+str(year)+"/acs/acs5/subject?get=NAME,S0901_C01_031E,S1501_C01_012E,S0901_C04_001E&for=zip%20code%20tabulation%20area:*"
demo_data = requests.get(url,allow_redirects=True)
file_name = str(year) +".json"
open(file_name, 'wb').write(demo_data.content)
if year == 2015:
json_data= open("2015.json", "r")
demo_15 = json.load(json_data)
json_data.close()
else:
json_data= open("2016.json", "r")
demo_16 = json.load(json_data)
json_data.close()
###--setting specific regions with their corresponding zipcodes--###
#http://chicago-zone.blogspot.com/2014/03/chicago-zip-code-map-locate-chicago.html
zip_dict = {"Far North" : [60626,60645, 60659, 60660,60640,60625,60630,60631,60656], \
"Northwest" : [60618,60634, 60641,60607,60639], \
"North" : [60618, 60613,60657, 60613,60614, 60610,60647], \
"West" :[60651, 60622,60612, 60623, 60642,60639, 60644,60624,60612,60607,60608,60616], \
"Central" : [60610,60601, 60602, 60603, 60604, 60605,60606, 60607, 60661,60616], \
"South" : [60609,60616,60653,60615,60637,60649,60608,60620,60619], \
"Southwest" :[60632,60608, 60609,60629,60638,60621,60636], \
"Far Southwest" : [60652,60620,60643,60655], \
"Far Southeast" : [60619,60617,60628,60643,60633,60827,60633,60638] }
# Create object to store the counters
datacnt = dataCounter()
#Populate data for 2015
for key, val in zip_dict.items():
for i in range(1, len(demo_15)):
zipCode = int(demo_15[i][4])
if zipCode in val:
addval=[0, 0, 0]
for j in range(1,4):
if demo_15[i][j] != None:
if j==1: addval[j-1] = float(demo_15[i][j])
else: addval[j-1] = int(demo_15[i][j])
datacnt.child15[key] += addval[0]
datacnt.bach15[key] += addval[1]
datacnt.mom15[key] += addval[2]
#Populate data for 2016
for key, val in zip_dict.items():
for i in range(1, len(demo_16)):
zipCode = int(demo_16[i][4])
if zipCode in val:
addval=[0, 0, 0]
for j in range(1,4):
if demo_16[i][j] != None:
if j==1: addval[j-1] = float(demo_16[i][j])
else: addval[j-1] = int(demo_16[i][j])
datacnt.child16[key] += addval[0]
datacnt.bach16[key] += addval[1]
datacnt.mom16[key] += addval[2]
fig, ax = plt.subplots()
N = len(datacnt.child16.keys())
ind = np.arange(N)
width = 0.35
setting='mom'
if setting == 'child':
rects1 = ax.bar(ind, datacnt.child15.values(), width)
rects2 = ax.bar(ind + width, datacnt.child16.values(), width)
elif setting == 'bach':
rects1 = ax.bar(ind, datacnt.bach15.values(), width)
rects2 = ax.bar(ind + width, datacnt.bach16.values(), width)
elif setting == 'mom':
rects1 = ax.bar(ind, datacnt.mom15.values(), width)
rects2 = ax.bar(ind + width, datacnt.mom16.values(), width)
ax.set_ylabel('Frequency')
ax.set_xlabel('Chicago Communities')
ax.set_title('Number of Children in Single mom Households in City of Chicago Community (2015-2016)')
ax.set_xticks(ind + width / 2)
ax.set_xticklabels(datacnt.mom16.keys())
ax.legend((rects1[0], rects2[0]), ('2015', '2016'))
def autolabel(rects):
"""
Attach a text label above each bar displaying its height
"""
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%d' % int(height),
ha='center', va='bottom')
#autolabel(rects1)
#autolabel(rects2)
plt.show()
if __name__ == '__main__':
main()
|
[
"ygomez297@gmail.com"
] |
ygomez297@gmail.com
|
7a509387928543470805a0bff90e312d0618d154
|
9f1039075cc611198a988034429afed6ec6d7408
|
/tensorflow-stubs/contrib/seq2seq/__init__.pyi
|
257610d37bf4e559a080c9544edbf012f74b2e01
|
[] |
no_license
|
matangover/tensorflow-stubs
|
9422fbb1cb3a3638958d621461291c315f9c6ec2
|
664bd995ef24f05ba2b3867d979d23ee845cb652
|
refs/heads/master
| 2020-05-23T12:03:40.996675
| 2019-05-15T06:21:43
| 2019-05-15T06:21:43
| 186,748,093
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
pyi
|
# Stubs for tensorflow.contrib.seq2seq (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import *
from tensorflow.contrib.seq2seq.python.ops.basic_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import *
from tensorflow.contrib.seq2seq.python.ops.beam_search_ops import *
from tensorflow.contrib.seq2seq.python.ops.decoder import *
from tensorflow.contrib.seq2seq.python.ops.helper import *
from tensorflow.contrib.seq2seq.python.ops.loss import *
from tensorflow.python.util.all_util import remove_undocumented as remove_undocumented
|
[
"matangover@gmail.com"
] |
matangover@gmail.com
|
0c00c9201ad849dadeaf44789e0b9752180054d1
|
b62563d791382e75f65ec9cc281882c58baa1444
|
/machine/urls.py
|
189042b2c8fa9fb5f6bfb2e68d4fac0d7a3963f7
|
[] |
no_license
|
moses-mugoya/Farmers-Machinery-Management-System
|
4fda328f1813041c9a6ae64bf618a7eb0b23d78b
|
0221c33e5b5936edee3c32baa98f486c9a4726a4
|
refs/heads/master
| 2020-05-19T03:59:05.148558
| 2019-05-03T20:06:04
| 2019-05-03T20:06:04
| 184,814,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
from django.urls import re_path
from . import views
app_name = 'machine'
urlpatterns = [
re_path(r'^$', views.product_list, name='product_list'),
re_path(r'^(?P<category_slug>[-\w]+)/$', views.product_list, name='product_list_by_category'),
re_path(r'^(?P<id>\d+)/(?P<slug>[-\w]+)/$', views.product_detail, name='product_detail'),
]
|
[
"mosesmugoya31@gmail.com"
] |
mosesmugoya31@gmail.com
|
24b53bcc79668b3c8a1ee6b0b5ad11b327473c3d
|
842ccd98867d5549884505d18ed1bc7f53a1803e
|
/Random-Alan-n-Akaash-Puzzles/mystery_2.py
|
6f99b554e185fe4a1750db9c0a4030df16940b37
|
[] |
no_license
|
AwesomeZaidi/Problem-Solving
|
dd43593c2a9f5d7ce30c7aaa2575fdd9eaa2ba1d
|
28d40a46f415a41b6754378a46ab26e90a094273
|
refs/heads/master
| 2023-01-09T14:40:10.354981
| 2019-07-10T02:18:26
| 2019-07-10T02:18:26
| 158,969,031
| 3
| 0
| null | 2023-01-03T23:53:00
| 2018-11-24T19:56:50
|
Python
|
UTF-8
|
Python
| false
| false
| 932
|
py
|
import string
import sys
def puzzle(binary_input, b):
"""
Create a dictionary values of 1-9-a-z :
Args: ds ? type?
b ? type?
Returns? ?
"""
string_values = {char: index for index, char in enumerate(string.printable[:36])}
# for _, base_36_val in enumerate(string.printable[:36]):
# print('base_36_val:', base_36_val)
# sum( values[d] * b**e for e, d in enumerate(ds[::-1]) )
sum = 0
for idx, num in enumerate(binary_input[::-1]):
print('idx:', idx, 'num:', num)
current = string_values[num] * b**idx
sum += current
print('current:', current)
return sum # wrong
# return sum(string_values[d] * b**e for e, d in enumerate(ds[::-1]))
if __name__ == '__main__':
# print('string.printable[:36]:', string.printable[:36])
print(puzzle(sys.argv[1], int(sys.argv[2])))
|
[
"asimzaidih@gmail.com"
] |
asimzaidih@gmail.com
|
ebed4b81b88149665e77ef9354a5f98dd58f2dea
|
2884e44c7c8b5f1dd7405fba24549e8135605ad8
|
/plastiqpublicapi/http/http_response.py
|
44ce71985958c313988911f98ecf3550dcaaeec5
|
[
"MIT"
] |
permissive
|
jeffkynaston/sdk-spike-python-apimatic
|
d44d2464ba43c12dabe3ae3ba01ef268f73c16f3
|
e1ca52116aabfcdb2f36c24ebd866cf00bb5c6c9
|
refs/heads/main
| 2023-07-01T15:17:50.623155
| 2021-08-05T22:45:12
| 2021-08-05T22:45:12
| 393,186,601
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
# -*- coding: utf-8 -*-
"""
plastiqpublicapi
This file was automatically generated by APIMATIC v3.0 (
https://www.apimatic.io ).
"""
class HttpResponse(object):
"""Information about an HTTP Response including its status code, returned
headers, and raw body
Attributes:
status_code (int): The status code response from the server that
corresponds to this response.
reason_phrase (string): The reason phrase returned by the server.
headers (dict): A dictionary of headers (key : value) that were
returned with the response
text (string): The Raw body of the HTTP Response as a string
request (HttpRequest): The request that resulted in this response.
"""
def __init__(self,
status_code,
reason_phrase,
headers,
text,
request):
"""Constructor for the HttpResponse class
Args:
status_code (int): The response status code.
reason_phrase (string): The response reason phrase.
headers (dict): The response headers.
text (string): The raw body from the server.
request (HttpRequest): The request that resulted in this response.
"""
self.status_code = status_code
self.reason_phrase = reason_phrase
self.headers = headers
self.text = text
self.request = request
|
[
"jeff.kynaston@plastiq.com"
] |
jeff.kynaston@plastiq.com
|
a60935cfa8dae08bce2ff3cda00428329bb70552
|
331ed33890f103ce95318abe0d4bd255929e8f4d
|
/source/addcode3.py
|
de77590e3a533f3efb249fe212a7c9c7f61e1647
|
[] |
no_license
|
manon2012/e
|
fa15ce55a72fa6ee20f10d06e9f670ade207209a
|
c20a345e96ccd702b56a802e2efbd924f1cd808d
|
refs/heads/master
| 2021-01-22T19:25:41.688876
| 2018-09-20T09:59:29
| 2018-09-20T09:59:29
| 102,418,775
| 0
| 0
| null | 2017-09-22T08:21:04
| 2017-09-05T01:30:02
|
Python
|
UTF-8
|
Python
| false
| false
| 524
|
py
|
#! /usr/bin/env python
#coding=utf-8
import string, random
#激活码中的字符和数字
field = string.letters + string.digits
#获得四个字母和数字的随机组合
def getRandom():
return "".join(random.sample(field,4))
#生成的每个激活码中有几组
def concatenate(group):
return "-".join([getRandom() for i in range(group)])
#生成n组激活码
def generate(n):
return [concatenate(4) for i in range(n)]
if __name__ == '__main__':
print generate(2)
|
[
"manon2012@126.com"
] |
manon2012@126.com
|
0eef2b6a52956de948410e4e9c6e033167b702c7
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/x12/5040/264005040.py
|
513841a97cbf65b75f2325a806e4038bc2fd7268
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
from bots.botsconfig import *
from records005040 import recorddefs
syntax = {
'version': '00504',
'functionalgroup': 'MG',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGN', MIN: 1, MAX: 1},
{ID: 'MIS', MIN: 0, MAX: 1},
{ID: 'N1', MIN: 1, MAX: 2, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
]},
{ID: 'LX', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 2},
{ID: 'N1', MIN: 0, MAX: 1},
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'REF', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'AMT', MIN: 0, MAX: 2},
{ID: 'DTP', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'REF', MIN: 1, MAX: 10},
{ID: 'N1', MIN: 0, MAX: 99999, LEVEL: [
{ID: 'N2', MIN: 0, MAX: 1},
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'PER', MIN: 0, MAX: 2},
{ID: 'REF', MIN: 0, MAX: 4},
]},
{ID: 'LS', MIN: 0, MAX: 1, LEVEL: [
{ID: 'REC', MIN: 1, MAX: 1, LEVEL: [
{ID: 'N3', MIN: 0, MAX: 1},
{ID: 'N4', MIN: 0, MAX: 1},
{ID: 'DFI', MIN: 0, MAX: 99999},
{ID: 'QTY', MIN: 0, MAX: 1},
{ID: 'AMT', MIN: 0, MAX: 10},
{ID: 'INT', MIN: 0, MAX: 1},
{ID: 'SOM', MIN: 0, MAX: 10},
{ID: 'DTP', MIN: 0, MAX: 14},
{ID: 'MRC', MIN: 0, MAX: 2},
{ID: 'MSG', MIN: 0, MAX: 11},
{ID: 'YNQ', MIN: 0, MAX: 2},
{ID: 'PER', MIN: 0, MAX: 99999},
]},
{ID: 'LE', MIN: 1, MAX: 1},
]},
]},
]},
{ID: 'QTY', MIN: 0, MAX: 2},
{ID: 'AMT', MIN: 0, MAX: 2},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
|
[
"doug.vanhorn@tagglogistics.com"
] |
doug.vanhorn@tagglogistics.com
|
be482746b575856b1c3066b076cb0366368f336f
|
3abcde3ca444d7612e24a0faf9b89af61d9bad6d
|
/backend/helpers/basics.py
|
d2b90f0117484a68d11355657f30f6ecfa701b56
|
[] |
no_license
|
cbib/COBRA
|
fa43d5600beaf36d3dcab98bc7b8faa940a02aea
|
54f43d3d2867b4f228dccc6630416808e258be77
|
refs/heads/master
| 2022-09-25T00:20:57.688447
| 2021-02-16T21:53:28
| 2021-02-16T21:53:28
| 29,740,646
| 2
| 3
| null | 2022-09-01T22:15:54
| 2015-01-23T16:10:43
|
HTML
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
# -*- coding: utf-8 -*-
import os
import socket
from configobj import ConfigObj
from helpers.path import config_dir
def get_hostname():
"""
Get host name
:return: host name in lower case
"""
return socket.gethostname().lower()
def load_config(filepath=None):
"""
Load config object from a file
:param filepath: path of config file
:return: a config object
"""
if not filepath:
filename = '%s.ini' % get_hostname()
filepath = os.path.join(config_dir, filename)
if not os.path.exists(filepath):
filepath = os.path.join(config_dir, 'default.ini')
return ConfigObj(filepath)
|
[
"massyah@gmail.com"
] |
massyah@gmail.com
|
985bcdf1b5a6191154114626de32dee9d23f0777
|
18c99e7d06cb18570a7a2177066e5da2372f895c
|
/resources/Rougier Tutorial/scripts/contour_ex.py
|
7b23fce2fe125487b170f498a7ef97a154bbb519
|
[
"MIT",
"CC-BY-SA-4.0"
] |
permissive
|
nstarman/2019-10-22-dotAstronomy-Plotting-Workshop
|
00a7d9cc17a32effa0bb6cf758c02efa25d33bd8
|
31e1a10b3d0f051a2cd197ce390bcf96753f153c
|
refs/heads/master
| 2021-07-13T12:14:28.031574
| 2019-10-22T16:00:54
| 2019-10-22T16:00:54
| 214,871,831
| 7
| 0
|
MIT
| 2020-10-10T07:10:38
| 2019-10-13T18:27:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 772
|
py
|
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Nicolas P. Rougier. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
def f(x,y):
return (1-x/2+x**5+y**3)*np.exp(-x**2-y**2)
n = 256
x = np.linspace(-3,3,n)
y = np.linspace(-3,3,n)
X,Y = np.meshgrid(x,y)
plt.axes([0.025,0.025,0.95,0.95])
plt.contourf(X, Y, f(X,Y), 8, alpha=.75, cmap=plt.cm.hot)
C = plt.contour(X, Y, f(X,Y), 8, colors='black', linewidth=.5)
plt.clabel(C, inline=1, fontsize=10)
plt.xticks([]), plt.yticks([])
# savefig('../figures/contour_ex.png',dpi=48)
plt.show()
|
[
"nstarkman@protonmail.com"
] |
nstarkman@protonmail.com
|
07550d5e6fdcbc52b17afb6e73ec3d7d63f2f34f
|
bef2f86cfbf8dd7915c5ec72a6b38e26b5238641
|
/application/apps.py
|
b3e76b61bebbdcea9af4d962559306f250536674
|
[] |
no_license
|
rayhancse08/IMS
|
19f66ba20b3101ea4ced7d88cd7bd5c8c22b14f6
|
553050a1b6f621a43c1b141459fc1c89c39cfd4a
|
refs/heads/master
| 2023-04-29T23:37:41.202213
| 2019-09-22T10:21:38
| 2019-09-22T10:21:38
| 210,084,538
| 0
| 0
| null | 2023-04-21T20:37:48
| 2019-09-22T03:23:48
|
Python
|
UTF-8
|
Python
| false
| false
| 192
|
py
|
from django.apps import AppConfig
class ApiConfig(AppConfig):
name = 'application'
def ready(self):
# noinspection PyUnresolvedReferences
import application.signals
|
[
"rayhancse08@gmail.com"
] |
rayhancse08@gmail.com
|
c0145b7cab1a1625f2d70ee33750828557e05f2d
|
4a6e49b33d07c83da9ec56621c27c37a6a28a8ce
|
/configs/hyper_c32/upernet_swin_small_patch4_window7_4k_hyper_c32.py
|
b4b092bc4bac1b426b1f12be9915ec6ece9aba54
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
DKJJ/Swin-Transformer-Semantic-Segmentation
|
213e7516d2abc34b9ecca9dc6037b0ab5499397f
|
c8707951ddabdc0189451bcbd25c145f1f6cc041
|
refs/heads/main
| 2023-04-21T12:13:00.624902
| 2021-05-06T12:15:13
| 2021-05-06T12:15:13
| 365,226,907
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,497
|
py
|
_base_ = [
'../_base_/models/upernet_swin.py', '../_base_/datasets/hyper_c32.py',
'../_base_/default_runtime.py', '../_base_/schedules/schedule_4k.py'
]
norm_cfg = dict(type='BN', requires_grad=True)
model = dict(
backbone=dict(
embed_dim=96,
depths=[2, 2, 18, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
ape=False,
drop_path_rate=0.3,
patch_norm=True,
use_checkpoint=False,
in_chans=32
),
decode_head=dict(
in_channels=[96, 192, 384, 768],
num_classes=2,
norm_cfg=norm_cfg
),
auxiliary_head=dict(
in_channels=384,
num_classes=2,
norm_cfg=norm_cfg
))
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
'relative_position_bias_table': dict(decay_mult=0.),
'norm': dict(decay_mult=0.)}))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,)
|
[
"1395133179@qq.com"
] |
1395133179@qq.com
|
a6a678b8ba3327da88bff0b8846c393e3e21ab86
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2606/60622/314959.py
|
b4aeade74d5cd1b7d707179de1be3c9a40e2440f
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 156
|
py
|
arr=eval(input())
targrt=int(input())
get=False
for i in range(len(arr)):
if arr[i]==target:
get=True
print(i)
if not get:
print(-1)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5ef3e003fbc3b211a53280eedf68c8e5c6f2743d
|
9b64f0f04707a3a18968fd8f8a3ace718cd597bc
|
/huaweicloud-sdk-dcs/huaweicloudsdkdcs/v2/model/cluster_redis_node_monitored_object.py
|
e9d88d7507107ddb26464bc9702d96002ef08dc5
|
[
"Apache-2.0"
] |
permissive
|
jaminGH/huaweicloud-sdk-python-v3
|
eeecb3fb0f3396a475995df36d17095038615fba
|
83ee0e4543c6b74eb0898079c3d8dd1c52c3e16b
|
refs/heads/master
| 2023-06-18T11:49:13.958677
| 2021-07-16T07:57:47
| 2021-07-16T07:57:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,524
|
py
|
# coding: utf-8
import re
import six
class ClusterRedisNodeMonitoredObject:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'dcs_instance_id': 'str',
'name': 'str',
'dcs_cluster_redis_node': 'str',
'status': 'str'
}
attribute_map = {
'dcs_instance_id': 'dcs_instance_id',
'name': 'name',
'dcs_cluster_redis_node': 'dcs_cluster_redis_node',
'status': 'status'
}
def __init__(self, dcs_instance_id=None, name=None, dcs_cluster_redis_node=None, status=None):
"""ClusterRedisNodeMonitoredObject - a model defined in huaweicloud sdk"""
self._dcs_instance_id = None
self._name = None
self._dcs_cluster_redis_node = None
self._status = None
self.discriminator = None
if dcs_instance_id is not None:
self.dcs_instance_id = dcs_instance_id
if name is not None:
self.name = name
if dcs_cluster_redis_node is not None:
self.dcs_cluster_redis_node = dcs_cluster_redis_node
if status is not None:
self.status = status
@property
def dcs_instance_id(self):
"""Gets the dcs_instance_id of this ClusterRedisNodeMonitoredObject.
测量对象ID,即节点的ID。
:return: The dcs_instance_id of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._dcs_instance_id
@dcs_instance_id.setter
def dcs_instance_id(self, dcs_instance_id):
"""Sets the dcs_instance_id of this ClusterRedisNodeMonitoredObject.
测量对象ID,即节点的ID。
:param dcs_instance_id: The dcs_instance_id of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._dcs_instance_id = dcs_instance_id
@property
def name(self):
"""Gets the name of this ClusterRedisNodeMonitoredObject.
测量对象名称,即节点IP。
:return: The name of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ClusterRedisNodeMonitoredObject.
测量对象名称,即节点IP。
:param name: The name of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._name = name
@property
def dcs_cluster_redis_node(self):
"""Gets the dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
维度dcs_cluster_redis_node的测量对象的ID。
:return: The dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._dcs_cluster_redis_node
@dcs_cluster_redis_node.setter
def dcs_cluster_redis_node(self, dcs_cluster_redis_node):
"""Sets the dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
维度dcs_cluster_redis_node的测量对象的ID。
:param dcs_cluster_redis_node: The dcs_cluster_redis_node of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._dcs_cluster_redis_node = dcs_cluster_redis_node
@property
def status(self):
"""Gets the status of this ClusterRedisNodeMonitoredObject.
测量对象状态,即节点状态。
:return: The status of this ClusterRedisNodeMonitoredObject.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ClusterRedisNodeMonitoredObject.
测量对象状态,即节点状态。
:param status: The status of this ClusterRedisNodeMonitoredObject.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
return json.dumps(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ClusterRedisNodeMonitoredObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
019af73cde0ddb511c0a9d5cb31e3865fc933fd2
|
61f9c7094be028e040d1234f05ee6d7370c2206d
|
/pytext/models/output_layers/intent_slot_output_layer.py
|
1a831b9e0f9fcb261594fa81513bece669755bdb
|
[
"BSD-3-Clause"
] |
permissive
|
timgates42/pytext
|
3ce5473fecca5174108a4eb63209a3eecfb6d8dd
|
5f2c3ca6c3ba56e1001e95825abd7ee295de1dff
|
refs/heads/main
| 2023-03-15T07:33:21.217159
| 2022-07-11T16:06:16
| 2022-07-11T16:06:16
| 231,028,915
| 0
| 0
|
NOASSERTION
| 2019-12-31T05:04:01
| 2019-12-31T05:04:00
| null |
UTF-8
|
Python
| false
| false
| 7,272
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from caffe2.python import core
from pytext.common.constants import DatasetFieldName
from pytext.data.utils import Vocabulary
from pytext.models.module import create_module
from pytext.utils.usage import log_class_usage
from torch import jit
from .doc_classification_output_layer import ClassificationOutputLayer
from .output_layer_base import OutputLayerBase
from .utils import query_word_reprs
from .word_tagging_output_layer import CRFOutputLayer, WordTaggingOutputLayer
class IntentSlotScores(nn.Module):
def __init__(self, doc_scores: jit.ScriptModule, word_scores: jit.ScriptModule):
super().__init__()
self.doc_scores = doc_scores
self.word_scores = word_scores
log_class_usage(__class__)
def forward(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
context: Dict[str, torch.Tensor],
) -> Tuple[List[Dict[str, float]], List[List[Dict[str, float]]]]:
d_logits, w_logits = logits
if "token_indices" in context:
w_logits = query_word_reprs(w_logits, context["token_indices"])
d_results = self.doc_scores(d_logits)
w_results = self.word_scores(w_logits, context)
return d_results, w_results
class IntentSlotOutputLayer(OutputLayerBase):
"""
Output layer for joint intent classification and slot-filling models.
Intent classification is a document classification problem and slot filling
is a word tagging problem. Thus terms these can be used interchangeably in the
documentation.
Args:
doc_output (ClassificationOutputLayer): Output layer for intent
classification task. See
:class:`~pytext.models.output_layers.ClassificationOutputLayer` for
details.
word_output (WordTaggingOutputLayer): Output layer for slot filling task.
See :class:`~pytext.models.output_layers.WordTaggingOutputLayer` for
details.
Attributes:
doc_output (type): Output layer for intent classification task.
word_output (type): Output layer for slot filling task.
"""
class Config(OutputLayerBase.Config):
doc_output: ClassificationOutputLayer.Config = (
ClassificationOutputLayer.Config()
)
word_output: Union[
WordTaggingOutputLayer.Config, CRFOutputLayer.Config
] = WordTaggingOutputLayer.Config()
@classmethod
def from_config(
cls, config: Config, doc_labels: Vocabulary, word_labels: Vocabulary
):
return cls(
create_module(config.doc_output, labels=doc_labels),
create_module(config.word_output, labels=word_labels),
)
def __init__(
self, doc_output: ClassificationOutputLayer, word_output: WordTaggingOutputLayer
) -> None:
super().__init__()
self.doc_output = doc_output
self.word_output = word_output
log_class_usage(__class__)
def get_loss(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
targets: Tuple[torch.Tensor, torch.Tensor],
context: Dict[str, Any] = None,
*args,
**kwargs,
) -> torch.Tensor:
"""Compute and return the averaged intent and slot-filling loss.
Args:
logit (Tuple[torch.Tensor, torch.Tensor]): Logits returned by
:class:`~pytext.models.joint_model.JointModel`. It is a tuple
containing logits for intent classification and slot filling.
targets (Tuple[torch.Tensor, torch.Tensor]): Tuple of target Tensors
containing true document label/target and true word labels/targets.
context (Dict[str, Any]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
Returns:
torch.Tensor: Averaged intent and slot loss.
"""
d_logit, w_logit = logits
if DatasetFieldName.TOKEN_INDICES in context:
w_logit = query_word_reprs(w_logit, context[DatasetFieldName.TOKEN_INDICES])
d_target, w_target = targets
d_weight = context[DatasetFieldName.DOC_WEIGHT_FIELD] # noqa
w_weight = context[DatasetFieldName.WORD_WEIGHT_FIELD] # noqa
d_loss = self.doc_output.get_loss(
d_logit, d_target, context=context, reduce=False
)
w_loss = self.word_output.get_loss(
w_logit, w_target, context=context, reduce=False
)
# w_loss could have been flattened
w_hard_target = w_target[0] if type(w_target) is tuple else w_target
if w_loss.size()[0] != w_hard_target.size()[0]:
w_loss = w_loss.reshape(w_hard_target.size())
w_loss = torch.mean(w_loss, dim=1)
d_weighted_loss = torch.mean(torch.mul(d_loss, d_weight))
w_weighted_loss = torch.mean(torch.mul(w_loss, w_weight))
return d_weighted_loss + w_weighted_loss
def get_pred(
self,
logits: Tuple[torch.Tensor, torch.Tensor],
targets: Optional[torch.Tensor] = None,
context: Optional[Dict[str, Any]] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute and return prediction and scores from the model.
Args:
logit (Tuple[torch.Tensor, torch.Tensor]): Logits returned by
:class:`~pytext.models.joint_model.JointModel`. It's tuple
containing logits for intent classification and slot filling.
targets (Optional[torch.Tensor]): Not applicable. Defaults to None.
context (Optional[Dict[str, Any]]): Context is a dictionary of items
that's passed as additional metadata. Defaults to None.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Model prediction and scores.
"""
d_logit, w_logit = logits
if DatasetFieldName.TOKEN_INDICES in context:
w_logit = query_word_reprs(w_logit, context[DatasetFieldName.TOKEN_INDICES])
d_pred, d_score = self.doc_output.get_pred(d_logit, None, context)
w_pred, w_score = self.word_output.get_pred(w_logit, None, context)
return (d_pred, w_pred), (d_score, w_score)
def export_to_caffe2(
self,
workspace: core.workspace,
init_net: core.Net,
predict_net: core.Net,
model_out: List[torch.Tensor],
doc_out_name: str,
word_out_name: str,
) -> List[core.BlobReference]:
"""
Exports the intent slot output layer to Caffe2.
See `OutputLayerBase.export_to_caffe2()` for details.
"""
return self.doc_output.export_to_caffe2(
workspace, init_net, predict_net, model_out[0], doc_out_name
) + self.word_output.export_to_caffe2(
workspace, init_net, predict_net, model_out[1], word_out_name
)
def torchscript_predictions(self):
doc_scores = self.doc_output.torchscript_predictions()
word_scores = self.word_output.torchscript_predictions()
return jit.script(IntentSlotScores(doc_scores, word_scores))
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
429b8aebe0e1a0732958d27d2a34cc2b6c0d64f3
|
b28305dab0be0e03765c62b97bcd7f49a4f8073d
|
/components/cronet/tools/cr_cronet.py
|
50a1c371b6c5044149e25ebb99b3029e2786eb66
|
[
"BSD-3-Clause"
] |
permissive
|
svarvel/browser-android-tabs
|
9e5e27e0a6e302a12fe784ca06123e5ce090ced5
|
bd198b4c7a1aca2f3e91f33005d881f42a8d0c3f
|
refs/heads/base-72.0.3626.105
| 2020-04-24T12:16:31.442851
| 2019-08-02T19:15:36
| 2019-08-02T19:15:36
| 171,950,555
| 1
| 2
|
NOASSERTION
| 2019-08-02T19:15:37
| 2019-02-21T21:47:44
| null |
UTF-8
|
Python
| false
| false
| 7,538
|
py
|
#!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
cr_cronet.py - cr - like helper tool for cronet developers
"""
import argparse
import os
import sys
def run(command, extra_options=''):
command = command + ' ' + extra_options
print command
return os.system(command)
def gn(out_dir, gn_args, gn_extra=''):
return run ('gn gen %s --args=\'%s\' %s' % (out_dir, gn_args, gn_extra))
def build(out_dir, build_target, extra_options=''):
return run('ninja -C ' + out_dir + ' ' + build_target,
get_ninja_jobs_option() + extra_options)
def install(out_dir):
cmd = 'build/android/adb_install_apk.py ' + out_dir + '/apks/{0}'
return run(cmd.format('CronetTest.apk')) or \
run(cmd.format('ChromiumNetTestSupport.apk'))
def test(out_dir, extra_options):
return run(out_dir + '/bin/run_cronet_test_instrumentation_apk ' + \
extra_options)
def unittest(out_dir, extra_options):
return run(out_dir + '/bin/run_cronet_unittests_android ' + \
extra_options)
def test_ios(out_dir, extra_options):
return run(out_dir + '/iossim -c "' + extra_options + '" ' + \
out_dir + '/cronet_test.app')
def unittest_ios(out_dir, extra_options):
return run(out_dir + '/iossim -c "' + extra_options + '" ' + \
out_dir + '/cronet_unittests_ios.app')
def debug(extra_options):
return run('build/android/adb_gdb --start ' + \
'--activity=.CronetTestActivity ' + \
'--program-name=CronetTest ' + \
'--package-name=org.chromium.net',
extra_options)
def stack(out_dir):
return run('adb logcat -d | CHROMIUM_OUTPUT_DIR=' + out_dir +
' third_party/android_platform/development/scripts/stack')
def use_goma():
home_goma = os.path.expanduser("~/goma")
if os.path.exists(home_goma) or os.environ.get("GOMA_DIR") or \
os.environ.get("GOMADIR"):
return 'use_goma=true '
return ''
def get_ninja_jobs_option():
if use_goma():
return " -j1000 "
return ""
def get_default_gn_args(target_os, is_release):
gn_args = 'target_os="' + target_os + '" enable_websockets=false '+ \
'disable_file_support=true disable_ftp_support=true '+ \
'disable_brotli_filter=false ' + \
'is_component_build=false ' + \
'use_crash_key_stubs=true ' + \
'ignore_elf32_limitations=true use_partition_alloc=false ' + \
'include_transport_security_state_preload_list=false ' + use_goma()
if (is_release):
gn_args += 'is_debug=false is_official_build=true '
return gn_args
def get_mobile_gn_args(target_os, is_release):
return get_default_gn_args(target_os, is_release) + \
'use_platform_icu_alternatives=true '
def get_ios_gn_args(is_release, target_cpu):
return get_mobile_gn_args('ios', is_release) + \
'is_cronet_build=true ' + \
'use_xcode_clang=true ' + \
'ios_deployment_target="9.0" ' + \
'enable_dsyms=true ' + \
'target_cpu="%s" ' % target_cpu
def get_mac_gn_args(is_release):
return get_default_gn_args('mac', is_release) + \
'disable_histogram_support=true ' + \
'enable_dsyms=true '
def main():
parser = argparse.ArgumentParser()
parser.add_argument('command',
choices=['gn',
'sync',
'build',
'install',
'proguard',
'test',
'build-test',
'unit',
'build-unit',
'stack',
'debug',
'build-debug'])
parser.add_argument('-d', '--out_dir', action='store',
help='name of the build directory')
parser.add_argument('-i', '--iphoneos', action='store_true',
help='build for physical iphone')
parser.add_argument('-x', '--x86', action='store_true',
help='build for Intel x86 architecture')
parser.add_argument('-r', '--release', action='store_true',
help='use release configuration')
parser.add_argument('-a', '--asan', action='store_true',
help='use address sanitizer')
options, extra_options_list = parser.parse_known_args()
print options
print extra_options_list
is_ios = (sys.platform == 'darwin')
if is_ios:
test_target = 'cronet_test'
unit_target = 'cronet_unittests_ios'
gn_extra = '--ide=xcode'
if options.iphoneos:
gn_args = get_ios_gn_args(options.release, 'arm64')
out_dir_suffix = '-iphoneos'
else:
gn_args = get_ios_gn_args(options.release, 'x64')
out_dir_suffix = '-iphonesimulator'
if options.asan:
gn_args += 'is_asan=true '
out_dir_suffix += '-asan'
else:
test_target = 'cronet_test_instrumentation_apk'
unit_target = 'cronet_unittests_android'
gn_args = get_mobile_gn_args('android', options.release) + \
'use_errorprone_java_compiler=true enable_reporting=true '
gn_extra = ''
out_dir_suffix = ''
if options.x86:
gn_args += 'target_cpu="x86" '
out_dir_suffix = '-x86'
else:
gn_args += 'arm_use_neon=false '
if options.asan:
# ASAN on Android requires one-time setup described here:
# https://www.chromium.org/developers/testing/addresssanitizer
gn_args += 'is_asan=true is_clang=true is_debug=false '
out_dir_suffix += '-asan'
extra_options = ' '.join(extra_options_list)
if options.release:
out_dir = 'out/Release' + out_dir_suffix
else:
out_dir = 'out/Debug' + out_dir_suffix
if options.out_dir:
out_dir = options.out_dir
if (options.command=='gn'):
return gn(out_dir, gn_args, gn_extra)
if (options.command=='sync'):
return run('git pull --rebase && gclient sync')
if (options.command=='build'):
return build(out_dir, test_target, extra_options)
if (not is_ios):
if (options.command=='install'):
return install(out_dir)
if (options.command=='proguard'):
return build (out_dir, 'cronet_sample_proguard_apk')
if (options.command=='test'):
return install(out_dir) or test(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or install(out_dir) or \
test(out_dir, extra_options)
if (options.command=='stack'):
return stack(out_dir)
if (options.command=='debug'):
return install(out_dir) or debug(extra_options)
if (options.command=='build-debug'):
return build(out_dir, test_target) or install(out_dir) or \
debug(extra_options)
if (options.command=='unit'):
return unittest(out_dir, extra_options)
if (options.command=='build-unit'):
return build(out_dir, unit_target) or unittest(out_dir, extra_options)
else:
if (options.command=='test'):
return test_ios(out_dir, extra_options)
if (options.command=='build-test'):
return build(out_dir, test_target) or test_ios(out_dir, extra_options)
if (options.command=='unit'):
return unittest_ios(out_dir, extra_options)
if (options.command=='build-unit'):
return build(out_dir, unit_target) or unittest_ios(out_dir, extra_options)
parser.print_help()
return 1
if __name__ == '__main__':
sys.exit(main())
|
[
"artem@brave.com"
] |
artem@brave.com
|
75d1e9b373c45d4eeb5da68148b4476d955e7fd8
|
0811cd3a37e82eb5175e0c1ffdc13510b256c7de
|
/bcs-app/backend/apps/configuration/yaml_mode/release.py
|
74c76eec59643c3e951e70340e7b17d81810fa86
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
QiChangYin/bk-bcs-saas-copy-modify
|
363d08a64dd860329b8ab051a02f4733a29c33dd
|
7ef3b003ff20857a99415d7a3c99d8eb4f5764d9
|
refs/heads/master
| 2023-02-10T02:30:40.616494
| 2020-01-13T09:23:18
| 2020-01-13T09:23:18
| 233,553,174
| 0
| 0
|
NOASSERTION
| 2023-02-02T05:12:22
| 2020-01-13T09:01:09
|
Python
|
UTF-8
|
Python
| false
| false
| 5,381
|
py
|
# -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import json
import datetime
from io import StringIO
from collections import OrderedDict
import jinja2
from ruamel.yaml import YAML
from dataclasses import dataclass
from rest_framework.exceptions import ParseError
from backend.apps.configuration.constants import FileResourceName
from backend.bcs_k8s.app import bcs_info_injector
from backend.bcs_k8s.helm import bcs_variable
@dataclass
class ReleaseData:
project_id: str
namespace_info: dict
show_version: OrderedDict
template_files: list
class ReleaseDataProcessor:
def __init__(self, user, raw_release_data):
self.access_token = user.token.access_token
self.username = user.username
self.project_id = raw_release_data.project_id
self.namespace_info = raw_release_data.namespace_info
self.show_version = raw_release_data.show_version
self.template_files = raw_release_data.template_files
def _parse_yaml(self, yaml_content):
try:
yaml = YAML()
resources = list(yaml.load_all(yaml_content))
except Exception as e:
raise ParseError(f'Parse manifest failed: \n{e}\n\nManifest content:\n{yaml_content}')
else:
# ordereddict to dict
return json.loads(json.dumps(resources))
def _join_manifest(self, resources):
try:
yaml = YAML()
s = StringIO()
yaml.dump_all(resources, s)
except Exception as e:
raise ParseError(f'join manifest failed: {e}')
else:
return s.getvalue()
def _get_bcs_variables(self):
sys_variables = bcs_variable.collect_system_variable(
access_token=self.access_token,
project_id=self.project_id,
namespace_id=self.namespace_info['id']
)
bcs_variables = bcs_variable.get_namespace_variables(self.project_id, self.namespace_info['id'])
sys_variables.update(bcs_variables)
return sys_variables
def _render_with_variables(self, raw_content, bcs_variables):
t = jinja2.Template(raw_content)
return t.render(bcs_variables)
def _set_namespace(self, resources):
ignore_ns_res = [FileResourceName.ClusterRole.value,
FileResourceName.ClusterRoleBinding.value,
FileResourceName.StorageClass.value,
FileResourceName.PersistentVolume.value]
try:
for res_manifest in resources:
if res_manifest['kind'] in ignore_ns_res:
continue
metadata = res_manifest['metadata']
metadata['namespace'] = self.namespace_info['name']
except Exception:
raise ParseError('set namespace failed: no valid metadata in manifest')
def _inject_bcs_info(self, yaml_content, inject_configs):
resources = self._parse_yaml(yaml_content)
context = {
'creator': self.username,
'updator': self.username,
'version': self.show_version.name
}
manager = bcs_info_injector.InjectManager(
configs=inject_configs,
resources=resources,
context=context
)
resources = manager.do_inject()
self._set_namespace(resources)
return self._join_manifest(resources)
def _get_inject_configs(self):
now = datetime.datetime.now()
configs = bcs_info_injector.inject_configs(
access_token=self.access_token,
project_id=self.project_id,
cluster_id=self.namespace_info['cluster_id'],
namespace_id=self.namespace_info['id'],
namespace=self.namespace_info['name'],
creator=self.username,
updator=self.username,
created_at=now,
updated_at=now,
version=self.show_version.name,
source_type='template'
)
return configs
def _inject(self, raw_content, inject_configs, bcs_variables):
content = self._render_with_variables(raw_content, bcs_variables)
content = self._inject_bcs_info(content, inject_configs)
return content
def release_data(self):
inject_configs = self._get_inject_configs()
bcs_variables = self._get_bcs_variables()
for res_files in self.template_files:
for f in res_files['files']:
f['content'] = self._inject(f['content'], inject_configs, bcs_variables)
return ReleaseData(self.project_id, self.namespace_info, self.show_version, self.template_files)
|
[
"gejun.coolfriend@gmail.com"
] |
gejun.coolfriend@gmail.com
|
4cd3b162fb3f2dcf97349017329fb4abb9623ee9
|
327d83545dc9f309de74f4e670cc7a92db954f17
|
/python-alg/algorithms/models/node.py
|
7c26f409dc734939ce35dd032ac69f064a338444
|
[] |
no_license
|
felipe-basina/algoritmos
|
eb936c45aaa6ae45c514d43e31cabbad5d8ee874
|
a4ee5b30801f54f42f71f23c963781d7bed899eb
|
refs/heads/master
| 2023-06-10T17:33:56.420717
| 2021-06-24T01:17:56
| 2021-06-24T01:17:56
| 360,247,224
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
class Node:
def __init__(self, data):
self.data = data
self.next = None # References to another node
def __str__(self):
return "[data={}:next={}] ".format(self.data, self.next)
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
def insert_node(self, data):
node = Node(data)
if not self.head:
self.head = node
else:
self.tail.next = node
self.tail = node
|
[
"felipe.basina@gmail.com"
] |
felipe.basina@gmail.com
|
1462fd76543b2b6609f1a9102b0c572db25685d5
|
a1080c28573e1a59ec418ad3b0b0bf18e035dc41
|
/LeetCode/Largest Number_366617438.py
|
72e6404214711f50d16b2cd8c159aa203f976682
|
[] |
no_license
|
AumkarG/Algorithms-and-Data-Structures
|
8c6fc21218897d2361fed1512dc6bb13eabd8842
|
03603ad579564ef213c58edd57cb8753cf8f86ba
|
refs/heads/master
| 2023-03-04T09:48:25.167519
| 2021-02-14T17:16:18
| 2021-02-14T17:16:18
| 330,424,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
def custom_compare(x, y):
return int(x+y)-int(y+x)
class Solution(object):
def largestNumber(self, nums):
nums=[str(i) for i in nums]
nums=sorted(nums, cmp=custom_compare,reverse=True)
if(nums[0]=='0'):
return '0'
s=""
for i in nums:
s+=i
return s
|
[
"aumkaar,g@gmail.com"
] |
aumkaar,g@gmail.com
|
3fc636e9d82d6fb11367eecaec1349a89d61de26
|
c6969585b7edec377a389da46825a7389b5e8e12
|
/examples/twisted/websocket/echo_wsgi/server.py
|
55d5c779b9fc775e710c53630db196a5f4ba4dc2
|
[
"MIT"
] |
permissive
|
hzruandd/AutobahnPython
|
38b35a9d413d53dde9271ec436ccd3d8d07da74e
|
942d27eb6a87084cb8964fd2c9abaae67807be13
|
refs/heads/master
| 2021-01-16T21:43:49.398537
| 2015-09-13T19:16:21
| 2015-09-13T19:16:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,335
|
py
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import uuid
import sys
from twisted.python import log
from twisted.internet import reactor
from twisted.web.server import Site
from twisted.web.wsgi import WSGIResource
from flask import Flask, render_template
from autobahn.twisted.websocket import WebSocketServerFactory, \
WebSocketServerProtocol
from autobahn.twisted.resource import WebSocketResource, \
WSGIRootResource, \
HTTPChannelHixie76Aware
##
# Our WebSocket Server protocol
##
class EchoServerProtocol(WebSocketServerProtocol):
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
##
# Our WSGI application .. in this case Flask based
##
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
@app.route('/')
def page_home():
return render_template('index.html')
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
app.debug = debug
if debug:
log.startLogging(sys.stdout)
##
# create a Twisted Web resource for our WebSocket server
##
wsFactory = WebSocketServerFactory(u"ws://127.0.0.1:8080",
debug=debug,
debugCodePaths=debug)
wsFactory.protocol = EchoServerProtocol
wsFactory.setProtocolOptions(allowHixie76=True) # needed if Hixie76 is to be supported
wsResource = WebSocketResource(wsFactory)
##
# create a Twisted Web WSGI resource for our Flask server
##
wsgiResource = WSGIResource(reactor, reactor.getThreadPool(), app)
##
# create a root resource serving everything via WSGI/Flask, but
# the path "/ws" served by our WebSocket stuff
##
rootResource = WSGIRootResource(wsgiResource, {'ws': wsResource})
##
# create a Twisted Web Site and run everything
##
site = Site(rootResource)
site.protocol = HTTPChannelHixie76Aware # needed if Hixie76 is to be supported
reactor.listenTCP(8080, site)
reactor.run()
|
[
"tobias.oberstein@tavendo.de"
] |
tobias.oberstein@tavendo.de
|
ad9332db347bf438e8ee4f9c260de6368599a431
|
223c05418090665e9aedb754783cbb55bc3555c1
|
/277-Find-the-Celebrity.py
|
aa6318bbf4b60a4419c8c132d0522012d1b958c3
|
[] |
no_license
|
dkrotx/leetcode
|
227639030aa62c80adf9412aa50cba4e4ae13034
|
501c347004c140a82a95461e1dbcef6775b3d9da
|
refs/heads/master
| 2021-06-05T23:37:10.369386
| 2019-12-04T17:59:14
| 2019-12-04T17:59:14
| 111,242,268
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
# The knows API is already defined for you.
# @param a, person a
# @param b, person b
# @return a boolean, whether a knows b
# def knows(a, b):
class Solution(object):
def findCelebrity(self, n):
"""
:type n: int
:rtype: int
"""
guests = list(range(n))
while len(guests) >= 2:
a = guests.pop()
b = guests.pop()
if knows(a, b):
if not knows(b, a):
guests.append(b)
else:
if knows(b, a):
guests.append(a)
""" check what celebrity doesn't know anyone """
if guests:
x = guests[0]
for i in range(n):
if x != i and knows(x, i):
return -1
return x
return -1
|
[
"kisel@corp.mail.ru"
] |
kisel@corp.mail.ru
|
6ea3185e6c1f56dd935d1047fdc2829d8e96362c
|
62b736eff115a6d9cfd323c1b396c94f8a9302fe
|
/tkinter/simple_grid_window.py
|
3c50166b69166d56d82b070fb5b1d328c7360f03
|
[] |
no_license
|
ccnelson/Python
|
dccbb9a2c00f8124216f2f4d4202b94907134083
|
ebd0c401b23aee7467332d692588f02cda0ff935
|
refs/heads/master
| 2023-04-02T09:09:55.716686
| 2021-03-28T18:16:15
| 2021-03-28T18:16:15
| 184,681,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
import tkinter as tk
root = tk.Tk()
tk.Label(root, text="Username").grid(row=0, sticky=tk.W)
tk.Label(root, text="Password").grid(row=1, sticky=tk.W)
tk.Entry(root).grid(row=0, column=1, sticky=tk.E)
tk.Entry(root).grid(row=1, column=1, sticky=tk.E)
tk.Button(root, text="Login").grid(row=2, column=1, sticky=tk.E)
root.mainloop()
|
[
"noreply@github.com"
] |
ccnelson.noreply@github.com
|
03ac8e612ab1fe00f08e2288e8e207e0a263bd26
|
49536aafb22a77a6caf249c7fadef46d63d24dfe
|
/tensorflow/tensorflow/contrib/slim/python/slim/data/dataset_data_provider_test.py
|
9b9f6d8b299b60cd67a647e0dc0d0302815a82fb
|
[
"Apache-2.0"
] |
permissive
|
wangzhi01/deeplearning-1
|
4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d
|
46ab82253d956953b8aa98e97ceb6cd290e82288
|
refs/heads/master
| 2020-05-28T03:14:55.687567
| 2018-09-12T16:52:09
| 2018-09-12T16:52:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,177
|
py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.data.dataset_data_provider."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.slim.python.slim import queues
from tensorflow.contrib.slim.python.slim.data import dataset
from tensorflow.contrib.slim.python.slim.data import dataset_data_provider
from tensorflow.contrib.slim.python.slim.data import test_utils
from tensorflow.contrib.slim.python.slim.data import tfexample_decoder
from tensorflow.python.client import session
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import image_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
def _resize_image(image, height, width):
image = array_ops.expand_dims(image, 0)
image = image_ops.resize_bilinear(image, [height, width])
return array_ops.squeeze(image, [0])
def _create_tfrecord_dataset(tmpdir):
if not gfile.Exists(tmpdir):
gfile.MakeDirs(tmpdir)
data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)
keys_to_features = {
'image/encoded':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value=''),
'image/format':
parsing_ops.FixedLenFeature(
shape=(), dtype=dtypes.string, default_value='jpeg'),
'image/class/label':
parsing_ops.FixedLenFeature(
shape=[1],
dtype=dtypes.int64,
default_value=array_ops.zeros(
[1], dtype=dtypes.int64))
}
items_to_handlers = {
'image': tfexample_decoder.Image(),
'label': tfexample_decoder.Tensor('image/class/label'),
}
decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
items_to_handlers)
return dataset.Dataset(
data_sources=data_sources,
reader=io_ops.TFRecordReader,
decoder=decoder,
num_samples=100,
items_to_descriptions=None)
class DatasetDataProviderTest(test.TestCase):
def testTFRecordDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
height = 300
width = 280
with self.test_session():
test_dataset = _create_tfrecord_dataset(dataset_dir)
provider = dataset_data_provider.DatasetDataProvider(test_dataset)
key, image, label = provider.get(['record_key', 'image', 'label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
key, image, label = sess.run([key, image, label])
split_key = key.decode('utf-8').split(':')
self.assertEqual(2, len(split_key))
self.assertEqual(test_dataset.data_sources[0], split_key[0])
self.assertTrue(split_key[1].isdigit())
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testTFRecordSeparateGetDataset(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_separate_get'))
height = 300
width = 280
with self.test_session():
provider = dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir))
[image] = provider.get(['image'])
[label] = provider.get(['label'])
image = _resize_image(image, height, width)
with session.Session('') as sess:
with queues.QueueRunners(sess):
image, label = sess.run([image, label])
self.assertListEqual([height, width, 3], list(image.shape))
self.assertListEqual([1], list(label.shape))
def testConflictingRecordKeyItem(self):
dataset_dir = tempfile.mkdtemp(prefix=os.path.join(self.get_temp_dir(),
'tfrecord_dataset'))
with self.test_session():
with self.assertRaises(ValueError):
dataset_data_provider.DatasetDataProvider(
_create_tfrecord_dataset(dataset_dir), record_key='image')
if __name__ == '__main__':
test.main()
|
[
"hanshuobest@163.com"
] |
hanshuobest@163.com
|
ae5634dc045f83121134040f072c8b310c823b31
|
9f03e20a34599816358891adc6bcce29cd48aed6
|
/test/test_config.py
|
7b1fa6d485b48999903d072b0fe69863aaa2e6d9
|
[
"MIT"
] |
permissive
|
projg2/flaggie
|
9917adf783dca7c18471ad8f822a35e8afec351f
|
4485aed08c4e33347d88736fdd4a9914b8349908
|
refs/heads/master
| 2023-08-08T12:20:58.729741
| 2023-07-27T14:15:01
| 2023-07-27T14:15:01
| 72,996,715
| 14
| 2
|
MIT
| 2023-03-20T06:05:47
| 2016-11-06T14:40:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,723
|
py
|
# (c) 2022-2023 Michał Górny
# Released under the terms of the MIT license
import dataclasses
import os
import stat
import pytest
from flaggie.config import (TokenType, ConfigLine, find_config_files,
parse_config_file, dump_config_line,
ConfigFile, read_config_files, save_config_files,
)
@pytest.mark.parametrize(
"layout,expected",
[([], ["package.use/99local.conf"]),
(["package.use"], None),
(["package.use/a.conf", "package.use/b.conf"], None),
(["package.use/a/foo.conf", "package.use/b/foo.conf"], None),
# even though "a+" sorts before "a/", directories take precedence
(["package.use/a/foo.conf", "package.use/a+"], None),
# hidden and backup files should be ignored
(["package.use/.foo", "package.use/foo.conf", "package.use/foo.conf~"],
["package.use/foo.conf"]),
# corner case: package.use yielding no valid files
(["package.use/.foo"], ["package.use/99local.conf"]),
])
def test_find_config(tmp_path, layout, expected):
confdir = tmp_path / "etc/portage"
for f in layout:
path = confdir / f
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb"):
pass
if expected is None:
expected = layout
assert find_config_files(tmp_path, TokenType.USE_FLAG
) == [confdir / x for x in expected]
TEST_CONFIG_FILE = [
"#initial comment\n",
" # comment with whitespace\n",
"\n",
"*/* foo bar baz # global flags\n",
"*/* FROBNICATE_TARGETS: frob1 frob2\n",
" dev-foo/bar weird#flag other # actual comment # more comment\n",
"dev-foo/baz mixed LONG: too EMPTY:\n"
]
PARSED_TEST_CONFIG_FILE = [
ConfigLine(comment="initial comment"),
ConfigLine(comment=" comment with whitespace"),
ConfigLine(),
ConfigLine("*/*", ["foo", "bar", "baz"], [], " global flags"),
ConfigLine("*/*", [], [("FROBNICATE_TARGETS", ["frob1", "frob2"])]),
ConfigLine("dev-foo/bar", ["weird#flag", "other"], [],
" actual comment # more comment"),
ConfigLine("dev-foo/baz", ["mixed"], [("LONG", ["too"]), ("EMPTY", [])]),
]
for raw_line, line in zip(TEST_CONFIG_FILE, PARSED_TEST_CONFIG_FILE):
line._raw_line = raw_line
def test_parse_config_file():
assert list(parse_config_file(TEST_CONFIG_FILE)) == PARSED_TEST_CONFIG_FILE
def test_dump_config_line():
assert [dump_config_line(x) for x in parse_config_file(TEST_CONFIG_FILE)
] == [x.lstrip(" ") for x in TEST_CONFIG_FILE]
def test_read_config_files(tmp_path):
with open(tmp_path / "config", "w") as f:
f.write("".join(TEST_CONFIG_FILE))
with open(tmp_path / "config2", "w") as f:
pass
assert list(read_config_files([tmp_path / "config", tmp_path / "config2"])
) == [
ConfigFile(tmp_path / "config", PARSED_TEST_CONFIG_FILE),
ConfigFile(tmp_path / "config2", []),
]
def test_save_config_files_no_modification(tmp_path):
config_files = [
ConfigFile(tmp_path / "config", PARSED_TEST_CONFIG_FILE),
ConfigFile(tmp_path / "config2", []),
]
save_config_files(config_files)
assert all(not config_file.path.exists() for config_file in config_files)
def invalidate_config_lines(lines: list[ConfigLine],
*line_nos: int,
) -> list[ConfigLine]:
out = list(lines)
for x in line_nos:
out[x] = dataclasses.replace(out[x])
out[x].invalidate()
return out
@pytest.mark.parametrize("write", [False, True])
def test_save_config_files(tmp_path, write):
config_files = [
ConfigFile(tmp_path / "config",
invalidate_config_lines(PARSED_TEST_CONFIG_FILE, 1, 5),
modified=True),
ConfigFile(tmp_path / "config2",
[ConfigLine("dev-foo/bar", ["new"], [])],
modified=True),
ConfigFile(tmp_path / "config3", []),
]
for conf in config_files:
with open(conf.path, "w") as f:
os.fchmod(f.fileno(), 0o400)
f.write("<original content>")
save_config_files(config_files,
confirm_cb=lambda orig_file, temp_file: write)
expected = ["<original content>" for _ in config_files]
if write:
expected[:2] = [
"".join(x.lstrip(" ") for x in TEST_CONFIG_FILE),
"dev-foo/bar new\n",
]
assert [conf.path.read_text() for conf in config_files] == expected
assert [stat.S_IMODE(os.stat(conf.path).st_mode) for conf in config_files
] == [0o400 for _ in config_files]
|
[
"mgorny@gentoo.org"
] |
mgorny@gentoo.org
|
00fe334671f8b26e7570ae945446b1944bf41d48
|
9893602fdad77858441c08de15980174e1bd3216
|
/examples/gym/tests/test_mp_speed.py
|
ca36a13a6947aaba69b88c87289009cf7bab731f
|
[
"MIT"
] |
permissive
|
batermj/TensorArtist
|
b61a1fa71325b7dc538318160a0924e1b3d5c3d5
|
7654eb026f6d87f64e28ca152d006ef7625b0f45
|
refs/heads/master
| 2020-03-29T02:28:59.101255
| 2017-11-01T12:37:49
| 2017-11-01T12:37:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,360
|
py
|
# -*- coding:utf8 -*-
# File : test_mp_speed.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 10/08/2017
#
# This file is part of TensorArtist.
from tartist import image
from tartist.app import rl
import time
import multiprocessing.pool as mppool
def make_player(dump_dir=None):
def resize_state(s):
return image.resize(s, (84, 84), interpolation='NEAREST')
p = rl.GymRLEnviron('Enduro-v0', dump_dir=dump_dir)
p = rl.MapStateProxyRLEnviron(p, resize_state)
p = rl.HistoryFrameProxyRLEnviron(p, 4)
p = rl.LimitLengthProxyRLEnviron(p, 4000)
return p
def actor(s):
return 1
def worker(i):
p = make_player()
l = 0
for i in range(1):
p.play_one_episode(func=actor)
l += p.stats['length'][-1]
return l
def test_mp():
pool = mppool.Pool(4)
start_time = time.time()
lengths = pool.map(worker, range(4))
finish_time = time.time()
print('Multiprocessing: total_length={}, time={:.2f}s.'.format(sum(lengths), finish_time - start_time))
def test_mt():
pool = mppool.ThreadPool(4)
start_time = time.time()
lengths = pool.map(worker, range(4))
finish_time = time.time()
print('Multithreading: total_length={}, time={:.2f}s.'.format(sum(lengths), finish_time - start_time))
if __name__ == '__main__':
test_mp()
test_mt()
|
[
"maojiayuan@gmail.com"
] |
maojiayuan@gmail.com
|
38562e9aaea1b41c2e4b85cc909df95320520890
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/e8TFAMbTTaEr7JSgd_24.py
|
1ca289103b566112414504dfb983bcf3e86bd3fb
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 132
|
py
|
def left_digit(num):
num=list(num)
for x in num:
try:
x=int(x)
return x
except ValueError:
continue
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
1440e3fdc04667cea54daecb56015389ec30e3d1
|
1e0a8a929f8ea69e476d8a8c5f3455aaf5317de6
|
/tests/app/crud/test_dog_crud.py
|
afd0b8d1e8870ae8f2425c8541e38e56329efd68
|
[
"MIT"
] |
permissive
|
jearistiz/guane-intern-fastapi
|
aa41400fa22076111e96be695fde0a1ff6f118d0
|
269adc3ee6a78a262b4e19e7df291fd920fae2e1
|
refs/heads/master
| 2023-06-25T08:58:03.729614
| 2023-06-11T15:28:59
| 2023-06-11T15:28:59
| 370,229,796
| 63
| 9
|
MIT
| 2021-06-11T01:28:52
| 2021-05-24T04:45:23
|
Python
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
from sqlalchemy.orm import Session
from app import crud
from mock_data.db_test_data import adopted_dogs_dicts
from tests.utils.handle_db_test import HandleDBTest
from tests.utils.parse_dict import update_dict_fmt_item
class TestDogCrud(HandleDBTest):
def test_get_adopter(self, db: Session):
adopted_dogs_out = crud.dog.get_adopted(db)
for adopted_dog_out in adopted_dogs_out:
adopted_dog_dict = adopted_dog_out._asdict()
adopted_dog_dict.pop('id')
update_dict_fmt_item(adopted_dog_dict, 'create_date', str)
assert adopted_dog_dict in adopted_dogs_dicts
|
[
"jeaz.git@gmail.com"
] |
jeaz.git@gmail.com
|
e9d4a1e15e50708818c91043e99270b983a6336f
|
e66770daf4d1679c735cfab1ac24dd1f5107bd83
|
/Chapter02/Ch02_Code/GUI_add_padding.py
|
ed137f365e58ba1817dfca017a7f893771e77317
|
[] |
no_license
|
CodedQuen/Python-GUI-Programming-Cookbook
|
c038eb6cec4945ff4f2b09e1551f9db712dd2502
|
f02b0f9916fb8272edc7ed4704eecce53ae0231c
|
refs/heads/master
| 2022-05-27T19:35:35.004455
| 2020-05-05T01:00:51
| 2020-05-05T01:00:51
| 261,329,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,147
|
py
|
'''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
# Create instance
win = tk.Tk()
# Add a title
win.title("Python GUI")
# Modify adding a Label
a_label = ttk.Label(win, text="A Label")
a_label.grid(column=0, row=0)
# Modified Button Click Function
def click_me():
action.configure(text='Hello ' + name.get() + ' ' +
number_chosen.get())
# Changing our Label
ttk.Label(win, text="Enter a name:").grid(column=0, row=0)
# Adding a Textbox Entry widget
name = tk.StringVar()
name_entered = ttk.Entry(win, width=12, textvariable=name)
name_entered.grid(column=0, row=1)
# Adding a Button
action = ttk.Button(win, text="Click Me!", command=click_me)
action.grid(column=2, row=1) # <= change column to 2
# Creating three checkbuttons
ttk.Label(win, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
number_chosen = ttk.Combobox(win, width=12, textvariable=number, state='readonly')
number_chosen['values'] = (1, 2, 4, 42, 100)
number_chosen.grid(column=1, row=1)
number_chosen.current(0)
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(win, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=4, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(win, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=4, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(win, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=4, sticky=tk.W)
# GUI Callback function
def checkCallback(*ignoredArgs):
# only enable one checkbutton
if chVarUn.get(): check3.configure(state='disabled')
else: check3.configure(state='normal')
if chVarEn.get(): check2.configure(state='disabled')
else: check2.configure(state='normal')
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : checkCallback())
# Using a scrolled Text control
scrol_w = 30
scrol_h = 3
scr = scrolledtext.ScrolledText(win, width=scrol_w, height=scrol_h, wrap=tk.WORD)
scr.grid(column=0, row=5, sticky='WE', columnspan=3) # now row=5
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# We have also changed the callback function to be zero-based, using the list
# instead of module-level global variables
# Radiobutton Callback
def radCall():
radSel=radVar.get()
if radSel == 0: win.configure(background=colors[0]) # now zero-based
elif radSel == 1: win.configure(background=colors[1]) # and using list
elif radSel == 2: win.configure(background=colors[2])
# create three Radiobuttons using one variable
radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(win, text=colors[col], variable=radVar,
value=col, command=radCall)
curRad.grid(column=col, row=6, sticky=tk.W) # now row=6
# Create a container to hold labels
buttons_frame = ttk.LabelFrame(win, text=' Labels in a Frame ')
buttons_frame.grid(column=0, row=7, padx=20, pady=40) # padx, pady
# Place labels into the container element - vertically
ttk.Label(buttons_frame, text="Label1").grid(column=0, row=0)
ttk.Label(buttons_frame, text="Label2").grid(column=0, row=1)
ttk.Label(buttons_frame, text="Label3").grid(column=0, row=2)
name_entered.focus() # Place cursor into name Entry
#======================
# Start GUI
#======================
win.mainloop()
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
7792841836eb91ce3be5aa927a1a37b5f335c11b
|
ac54aa0127a47fb59211fba9e6cb8431d9d864cd
|
/muscn/settings/base.py
|
be10c07ea832ffd447b82e096349324c124e21c9
|
[] |
no_license
|
xiringlama/manutd.org.np
|
8919e3c1ad0494f88b819089686a756d67d38598
|
f394f16edb96c05e2e864dcec1ec52532cd35ac2
|
refs/heads/master
| 2021-07-12T00:33:17.197706
| 2017-10-16T14:45:10
| 2017-10-16T14:45:10
| 107,222,122
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,820
|
py
|
import os
BASE_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), '..')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
INSTALLED_APPS = (
'jet',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django.contrib.sitemaps',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.twitter',
'froala_editor',
'dj_pagination',
'webstack_django_sorting',
'auditlog',
'versatileimagefield',
'rest_framework',
'rest_framework.authtoken',
'solo',
'fcm',
'anymail',
'adminsortable2',
'apps.core',
'apps.users',
'apps.payment',
'apps.page',
'apps.dashboard',
'apps.stats',
'apps.events',
'apps.post',
'apps.partner',
'apps.team',
'apps.timeline',
'apps.webhook',
'apps.gallery',
'apps.key',
'apps.push_notification',
'apps.contact',
)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'dj_pagination.middleware.PaginationMiddleware',
'auditlog.middleware.AuditlogMiddleware',
'django.middleware.security.SecurityMiddleware',
]
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
ROOT_URLCONF = 'muscn.urls'
WSGI_APPLICATION = 'muscn.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kathmandu'
USE_I18N = True
USE_L10N = True
USE_TZ = True
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 25,
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
),
'DEFAULT_PERMISSION_CLASSES': (
'apps.key.permissions.DistributedKeyAuthentication',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
)
}
from .user_settings import * # noqa
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger'
}
ESEWA_SCD = 'manutd'
FCM_MAX_RECIPIENTS = 10000
ALIASES = [
'Manchester United',
'Man Utd',
'Man United',
'MUFC',
]
# TEMPLATE_DEBUG = False
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_NAME = 'sci'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_NAME = 'ct'
CSRF_COOKIE_SECURE = True
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/7",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "default"
SOLO_CACHE = 'default'
SOLO_CACHE_PREFIX = 'solo'
SOLO_CACHE_TIMEOUT = 60 * 60 * 24
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
# 'opbeat': {
# 'level': 'WARNING',
# 'class': 'opbeat.contrib.django.handlers.OpbeatHandler',
# },
},
'loggers': {
'django': {
# 'handlers': ['mail_admins', 'opbeat'],
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# E-mail settings
DEFAULT_FROM_EMAIL = 'MUSC Nepal<info@manutd.org.np>'
SERVER_EMAIL = DEFAULT_FROM_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.zoho.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'info@manutd.org.np'
EMAIL_USE_TLS = True
SITE_ID = 1
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
MEDIA_URL = '/media/'
|
[
"xtranophilist@gmail.com"
] |
xtranophilist@gmail.com
|
948de2c172a457ca55617d1ab99b85b378c019ce
|
1840eff4a604161d56fba2747520686a5a008301
|
/src/apps/dh/urls.py
|
6c7f4b97920cad4dcd334b410b7cf1cbfb3c53eb
|
[] |
no_license
|
jinchuika/app-suni
|
f9291fd2e5ecc9915178141039da19444769cb85
|
0e37786d7173abe820fd10b094ffcc2db9593a9c
|
refs/heads/master
| 2023-08-24T21:52:04.490613
| 2023-08-14T21:32:22
| 2023-08-14T21:32:22
| 68,239,483
| 7
| 6
| null | 2023-08-14T21:32:24
| 2016-09-14T20:10:09
|
Python
|
UTF-8
|
Python
| false
| false
| 619
|
py
|
from django.conf.urls import url
from apps.dh.views import *
from django.views.decorators.cache import cache_page
urlpatterns = [
url(r'^evento/add/$', EventoDHCreateView.as_view(), name='evento_dh_add'),
url(r'^evento/(?P<pk>\d+)/$', EventoDHDetailView.as_view(), name='evento_dh_detail'),
url(r'^evento/(?P<pk>\d+)/edit$', EventoDHUpdateView.as_view(), name='evento_dh_update'),
url(r'^evento/calendario/home$', cache_page(5)(EventoDHCalendarHomeView.as_view()), name='evento_dh_calendario_home'),
url(r'^calendario/$', cache_page(5)(CalendarioDHView.as_view()), name='evento_dh_calendario'),
]
|
[
"jinchuika@gmail.com"
] |
jinchuika@gmail.com
|
2afc06ad0c4f9e8735adae24182886d198a029d3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03284/s870585423.py
|
e77617648221cc20c5db9167bc932d1b07055052
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
sys.setrecursionlimit(10 ** 9)
INF = 1 << 60
MOD = 1000000007
def main():
N, K = map(int, readline().split())
if N % K == 0:
print(0)
else:
print(1)
return
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d390872599b775908541f61bca80c5d479995e3e
|
f900a9f48fe24c6a581bcb28ad1885cfe5743f80
|
/Chapter_3/Pg_89_Try_It_Yourself.py
|
18af1c66106cd96e52bca0048c6335ef2c061c01
|
[] |
no_license
|
Anjali-225/PythonCrashCourse
|
76e63415e789f38cee019cd3ea155261ae2e8398
|
f9b9649fe0b758c04861dad4d88058d48837a365
|
refs/heads/master
| 2022-12-03T21:35:07.428613
| 2020-08-18T11:42:58
| 2020-08-18T11:42:58
| 288,430,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,879
|
py
|
#3-4
guest = ['Joe','Eric','Sarah','Helly']
message = f"{guest[0]}, you are invited to dinner"
print(message)
message = f"{guest[1]}, you are invited to dinner"
print(message)
message = f"{guest[2]}, you are invited to dinner"
print(message)
message = f"{guest[3]}, you are invited to dinner\n"
print(message)
#3-5
print(f"{guest[1]} can not make it to the dinner\n")
del guest[1]
message = f"{guest[0]}, you are invited to dinner"
print(message)
message = f"{guest[1]}, you are invited to dinner"
print(message)
message = f"{guest[2]}, you are invited to dinner\n"
print(message)
#3-6
guest.insert(0, 'Bob')
guest.insert(2, 'James')
guest.append('Shrek')
message = f"{guest[0]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[1]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[2]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[3]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[4]}, you are invited to dinner since we have a bigger table"
print(message)
message = f"{guest[5]}, you are invited to dinner since we have a bigger table\n"
print(message)
#3-7
print("We can only add two people for dinner")
popped1 = guest.pop()
print(f"Sorry {popped1}, we can not invite you for dinner anymore")
popped2 = guest.pop()
print(f"Sorry {popped2}, we can not invite you for dinner anymore")
popped3 = guest.pop()
print(f"Sorry {popped3}, we can not invite you for dinner anymore")
popped4 = guest.pop()
print(f"Sorry {popped4}, we can not invite you for dinner anymore\n")
print(f"{guest[0]}, you are still invited for dinner")
print(f"{guest[1]}, you are still invited for dinner\n")
#del guest[1]
#del guest[0]
print(guest)
|
[
"noreply@github.com"
] |
Anjali-225.noreply@github.com
|
03ee2745ee616d850f07343d7d489f52af1d54c3
|
7d1e23982439f530f3c615f3ac94f59861bc2325
|
/controller.py
|
70d1ac19194843aea3eac826c23ea21ef5b3ffdb
|
[] |
no_license
|
daskorod/RPG_project
|
3104afafd19038f1c9da0d9aca6f489f9629093b
|
3b42a7f3131830d3b728f5d65332750fa032ec03
|
refs/heads/master
| 2020-05-24T04:24:33.198371
| 2017-11-30T19:29:48
| 2017-11-30T19:29:48
| 84,820,405
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,385
|
py
|
import pygame
import sys
import sounds
class Holy_Spirit ():
def __init__ (self):
self.auto = True
self.k_space = False
self.stage1_flag = False
self.stage2_flag = True
self.flag = 0
self.left = False
self.right = False
self.up = False
self.down = False
self.k_1 = False
self.k_2 = False
self.k_3 = False
self.k_4 = False
self.k_n = False
self.k_e = False
self.k_i = False
self.k_c = False
self.k_q = False
self.button_up = True
self.clic = False
self.k_j = False
self.k_esc = False
self.e_cntrl = False
self.up_is = False
self.down_is = False
self.k_1_control = False
self.k_2_control = False
self.k_3_control = False
self.k_a = False
self.move_cntrl_a = False
# R = False
# L = False
# U = False
# D = False
# self.direction = [R,L,U,D]
self.current_location = ''
self.move_cntrl = False
self.e_lock = False
def control (self):
if self.k_esc == True:
sys.exit ()
for e in pygame.event.get ():
if e.type == pygame.QUIT:
sys.exit ()
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_SPACE:
self.k_space = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_SPACE:
self.k_space = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_LEFT:
self.left = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.key == pygame.K_RIGHT:
self.right = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_UP:
self.up = True
self.up_is = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.key == pygame.K_DOWN:
self.down = True
self.down_is = True
if self.move_cntrl_a == False:
self.move_cntrl = True
self.move_cntrl_a = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_LEFT:
self.left = False
self.move_cntrl_a = False
if e.key == pygame.K_RIGHT:
self.right = False
self.move_cntrl_a = False
if e.type == pygame.KEYUP:
if e.key == pygame.K_UP:
self.up = False
self.up_is = False
self.move_cntrl_a = False
if e.key == pygame.K_DOWN:
self.down = False
self.down_is = False
self.move_cntrl_a = False
if e.type == pygame.KEYDOWN:
if e.key == pygame.K_1 and self.k_1_control == False:
self.k_1 = True
self.k_1_control = True
self.clic = True
if e.key == pygame.K_2 and self.k_2_control == False:
self.k_2 = True
self.k_2_control = True
self.clic = True
if e.key == pygame.K_3 and self.k_3_control == False:
self.k_3 = True
self.k_3_control = True
self.clic = True
if e.key == pygame.K_4:
self.k_4 = True
if e.key == pygame.K_n:
self.k_n = True
if e.key == pygame.K_e and self.e_lock == False:
#self.e_cntrl = False
self.k_e = True
sounds.clic2.play()
self.e_lock = True
if e.key == pygame.K_i:
self.k_i = True
if e.key == pygame.K_a:
self.k_a = True
if e.key == pygame.K_c:
self.k_c = True
if e.key == pygame.K_j:
self.k_j = True
if e.key == pygame.K_q:
self.k_q = True
if e.key == pygame.K_ESCAPE:
self.k_esc = True
if e.type == pygame.KEYUP:
if e.key == pygame.K_1:
self.k_1 = False
self.button_up = True
self.k_1_control = False
if e.key == pygame.K_2:
self.k_2 = False
self.button_up = True
self.k_2_control = False
if e.key == pygame.K_3:
self.k_3 = False
self.button_up = True
self.k_3_control = False
if e.key == pygame.K_4:
self.k_4 = False
self.button_up = True
if e.key == pygame.K_n:
self.k_n = False
if e.key == pygame.K_e:
self.e_cntrl = True
self.k_e = False
self.e_lock = False
if e.key == pygame.K_i:
self.k_i = False
if e.key == pygame.K_a:
self.k_a = False
if e.key == pygame.K_c:
self.k_c = False
if e.key == pygame.K_j:
self.k_j = False
if e.key == pygame.K_q:
self.k_q = False
if e.key == pygame.K_ESCAPE:
self.k_esc = False
|
[
"daskorod@gmail.com"
] |
daskorod@gmail.com
|
cdbded1453eefce5a9932e58c56c62071e4acfa4
|
ac5e52a3fc52dde58d208746cddabef2e378119e
|
/exps-gsn-edf/gsn-edf_ut=3.5_rd=0.65_rw=0.06_rn=4_u=0.075-0.35_p=harmonic-2/sched=RUN_trial=72/params.py
|
3e12d7ff33ac0d25b94cbf315139e7c926365d5c
|
[] |
no_license
|
ricardobtxr/experiment-scripts
|
1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1
|
7bcebff7ac2f2822423f211f1162cd017a18babb
|
refs/heads/master
| 2023-04-09T02:37:41.466794
| 2021-04-25T03:27:16
| 2021-04-25T03:27:16
| 358,926,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
{'cpus': 4,
'duration': 30,
'final_util': '3.518429',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '0.65',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'GSN-EDF',
'trial': 72,
'utils': 'uni-medium-3'}
|
[
"ricardo.btxr@gmail.com"
] |
ricardo.btxr@gmail.com
|
374137ee28176ba0c2168a43b68a6388dd3a8e5a
|
1dc4fbb38f333c665abd0f7e4f9919ad1b349c37
|
/calories/taskapp/celery.py
|
2931a91b8d7ded7aebc3e8e3d816fafd07496a4b
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
zulfiyagit/calories
|
2dd98a4b160fa88ba592ec150e8e6f640a3fb2e9
|
0719d61891a93eff7c06a9356c26f42b99019444
|
refs/heads/master
| 2020-12-29T18:52:20.372303
| 2016-03-28T20:54:20
| 2016-03-28T20:54:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from __future__ import absolute_import
import os
from celery import Celery
from django.apps import AppConfig
from django.conf import settings
if not settings.configured:
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local") # pragma: no cover
app = Celery('calories')
class CeleryConfig(AppConfig):
name = 'calories.taskapp'
verbose_name = 'Celery Config'
def ready(self):
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS, force=True)
if hasattr(settings, 'RAVEN_CONFIG'):
# Celery signal registration
from raven import Client as RavenClient
from raven.contrib.celery import register_signal as raven_register_signal
from raven.contrib.celery import register_logger_signal as raven_register_logger_signal
raven_client = RavenClient(dsn=settings.RAVEN_CONFIG['DSN'])
raven_register_logger_signal(raven_client)
raven_register_signal(raven_client)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) # pragma: no cover
|
[
"germanilyin@gmail.com"
] |
germanilyin@gmail.com
|
fb329c76b6c441fb363aee300ad275687f9b6472
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/MEDIUM/YW_ZXBMM_SZSJ_081.py
|
5286dc8bfa248725178097270b3c4f214c05c938
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,010
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ZXBMM_SZSJ_081(xtp_test_case):
# YW_ZXBMM_SZSJ_081
def test_YW_ZXBMM_SZSJ_081(self):
title = '五档即成转撤销买--已成(费用=min)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '全成',
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('002051', '2', '1', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 700,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
662681f8d9d3bea85b90b39c2599f1cef5f7dfc0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03438/s156659931.py
|
39826d4f4203b8b35ce295fa8eb7b93a219c170d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
from sys import stdin
n = int(stdin.readline().rstrip())
a = list(map(int,stdin.readline().rstrip().split()))
b = list(map(int,stdin.readline().rstrip().split()))
A = sum(a);B = sum(b)
count_1 = 0
count_2 = 0
total = B-A
for i,j in zip(a,b):
if i < j:
count_1 += (j-i+1)//2
else:
count_2 += i-j
count = max(count_1,count_2)
if count <= total:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
951bf4d60636cb348271486a74c5e08e4e35b053
|
5d3556043828ec87b4f28b92895904c20b083d2e
|
/electoral_constituencies/models.py
|
0881ebae4ecf60958bfb14f53fce930e1ee322a7
|
[] |
no_license
|
Election-Portal/Election-Portal-Webapp
|
826c926807bb54568c5db4861a01eaba217fc00b
|
90c6a3da19f4bd029f0bd98ea2ca98ab095abbab
|
refs/heads/master
| 2022-12-06T19:38:53.693793
| 2021-05-16T06:16:57
| 2021-05-16T06:16:57
| 139,657,468
| 3
| 3
| null | 2022-11-22T02:48:27
| 2018-07-04T02:16:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
from django.db import models
from political_divisions.models import Province, District
from political_parties.models import PoliticalParty
# Create your models here.
class Sabha(models.Model):
name = models.CharField("Name", max_length=50)
district = models.ForeignKey(District,on_delete=models.CASCADE, related_name="SabhaDistrictSet")
province = models.ForeignKey(Province, related_name="sabha_province_set",on_delete=models.CASCADE)
area = models.IntegerField()
population = models.IntegerField()
voters = models.IntegerField()
is_marginal = models.BooleanField(default=False)
class PradeshSabha(Sabha):
winner = models.CharField("Member of Provincial Assembly", max_length=50)
won_political_party = models.ForeignKey(PoliticalParty, related_name = "pradeshsabha_won_political_party_set",on_delete=models.CASCADE)
class Meta:
verbose_name = "Pradesh Sabha"
verbose_name_plural = "Pradesh Sabhas"
def __str__(self):
return self.name
class PratinidhiSabha(Sabha):
winner = models.CharField("Member of House of Representative", max_length=50)
won_political_party = models.ForeignKey(PoliticalParty, related_name = "pratinidhisabha_won_political_party_set",on_delete=models.CASCADE)
pradeshsabha_ka = models.ForeignKey(PradeshSabha, related_name="pratinidhisabha_pradeshsabha_ka",on_delete=models.CASCADE)
pradeshsabha_kha = models.ForeignKey(PradeshSabha, related_name="pratinidhisabha_pradeshsabha_kha",on_delete=models.CASCADE)
class Meta:
verbose_name = "Pratinidhi Sabha"
verbose_name_plural = "Pratinidhi Sabhas"
def __str__(self):
return self.name
|
[
"aakrist666@gmail.com"
] |
aakrist666@gmail.com
|
7c874e9bd36d9da166eb7dbe16f7b4208a2ca064
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/446.py
|
9f01e2cc099049a3b984b3b6a4598d3fb9b46375
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
FILE = open("C-small-attempt0.in","r")
OUTPUT = open("C-small-attempt0.out","w")
cases = FILE.readline()
def rideCoaster():
global k
global groups
groupsOnCoaster = []
numberOfRiders = 0
while(len(groups) > 0 and numberOfRiders + groups[0] <= k):
groupCount = groups.pop(0)
numberOfRiders += groupCount
groupsOnCoaster.append(groupCount)
groups.extend(groupsOnCoaster)
return numberOfRiders
for i in range(0,int(cases)):
temp = FILE.readline().split(" ")
temp2 = FILE.readline().split(" ")
r = int(temp[0])
k = int(temp[1])
n = int(temp[2])
groups = []
for j in temp2:
groups.append(int(j))
moneyMade = 0
for j in range(0,r):
moneyMade += rideCoaster()
OUTPUT.write('Case #' + str(i + 1) + ': ' + str(moneyMade) + '\n')
FILE.close()
OUTPUT.close()
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
ae4b4ce6d77449745f49c95d0a0c4c087506107c
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/10/usersdata/132/9950/submittedfiles/testes.py
|
af2fd0de3a5a21e6725e176347e0e0201d8e9097
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
a=input('digite um numero:')
i=1
while(i*i+1*i+2)<a:
if (i*i+1*i+2)==a:
print('s')
else: print('n')
i=i+1
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
a2d9dd9b79e3f5e7b06713e7c0d82ccb5958531c
|
7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0
|
/src/igem/neutronics/water/container/borosilicate-glass-backfill/5cm/50wt/plot_all.in.one_cask.thickness_dose.rate_t4045_bottom.py
|
a45cb45d0aafb14cb4f353d47ae77969fc35a8da
|
[] |
no_license
|
TheDoctorRAB/plot
|
dd3b5134c91c8fa7032fcc077c5427b26a80e49d
|
ed6746d511222c03e79f93548fe3ecd4286bf7b1
|
refs/heads/master
| 2021-07-11T10:21:19.347531
| 2020-07-16T17:13:15
| 2020-07-16T17:13:15
| 20,462,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,321
|
py
|
########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - Bottom surface'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=1100
#
annotate_title2='Water-Glass backfill'
annotate_x2=23
annotate_y2=700
#
annotate_title3='50 wt% $^{10}B$'
annotate_x3=23
annotate_y3=400
#
annotate_title4='5cm thick concrete'
annotate_x4=23
annotate_y4=0.02
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=0.01
ymax=3000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title4,xy=(annotate_x4,annotate_y4),xytext=(annotate_x4,annotate_y4),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# # plot.show()
#
########################################################################
#
# EOF
#
########################################################################
|
[
"borrelli@localhost.localdomain"
] |
borrelli@localhost.localdomain
|
8c420f57c5fe425f7d0fa0dae48e942a44174687
|
ceeeb927544c474163347254b11485cc945ea951
|
/core/migrations/0002_alter_user_managers.py
|
ce6eb500dce47749534bad897ccb645b67da9adf
|
[] |
no_license
|
alisamadzadeh46/filestore
|
ecc8d84ca16e8a8a51af0b74446a0c3b88cda646
|
4f31e51b2d028cd5f79b6af06d05568a8af7e9e1
|
refs/heads/main
| 2023-06-22T18:38:08.179128
| 2021-07-26T16:03:19
| 2021-07-26T16:03:19
| 377,806,835
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# Generated by Django 3.2.4 on 2021-06-18 16:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
],
),
]
|
[
"alisamadzadeh46@gmail.com"
] |
alisamadzadeh46@gmail.com
|
eb105e8f898024dd5992253c0332f1e6987e2698
|
875bb84440094ce058a2ec25a661a7da6bb2e129
|
/algo_py/boj/bj1647.py
|
e51a516645ce66ea00558949de82a9b29f3f55b6
|
[] |
no_license
|
shg9411/algo
|
150e4291a7ba15990f17ca043ae8ab59db2bf97b
|
8e19c83b1dbc0ffde60d3a3b226c4e6cbbe89a7d
|
refs/heads/master
| 2023-06-22T00:24:08.970372
| 2021-07-20T06:07:29
| 2021-07-20T06:07:29
| 221,694,017
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
import sys
input = sys.stdin.readline
def find(x):
if parent[x] == x:
return x
parent[x] = find(parent[x])
return parent[x]
def union(x, y):
parent[find(y)] = parent[find(x)]
def kruskal():
cnt = res = 0
for c,a,b in q:
if find(a) == find(b):
continue
cnt += 1
res += c
union(a, b)
if cnt == N-2:
break
return res
N, M = map(int, input().split())
parent = [i for i in range(N+1)]
q = []
for _ in range(M):
A, B, C = map(int, input().split())
q.append((C,A,B))
q.sort()
print(kruskal())
|
[
"shg9411@naver.com"
] |
shg9411@naver.com
|
62ac4885dae7b11cc8dde424e4969271cc97bbc6
|
51b838412b7d9d38e398fefff92a0f17b3e040d7
|
/enso/enso/utils/strings.py
|
64d7d425ff3f7eef188c554f110433ea14749552
|
[
"BSD-2-Clause"
] |
permissive
|
thdoan/enso-portable
|
ed87bb30f3fe5d95e8dc6f3c4fa2a1a3a46f37fc
|
2dd6db78f40811d78fe9a162ec95eac14bda2250
|
refs/heads/master
| 2020-04-05T19:01:50.058547
| 2015-01-11T16:46:56
| 2015-01-11T16:46:56
| 28,119,291
| 8
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,804
|
py
|
# Copyright (c) 2008, Humanized, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY Humanized, Inc. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Humanized, Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.utils.strings
#
# ----------------------------------------------------------------------------
"""
Various string utility methods.
"""
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
# Double "smart quotes".
OPEN_QUOTE = u"\u201C"
CLOSE_QUOTE = u"\u201D"
# Single "smart quotes".
OPEN_SINGLE_QUOTE = u"\u2018"
CLOSE_SINGLE_QUOTE = u"\u2019"
# ----------------------------------------------------------------------------
# String utility functions
# ----------------------------------------------------------------------------
def smartQuote( text ):
"""
Replaces regular quotes in text with "smart quotes", i.e., left and right
facing quotes, and returns the result as a unicode object.
NOTE: This uses a very simple algorithm; if you are trying to quote
an arbitrary chunk of text, it would be best to use this function
on your formatting string, e.g., use this on:
' %s ' - output from blah command
before you apply the formatting operation that dumps unknown text.
"""
text = _smartDoubleQuote( text )
text = _smartSingleQuote( text )
return text
def _smartSingleQuote( inText ):
"""
Replaces single quotes with "smart quotes", i.e., forward
and back facing quotes, except for single quotes that are
parts of certain contractions.
"""
# Explicitly copy the text and cast it to unicode.
outText = unicode( inText[:] )
# There are two usages of single quote marks; for
# quotations, and for contractions.
# First, we escape the contraction cases. Then,
# without those pesky apostrophes, we will be free
# and clear to replace the remaining single quotes
# with smart quotes.
cases = [ "'s", "'t", "'nt", "I'm", "'ve", "'re", ]
for case in cases:
tempText = "<<|%s|>>" % case.replace( "'", "" )
outText = outText.replace( case, tempText )
# Now that there are no apostrophes, we can run through
# the text, replacing each pair of single quotes with
# opening and closing 'smart single quotes'.
while outText.count( "'" ) > 0:
outText = outText.replace( "'", OPEN_SINGLE_QUOTE, 1)
outText = outText.replace( "'", CLOSE_SINGLE_QUOTE, 1)
# Now we have to replace the contraction escape sequences
# with the original contractions.
for case in cases:
tempText = "<<|%s|>>" % case.replace( "'", "" )
outText = outText.replace( tempText, case )
return outText
def _smartDoubleQuote( inText ):
"""
Replaces double quotes with "smart quotes", i.e., forward
and back facing quotes.
"""
# Explicitly copy the text and cast it to unicode.
outText = unicode( inText[:] )
while outText.count( "\"" ) > 0:
outText = outText.replace( "\"", OPEN_QUOTE, 1)
outText = outText.replace( "\"", CLOSE_QUOTE, 1)
return outText
def stringRatio( a, b ):
"""
Calculates the string ratio of a to b.
If the strings are equal, returns 1.0. If they have no similarity
whatsoever, returns 0.0. Otherwise, returns a number in-between.
"""
if a == b:
return 1.0
elif a in b:
return float( len(a) ) / len(b)
elif b in a:
return float( len(b) ) / len(a)
else:
# The following code is actually identical to this code:
#
# import difflib
# seqMatch = difflib.SequenceMatcher( False, a, b )
# ratio = seqMatch.real_quick_ratio()
# return ratio
#
# But has been copied from difflib and pasted inline here for
# efficiency purposes.
la, lb = len(a), len(b)
length = la + lb
if length:
return 2.0 * (min(la, lb)) / length
return 1.0
def stringRatioBestMatch( item, sequence ):
"""
Uses a string ratio algorithm to find to the best match
to item among the elements of sequence.
"""
ratios = [ stringRatio( item, element ) \
for element in sequence ]
return sequence[ ratios.index( min(ratios) ) ]
|
[
"gchristnsn@gmail.com"
] |
gchristnsn@gmail.com
|
3562f25f6517f4afb0b4ec5ceb853d99f9d34116
|
a331ac86bf0dc281b1b819f70110deb873833698
|
/python/higher/test/setup.py
|
e4e45f1aa39b2122f55e2f893e2600f14fb9fc1a
|
[] |
no_license
|
sunhuachuang/study-demo
|
f0c2bbaca78a6735442039a33a051a8b715f8490
|
822dfec043d53678c62f5dce407477f9fdd42873
|
refs/heads/master
| 2020-07-22T06:16:00.361964
| 2018-01-08T09:50:50
| 2018-01-08T09:50:50
| 66,520,404
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
from setuptools import setup, find_packages
setup(
name="test",
version="0.0.1",
keywords=("test", ),
description="test package",
url="https://github.com/sunhuachuang",
author="sun",
author_email="huachuang20@gmail.com",
packages=find_packages()
)
|
[
"huachuang20@gmail.com"
] |
huachuang20@gmail.com
|
2aced5c4391eace15eacc74cf045c411483606cb
|
17993dcca87d490bc9841437309f309a5592ab38
|
/Codes/logistic_regression/mnist_gd_tfdata.py
|
73d698f2f762ef720a43606f260da1deb67e8ee2
|
[] |
no_license
|
dreamlikexin/machine_learning
|
bc86ea15ef8552ad1be78a5bc65fb74a2cdb274e
|
850e87025270847210b6ad188d2da181983a72c7
|
refs/heads/master
| 2022-01-16T09:51:20.538340
| 2019-06-19T16:27:26
| 2019-06-19T16:27:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
from machine_learning.logistic_regression.lib.logistic_regression_gd import LogisticRegression
import machine_learning.logistic_regression.lib.classification_metrics as metrics
mnist = input_data.read_data_sets("MNIST_data/", one_hot=False)
X_train, y_train = mnist.train.images, mnist.train.labels
X_test, y_test = mnist.test.images, mnist.test.labels
y_train = (y_train == 6).astype(np.int).reshape(-1,1)
y_test = (y_test == 6).astype(np.int).reshape(-1,1)
model = LogisticRegression()
model.fit(X_train, y_train, eta=0.01, N=3000)
proba = model.predict_proba(X_test)
entropy = metrics.cross_entropy(y_test, proba)
print("cross entropy = {}".format(entropy))
|
[
"wanglei@wanglei-mbp.local"
] |
wanglei@wanglei-mbp.local
|
b90c426c5c0356cb66726af358af4424b301208b
|
1c83920efda583d0dcedda2ac9d91235094685e2
|
/web/appauth/urls.py
|
09dc1c47d4ea8a5f66990da9a38e74d9f4098dfe
|
[] |
no_license
|
eshandas/django_project_template
|
d866d2d8c5e206b0430e6130bc470042af50b7fa
|
09786f6201d8e83199a2c0b7a83b6b6b0c8fd285
|
refs/heads/master
| 2022-07-22T14:39:50.521081
| 2019-08-06T11:00:19
| 2019-08-06T11:00:19
| 65,455,207
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
from django.urls import path
from .views import (
Login,
Logout,
ForgotPassword,
ResetPassword,
ChangePassword,
UserInfo,
Dashboard,
)
app_name = 'appauth'
urlpatterns = (
path('login/', Login.as_view(), name='login'),
path('logout/', Logout.as_view(), name='logout'),
path('password/forgot/', ForgotPassword.as_view(), name='forgot_password'),
path('password/reset/', ResetPassword.as_view(), name='reset_password'),
path('password/change/', ChangePassword.as_view(), name='change_password'),
path('info/', UserInfo.as_view(), name='userinfo'),
path('dashboard/', Dashboard.as_view(), name='dashboard'),
)
|
[
"eshandasnit@gmail.com"
] |
eshandasnit@gmail.com
|
44d5f850f17713244033a26d848e397da519eccd
|
af41ca2086f7da6ca036921b2e2cec89e0e5d522
|
/src/Pyro4/utils/flameserver.py
|
8d278f20be023c123746e028a09a4f2d8f4c0f03
|
[
"MIT"
] |
permissive
|
irmen/Pyro4
|
023830905bb0d8fc25aed8e990631268f7fbe52c
|
8ec0db055d76ae1512239710b1e30883ee6bd74b
|
refs/heads/master
| 2023-08-22T10:18:47.878310
| 2023-06-04T16:00:32
| 2023-06-04T16:00:32
| 11,037,154
| 667
| 105
|
MIT
| 2022-06-26T14:23:01
| 2013-06-28T20:25:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,593
|
py
|
"""
Pyro FLAME: Foreign Location Automatic Module Exposer.
Easy but potentially very dangerous way of exposing remote modules and builtins.
This is the commandline server.
You can start this module as a script from the command line, to easily get a
flame server running:
:command:`python -m Pyro4.utils.flameserver`
or simply: :command:`pyro4-flameserver`
You have to explicitly enable Flame first though by setting the FLAME_ENABLED config item.
Pyro - Python Remote Objects. Copyright by Irmen de Jong (irmen@razorvine.net).
"""
from __future__ import print_function
import sys
import os
import warnings
from Pyro4.configuration import config
from Pyro4 import core
from Pyro4.utils import flame
def main(args=None, returnWithoutLooping=False):
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-H", "--host", default="localhost", help="hostname to bind server on (default=%default)")
parser.add_option("-p", "--port", type="int", default=0, help="port to bind server on")
parser.add_option("-u", "--unixsocket", help="Unix domain socket name to bind server on")
parser.add_option("-q", "--quiet", action="store_true", default=False, help="don't output anything")
parser.add_option("-k", "--key", help="the HMAC key to use (deprecated)")
options, args = parser.parse_args(args)
if options.key:
warnings.warn("using -k to supply HMAC key on the command line is a security problem "
"and is deprecated since Pyro 4.72. See the documentation for an alternative.")
if "PYRO_HMAC_KEY" in os.environ:
if options.key:
raise SystemExit("error: don't use -k and PYRO_HMAC_KEY at the same time")
options.key = os.environ["PYRO_HMAC_KEY"]
if not options.quiet:
print("Starting Pyro Flame server.")
hmac = (options.key or "").encode("utf-8")
if not hmac and not options.quiet:
print("Warning: HMAC key not set. Anyone can connect to this server!")
config.SERIALIZERS_ACCEPTED = {"pickle"} # flame requires pickle serializer, doesn't work with the others.
daemon = core.Daemon(host=options.host, port=options.port, unixsocket=options.unixsocket)
if hmac:
daemon._pyroHmacKey = hmac
uri = flame.start(daemon)
if not options.quiet:
print("server uri: %s" % uri)
print("server is running.")
if returnWithoutLooping:
return daemon, uri # for unit testing
else:
daemon.requestLoop()
daemon.close()
return 0
if __name__ == "__main__":
sys.exit(main())
|
[
"irmen@razorvine.net"
] |
irmen@razorvine.net
|
d6a5eef6d6e045d62b9c8acd509ebff2eb2df38e
|
1bd073f585706c31c406bceb81eb400f8ac27c1d
|
/tools/Polygraphy/examples/api/00_inference_with_tensorrt/load_and_run.py
|
e6a2a4542f7d1efcfc9ee553b1ebadc2d5df6384
|
[
"BSD-3-Clause",
"MIT",
"ISC",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
neevaco/TensorRT
|
7b5e54c6a7cc6d0fc545e47ab7cf6656f23d5e19
|
650a4a6ed29403bec1a55663b48ef41a075d0b3c
|
refs/heads/neeva
| 2023-05-29T19:20:26.431716
| 2022-08-19T23:09:26
| 2022-08-26T19:09:39
| 526,771,012
| 0
| 0
|
Apache-2.0
| 2022-08-19T23:09:27
| 2022-08-19T22:49:25
| null |
UTF-8
|
Python
| false
| false
| 1,848
|
py
|
#!/usr/bin/env python3
#
# SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This script loads the TensorRT engine built by `build_and_run.py` and runs inference.
"""
import numpy as np
from polygraphy.backend.common import BytesFromPath
from polygraphy.backend.trt import EngineFromBytes, TrtRunner
def main():
# Just as we did when building, we can compose multiple loaders together
# to achieve the behavior we want. Specifically, we want to load a serialized
# engine from a file, then deserialize it into a TensorRT engine.
load_engine = EngineFromBytes(BytesFromPath("identity.engine"))
# Inference remains virtually exactly the same as before:
with TrtRunner(load_engine) as runner:
inp_data = np.ones(shape=(1, 1, 2, 2), dtype=np.float32)
# NOTE: The runner owns the output buffers and is free to reuse them between `infer()` calls.
# Thus, if you want to store results from multiple inferences, you should use `copy.deepcopy()`.
outputs = runner.infer(feed_dict={"x": inp_data})
assert np.array_equal(outputs["y"], inp_data) # It's an identity model!
print("Inference succeeded!")
if __name__ == "__main__":
main()
|
[
"rajeevsrao@users.noreply.github.com"
] |
rajeevsrao@users.noreply.github.com
|
c8b052e45e49f5ed4a2b6da595120102c8858fdf
|
aef8eb6681e555ecb61ac67151e4c54d6fdd1023
|
/plots/plotsTommy/reco/limit_from_plot.py
|
af021609fd4aa6c7cba619ecabb02483a7fbc6ee
|
[] |
no_license
|
HephyAnalysisSW/TopEFT
|
0e2dc89f7a43bacf50c77a042f56663e9d4f3404
|
53174807c96dffa6654e4dc63bef92f2b71706ee
|
refs/heads/master
| 2022-11-07T02:41:53.120759
| 2020-03-31T08:08:27
| 2020-03-31T08:08:27
| 98,643,866
| 0
| 3
| null | 2019-10-14T09:02:09
| 2017-07-28T11:38:23
|
Python
|
UTF-8
|
Python
| false
| false
| 6,928
|
py
|
#!/usr/bin/env python
''' Make limit from plot
'''
#
# Standard imports and batch mode
#
import ROOT, os
ROOT.gROOT.SetBatch(True)
ROOT.gROOT.LoadMacro("$CMSSW_BASE/src/StopsDilepton/tools/scripts/tdrstyle.C")
ROOT.setTDRStyle()
import array
from RootTools.core.standard import *
from TopEFT.Tools.user import plot_directory
from TopEFT.Tools.cardFileWriter import cardFileWriter
#
# Arguments
#
import argparse
argParser = argparse.ArgumentParser(description = "Argument parser")
argParser.add_argument('--logLevel', action='store', default='INFO', nargs='?', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'TRACE', 'NOTSET'], help="Log level for logging")
argParser.add_argument('--input', action='store', default='./mlp1.root', help="Input file.")
argParser.add_argument('--output', action='store', default='./mlp1.txt', help="Output card file.")
argParser.add_argument('--refLumi', action='store', type=float, default=300, help="Lumi used in the input file.")
args = argParser.parse_args()
# Logger
import TopEFT.Tools.logger as logger
import RootTools.core.logger as logger_rt
logger = logger.get_logger( args.logLevel, logFile = None)
logger_rt = logger_rt.get_logger(args.logLevel, logFile = None)
# load histos
gDir = ROOT.gDirectory
rFile = ROOT.TFile.Open( args.input )
if rFile:
# first & only TObject in file is the canvas
try:
canvas = rFile.Get(rFile.GetListOfKeys().At(0).GetName())
histos = [ canvas.GetListOfPrimitives().At(i) for i in range(canvas.GetListOfPrimitives().GetSize()) ]
histos = filter( lambda h: type(h)==ROOT.TH1F, histos)
except:
logger.error( "Could not load input file %s", args.input)
sys.exit(-1)
else:
logger.error( "Could not load input file %s", args.input)
sys.exit(-1)
# nicer name
histos[0].SetName( "signal" )
for i_histo, histo in enumerate(histos[1:]):
histo.SetName("bkg_%i"%i_histo)
# signal is first, the last histo is a copy
logger.info( "Loaded %i histos from file %s", len(histos), args.input)
histos = histos[:-1]
# un-stack
for i_histo, histo in enumerate(histos[:-1]):
histos[i_histo+1].Scale(-1)
histo.Add( histos[i_histo+1] )
histos[i_histo+1].Scale(-1)
# compute differences
h_signal, h_backgrounds = histos[0], histos[1:]
logger.info("Total signal %s %f", h_signal.GetName(), h_signal.Integral())
for i_h, h in enumerate(h_backgrounds):
logger.info( "Total bkg %i %s: %f", i_h, h.GetName(), h.Integral() )
result = {}
lumi_factor = 136./300.
signal_strengths = [0, 0.25, 0.5, 0.75, 1., 1.5, 2, 2.2]
for signal_strength in signal_strengths:
c = cardFileWriter.cardFileWriter()
bkg_sys = 1.1
bkg_shape_sys = 1.1
for i in range(len(h_backgrounds)):
c.addUncertainty('bkg_sys_%i'%i, 'lnN')
c.addUncertainty('bkg_shape_sys_%i'%i, 'lnN')
c.addUncertainty('sig_sys', 'lnN')
sig_sys = 1.25
for i_bin in range(1, 1+h_signal.GetNbinsX()):
c.addBin('Bin'+str(i_bin), [h.GetName() for h in h_backgrounds], 'Bin'+str(i_bin))
y_signal = h_signal.GetBinContent(i_bin)
y_backgrounds = [ h.GetBinContent(i_bin) for h in h_backgrounds ]
# Assume we observe the background
c.specifyObservation('Bin'+str(i_bin), int(round(lumi_factor*(y_signal+sum(y_backgrounds)))))
for i_h, h in enumerate(h_backgrounds):
c.specifyExpectation('Bin'+str(i_bin), h.GetName(), lumi_factor*h.GetBinContent(i_bin))
c.specifyUncertainty('bkg_sys_%i'%i_h, 'Bin'+str(i_bin),h.GetName(),bkg_sys)
c.specifyUncertainty('bkg_shape_sys_%i'%i_h, 'Bin'+str(i_bin),h.GetName(),1+(bkg_shape_sys-1)*(i_bin)/(h_signal.GetNbinsX()))
c.specifyExpectation('Bin'+str(i_bin), h_signal.GetName(), lumi_factor*signal_strength*h_signal.GetBinContent(i_bin))
c.specifyUncertainty('sig_sys','Bin'+str(i_bin),h_signal.GetName(),sig_sys)
c.addUncertainty('Lumi', 'lnN')
c.specifyFlatUncertainty('Lumi', 1.03)
c.writeToFile(args.output)
result[signal_strength] = c.calcNLL(rm=False)
def getIntersections(func, level, x_min=0, x_max=4, stepsize=0.001):
intersections = []
x_val = x_min
while x_val < x_max:
x_val += stepsize
intersection = func.GetX(level, x_val-stepsize, x_val)
if (x_val-stepsize+stepsize/10000.) < intersection < (x_val-stepsize/10000.):
intersections.append(intersection)
return intersections
c1 = ROOT.TCanvas()
ROOT.gPad.SetRightMargin(0.15)
x = array.array('d', signal_strengths )
y = array.array('d', [-2*result[strength]['nll'] for strength in signal_strengths] )
g = ROOT.TGraph(len(x),x,y)
#funStr = "[1]*(x-1)+[2]*(x-1)**2+[3]*(x-1)**4"
funStr = "[0]*(x-1)**2+[1]*(x-1)**3+[2]*(x-1)**4"
fun = ROOT.TF1("name", funStr, 0, signal_strengths[-1])
fun.SetTitle("")
g.Fit(fun)
parameters = [fun.GetParameter(i) for i in range(fun.GetNpar())]
fun.Draw("")
fun.SetLineColor(ROOT.kBlue-2)
fun.GetYaxis().SetRangeUser( 0, 50)
delta = 0.001
x_min, x_max = min(signal_strengths), max(signal_strengths)
# find TF1 segments under threshold level**2
levels = [1, 2, 5]
intervals = {}
for level in levels:
intersections = getIntersections(fun, level**2, x_min, x_max, delta/20.)
intervals[level] = []
for i,v in enumerate(intersections):
if i > len(intersections)-2: break
if fun.GetMinimum(intersections[i], intersections[i+1]) < 0.99:
#intervals.append((intersections[i], intersections[i+1]))
intervals[level].append(ROOT.TF1('', funStr, intersections[i], intersections[i+1]))
intervals[level][-1].SetParameters(*parameters)
for interval in intervals[2]:
interval.SetFillColorAlpha(ROOT.kYellow,0.9)
interval.SetLineColor(ROOT.kOrange-2)
interval.SetFillStyle(1111)
interval.Draw("f1same")
for interval in intervals[1]:
interval.SetFillColorAlpha(ROOT.kGreen+1,0.9)
interval.SetLineColor(ROOT.kCyan-3)
interval.SetFillStyle(1111)
interval.Draw("f1same")
stuff = []
tex = ROOT.TLatex()
#tex.SetNDC()
tex.SetTextSize(0.03)
tex.SetTextColor(ROOT.kGray+2)
#tex.SetTextAngle(90)
tex.SetTextAlign(12) # align right
for level in [ 1, 2, 3, 4, 5 ]:
l = ROOT.TLine(x_min, level**2, x_max, level**2)
l.SetLineStyle( ROOT.kDashed )
l.SetLineColor(ROOT.kGray+2)
stuff.append(l)
l.Draw("same")
tex.DrawLatex(x_max*2.2/2.5, level**2+1, "%i#sigma"%level)
tex.Draw()
stuff.append(tex)
fun.GetYaxis().SetTitle("q")
fun.GetXaxis().SetTitle("#mu_{tWZ}")
fun.GetXaxis().SetLabelSize(0.04)
fun.GetYaxis().SetLabelSize(0.04)
fun.SetLineWidth(2)
fun.Draw("same")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.png")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.pdf")
c1.Print("/afs/hephy.at/user/r/rschoefbeck/www/etc/ll.root")
|
[
"robert.schoefbeck@cern.ch"
] |
robert.schoefbeck@cern.ch
|
17d31d1319d4f86626104931e9945e669c8ebd41
|
ef41528b736f0ac7927fb110211f016f51362b9a
|
/Mux_src/aScript_Add_Songs_in_Path.py
|
de61edc3f4d8b63faeb42e08fd424baa9ff9b05d
|
[] |
no_license
|
rduvalwa5/Mux
|
d243edb10121dcd991f5129c367bf918c4bd31e7
|
2dba11861f91e4bdc1ef28279132a6d8dd4ccf54
|
refs/heads/master
| 2023-05-10T06:53:20.249532
| 2023-04-26T23:53:21
| 2023-04-26T23:53:21
| 73,444,154
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 603
|
py
|
'''
Created on Mar 16, 2017
@author: rduvalwa2
'''
from Music_Get_Functions import musicGet_Functions
if __name__ == "__main__":
mux = musicGet_Functions()
myPath = "/Users/rduvalwa2/Music/Music/Media.localized/The Statler Brothers/The Best of the Statler Bros_"
album = "The Best of the Statler Bros_"
artist = "The Statler Brothers"
genre = "Country"
inType = "Itunes"
Medium = 'Download'
# mux.add_songs_in_path(myPath, album, artist, genre, inType, Medium)
mux.add_album(album, artist, genre, inType, Medium)
mux.add_artist(artist, genre)
|
[
"rduvalwa5@hotmail.com"
] |
rduvalwa5@hotmail.com
|
9176f63c57c1e41f0f2eda2e4230db2966440a0b
|
64bf39b96a014b5d3f69b3311430185c64a7ff0e
|
/intro-ansible/venv2/lib/python3.8/site-packages/ansible/plugins/test/mathstuff.py
|
b8251ad42ca082239b0a74e1ffec429da98f11f7
|
[
"MIT"
] |
permissive
|
SimonFangCisco/dne-dna-code
|
7072eba7da0389e37507b7a2aa5f7d0c0735a220
|
2ea7d4f00212f502bc684ac257371ada73da1ca9
|
refs/heads/master
| 2023-03-10T23:10:31.392558
| 2021-02-25T15:04:36
| 2021-02-25T15:04:36
| 342,274,373
| 0
| 0
|
MIT
| 2021-02-25T14:39:22
| 2021-02-25T14:39:22
| null |
UTF-8
|
Python
| false
| false
| 1,327
|
py
|
# (c) 2016, Ansible, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import math
def issubset(a, b):
return set(a) <= set(b)
def issuperset(a, b):
return set(a) >= set(b)
def isnotanumber(x):
try:
return math.isnan(x)
except TypeError:
return False
class TestModule:
''' Ansible math jinja2 tests '''
def tests(self):
return {
# set theory
'issubset': issubset,
'subset': issubset,
'issuperset': issuperset,
'superset': issuperset,
'isnan': isnotanumber,
'nan': isnotanumber,
}
|
[
"sifang@cisco.com"
] |
sifang@cisco.com
|
4169ad39d56c6d5f14227d8946f7bbe55d190baf
|
c3432a248c8a7a43425c0fe1691557c0936ab380
|
/2022/two_pointer/1806_부분합.py
|
b9e0e04008b3c43ba286237ccdafd8891c2652f6
|
[] |
no_license
|
Parkyunhwan/BaekJoon
|
13cb3af1f45212d7c418ecc4b927f42615b14a74
|
9a882c568f991c9fed3df45277f091626fcc2c94
|
refs/heads/master
| 2022-12-24T21:47:47.052967
| 2022-12-20T16:16:59
| 2022-12-20T16:16:59
| 232,264,447
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
import sys
N, S = map(int, input().split())
arr = list(map(int, input().split()))
def solve():
left = 0
right = 0
sum_value = 0
min_length = sys.maxsize
while left < N:
if sum_value < S:
if right == N:
break
sum_value += arr[right]
right += 1
elif sum_value >= S:
sum_value -= arr[left]
min_length = min(min_length, right - left)
left += 1
return min_length
result = solve()
if result != sys.maxsize:
print(result)
else:
print(0)
|
[
"pyh8618@gmail.com"
] |
pyh8618@gmail.com
|
f8455f61ade57470067945166776dbd8bbb6fabf
|
bec402eb6b6ae6cecf53a6d3190568526584a18c
|
/coursera/models.py
|
cc8c7cf0c31d1d9e816567887ed6636eeebeb5ce
|
[] |
no_license
|
Lozoniuk/bionic_python_django
|
c29ab730bbeacb7d80be935650a3e9b36be3b679
|
80755b7524d650d99effdf69cc243bd0cdf9b7f5
|
refs/heads/master
| 2020-04-01T21:25:09.823735
| 2014-11-20T12:23:27
| 2014-11-20T12:23:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 197
|
py
|
from django.db import models
from django.contrib.auth.models import User
class Student(User):
group = models.CharField(max_length=200)
def __unicode__(self):
return self.username
|
[
"bohdan.korniyenko@gmail.com"
] |
bohdan.korniyenko@gmail.com
|
b243c1eb7fd87338027443c852b9034370f180e6
|
cc81cc2e5b52bbfe4d13ed6b37859965f7e9408a
|
/resources/tests.py
|
0bdef30d6c623104d15067e803172c8249d07683
|
[] |
no_license
|
youpiyoful/pythonclassmates
|
edb966e3cdf71d00277e49efb1e16aa4c16bbb5e
|
08fae9c919b6376239c150697dc9719520c06b1c
|
refs/heads/master
| 2023-02-14T17:22:09.349226
| 2021-01-14T02:56:16
| 2021-01-14T02:56:16
| 323,906,893
| 0
| 1
| null | 2021-01-14T01:21:34
| 2020-12-23T13:17:05
|
Python
|
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
"""Integration tests of the resources app pages."""
from wagtail.images.blocks import ImageChooserBlock
from wagtail.tests.utils import WagtailPageTests
from wagtail.tests.utils.form_data import nested_form_data, streamfield
from blog.models import BlogPage, PostPage
from home.models import HomePage
from events.models import EventPage, EventsPage
from resources.models import ResourcesPage, ResourcePage
# from flex.models import FlexPage
class ResourcesPageTests(WagtailPageTests):
"""Resources page behavior test."""
def test_can_create_only_resource_page(self):
"""test we can create only resourcePage from ResourcesPage"""
self.assertCanNotCreateAt(ResourcesPage, HomePage)
self.assertCanNotCreateAt(ResourcesPage, EventPage)
self.assertCanNotCreateAt(ResourcesPage, EventsPage)
self.assertCanNotCreateAt(ResourcesPage, BlogPage)
# self.assertCanNotCreateAt(BlogPage, FlexPage)
self.assertCanNotCreateAt(ResourcesPage, ResourcesPage)
self.assertAllowedSubpageTypes(BlogPage, {PostPage})
def test_can_create_resource_page(self):
"""
We are testing that it's possible to create
a resource page from the resources part.
"""
self.assertCanCreateAt(ResourcesPage, ResourcePage)
class ResourcePageTests(WagtailPageTests):
"""Resource page behavior test."""
def test_can_not_create_any_page(self):
"""
we are testing that no child page
can be created from resourcePage.
"""
self.assertCanNotCreateAt(ResourcePage, HomePage)
self.assertCanNotCreateAt(ResourcePage, EventPage)
self.assertCanNotCreateAt(ResourcePage, EventsPage)
self.assertCanNotCreateAt(ResourcePage, BlogPage)
self.assertCanNotCreateAt(ResourcePage, PostPage)
# self.assertCanNotCreateAt(PostPage, FlexPage)
self.assertCanNotCreateAt(ResourcePage, ResourcesPage)
self.assertAllowedSubpageTypes(ResourcePage, {})
def test_can_only_be_created_in_resources_page_parent(self):
"""
Test that the resource page cannot be
created in a parent other than the resourcespage.
"""
self.assertAllowedParentPageTypes(
ResourcePage, {ResourcesPage}
)
# def test_can_create_post_page(self):
# """ Test PostPageCreation are ok"""
# # Assert that a ContentPage can be made here, with this POST data
# self.assertCanCreate(BlogPage, PostPage, nested_form_data({
# 'custom_title': 'About us',
# 'content': streamfield([
# ('text', 'Lorem ipsum dolor sit amet'),
# ])
# # 'blog_image': ImageChooserBlock
# }))
# custom_title
# blog_image
# description
# content
# categories
# tags
# content_panels
|
[
"yoanfornari@gmail.com"
] |
yoanfornari@gmail.com
|
2be30b19aaefb84da5148586d9978e42bae4eaf9
|
48e124e97cc776feb0ad6d17b9ef1dfa24e2e474
|
/sdk/python/pulumi_azure_native/insights/get_activity_log_alert.py
|
d81b36925eeb8c51d68e403b066ae1877a85315e
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
bpkgoud/pulumi-azure-native
|
0817502630062efbc35134410c4a784b61a4736d
|
a3215fe1b87fba69294f248017b1591767c2b96c
|
refs/heads/master
| 2023-08-29T22:39:49.984212
| 2021-11-15T12:43:41
| 2021-11-15T12:43:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,196
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetActivityLogAlertResult',
'AwaitableGetActivityLogAlertResult',
'get_activity_log_alert',
'get_activity_log_alert_output',
]
@pulumi.output_type
class GetActivityLogAlertResult:
"""
An Activity Log Alert rule resource.
"""
def __init__(__self__, actions=None, condition=None, description=None, enabled=None, id=None, location=None, name=None, scopes=None, tags=None, type=None):
if actions and not isinstance(actions, dict):
raise TypeError("Expected argument 'actions' to be a dict")
pulumi.set(__self__, "actions", actions)
if condition and not isinstance(condition, dict):
raise TypeError("Expected argument 'condition' to be a dict")
pulumi.set(__self__, "condition", condition)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if scopes and not isinstance(scopes, list):
raise TypeError("Expected argument 'scopes' to be a list")
pulumi.set(__self__, "scopes", scopes)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> 'outputs.ActionListResponse':
"""
The actions that will activate when the condition is met.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def condition(self) -> 'outputs.AlertRuleAllOfConditionResponse':
"""
The condition that will cause this alert to activate.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of this Activity Log Alert rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Indicates whether this Activity Log Alert rule is enabled. If an Activity Log Alert rule is not enabled, then none of its actions will be activated.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The location of the resource. Since Azure Activity Log Alerts is a global service, the location of the rules should always be 'global'.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def scopes(self) -> Sequence[str]:
"""
A list of resource IDs that will be used as prefixes. The alert will only apply to Activity Log events with resource IDs that fall under one of these prefixes. This list must include at least one item.
"""
return pulumi.get(self, "scopes")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetActivityLogAlertResult(GetActivityLogAlertResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetActivityLogAlertResult(
actions=self.actions,
condition=self.condition,
description=self.description,
enabled=self.enabled,
id=self.id,
location=self.location,
name=self.name,
scopes=self.scopes,
tags=self.tags,
type=self.type)
def get_activity_log_alert(activity_log_alert_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetActivityLogAlertResult:
"""
An Activity Log Alert rule resource.
API Version: 2020-10-01.
:param str activity_log_alert_name: The name of the Activity Log Alert rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['activityLogAlertName'] = activity_log_alert_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:insights:getActivityLogAlert', __args__, opts=opts, typ=GetActivityLogAlertResult).value
return AwaitableGetActivityLogAlertResult(
actions=__ret__.actions,
condition=__ret__.condition,
description=__ret__.description,
enabled=__ret__.enabled,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
scopes=__ret__.scopes,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_activity_log_alert)
def get_activity_log_alert_output(activity_log_alert_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetActivityLogAlertResult]:
"""
An Activity Log Alert rule resource.
API Version: 2020-10-01.
:param str activity_log_alert_name: The name of the Activity Log Alert rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
|
[
"noreply@github.com"
] |
bpkgoud.noreply@github.com
|
e497ee7c6fc1d99e7cec72d1a9672e3eaba1d033
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04001/s666000652.py
|
8febb46752a063e6eaf83e2495a6012824ec9336
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
# -*- coding: utf-8 -*-
S = input()
n = len(S)
sum_formula = 0
# bit全探索
for bit in range(2 ** (n-1)):
nums = []
# 何ビットシフトさせるか
num = S[0]
for shift in range(n-1):
if not ((bit >> shift)&1):
num = num + S[shift+1]
else:
nums.append(int(num))
num = S[shift+1]
nums.append(int(num))
sum_formula += sum(nums)
print(sum_formula)
# 再帰関数
def calc_all_formula(S, i, lists):
'''
listsの要素: ([数字list], temp_str)
'''
new_lists = []
# i - i+1番目の間に+が入る
new_lists.extend(
[(lst[0]+[int(lst[1])], S[i+1]) for lst in lists]
)
new_lists.extend(
[(lst[0], lst[1]+S[i+1]) for lst in lists]
)
# base case
if i == len(S)-2:
new_lists = [
lst[0]+[int(lst[1])] for lst in new_lists
]
return new_lists
return calc_all_formula(S, i+1, new_lists)
# base_cand = [([], S[0])]
# if len(S) >= 2:
# sum_formula = sum([sum(lst) for lst in calc_all_formula(S, 0, base_cand)])
# print(sum_formula)
# else:
# print(int(S))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
627bfc86c47a4d66c3aa8d88de13b29fe6301b04
|
2a171178942a19afe9891c2425dce208ae04348b
|
/kubernetes/test/test_v1beta1_job_status.py
|
cfc5fd57f823f4d33a882fee43bf94f753ee86de
|
[
"Apache-2.0"
] |
permissive
|
ouccema/client-python
|
ac3f1dee1c5ad8d82f15aeecb87a2f5f219ca4f4
|
d7f33ec53e302e66674df581904a3c5b1fcf3945
|
refs/heads/master
| 2021-01-12T03:17:54.274888
| 2017-01-03T22:13:14
| 2017-01-03T22:13:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,436
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.0-snapshot
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_job_status import V1beta1JobStatus
class TestV1beta1JobStatus(unittest.TestCase):
""" V1beta1JobStatus unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1JobStatus(self):
"""
Test V1beta1JobStatus
"""
model = kubernetes.client.models.v1beta1_job_status.V1beta1JobStatus()
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
c24bb6886a48aaa9e47428e2df5af60d5ed73c55
|
63daf225819636397fda6ef7e52783331c27f295
|
/taobao-sdk/top/api/rest/ItemcatsAuthorizeGetRequest.py
|
fd5db84b60c429ab09a6969487e4d598ee750905
|
[] |
no_license
|
cash2one/language-Python
|
e332ecfb4e9321a11407b29987ee64d44e552b15
|
8adb4f2fd2f023f9cc89b4edce1da5f71a3332ab
|
refs/heads/master
| 2021-06-16T15:15:08.346420
| 2017-04-20T02:44:16
| 2017-04-20T02:44:16
| 112,173,361
| 1
| 0
| null | 2017-11-27T09:08:57
| 2017-11-27T09:08:57
| null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
'''
Created by auto_sdk on 2014.03.04
'''
from top.api.base import RestApi
class ItemcatsAuthorizeGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
def getapiname(self):
return 'taobao.itemcats.authorize.get'
|
[
"a@ie9.org"
] |
a@ie9.org
|
c8085220e7b6b96d11824d7bb4b34a4c007bc174
|
1cceb7c7e6d708dc9de0084739f582ccde2633a3
|
/examples/test_no_sensors.py
|
647a1dd10226b958ce9207ed3fbddf1dcae15cd9
|
[
"BSD-3-Clause"
] |
permissive
|
zjtomy/python-rpi-greenhouse
|
5144d0267aecfa2de240832f3350ec67a5e23955
|
e263f3b1c933e2d5a174f5b1de1cbb9fc3e8e56f
|
refs/heads/master
| 2020-06-28T08:11:48.233302
| 2015-12-04T13:40:25
| 2015-12-04T13:40:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 167
|
py
|
from rpi_greenhouse import GreenhouseIndicator
from time import sleep
indicator = GreenhouseIndicator()
while True:
indicator.show_status_on_leds()
sleep(5)
|
[
"ben@bennuttall.com"
] |
ben@bennuttall.com
|
72763d657d04cba848de805950347bceacb614cb
|
79aa4b99a48bb16a907916ad63c902443420541a
|
/0022.py
|
d6ffc37f8e6df753e4e3fb2d944ef83a56d7bdfd
|
[] |
no_license
|
mach8686devops/leetcode-100
|
62dec66c719d7cfa120ca9505701df49d8d5b982
|
f90526c9b073165b86b933cdf7d1dc496e68f2c6
|
refs/heads/main
| 2023-04-11T06:28:15.059587
| 2021-04-13T12:11:54
| 2021-04-13T12:11:54
| 329,346,572
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 872
|
py
|
from functools import lru_cache
from typing import List
class Solution:
@lru_cache(None)
def generateParenthesis(self, n: int) -> List[str]:
if n == 0:
return ['']
ans = []
for c in range(n):
for left in self.generateParenthesis(c):
for right in self.generateParenthesis(n - 1 - c):
ans.append('({}){}'.format(left, right))
return ans
# 回溯法的代码套路是使用两个变量: res 和 path,res 表示最终的结果,
# path 保存已经走过的路径。如果搜到一个状态满足题目要求,就把 path 放到 res 中。
#
# 代码后面的判断条件都是 if,而不是 elif,因为是满足两个条件的任意一个就可以继续向下搜索,
# 而不是同时只能满足其中的一个。
print(Solution().generateParenthesis(n=3))
|
[
"zhangjohn202@gmail.com"
] |
zhangjohn202@gmail.com
|
3e2c7d114bfedc67dae2e3410fac2792652dc324
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/numeric/scripts/ode_ivp.py
|
2441368ba7dc04cf56439a1acc9e817fc837164e
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,155
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from numeric import ode
# ODE system using euler forwarding
def F(t):
return np.array([t*np.exp(-2*t), np.exp(-t)])
def dF(t, y):
return np.array([-y[0]*2+y[1]**2, y[0]-y[1]-t*y[1]**2])
ny = 2
t = np.arange(0, 10, 0.01)
y = np.ndarray([len(t), ny])
y[0, :] = [0, 1]
for k in range(1, len(t)):
yy = y[k-1, :]
tt = t[k-1]
dt = t[k]-t[k-1]
y[k, :] = ode.forward2(tt, yy, dt, dF)
fig, ax = plt.subplots(1, 1)
f = F(t)
ax.plot(t, f[0, :], 'b')
ax.plot(t, f[1, :], 'b')
ax.plot(t, y[:, 0], 'r')
ax.plot(t, y[:, 1], 'r')
fig.show()
# throw
g = 10
y0 = [10, 0]
def F(t):
return np.array([-g*t+y0[0], -1/2*g*t**2+y0[0]*t+y0[1]]).T
def dF(t, y):
return np.array([-g, y[0]])
def throw(y0, t1, dt=0.01):
ny = 2
t = np.arange(0, t1, 0.01)
y = np.ndarray([len(t), ny])
y[0, :] = y0
for k in range(1, len(t)):
yy = y[k-1, :]
tt = t[k-1]
dt = t[k]-t[k-1]
y[k, :] = ode.forward1(tt, yy, dt, dF)
return t, y
t, y = throw(y0, 1)
fig, ax = plt.subplots(1, 1)
f = F(t)
ax.plot(t, f, 'b')
ax.plot(t, y, 'r')
fig.show()
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
efaa18429722ce4503c8ba19e35771809d9e0396
|
e6e57bf7d4eda37f1188ab72ff249675f40029ee
|
/algorithms_and_data_structures/strings/Longest Substring Without Repeating Chars.py
|
eb64961b7d75fe8faebb2d570e7abbd1b9e84eb2
|
[] |
no_license
|
juanpedrovel/bomboclap
|
4e186331ef1c26c8522e44c21d6a33358471786b
|
99db02266c31dd14357ef6a575d35fcf55718617
|
refs/heads/master
| 2020-04-19T21:16:38.141830
| 2019-01-31T00:31:24
| 2019-01-31T00:31:24
| 168,436,881
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 662
|
py
|
class Solution:
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return 0
table = {}
max_lenght = 0
index = 0
for i in range(len(s)):
if s[i] not in table:
table[s[i]] = i
else:
if i - index > max_lenght:
max_lenght = i - index
index = max(table[s[i]] + 1, index)
table[s[i]] = i
max_lenght = max(max_lenght, len(s) - index)
return max_lenght
time = "abba"
k = 2
d = Solution()
print(d.lengthOfLongestSubstring(time))
|
[
"juanpedrovel@gmail.com"
] |
juanpedrovel@gmail.com
|
dd8ba826e8603a45322205319804915132768d87
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/SettleCardInfo.py
|
c423d25f0cd7537346ab3f05842c9ac0b29eede3
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 6,111
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class SettleCardInfo(object):
def __init__(self):
self._account_branch_name = None
self._account_holder_name = None
self._account_inst_city = None
self._account_inst_id = None
self._account_inst_name = None
self._account_inst_province = None
self._account_no = None
self._account_type = None
self._bank_code = None
self._usage_type = None
@property
def account_branch_name(self):
return self._account_branch_name
@account_branch_name.setter
def account_branch_name(self, value):
self._account_branch_name = value
@property
def account_holder_name(self):
return self._account_holder_name
@account_holder_name.setter
def account_holder_name(self, value):
self._account_holder_name = value
@property
def account_inst_city(self):
return self._account_inst_city
@account_inst_city.setter
def account_inst_city(self, value):
self._account_inst_city = value
@property
def account_inst_id(self):
return self._account_inst_id
@account_inst_id.setter
def account_inst_id(self, value):
self._account_inst_id = value
@property
def account_inst_name(self):
return self._account_inst_name
@account_inst_name.setter
def account_inst_name(self, value):
self._account_inst_name = value
@property
def account_inst_province(self):
return self._account_inst_province
@account_inst_province.setter
def account_inst_province(self, value):
self._account_inst_province = value
@property
def account_no(self):
return self._account_no
@account_no.setter
def account_no(self, value):
self._account_no = value
@property
def account_type(self):
return self._account_type
@account_type.setter
def account_type(self, value):
self._account_type = value
@property
def bank_code(self):
return self._bank_code
@bank_code.setter
def bank_code(self, value):
self._bank_code = value
@property
def usage_type(self):
return self._usage_type
@usage_type.setter
def usage_type(self, value):
self._usage_type = value
def to_alipay_dict(self):
params = dict()
if self.account_branch_name:
if hasattr(self.account_branch_name, 'to_alipay_dict'):
params['account_branch_name'] = self.account_branch_name.to_alipay_dict()
else:
params['account_branch_name'] = self.account_branch_name
if self.account_holder_name:
if hasattr(self.account_holder_name, 'to_alipay_dict'):
params['account_holder_name'] = self.account_holder_name.to_alipay_dict()
else:
params['account_holder_name'] = self.account_holder_name
if self.account_inst_city:
if hasattr(self.account_inst_city, 'to_alipay_dict'):
params['account_inst_city'] = self.account_inst_city.to_alipay_dict()
else:
params['account_inst_city'] = self.account_inst_city
if self.account_inst_id:
if hasattr(self.account_inst_id, 'to_alipay_dict'):
params['account_inst_id'] = self.account_inst_id.to_alipay_dict()
else:
params['account_inst_id'] = self.account_inst_id
if self.account_inst_name:
if hasattr(self.account_inst_name, 'to_alipay_dict'):
params['account_inst_name'] = self.account_inst_name.to_alipay_dict()
else:
params['account_inst_name'] = self.account_inst_name
if self.account_inst_province:
if hasattr(self.account_inst_province, 'to_alipay_dict'):
params['account_inst_province'] = self.account_inst_province.to_alipay_dict()
else:
params['account_inst_province'] = self.account_inst_province
if self.account_no:
if hasattr(self.account_no, 'to_alipay_dict'):
params['account_no'] = self.account_no.to_alipay_dict()
else:
params['account_no'] = self.account_no
if self.account_type:
if hasattr(self.account_type, 'to_alipay_dict'):
params['account_type'] = self.account_type.to_alipay_dict()
else:
params['account_type'] = self.account_type
if self.bank_code:
if hasattr(self.bank_code, 'to_alipay_dict'):
params['bank_code'] = self.bank_code.to_alipay_dict()
else:
params['bank_code'] = self.bank_code
if self.usage_type:
if hasattr(self.usage_type, 'to_alipay_dict'):
params['usage_type'] = self.usage_type.to_alipay_dict()
else:
params['usage_type'] = self.usage_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = SettleCardInfo()
if 'account_branch_name' in d:
o.account_branch_name = d['account_branch_name']
if 'account_holder_name' in d:
o.account_holder_name = d['account_holder_name']
if 'account_inst_city' in d:
o.account_inst_city = d['account_inst_city']
if 'account_inst_id' in d:
o.account_inst_id = d['account_inst_id']
if 'account_inst_name' in d:
o.account_inst_name = d['account_inst_name']
if 'account_inst_province' in d:
o.account_inst_province = d['account_inst_province']
if 'account_no' in d:
o.account_no = d['account_no']
if 'account_type' in d:
o.account_type = d['account_type']
if 'bank_code' in d:
o.bank_code = d['bank_code']
if 'usage_type' in d:
o.usage_type = d['usage_type']
return o
|
[
"liuqun.lq@alibaba-inc.com"
] |
liuqun.lq@alibaba-inc.com
|
e137ef7c38d70656f375646a2aa4195efccc728c
|
6d54a7b26d0eb82152a549a6a9dfde656687752c
|
/src/controller/python/test/test_scripts/commissioning_window_test.py
|
6a113aede20baf05365072828d9a20694ebdf7a0
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
project-chip/connectedhomeip
|
81a123d675cf527773f70047d1ed1c43be5ffe6d
|
ea3970a7f11cd227ac55917edaa835a2a9bc4fc8
|
refs/heads/master
| 2023-09-01T11:43:37.546040
| 2023-09-01T08:01:32
| 2023-09-01T08:01:32
| 244,694,174
| 6,409
| 1,789
|
Apache-2.0
| 2023-09-14T20:56:31
| 2020-03-03T17:05:10
|
C++
|
UTF-8
|
Python
| false
| false
| 4,141
|
py
|
#!/usr/bin/env python3
#
# Copyright (c) 2022 Project CHIP Authors
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Commissioning test.
import asyncio
import os
import sys
from optparse import OptionParser
from base import BaseTestHelper, FailIfNot, TestFail, TestTimeout, logger
# The thread network dataset tlv for testing, splitted into T-L-V.
TEST_THREAD_NETWORK_DATASET_TLV = "0e080000000000010000" + \
"000300000c" + \
"35060004001fffe0" + \
"0208fedcba9876543210" + \
"0708fd00000000001234" + \
"0510ffeeddccbbaa99887766554433221100" + \
"030e54657374696e674e6574776f726b" + \
"0102d252" + \
"041081cb3b2efa781cc778397497ff520fa50c0302a0ff"
# Network id, for the thread network, current a const value, will be changed to XPANID of the thread network.
TEST_THREAD_NETWORK_ID = "fedcba9876543210"
TEST_DISCRIMINATOR = 3840
ENDPOINT_ID = 0
LIGHTING_ENDPOINT_ID = 1
GROUP_ID = 0
async def main():
optParser = OptionParser()
optParser.add_option(
"-t",
"--timeout",
action="store",
dest="testTimeout",
default=75,
type='int',
help="The program will return with timeout after specified seconds.",
metavar="<timeout-second>",
)
optParser.add_option(
"--address",
action="store",
dest="deviceAddress",
default='',
type='str',
help="Address of the first device",
)
optParser.add_option(
"-p",
"--paa-trust-store-path",
action="store",
dest="paaTrustStorePath",
default='',
type='str',
help="Path that contains valid and trusted PAA Root Certificates.",
metavar="<paa-trust-store-path>"
)
(options, remainingArgs) = optParser.parse_args(sys.argv[1:])
timeoutTicker = TestTimeout(options.testTimeout)
timeoutTicker.start()
test = BaseTestHelper(
nodeid=112233, paaTrustStorePath=options.paaTrustStorePath, testCommissioner=False)
FailIfNot(test.SetNetworkCommissioningParameters(dataset=TEST_THREAD_NETWORK_DATASET_TLV),
"Failed to finish network commissioning")
logger.info("Commissioning DUT from first commissioner")
FailIfNot(test.TestPaseOnly(ip=options.deviceAddress, setuppin=20202021, nodeid=1),
"Unable to establish PASE connection to device")
FailIfNot(test.TestCommissionOnly(nodeid=1), "Unable to commission device")
logger.info("Creating controller on a new fabric")
FailIfNot(test.CreateNewFabricController(), "Unable to create new controller")
logger.info("Testing RevokeCommissioning")
FailIfNot(await test.TestRevokeCommissioningWindow(ip=options.deviceAddress,
setuppin=20202021,
nodeid=1),
"RevokeCommissioning test failed")
logger.info("Test Enhanced Commissioning Window")
FailIfNot(test.TestEnhancedCommissioningWindow(ip=options.deviceAddress, nodeid=1), "EnhancedCommissioningWindow open failed")
timeoutTicker.stop()
logger.info("Test finished")
# TODO: Python device controller cannot be shutdown clean sometimes and will block on AsyncDNSResolverSockets shutdown.
# Call os._exit(0) to force close it.
os._exit(0)
if __name__ == "__main__":
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
except Exception as ex:
logger.exception(ex)
TestFail("Exception occurred when running tests.")
|
[
"noreply@github.com"
] |
project-chip.noreply@github.com
|
d13a948f9a23c48970fce20715c43920dd560cef
|
cb4cfcece4bc14f591b038adbc7fadccaf447a1d
|
/FFL.py
|
ca7c42027fa2499d1ac1f01774358f322ab53de3
|
[] |
no_license
|
psycho-pomp/CodeChef
|
ba88cc8e15b3e87d39ad0c4665c6892620c09d22
|
881edddded0bc8820d22f42b94b9959fd6912c88
|
refs/heads/master
| 2023-03-21T06:46:14.455055
| 2021-03-11T12:07:48
| 2021-03-11T12:07:48
| 275,214,989
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
# cook your dish here
t=int(input())
for _ in range(t):
n,s=map(int,input().split())
p=list(map(int,input().split()))
a=list(map(int,input().split()))
forward=101
defender=101
for i in range(n):
if a[i]==0:
defender=min(defender,p[i])
else:
forward=min(forward,p[i])
if s+defender+forward>100:
print('no')
else:
print("yes")
|
[
"noreply@github.com"
] |
psycho-pomp.noreply@github.com
|
ae6f6eb11eddca39b273b09aef1c744440f99616
|
a97fb0584709e292a475defc8506eeb85bb24339
|
/source code/code/ch1713.py
|
6b4765d3aa02f8de2a92852301a82b8975666146
|
[] |
no_license
|
AAQ6291/PYCATCH
|
bd297858051042613739819ed70c535901569079
|
27ec4094be785810074be8b16ef84c85048065b5
|
refs/heads/master
| 2020-03-26T13:54:57.051016
| 2018-08-17T09:05:19
| 2018-08-17T09:05:19
| 144,963,014
| 0
| 0
| null | null | null | null |
BIG5
|
Python
| false
| false
| 1,037
|
py
|
#!/usr/bin/env python
# -*- coding: cp950 -*-
# 載入wx模組
import wx
class myApp(wx.App):
def OnInit(self):
frame = myFrame()
frame.Show()
return True
# 定義myFrame並繼承wx.Frame類別
class myFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(
self,
None,
-1,
'Progress Bar',
size=(320, 150))
# 建立panel
panel = wx.Panel(self, -1)
# 計算目前進度的count變數
self.count = 0
# 建立 Progress Bar元件
self.gauge = wx.Gauge(
panel,
-1,
50,
pos = (5, 50),
size = (300, 20),
style=wx.GA_HORIZONTAL)
# 監聽事件
self.Bind(wx.EVT_IDLE, self.OnIdle)
# OnIdle事件函數
def OnIdle(self, event):
self.count += 1
if self.count >= 100:
self.count = 0
# 更新進度
self.gauge.SetValue(self.count)
def main():
app = myApp()
app.MainLoop()
if __name__ == "__main__":
main()
|
[
"angelak.tw@gmail.com"
] |
angelak.tw@gmail.com
|
86bd165356c0d04df9db767185f55bfd03bdff46
|
93684882400d0249ad733249f5b2c8dbd230110f
|
/ClassExercise & studio/chapter 10/ex.04.py
|
4c70f122bd5ee54fdb03df271fb9de2af6b7e6e9
|
[] |
no_license
|
SmileShmily/LaunchCode-summerofcode-Unit1
|
c492bbed966547cc8c1be7f15d7a23cb989d407b
|
03474cf77b0dae2bcfaf8513711d3fec72bd4166
|
refs/heads/master
| 2021-01-16T23:19:23.413010
| 2017-06-29T02:49:19
| 2017-06-29T02:49:19
| 95,730,117
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
'''Write a function to count how many odd numbers are in a list.
'''
import random
def countOdd(lst):
odd = 0
for e in lst:
if e % 2 != 0:
odd = odd + 1
return odd
# make a random list to test the function
lst = []
for i in range(100):
lst.append(random.randint(0, 1000))
print(countOdd(lst))
|
[
"zyxjyaya@gmail.com"
] |
zyxjyaya@gmail.com
|
0b428bafc96d69a7ec8a727903cea42cf0da8fd4
|
3c0f93b707e94b8e1363588a1699ab7d663d8f98
|
/bin/extract-extrema
|
9c4d337bd4e25c292cd676ab2f7aee1be00d2d98
|
[
"MIT"
] |
permissive
|
reedessick/universality
|
3b9c39dbf3d15fe6fe824ac124cbecec5d3f1836
|
d4eaf5ee6ea1ecf79f0ec32ea5e62093c95d4b57
|
refs/heads/master
| 2023-07-13T19:31:38.277188
| 2023-06-25T23:03:14
| 2023-06-25T23:03:14
| 118,136,331
| 2
| 2
|
MIT
| 2023-04-25T17:29:33
| 2018-01-19T14:39:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,553
|
#!/usr/bin/env python3
"""an executable to extract extrema from a single CSV
"""
__author__ = "Reed Essick (reed.essick@gmail.com)"
#-------------------------------------------------
import os
import numpy as np
from argparse import ArgumentParser
### non-standard libraries
from universality.utils import io
from universality.properties import extrema
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
import os
import numpy as np
from argparse import ArgumentParser
### non-standard libraries
from universality.utils import io
from universality.properties import extrema
#-------------------------------------------------
parser = ArgumentParser(description=__doc__)
# required arguments
rgroup = parser.add_argument_group('required arguments')
rgroup.add_argument('inpath', type=str)
rgroup.add_argument('outpath', type=str)
rgroup.add_argument('columns', nargs='+', type=str,
help='columns to extract')
rgroup.add_argument('--default-values', nargs=3, type=str, action='append', default=[],
help='set a default value if no extrema are found within the specified ranges. \
eg, "--default-value column default_max_val default_min_val"')
rgroup.add_argument('--new-column', type=str, default=[], action='append', nargs=3,
help='the name of the new column that will be added to each file. \
Supply the name of the column original and then the names of the new columns for max and min. \
eg, "--new-column old_column max_old_column min_old_column"')
rgroup.add_argument('--column-range', nargs=3, type=str, default=[], action='append',
help='restrict the extremization to within this range for this column. \
e.g.: "--column-range baryon_density 2.8e14 2.8e15". Can be repeated to specify multiple ranges')
# verbosity arguments
vgroup = parser.add_argument_group('verbosity arguments')
vgroup.add_argument('-v', '--verbose', default=False, action='store_true')
vgroup.add_argument('-V', '--Verbose', default=False, action='store_true')
args = parser.parse_args()
### finish parsing
Ncol = len(args.columns)
new_columns = dict((a, [b, c]) for a, b, c in args.new_column)
if args.default_values:
default_values = default_values = dict((a, (float(b), float(c))) for a, b, c in args.default_values)
for col in args.columns:
if col not in default_values.keys():
raise ValueError('must specify --default-values for either all columns or no columns! missing %s'%col)
default_values = [default_values[col] for col in args.columns]
else:
default_values = None
if os.path.dirname(args.outpath) and (not os.path.exists(os.path.dirname(args.outpath))):
os.makedirs(os.path.dirname(args.outpath))
loadcolumns = [col for col in args.columns]
ranges = dict()
for column, m, M in args.column_range:
if key not in loadcolumns:
loadcolumns.append(key)
ranges[loadcolumns.index(column)] = (float(m), float(M))
args.verbose |= args.Verbose
#-------------------------------------------------
if args.verbose:
print('reading samples from: '+args.inpath)
data, _ = io.load(args.inpath, loadcolumns)
N = len(data)
#------------------------
if args.verbose:
print('extracting maxima and minima')
ans = extrema.data2extrema(
data,
Ncol,
default_values=default_values,
static_ranges=ranges,
)
outcols = extrema.outputcolumns(
args.columns,
custom_names=new_columns,
)
#------------------------
if args.verbose:
print('writing samples to: '+args.outpath)
io.write(args.outpath, ans, outcols)
|
[
"reed.essick@ligo.org"
] |
reed.essick@ligo.org
|
|
add0946623b02c0745122b9f79576c4775ae89d3
|
be0898ceaee2a7758ffe0365b976f597b2ad26dd
|
/rls/common/when.py
|
e669f55384cf6128b09564efcdf3616c92ce3403
|
[
"Apache-2.0"
] |
permissive
|
violet712/RLs
|
1edaa6427108e3e36d513cb6038be771837ecca4
|
25cc97c96cbb19fe859c9387b7547cbada2c89f2
|
refs/heads/master
| 2023-08-25T12:04:24.174034
| 2021-10-03T15:37:32
| 2021-10-03T15:37:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
# Added from https://github.com/danijar/dreamerv2/blob/main/dreamerv2/common/when.py
class Every:
def __init__(self, every):
self._every = every
self._last = None
def __call__(self, step):
step = int(step)
if not self._every:
return False
if self._last is None:
self._last = step
return True
if step >= self._last + self._every:
self._last += self._every
return True
return False
class Once:
def __init__(self):
self._once = True
def __call__(self):
if self._once:
self._once = False
return True
return False
class Until:
def __init__(self, until=None):
self._until = until
def __call__(self, step):
step = int(step)
if not self._until:
return True
return step < self._until
if __name__ == '__main__':
e = Every(10)
for i in range(100):
if e(i):
print(i)
o = Once()
if o():
print('first')
if o():
print('second')
u = Until(1)
for i in range(10):
if u(i):
print(i)
|
[
"keavnn.wjs@gmail.com"
] |
keavnn.wjs@gmail.com
|
1c8e159794e5cfb1b219726ba7ee4901f03e4a45
|
e4ec5b6cf3cfe2568ef0b5654c019e398b4ecc67
|
/azure-cli/2.0.18/libexec/lib/python3.6/site-packages/azure/mgmt/monitor/models/log_profile_resource.py
|
f9ecb885b923e3bdda411b695f0559bb08ac013c
|
[] |
no_license
|
EnjoyLifeFund/macHighSierra-cellars
|
59051e496ed0e68d14e0d5d91367a2c92c95e1fb
|
49a477d42f081e52f4c5bdd39535156a2df52d09
|
refs/heads/master
| 2022-12-25T19:28:29.992466
| 2017-10-10T13:00:08
| 2017-10-10T13:00:08
| 96,081,471
| 3
| 1
| null | 2022-12-17T02:26:21
| 2017-07-03T07:17:34
| null |
UTF-8
|
Python
| false
| false
| 3,481
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class LogProfileResource(Resource):
"""The log profile resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Azure resource Id
:vartype id: str
:ivar name: Azure resource name
:vartype name: str
:ivar type: Azure resource type
:vartype type: str
:param location: Resource location
:type location: str
:param tags: Resource tags
:type tags: dict
:param storage_account_id: the resource id of the storage account to which
you would like to send the Activity Log.
:type storage_account_id: str
:param service_bus_rule_id: The service bus rule ID of the service bus
namespace in which you would like to have Event Hubs created for streaming
the Activity Log. The rule ID is of the format: '{service bus resource
ID}/authorizationrules/{key name}'.
:type service_bus_rule_id: str
:param locations: List of regions for which Activity Log events should be
stored or streamed. It is a comma separated list of valid ARM locations
including the 'global' location.
:type locations: list of str
:param categories: the categories of the logs. These categories are
created as is convenient to the user. Some values are: 'Write', 'Delete',
and/or 'Action.'
:type categories: list of str
:param retention_policy: the retention policy for the events in the log.
:type retention_policy: :class:`RetentionPolicy
<azure.mgmt.monitor.models.RetentionPolicy>`
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'required': True},
'locations': {'required': True},
'categories': {'required': True},
'retention_policy': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'storage_account_id': {'key': 'properties.storageAccountId', 'type': 'str'},
'service_bus_rule_id': {'key': 'properties.serviceBusRuleId', 'type': 'str'},
'locations': {'key': 'properties.locations', 'type': '[str]'},
'categories': {'key': 'properties.categories', 'type': '[str]'},
'retention_policy': {'key': 'properties.retentionPolicy', 'type': 'RetentionPolicy'},
}
def __init__(self, location, locations, categories, retention_policy, tags=None, storage_account_id=None, service_bus_rule_id=None):
super(LogProfileResource, self).__init__(location=location, tags=tags)
self.storage_account_id = storage_account_id
self.service_bus_rule_id = service_bus_rule_id
self.locations = locations
self.categories = categories
self.retention_policy = retention_policy
|
[
"Raliclo@gmail.com"
] |
Raliclo@gmail.com
|
1374e0b0e8afa74b17c2f850934167d58caa106d
|
1ac87c808c33336338f359f0233593a8e603f45e
|
/allauth/socialaccount/providers/github/models.py
|
9db7841b25cbd32da8ff47e04218751cb5eb7996
|
[
"MIT"
] |
permissive
|
chhabrakadabra/django-allauth
|
140a10de87061f7d60e1fa692b98b64afb9f987e
|
19c7070c1b777b7ad69c2c6fd59571cc5a18f4e4
|
refs/heads/master
| 2021-01-15T20:19:26.777224
| 2012-06-19T04:26:29
| 2012-06-19T04:26:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 373
|
py
|
from allauth.socialaccount import providers
from allauth.socialaccount.providers.base import Provider, ProviderAccount
class GitHubAccount(ProviderAccount):
pass
class GitHubProvider(Provider):
id = 'github'
name = 'GitHub'
package = 'allauth.socialaccount.providers.github'
account_class = GitHubAccount
providers.registry.register(GitHubProvider)
|
[
"raymond.penners@intenct.nl"
] |
raymond.penners@intenct.nl
|
0ea1c176cc778405f57d3480c00c550dd52c912d
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/070_oop/001_classes/_exercises/_templates/ITDN Python RUS/002_Nasledovanie i Polimorfizm/07-complex-hierarchy-mro.py
|
fdcb6405dbda80c5c907ab54361512d72eeb6716
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
# # -*- coding: utf-8 -*-
#
#
# """
# Ещё один пример использования super и построения интерпретатором линеаризации.
# Иерархия классов в данном примере:
# object
# / \
# / \
# A B
# \ /|
# \ / |
# \ / |
# \ / |
# C |
# \ |
# \ |
# \ |
# \ |
# \|
# D
# |
# E
# """
#
#
#
# ___ gen_init ___
# """
# Декоратор gen_init, который добавляет автоматически
# сгенерированный конструктор.
# Декоратор -- это функция, которая принимает функцию или класс
# и возвращает другой объект, который будет привязан к имени изначального.
# Обычно используется для изменения поведения фукнции (путём создания
# новой функции, которая вызывает изначальную) или модификации класса
# (и происходит в данном примере).
#
# :param cls: модифицируемый класс
# :return: класс с добавленным конструктором
# """
#
# ___ -
# print('Entered' ___. -n "constructor")
# s... ___ ____. -
# print('Quit', ___. -n "constructor")
# ___. - _ init
# r_ ___
#
#
# 0?
# c__ A o..
# p___
#
#
# 0?
# c_ B o..
# p___
#
#
# 0?
# c_ C A B
# p_
#
#
# 0?
# c_ D C B
# p_
#
#
# 0?
# c_ E D
# p_
#
#
# print E. -m
# obj _ E
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
3497af07bb08348af9c4922ec8bd53d41bfe1fa4
|
7bd5ca970fbbe4a3ed0c7dadcf43ba8681a737f3
|
/atcoder/other/lang_test_v1/d.py
|
6fd5f61c90ff0b66f7c278d91505477cd77d5fe2
|
[] |
no_license
|
roiti46/Contest
|
c0c35478cd80f675965d10b1a371e44084f9b6ee
|
c4b850d76796c5388d2e0d2234f90dc8acfaadfa
|
refs/heads/master
| 2021-01-17T13:23:30.551754
| 2017-12-10T13:06:42
| 2017-12-10T13:06:42
| 27,001,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
# -*- coding: utf-8 -*-
import sys,copy,math,heapq,itertools as it,fractions,re,bisect,collections as coll
N = int(raw_input())
md = [map(int, raw_input().split("/")) for i in xrange(N)]
days = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
day = 1
yobi = 6
hurikae = 0
seq = 0
ans = 0
for month in xrange(1, 13):
for day in xrange(1, days[month - 1] + 1):
if [month, day] in md:
if 5 <= yobi:
hurikae += 1
seq += 1
elif 5 <= yobi:
seq += 1
else:
if hurikae:
seq += 1
hurikae -= 1
else:
seq = 0
ans = max(ans, seq)
yobi = (yobi + 1) % 7
print ans
|
[
"roiti46@gmail.com"
] |
roiti46@gmail.com
|
bfffbcc498ac8cd37e2b430fca1a96b35ea64a56
|
384d0be5ac54b306b945cf38c10d9b0a44c975ea
|
/devstack/tools/jenkins/jenkins_home/print_summary.py
|
1d71a4a8ebc7b254f088afcd8a132aed0bbd2b19
|
[] |
no_license
|
ashokcse/openstack-bill
|
05ae313637b3cfecba946d2a9b32e8c7609fc721
|
1a3d7575d4b341f64fa1764ed47e47a7504a9bcc
|
refs/heads/master
| 2021-01-18T14:05:24.696165
| 2012-09-12T11:29:20
| 2012-09-12T11:29:20
| 5,424,267
| 5
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
#!/usr/bin/python
import urllib
import json
import sys
def print_usage():
print "Usage: %s [jenkins_url (eg. http://50.56.12.202:8080/)]"\
% sys.argv[0]
sys.exit()
def fetch_blob(url):
return json.loads(urllib.urlopen(url + '/api/json').read())
if len(sys.argv) < 2:
print_usage()
BASE_URL = sys.argv[1]
root = fetch_blob(BASE_URL)
results = {}
for job_url in root['jobs']:
job = fetch_blob(job_url['url'])
if job.get('activeConfigurations'):
(tag, name) = job['name'].split('-')
if not results.get(tag):
results[tag] = {}
if not results[tag].get(name):
results[tag][name] = []
for config_url in job['activeConfigurations']:
config = fetch_blob(config_url['url'])
log_url = ''
if config.get('lastBuild'):
log_url = config['lastBuild']['url'] + 'console'
results[tag][name].append({'test': config['displayName'],
'status': config['color'],
'logUrl': log_url,
'healthReport': config['healthReport']})
print json.dumps(results)
|
[
"ashokcse@live.com"
] |
ashokcse@live.com
|
9f79fa06ef66ddb7cd1d963ace3346532d9816b1
|
dfcaf26ef27684c7f1e69b7e90ac55094158115d
|
/paper/pnoise.py
|
d921844ebbea85d25dbd7430145b0a9781503021
|
[] |
no_license
|
msyriac/halofg
|
e038bbacf9332091087be657b39f274cb5507c01
|
d1aaf54624a3d8bae9eeba667c6e895621a06f24
|
refs/heads/master
| 2021-01-21T06:10:04.264115
| 2018-04-02T02:26:23
| 2018-04-02T02:26:23
| 101,939,544
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,538
|
py
|
from __future__ import print_function
from orphics import maps,io,cosmology
from enlib import enmap
import numpy as np
import os,sys
import healpy as hp
proot = "/gpfs01/astro/workarea/msyriac/data/planck/"
import matplotlib as mpl
from cycler import cycler
mpl.rcParams['axes.prop_cycle'] = cycler(color=['#2424f0','#df6f0e','#3cc03c','#d62728','#b467bd','#ac866b','#e397d9','#9f9f9f','#ecdd72','#77becf'])
try:
lss,smica_nls = np.loadtxt(proot+"smica_nls.txt",unpack=True)
lsl,lgmca_nls = np.loadtxt(proot+"lgmca_nls.txt",unpack=True)
except:
mask = hp.read_map(proot+"COM_Mask_Lensing_2048_R2.00.fits")
fsky = mask.sum()*1./mask.size
# smica
smica1 = hp.read_map(proot+"COM_CMB_IQU-smica-field-Int_2048_R2.01_ringhalf-1.fits")*1e6
smica2 = hp.read_map(proot+"COM_CMB_IQU-smica-field-Int_2048_R2.01_ringhalf-2.fits")*1e6
autos = (hp.anafast(smica1*mask)+hp.anafast(smica2*mask))/2.
k12 = hp.anafast(smica1*mask,smica2*mask)
smica_nls = ((autos-k12)/2.)/fsky
print(smica_nls)
lss = np.arange(len(smica_nls))
# lgmca
lgmcan = hp.read_map(proot+"WPR2_CMB_noise_muK.fits") * mask
lgmca_nls = hp.anafast(lgmcan)/fsky
lsl = np.arange(len(lgmca_nls))
io.save_cols(proot+"smica_nls.txt",(lss,smica_nls))
io.save_cols(proot+"lgmca_nls.txt",(lsl,lgmca_nls))
cc = cosmology.Cosmology(lmax=6000,pickling=True,dimensionless=False)
ells = np.arange(0,3000,1)
cltt = cc.theory.lCl('TT',ells)
spbeam = maps.gauss_beam(lss,5.0)
lpbeam = maps.gauss_beam(lsl,5.0)
pl = io.Plotter(yscale='log',xlabel="$\\ell$",ylabel="$\\ell(\\ell+1)C^{TT}_{\\ell}/2\\pi\ (\\mu K-\\mathrm{rad})^2$",ftsize=17)
pl.add(ells,cltt*ells*(ells+1.)/2./np.pi,color="k",lw=2)
pl.add(lss,smica_nls*lss*(lss+1.)/2./np.pi/spbeam**2.,label="SMICA")
pl.add(lsl,lgmca_nls*lsl*(lsl+1.)/2./np.pi/lpbeam**2.,label="LGMCA")
abeam = maps.gauss_beam(ells,1.5)
for noise in [6.,10.,20.]:
lknee = 3000
alpha = -4.
nls = cosmology.white_noise_with_atm_func(ells,noise,lknee,alpha,dimensionless=False,TCMB=2.7255e6)
pl.add(ells,nls*ells*(ells+1.)/2./np.pi/abeam**2.,ls="--",lw=2,label=str(noise)+" $\\mu K$-arcmin")
noise = 45.
abeam = maps.gauss_beam(ells,5.0)
nls = cosmology.white_noise_with_atm_func(ells,noise,0,1,dimensionless=False,TCMB=2.7255e6)
# pl.add(ells,nls*ells*(ells+1.)/2./np.pi/abeam**2.,ls="--",lw=2,label="LGCMA estimate")
pl.legend(loc='lower right',labsize=12)
pl._ax.set_xlim(0,3000)
pl._ax.set_ylim(1,1e4)
pl.done(io.dout_dir+"smicalgmca.pdf")
|
[
"mathewsyriac@gmail.com"
] |
mathewsyriac@gmail.com
|
c4833fd78cb4f1385717b3b9920ad922d0188f62
|
b6b30fb06124883b074144c419b43d9182efcdff
|
/CV/optical_flow.py
|
25910a23c86c039d5993508aecf7a546dac33369
|
[] |
no_license
|
JohnnySunkel/BlueSky
|
da9f5107034289bfbdd3ba40458f9b9bd8d01a13
|
5a20eba9ef7509a5a7b7af86e7be848242e1a72f
|
refs/heads/master
| 2021-07-07T09:57:37.256950
| 2020-09-02T23:06:46
| 2020-09-02T23:06:46
| 166,883,639
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
import cv2
import numpy as np
# Define a function to track the object
def start_tracking():
# Initialize the video capture object
cap = cv2.VideoCapture(0)
# Define the scaling factor for the frames
scaling_factor = 0.5
# Number of frames to track
num_frames_to_track = 5
# Number of frames to skip
num_frames_jump = 2
# Initialize variables
tracking_paths = []
frame_index = 0
# Define tracking parameters
tracking_params = dict(winSize = (11, 11), maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
10, 0.03))
# Iterate until the user presses the 'Esc' key
while True:
# Capture the current frame
_, frame = cap.read()
# Resize the frame
frame = cv2.resize(frame, None, fx = scaling_factor,
fy = scaling_factor,
interpolation = cv2.INTER_AREA)
# Convert to grayscale
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Create a copy of the frame
output_img = frame.copy()
if len(tracking_paths) > 0:
# Get images
prev_img, current_img = prev_gray, frame_gray
# Organize the feature points
feature_points_0 = np.float32([tp[-1] for tp in \
tracking_paths]).reshape(-1, 1, 2)
# Compute optical flow
feature_points_1, _, _ = cv2.calcOpticalFlowPyrLK(
prev_img, current_img, feature_points_0,
None, **tracking_params)
# Compute reverse optical flow
feature_points_0_rev, _, _ = cv2.calcOpticalFlowPyrLK(
current_img, prev_img, feature_points_1,
None, **tracking_params)
# Compute the difference between forward and
# reverse optical flow
diff_feature_points = abs(feature_points_0 - \
feature_points_0_rev).reshape(-1, 2).max(-1)
# Extract the good feature points
good_points = diff_feature_points < 1
# Initialize the variable for the new tracking paths
new_tracking_paths = []
# Iterate through all the good feature points
for tp, (x, y), good_points_flag in zip(tracking_paths,
feature_points_1.reshape(-1, 2), good_points):
# If the flag is not True, then continue
if not good_points_flag:
continue
# Append the x and y coordinates and check if
# its length greater than the threshold
tp.append((x, y))
if len(tp) > num_frames_to_track:
del tp[0]
new_tracking_paths.append(tp)
# Draw a circle around the feature points
cv2.circle(output_img, (x, y), 3, (0, 255, 0), -1)
# Update the tracking paths
tracking_paths = new_tracking_paths
# Draw lines
cv2.polylines(output_img, [np.int32(tp) for tp in \
tracking_paths], False, (0, 150, 0))
# Go into this 'if' condition after skipping the
# right number of frames
if not frame_index % num_frames_jump:
# Create a mask and draw the circles
mask = np.zeros_like(frame_gray)
mask[:] = 255
for x, y in [np.int32(tp[-1]) for tp in tracking_paths]:
cv2.circle(mask, (x, y), 6, 0, -1)
# Compute good features to track
feature_points = cv2.goodFeaturesToTrack(frame_gray,
mask = mask, maxCorners = 500, qualityLevel = 0.3,
minDistance = 7, blockSize = 7)
# Check if the feature points exist. If so, then append
# them to the tracking paths
if feature_points is not None:
for x, y in np.float32(feature_points).reshape(-1, 2):
tracking_paths.append([(x, y)])
# Update variables
frame_index += 1
prev_gray = frame_gray
# Display output
cv2.imshow('Optical Flow', output_img)
# Check if the user pressed the 'Esc' key
c = cv2.waitKey(1)
if c == 27:
break
if __name__ == '__main__':
# Start the tracker
start_tracking()
# Close all windows
cv2.destroyAllWindows()
|
[
"noreply@github.com"
] |
JohnnySunkel.noreply@github.com
|
a0f701624c8c135034af750da087fe9d8ef8866d
|
54985d2bc6649272d877bfb10df0572a97fb934e
|
/tests/record_expert_demonstrations.py
|
9fcc1349c43f65519a88ba2dd37a699007a2a190
|
[
"MIT"
] |
permissive
|
panlybero/gym-novel-gridworlds
|
d28a9b282c37327d98bfb19e87a9b1d35bf0aae9
|
b375cb674337b6cd3c33b165d323cf98b46095e3
|
refs/heads/master
| 2022-12-04T23:48:10.462777
| 2020-07-19T23:15:12
| 2020-07-19T23:15:12
| 283,211,610
| 0
| 0
|
MIT
| 2020-07-28T12:52:44
| 2020-07-28T12:52:43
| null |
UTF-8
|
Python
| false
| false
| 1,929
|
py
|
import time
import gym
import gym_novel_gridworlds
import keyboard
from stable_baselines import DQN
from stable_baselines.gail import generate_expert_traj
from constant import ENV_KEY
"""
Generate Expert Trajectories from a model
"""
# env_id = 'NovelGridworld-v2'
# model = DQN('MlpPolicy', env_id, verbose=1)
#
# # Train a DQN agent for 1e5 timesteps and generate 10 trajectories
# # data will be saved in a numpy archive named `expert_+env_id.npz`
# generate_expert_traj(model, 'expert_'+env_id, n_timesteps=int(10), n_episodes=5)
"""
Generate Expert Trajectories from a human expert player
"""
env_id = 'NovelGridworld-v5'
env = gym.make(env_id)
KEY_ACTION_DICT = ENV_KEY[env_id]
def print_play_keys(action_str):
print("Press a key to play: ")
for key, key_id in KEY_ACTION_DICT.items():
print(key, ": ", action_str[key_id])
def human_expert(_obs):
"""
Random agent. It samples actions randomly
from the action space of the environment.
:param _obs: (np.ndarray) Current observation
:return: (np.ndarray) action taken by the expert
"""
while True:
env.render()
print_play_keys(env.action_str)
time.sleep(0.2)
key_pressed = keyboard.read_key()
# return index of action if valid key is pressed
if key_pressed:
if key_pressed in KEY_ACTION_DICT:
return KEY_ACTION_DICT[key_pressed]
elif key_pressed == "esc":
print("You pressed esc, exiting!!")
break
else:
print("You pressed wrong key. Press Esc key to exit, OR:")
# Data will be saved in a numpy archive named `expert_+env_id.npz`
# when using something different than an RL expert,
# you must pass the environment object explicitly
env.render()
episodes = 50
generate_expert_traj(human_expert, 'expert_' + env_id+'_'+str(episodes)+'demos', env, n_episodes=episodes)
|
[
"gtatiya@live.com"
] |
gtatiya@live.com
|
b8f64295f2228f7b120165c28fa4d3c69a7d8e41
|
31780af7a5558523def1aae5f25df3e0b084be9b
|
/reg1.py
|
71aae85831ae6835718ea909d9d9ae25a68bd9aa
|
[] |
no_license
|
sevilaybayatli/PYTHS19
|
1796615ff939f2e98ce657feeaa3efd47a2e66c6
|
ae0607e215a0d8205475d124c0362c39881e5eda
|
refs/heads/master
| 2020-07-23T16:12:17.922548
| 2020-03-23T22:03:00
| 2020-03-23T22:03:00
| 207,624,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import re
def checkOfString(string):
chare=re.compile(r'[^a-zA-Z0-9.]')
string=chare.search(string)
return not bool(string)
print(checkOfString('AgfsrWCB12.'))
print(checkOfString('*"q@aQ'))
|
[
"sevilaybayatli@gmail.com"
] |
sevilaybayatli@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.