blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6605bf10714ea0486e0502e4fc5f35a2777c12f3 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/MerchantBrandListResult.py | eae81a409220a5d4ea5ff4cfe0ca4568824e6cec | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,655 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.BrandResult import BrandResult
class MerchantBrandListResult(object):
def __init__(self):
self._brand_list_result = None
@property
def brand_list_result(self):
return self._brand_list_result
@brand_list_result.setter
def brand_list_result(self, value):
if isinstance(value, list):
self._brand_list_result = list()
for i in value:
if isinstance(i, BrandResult):
self._brand_list_result.append(i)
else:
self._brand_list_result.append(BrandResult.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.brand_list_result:
if isinstance(self.brand_list_result, list):
for i in range(0, len(self.brand_list_result)):
element = self.brand_list_result[i]
if hasattr(element, 'to_alipay_dict'):
self.brand_list_result[i] = element.to_alipay_dict()
if hasattr(self.brand_list_result, 'to_alipay_dict'):
params['brand_list_result'] = self.brand_list_result.to_alipay_dict()
else:
params['brand_list_result'] = self.brand_list_result
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = MerchantBrandListResult()
if 'brand_list_result' in d:
o.brand_list_result = d['brand_list_result']
return o
| [
"jiandong.jd@antfin.com"
] | jiandong.jd@antfin.com |
761cf1eac012c2b6fbbe0c3dd6c696eb9b456423 | e312531fab26c5172ceba9a6924850da2b1390e8 | /aliyun-python-sdk-reid/aliyunsdkreid/request/v20190928/GetFootwearEventRequest.py | cdcf6914a4a21d7204342b59b57b8f347aaba22f | [
"Apache-2.0"
] | permissive | heyjie/aliyun-openapi-python-sdk | e63dd8704b27190b6786766469485f8a7c13b1b6 | ea1aa1db98fa5ca709480eeee19a76dcfaba1bb8 | refs/heads/master | 2022-12-30T11:50:31.133033 | 2020-10-13T02:45:37 | 2020-10-13T02:45:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkreid.endpoint import endpoint_data
class GetFootwearEventRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'reid', '2019-09-28', 'GetFootwearEvent','1.1.8.4')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Date(self):
return self.get_body_params().get('Date')
def set_Date(self,Date):
self.add_body_params('Date', Date)
def get_StoreId(self):
return self.get_body_params().get('StoreId')
def set_StoreId(self,StoreId):
self.add_body_params('StoreId', StoreId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
f84d21bcb1c3dcf50eec8e83c47db0eb90eada0f | e77b92df446f0afed18a923846944b5fd3596bf9 | /Inflearn_algo/section4_BinarySearch/pro3_Music_BS.py | 78e5a18de62527a544f7fb265a1f3d0e25738669 | [] | no_license | sds1vrk/Algo_Study | e40ca8eb348d1fc6f88d883b26195b9ee6f35b2e | fbbc21bb06bb5dc08927b899ddc20e6cde9f0319 | refs/heads/main | 2023-06-27T05:49:15.351644 | 2021-08-01T12:43:06 | 2021-08-01T12:43:06 | 356,512,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,409 | py | # 뮤직비디오 (결정 알고리즘)
# M개 이하로 짜르기
import sys
sys.stdin=open("input.txt","r")
n,m=map(int,input().split())
a=list(map(int,input().split()))
# 결정 알고리즘, lt와 rt정하기
lt=1
rt=sum(a)
res=0
def count(mid):
# cnt는 DVD의 저장은 일단 1개
cnt=1
# DVD에 저장되는 노래들의 합
hap=0
for i in a:
hap+=i
if hap>mid:
# 용량이 초과되서 새로운 DVD가 필요
cnt+=1
# 그리고 첫번째 곡으로 i가 들어감
hap=i
return cnt
maxx=max(a)
while lt<=rt:
# 자를수 있는 중간값 찾기
mid=(lt+rt)//2
# 만약 count함수에 들어가서 나온 값이 찾을려는 m값 이하이면 정답이 되기에 이것을 mid값에 넣고
# 3개이하로 만들수 있으면 2,3개 가능하기에 이하로 넣는다.
# 그리고 최소값을 찾기 위해서 rt값을 줄인다
# rt값을 줄일수록 mid 값이 작아지기 떄문에
# 추가적으로 mid>=maxx 를 해준 이유는 123456789 9 9 라고 했을경우 9분 보다 큰것은 따로 들어가야 되므로 mid>=maxx라는 조건을 써준다.
# mid는 capcity
if mid>=maxx and count(mid)<=m:
res=mid
rt=mid-1
else :
# 나온값이 4,5,6 이러면 lt를 늘려서 더 작게 만들어야 됨
lt=mid+1
print(res) | [
"51287886+sds1vrk@users.noreply.github.com"
] | 51287886+sds1vrk@users.noreply.github.com |
0d007d7be2a8e9d963defe5cdede219ecbc18387 | 5d7ad4d331ee029101fe6a239db9d0e7eebedaae | /2BFS/71. Binary Tree Zigzag Level Order Traversal.py | b42fb0a663f8aa8b984b7c718f98932618353079 | [] | no_license | Iansdfg/9chap | 77587e1c99b34d13b49a86ed575ec89db421551e | d7b42f94c20f6a579ad59d9f894dcc2fbc5a0501 | refs/heads/master | 2022-11-15T02:21:33.791516 | 2022-11-09T06:24:43 | 2022-11-09T06:24:43 | 201,677,377 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from lintcode import (
TreeNode,
)
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
from collections import deque
class Solution:
"""
@param root: A Tree
@return: A list of lists of integer include the zigzag level order traversal of its nodes' values.
"""
def zigzag_level_order(self, root):
# write your code here
if not root:
return []
queue = deque([root])
res = []
flag = 0
while queue:
lv = []
for i in range(len(queue)):
curr = queue.popleft()
lv.append(curr.val)
if curr.left:
queue.append(curr.left)
if curr.right:
queue.append(curr.right)
if flag%2 == 0:
res.append(lv)
else:
lv.reverse()
res.append(lv)
flag += 1
return res
| [
"noreply@github.com"
] | Iansdfg.noreply@github.com |
9eb8aa52e4354fd7124a2cbb45bc1af55175c0a1 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03588/s218611558.py | 18c51109a42bf901002b08e02b1cd616be2108ed | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | N = int(input())
AB = [list(map(int,input().split())) for _ in range(N)]
AB = sorted(AB)
#print(AB)
ans = N + AB[-1][1]
for i in range(N):
if i == 0:
ans += AB[0][0] - 1
else:
ans += min(abs(AB[i][0]-AB[i-1][0])-1,abs(AB[i][1]-AB[i-1][1])-1)
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
5d2ca49923ba3a1183315f8fa2e3b16ee5fdc819 | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/InnerDetector/InDetExample/InDetSLHC_Example/share/ExtrapolationEngineTest_ITk_jobOptions.py | 359c9700d941710604b7d03b1f4f850021f23f80 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,822 | py | ###############################################################
#
# Job options
#
# This is for validation of ITk tracking geometry.
# Modified version of Tracking/TrkExtrapolation/TrkExUnitTests/share/ExtrapolationEngineTest_jobOptions.py. (TrkExUnitTests-00-00-08)
# See https://twiki.cern.ch/twiki/bin/view/Atlas/UpgradeSimulationInnerTrackerMigrationRel20p3p3#Validation_of_database_files
#==============================================================
#--------------------------------------------------------------
# ATLAS default Application Configuration options
#--------------------------------------------------------------
# Use McEventSelector so we can run with AthenaMP
import AthenaCommon.AtlasUnixGeneratorJob
#--------------------------------------------------------------
# Private Application Configuration options
#--------------------------------------------------------------
#--------------------------------------------------------------
# Geometry section
#--------------------------------------------------------------
from AthenaCommon.DetFlags import DetFlags
DetFlags.ID_setOn()
DetFlags.Calo_setOff()
DetFlags.Muon_setOff()
include("InDetSLHC_Example/preInclude.SLHC.py")
include("InDetSLHC_Example/preInclude.SiliconOnly.py")
# Full job is a list of algorithms
from AthenaCommon.AlgSequence import AlgSequence
job = AlgSequence()
# build GeoModel
if 'DetDescrVersion' not in dir():
DetDescrVersion = 'ATLAS-P2-ITK-01-00-00'
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetDescrVersion = DetDescrVersion
from AtlasGeoModel import SetGeometryVersion
from AtlasGeoModel import GeoModelInit
from GeoModelSvc.GeoModelSvcConf import GeoModelSvc
GeoModelSvc = GeoModelSvc()
ServiceMgr += GeoModelSvc
GeoModelSvc.AtlasVersion = DetDescrVersion
from IOVDbSvc.CondDB import conddb
conddb.setGlobalTag('OFLCOND-SIM-00-00-00')
# switch the material loading off
from TrkDetDescrSvc.TrkDetDescrJobProperties import TrkDetFlags
TrkDetFlags.PixelBuildingOutputLevel = INFO
TrkDetFlags.SCT_BuildingOutputLevel = INFO
TrkDetFlags.TRT_BuildingOutputLevel = INFO
TrkDetFlags.ConfigurationOutputLevel = INFO
TrkDetFlags.TRT_BuildStrawLayers = True
TrkDetFlags.SLHC_Geometry = True
TrkDetFlags.MaterialDatabaseLocal = True
if TrkDetFlags.MaterialDatabaseLocal() is True :
TrkDetFlags.MaterialSource = 'COOL'
TrkDetFlags.MaterialVersion = 17
TrkDetFlags.MaterialSubVersion = ""
# splitGeo = DetDescrVersion.split('-')
# TrkDetFlags.MaterialMagicTag = splitGeo[0] + '-' + splitGeo[1] + '-' + splitGeo[2]
TrkDetFlags.MaterialMagicTag = DetDescrVersion
TrkDetFlags.MaterialStoreGateKey = '/GLOBAL/TrackingGeo/LayerMaterialITK'
TrkDetFlags.MaterialDatabaseLocalPath = './'
TrkDetFlags.MaterialDatabaseLocalName = 'AtlasLayerMaterial-'+DetDescrVersion+'.db'
# load the tracking geometry service
from TrkDetDescrSvc.AtlasTrackingGeometrySvc import AtlasTrackingGeometrySvc
#--------------------------------------------------------------
# Event related parameters
#--------------------------------------------------------------
# Number of events to be processed (default is until the end of
# input, or -1, however, since we have no input, a limit needs
# to be set explicitly, here, choose 10)
theApp.EvtMax = 1 # 100
ExToolOutputLevel = VERBOSE # INFO #
ExAlgorithmOutputLevel = INFO #
from AthenaCommon.AppMgr import ServiceMgr
# output level
ServiceMgr.MessageSvc.OutputLevel = INFO
# increase the number of letter reserved to the alg/tool name from 18 to 30
ServiceMgr.MessageSvc.Format = "% F%50W%S%7W%R%T %0W%M"
# to change the default limit on number of message
ServiceMgr.MessageSvc.defaultLimit = 9999999 # all messages
#--------------------------------------------------------------
# Tool setup
#--------------------------------------------------------------
# the magnetic field
from MagFieldServices import SetupField
from IOVDbSvc.CondDB import conddb
conddb.addOverride('/GLOBAL/BField/Map','BFieldMap-FullAsym-09-solTil3')
from TrkExEngine.AtlasExtrapolationEngine import AtlasExtrapolationEngine
ExtrapolationEninge = AtlasExtrapolationEngine(name='Extrapolation', nameprefix='Atlas', ToolOutputLevel=ExToolOutputLevel)
ToolSvc += ExtrapolationEninge
#--------------------------------------------------------------
# Algorithm setup
#--------------------------------------------------------------
# Add top algorithms to be run
from TrkExUnitTests.TrkExUnitTestsConf import Trk__ExtrapolationEngineTest
ExtrapolationEngineTest = Trk__ExtrapolationEngineTest('ExtrapolationEngineTest')
# parameters mode: 0 - neutral tracks, 1 - charged particles
ExtrapolationEngineTest.ParametersMode = 1
# do the full test backwards as well
ExtrapolationEngineTest.BackExtrapolation = False
# pT range for testing
ExtrapolationEngineTest.PtMin = 100000
ExtrapolationEngineTest.PtMax = 100000
# The test range in Eta
ExtrapolationEngineTest.EtaMin = -0.5
ExtrapolationEngineTest.EtaMax = 0.5
# Configure how you wanna run
ExtrapolationEngineTest.CollectSensitive = True
ExtrapolationEngineTest.CollectPassive = True
ExtrapolationEngineTest.CollectBoundary = True
# the path limit to test
ExtrapolationEngineTest.PathLimit = -1.
# give it the engine
ExtrapolationEngineTest.ExtrapolationEngine = ExtrapolationEninge
# output formatting
ExtrapolationEngineTest.OutputLevel = ExAlgorithmOutputLevel
job += ExtrapolationEngineTest # 1 alg, named 'ExtrapolationEngineTest'
#################################################################
theApp.Dlls += [ 'RootHistCnv' ]
theApp.HistogramPersistency = 'ROOT'
# --- load AuditorSvc
from AthenaCommon.ConfigurableDb import getConfigurable
# --- write out summary of the memory usage
# | number of events to be skip to detect memory leak
# | 20 is default. May need to be made larger for complete jobs.
ServiceMgr.AuditorSvc += getConfigurable('ChronoAuditor')()
# --- write out a short message upon entering or leaving each algorithm
#
theApp.AuditAlgorithms = True
theApp.AuditServices = True
#
# --- Display detailed size and timing statistics for writing and reading
ServiceMgr.AthenaPoolCnvSvc.UseDetailChronoStat = True
if not hasattr(ServiceMgr, 'THistSvc'):
from GaudiSvc.GaudiSvcConf import THistSvc
ServiceMgr += THistSvc()
# add the G4 validation output stream
ServiceMgr.THistSvc.Output += [ "val DATAFILE='ExtrapolationEngineTest.root' TYPE='ROOT' OPT='RECREATE'" ]
include("InDetSLHC_Example/postInclude.SLHC_Setup.py")
#==============================================================
#
# End of job options file
#
###############################################################
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
fca0cc8ccedd00d95f5501264f237a8c3a36c9b9 | e8cac4db53b22a28f7421ede9089bd3d4df81c82 | /TaobaoSdk/Request/TraderateAddRequest.py | 05131877e37c9437578f810351d4d96396b6af27 | [] | no_license | wangyu0248/TaobaoOpenPythonSDK | af14e84e2bada920b1e9b75cb12d9c9a15a5a1bd | 814efaf6e681c6112976c58ec457c46d58bcc95f | refs/heads/master | 2021-01-19T05:29:07.234794 | 2012-06-21T09:31:27 | 2012-06-21T09:31:27 | 4,738,026 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,718 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=4 sts=4 sw=4 et:
## @brief 新增单个评价(<font color="red">注:在评价之前需要对订单成功的时间进行判定(end_time),如果超过15天,不能再通过该接口进行评价</font>)
# @author wuliang@maimiaotech.com
# @date 2012-06-21 17:17:38
# @version: 0.0.0
import os
import sys
import time
def __getCurrentPath():
return os.path.normpath(os.path.join(os.path.realpath(__file__), os.path.pardir))
__modulePath = os.path.join(__getCurrentPath(), os.path.pardir)
__modulePath = os.path.normpath(__modulePath)
if __modulePath not in sys.path:
sys.path.insert(0, __modulePath)
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">新增单个评价(<font color="red">注:在评价之前需要对订单成功的时间进行判定(end_time),如果超过15天,不能再通过该接口进行评价</font>)</SPAN>
# <UL>
# </UL>
class TraderateAddRequest(object):
def __init__(self):
super(self.__class__, self).__init__()
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">获取API名称</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">str</SPAN>
# </LI>
# </UL>
self.method = "taobao.traderate.add"
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">时间戳,如果不设置,发送请求时将使用当时的时间</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">int</SPAN>
# </LI>
# </UL>
self.timestamp = int(time.time())
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">是否匿名,卖家评不能匿名。可选值:true(匿名),false(非匿名)。注意:输入非可选值将会自动转为false</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Boolean</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.anony = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价内容,最大长度: 500个汉字 .注意:当评价结果为good时就不用输入评价内容.评价内容为neutral/bad的时候需要输入评价内容</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.content = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">子订单ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">optional</SPAN>
# </LI>
# </UL>
self.oid = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价结果,可选值:good(好评),neutral(中评),bad(差评)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.result = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">评价者角色,可选值:seller(卖家),buyer(买家)</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">String</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.role = None
## @brief <SPAN style="font-size:16px; font-family:'宋体','Times New Roman',Georgia,Serif;">交易ID</SPAN>
# <UL>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Type</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">Number</SPAN>
# </LI>
# <LI>
# <SPAN style="color:DarkRed; font-size:18px; font-family:'Times New Roman',Georgia,Serif;">Required</SPAN>: <SPAN style="color:DarkMagenta; font-size:16px; font-family:'Times New Roman','宋体',Georgia,Serif;">required</SPAN>
# </LI>
# </UL>
self.tid = None
| [
"liyangmin@maimiaotech.com"
] | liyangmin@maimiaotech.com |
595102c2c26d2c6c754214f9360bfce04c596dad | 95c3c587907ae38b11faacc4d2ebe1df8f5b3335 | /Aug-13-ASSGN-NUMBERS-Q1-Lakshmipriya.py | 848179e0f96b514e18ff3af2c050186eb3818b62 | [] | no_license | sandhyalethakula/Iprimed_16_python | d59cb47d2d2a63c04a658c8b302505efc8f24ff4 | c34202ca155819747a5c5ac4a8a5511d425f41a1 | refs/heads/main | 2023-08-16T11:40:11.005919 | 2021-09-29T13:22:32 | 2021-09-29T13:22:32 | 411,956,547 | 0 | 0 | null | 2021-09-30T06:57:13 | 2021-09-30T06:57:12 | null | UTF-8 | Python | false | false | 468 | py | '''1.Write a program that reads a positive integer, n, from the user and then displays the sum of all of the integers from 1 to n.
The sum of the first n positive integers can be computed using the formula: sum = (n)(n + 1) / 2 '''
print('-'*20)
n = int(input("Enter a positive integer: ")) #Read the input from the user
total = n * (n+1) / 2 #Calculate the sum
print("The sum of the first",n,"positive integers",total)#Display the result
print('-'*20)
| [
"noreply@github.com"
] | sandhyalethakula.noreply@github.com |
fe8c41c0d29dc660a9392cc21375dc8544892ae4 | cffc460605febc80e8bb7c417266bde1bd1988eb | /since2020/ZeroJudge/ZeroJudge e539.py | 8f69b1d47da09e1f2512a1162f47440133be1139 | [] | no_license | m80126colin/Judge | f79b2077f2bf67a3b176d073fcdf68a8583d5a2c | 56258ea977733e992b11f9e0cb74d630799ba274 | refs/heads/master | 2021-06-11T04:25:27.786735 | 2020-05-21T08:55:03 | 2020-05-21T08:55:03 | 19,424,030 | 7 | 4 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | '''
@judge ZeroJudge
@id e539
@name Circular
@source UVa 967
@tag Prime, Range Query
'''
from sys import stdin
from itertools import accumulate
from math import sqrt
N = 1000000
sieve = [ 1 ] * N
sieve[0] = sieve[1] = 0
for x in range(2, int(sqrt(N) + 0.01) + 1):
if sieve[x]:
for y in range(x * x, N, x):
sieve[y] = 0
def circle(n):
m = str(n) * 2
L = len(m) // 2
return all(map(lambda x: sieve[int(m[x:x+L])], range(L)))
dp = list(accumulate([ 1 if circle(x) else 0 for x in range(N) ]))
def solve(a, b):
res = dp[b] - dp[a - 1]
if res == 0:
return 'No Circular Primes.'
if res == 1:
return '1 Circular Prime.'
return f'{res} Circular Primes.'
for line in stdin:
if line.strip() != '-1':
print(solve(*map(int, line.split()))) | [
"m80126colin@gmail.com"
] | m80126colin@gmail.com |
e147389a483460573bbdcdffcce154acfaca0467 | 835fe55f4ea82b4e92fc3a07336c61c9a4726a44 | /conans/server/rest/controllers/file_upload_download_controller.py | 20d28124339ceb95070a2db072e893f7b9a8686a | [
"MIT"
] | permissive | tru/conan | 0b1ed247b4cf4cb4f66dc5c302edabfb3589d37b | b9266be3cd026e4a8ea1262e557f4259ed36e9f1 | refs/heads/develop | 2021-08-27T18:22:19.614272 | 2016-06-07T17:34:23 | 2016-06-07T17:34:23 | 60,776,756 | 1 | 0 | null | 2017-02-07T22:28:10 | 2016-06-09T13:26:31 | Python | UTF-8 | Python | false | false | 2,813 | py | from conans.server.rest.controllers.controller import Controller
from bottle import request, static_file, FileUpload, cached_property
from conans.server.service.service import FileUploadDownloadService
import os
import sys
from unicodedata import normalize
import six
class FileUploadDownloadController(Controller):
"""
Serve requests related with users
"""
def attach_to(self, app):
storage_path = app.file_manager.paths.store
service = FileUploadDownloadService(app.updown_auth_manager, storage_path)
@app.route(self.route + '/<filepath:path>', method=["GET"])
def get(filepath):
token = request.query.get("signature", None)
file_path = service.get_file_path(filepath, token)
# https://github.com/kennethreitz/requests/issues/1586
mimetype = "x-gzip" if filepath.endswith(".tgz") else "auto"
return static_file(os.path.basename(file_path),
root=os.path.dirname(file_path),
mimetype=mimetype)
@app.route(self.route + '/<filepath:path>', method=["PUT"])
def put(filepath):
token = request.query.get("signature", None)
file_saver = ConanFileUpload(request.body, None,
filename=os.path.basename(filepath), headers=request.headers)
abs_path = os.path.abspath(os.path.join(storage_path, os.path.normpath(filepath)))
# Body is a stringIO (generator)
service.put_file(file_saver, abs_path, token, request.content_length)
return
class ConanFileUpload(FileUpload):
"""Code copied from bottle but removing filename normalizing
FIXME: Review bottle.FileUpload and analyze possible security or general issues """
@cached_property
def filename(self):
''' Name of the file on the client file system, but normalized to ensure
file system compatibility. An empty filename is returned as 'empty'.
Only ASCII letters, digits, dashes, underscores and dots are
allowed in the final filename. Accents are removed, if possible.
Whitespace is replaced by a single dash. Leading or tailing dots
or dashes are removed. The filename is limited to 255 characters.
'''
fname = self.raw_filename
if six.PY2:
if not isinstance(fname, unicode):
fname = fname.decode('utf8', 'ignore')
fname = normalize('NFKD', fname).encode('ASCII', 'ignore').decode('ASCII')
fname = os.path.basename(fname.replace('\\', os.path.sep))
# fname = re.sub(r'[^a-zA-Z0-9-_.\s]', '', fname).strip()
# fname = re.sub(r'[-\s]+', '-', fname).strip('.-')
return fname[:255] or 'empty'
| [
"lasote@gmail.com"
] | lasote@gmail.com |
f8e181fb1435b0ee9e82aadf9176c9719f5ac972 | 3a78046505ac496c51978ddcba0f33c6acbeeb98 | /meetings/forms.py | a3fb19ed97b1f4c134c3c7131111680e5cff1c77 | [] | no_license | suryanarayadev/clusil-intranet | 1c46d2508f37050cbb9bd96dad466b24f715d8c2 | cc3fd8d3f35c35e30f72cc501962ea4953ca1945 | refs/heads/master | 2021-01-17T21:40:51.154315 | 2015-09-18T21:04:53 | 2015-09-18T21:04:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | # coding=utf-8
from datetime import date
from django.forms import Form, ModelForm, TextInput, Textarea, HiddenInput, CharField, ModelChoiceField
from django.conf import settings
from .models import Meeting, Location
#location form
class LocationForm(ModelForm):
class Meta:
model = Location
fields = ( 'name', 'address', )
widgets = {
'address' : Textarea(),
}
#modify location wizard forms
class ListLocationsForm(Form):
locations = ModelChoiceField(queryset=Location.objects.all())
#meeting form
class MeetingForm(ModelForm):
additional_message = CharField(label='Message supplémentaire',widget=Textarea(attrs={'placeholder': "Message à transmettre dans l'inviation.",}),required=False)
class Meta:
model = Meeting
fields = ( 'title', 'when', 'time', 'location', 'additional_message', )
widgets = {
'title' : TextInput(attrs={'readonly': 'readonly', }),
'when' : TextInput(attrs={'type': 'date', }),
'time' : TextInput(attrs={'type': 'time', }),
}
#modify wizard forms
class ListMeetingsForm(Form):
meetings = ModelChoiceField(queryset=Meeting.objects.all().order_by('when'))
class ModifyMeetingForm(ModelForm):
class Meta:
model = Meeting
fields = ( 'title', 'when', 'time', 'location', )
widgets = {
'when' : TextInput(attrs={'type': 'date', }),
'time' : TextInput(attrs={'type': 'time', }),
}
| [
"pst@libre.lu"
] | pst@libre.lu |
29a1f909aaed754a504a34fa35b48c5730c71bed | b0303d4cd701574d494be4330ab850ed182520a5 | /Defining Classes/Lab/Solutions/01. Rhombus of Stars.py | 28c4df320c6073ec3efe607ec75bb296fdb96d2f | [] | no_license | rimisarK-blue/Python---OOP | 7e9dec781aac8385d45d2e1474afa6980960f4ff | 73dace732eebf7581466a6fb22b941c30255c650 | refs/heads/master | 2023-03-29T17:28:36.477434 | 2021-03-26T14:04:11 | 2021-03-26T14:04:11 | 342,049,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | def draw_line(count, symbol, offset_count=0):
offset = offset_count * ' '
counted = (f"{symbol} "* count).strip()
print(f"{offset}{counted}")
def draw_rhombus(n):
for i in range(n):
draw_line(i + 1, '*', n - i - 1,)
for i in range(n - 2, -1, -1):
draw_line(i + 1, '*', n - i - 1, )
n = int(input())
draw_rhombus(n) | [
"rimisark92@gmail.com"
] | rimisark92@gmail.com |
641c99b05541aae373077e10268e6323c51d3165 | 971e0efcc68b8f7cfb1040c38008426f7bcf9d2e | /tests/model_control/detailed/transf_Quantization/model_control_one_enabled_Quantization_LinearTrend_BestCycle_LSTM.py | fc40645070b6c9b7dc80e07988908a29182d3e88 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | antoinecarme/pyaf | a105d172c2e7544f8d580d75f28b751351dd83b6 | b12db77cb3fa9292e774b2b33db8ce732647c35e | refs/heads/master | 2023-09-01T09:30:59.967219 | 2023-07-28T20:15:53 | 2023-07-28T20:15:53 | 70,790,978 | 457 | 77 | BSD-3-Clause | 2023-03-08T21:45:40 | 2016-10-13T09:30:30 | Python | UTF-8 | Python | false | false | 158 | py | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Quantization'] , ['LinearTrend'] , ['BestCycle'] , ['LSTM'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
7c3a83c8ebac17237a4c5e636db2c2cc32126d39 | c552bc4a6369654f001cf8ddef66b3079ed26870 | /src/ui/widgets/elevation.py | 54384f1fd2849904de9acb444b30fc9c709dc1a4 | [
"MIT"
] | permissive | SqrtMinusOne/GeoTIFF-3d | a59258fc691365af6efea706bb157b1e60a79854 | cf4bda4b989f0c09fe91f676d4094feb75aa54e7 | refs/heads/master | 2020-05-18T13:55:10.068499 | 2019-10-08T03:52:42 | 2019-10-08T03:53:36 | 184,456,538 | 5 | 0 | MIT | 2019-05-17T15:14:29 | 2019-05-01T17:46:12 | Python | UTF-8 | Python | false | false | 5,017 | py | import sys
import numpy as np
from PyQt5.QtCore import QRect, QRectF, Qt, QLine, QPointF, QPoint
from PyQt5.QtGui import QLinearGradient, QPainter, QPen, QPolygonF
from PyQt5.QtWidgets import (QApplication, QGraphicsItem, QGraphicsScene,
QGraphicsView)
__all__ = ['ElevationGraphWidget']
def getRect(widget):
w, h = widget.width(), widget.height()
y_ = 10
h_ = h - 20
w_ = int(h / 4)
x_ = int((w - w_) / 2)
return QRect(x_, y_, w_, h_)
class ElevationSquare(QGraphicsItem):
"""A red-green filled square with altitudes"""
def __init__(self, start, end, levels, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.levels = levels
self.rect = QRect(0, 0, 100, 100)
def paint(self, painter: QPainter, option, widget=None):
painter.setPen(QPen(Qt.black, 0))
self.rect = getRect(widget)
gradient = QLinearGradient(self.rect.topLeft(), self.rect.bottomLeft())
gradient.setColorAt(0, Qt.red)
gradient.setColorAt(1, Qt.green)
painter.setBrush(gradient)
painter.drawRect(self.rect)
metrics = painter.fontMetrics()
for level in self.levels:
text = str(int(level))
w, h = metrics.width(text), metrics.height()
y = self.rect.height() - (level - self.start) / (
self.end -
self.start) * self.rect.height() + self.rect.y() - h / 2
x = self.rect.x() - w - 10
text_rect = QRectF(x, y, w, h)
painter.drawText(text_rect, Qt.AlignRight, text)
def boundingRect(self):
adjust = 2
return QRectF(self.rect.x() - adjust,
self.rect.y() - adjust,
self.rect.width() + adjust,
self.rect.height() + adjust)
class CameraTri(QGraphicsItem):
"""A triangle showing a level of the camera"""
def __init__(self, start, end, pos, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.pos = pos
self.line = QLine(0, 0, 100, 0)
def updatePos(self, pos):
self.pos = pos
self.update()
def getPoint(self):
if self.pos < self.start:
return self.line.p1()
elif self.pos > self.end:
return self.line.p2()
else:
c = (self.pos - self.start) / (self.end - self.start)
return self.line.p1() * (1 - c) + self.line.p2() * c
def paint(self, painter: QPainter, option, widget=None):
rect = getRect(widget)
delta = QPoint(5, 0)
self.line = QLine(rect.bottomRight() + delta * 2,
rect.topRight() + delta * 2)
point = self.getPoint()
painter.setPen(QPen(Qt.black, 0))
painter.setBrush(Qt.black)
offset_h = QPointF(10, 0)
offset_v = QPointF(0, 10)
points = QPolygonF((
QPointF(point),
QPointF(point + offset_h - offset_v),
QPointF(point + offset_h + offset_v),
QPointF(point)
))
painter.drawPolygon(points, 4)
def boundingRect(self):
offset = QPointF(20, 0)
return QRectF(self.line.p1() - offset, self.line.p2() + offset)
class ElevationGraphWidget(QGraphicsView):
"""A widget to show an elevation of camera relative to the
given altitude"""
def __init__(self, start, end, pos, width=240, height=240,
levels=None, parent=None):
super().__init__(parent)
self.start, self.end = start, end
self.pos = pos
self.levels = np.linspace(start, end, 5) if levels is None else levels
self.resize(width, height)
scene = QGraphicsScene(self)
scene.setItemIndexMethod(QGraphicsScene.NoIndex)
scene.setSceneRect(0, 0, self.width(), self.height())
self.setScene(scene)
self.setCacheMode(self.CacheBackground)
self.setViewportUpdateMode(self.BoundingRectViewportUpdate)
self.setRenderHint(QPainter.Antialiasing)
self.setTransformationAnchor(self.AnchorUnderMouse)
self.setSizeAdjustPolicy(self.AdjustToContents)
self.setAlignment(Qt.AlignLeft | Qt.AlignTop)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.setMinimumSize(60, 60)
self.initContent()
def initContent(self):
self.square = ElevationSquare(self.start, self.end, self.levels)
self.tri = CameraTri(self.start, self.end, self.pos)
self.scene().addItem(self.square)
self.scene().addItem(self.tri)
def updatePos(self, pos):
"""Update a position of the camera
"""
self.tri.updatePos(int(pos))
self.scene().update()
if __name__ == "__main__":
app = QApplication(sys.argv)
widget = ElevationGraphWidget(0, 1000, 500)
widget.show()
app.exec_()
| [
"thexcloud@gmail.com"
] | thexcloud@gmail.com |
a79f875b03826b8306266039090b10114425cd2a | c91d029b59f4e6090a523bf571b3094e09852258 | /src/itat/__init__.py | 70eb7622292aa24d3ad4bea7f189356399b1170a | [
"MIT"
] | permissive | anselmobd/fo2 | d51b63ebae2541b00af79448ede76b02638c41f0 | 8e7f8f3d9a296c7da39d0faf38a266e9c6c162ab | refs/heads/master | 2023-08-31T19:59:33.964813 | 2023-08-31T19:50:53 | 2023-08-31T19:50:53 | 92,856,677 | 1 | 0 | MIT | 2023-04-21T21:50:46 | 2017-05-30T17:04:27 | Python | UTF-8 | Python | false | false | 44 | py | default_app_config = 'itat.apps.ItatConfig'
| [
"anselmo.blanco.dominguez+github@gmail.com"
] | anselmo.blanco.dominguez+github@gmail.com |
f5e424239ab2839fe0d0d067878f8928e02544b2 | 16050a733a926cbbfd7207fa8ce00db1a8fee81c | /apps/API_VK/command/commands/AdminCommands/Control.py | b60065962c18e8e53fb2bcbbef1a889226982424 | [] | no_license | thallkeer/xoma163site | bd87ed4924c244b666343ab30e66477adfad84fa | f86f61c060233028dced4ae48d44015c62625358 | refs/heads/master | 2021-05-17T14:27:33.785029 | 2020-03-19T16:32:57 | 2020-03-19T16:32:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | from apps.API_VK.command.CommonCommand import CommonCommand
# ToDo: get chat by id or name
class Control(CommonCommand):
def __init__(self):
names = ["управление", "сообщение"]
help_text = "Управление - отправление сообщение в любую конфу"
detail_help_text = "Управление (N,M) - N - chat_id, M - сообщение"
super().__init__(names, help_text, detail_help_text, access='admin', args=2, int_args=[0])
def start(self):
msg_chat_id = self.vk_event.args[0]
msg = self.vk_event.params_without_keys.split(' ', 1)[1]
self.vk_bot.send_message(self.vk_bot.get_group_id(msg_chat_id), msg)
| [
"Xoma163rus@gmail.com"
] | Xoma163rus@gmail.com |
45aec375bad35993810ff1642d6699b3217130a7 | c18bfe1c2c78962ebcfb4296697324dbadbcca48 | /build/my_personal_robotic_companion/robot_model/collada_parser/catkin_generated/pkg.installspace.context.pc.py | 5ece18fad88f276b8a09448fef516eca9a4157db | [] | no_license | MIsmailKhan/SERO | e1ac693752c4201f987a13d3dff2ece69bb6e59d | da4956682741c14c2eb6c49ea83e9d76987ce1ff | refs/heads/master | 2021-01-17T05:26:01.160822 | 2017-06-28T17:30:44 | 2017-06-28T17:30:44 | 95,693,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 541 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/ismail/catkin_ws2/install/include;/usr/include".split(';') if "/home/ismail/catkin_ws2/install/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;urdf_parser_plugin".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lcollada_parser".split(';') if "-lcollada_parser" != "" else []
PROJECT_NAME = "collada_parser"
PROJECT_SPACE_DIR = "/home/ismail/catkin_ws2/install"
PROJECT_VERSION = "1.11.7"
| [
"ismailkhan777@gmail.com"
] | ismailkhan777@gmail.com |
d5cb86c0a9193341d40321d7fcab8fc9e816be35 | 882de85f0a5e99320848ff2e140723888c075420 | /plugins/houdini/create/create_vbd_cache.py | eb836848ca0d4299d013214b2c636ed243fd265e | [
"MIT"
] | permissive | Panda-Luffy/reveries-config | 60fb9511f9c4ff9bb9f0c74e2a9d2c390684a033 | 09fb5be23816031e1407b02669049322fd6a8c16 | refs/heads/master | 2020-05-16T15:35:47.027217 | 2019-04-23T11:27:21 | 2019-04-23T11:27:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | from avalon import houdini
class CreateVDBCache(houdini.Creator):
"""OpenVDB from Geometry ROP"""
label = "VDB Cache"
family = "reveries.vdbcache"
icon = "cloud"
def __init__(self, *args, **kwargs):
super(CreateVDBCache, self).__init__(*args, **kwargs)
# Remove the active, we are checking the bypass flag of the nodes
self.data.pop("active", None)
# Set node type to create for output
self.data["node_type"] = "geometry"
def process(self):
instance = super(CreateVDBCache, self).process()
parms = {"sopoutput": "$HIP/pyblish/%s.$F4.vdb" % self.name,
"initsim": True}
if self.nodes:
node = self.nodes[0]
parms.update({"soppath": node.path()})
instance.setParms(parms)
| [
"davidlatwe@gmail.com"
] | davidlatwe@gmail.com |
dbb6d5cb1e195b116fb37bd4c544da4e49b82da1 | ea96ef74bc93762a0ed5d616a831259daf24992e | /knesset_data/dataservice/tests/base/test_exceptions.py | fb074ac802766e8d7a512d55097d6591e061ea20 | [] | no_license | alonisser/knesset-data-python | ca41675a31c6ea1a228fb43ad50b90dfa5c8d60b | 39aeb40edbb6c3b7af5967a90ac14bbf262fa6ad | refs/heads/master | 2021-01-22T20:44:28.734243 | 2017-07-02T08:38:53 | 2017-07-02T08:38:53 | 85,351,225 | 0 | 0 | null | 2017-03-17T20:16:18 | 2017-03-17T20:16:18 | null | UTF-8 | Python | false | false | 3,394 | py | import unittest
from datetime import datetime
from knesset_data.dataservice.committees import Committee, CommitteeMeeting
from knesset_data.dataservice.exceptions import KnessetDataServiceRequestException, KnessetDataServiceObjectException
from knesset_data.dataservice.mocks import MockMember
from knesset_data.utils.testutils import data_dependant_test
class CommitteeWithVeryShortTimeoutAndInvalidService(Committee):
DEFAULT_REQUEST_TIMEOUT_SECONDS = 1
METHOD_NAME = "Invalid Method Name"
class CommitteeMeetingWithVeryShortTimeoutAndInvalidService(CommitteeMeeting):
DEFAULT_REQUEST_TIMEOUT_SECONDS = 1
METHOD_NAME = "FOOBARBAZBAX"
class TestDataServiceRequestExceptions(unittest.TestCase):
def test_member_exception(self):
# get_page - raises an exception as soon as it's encountered
exception = None
try:
list(MockMember.get_page())
except Exception, e:
exception = e
self.assertEqual(exception.message, "member with exception on init")
# get - raises an exception as soon as it's encountered
exception = None
try:
MockMember.get(215)
except Exception, e:
exception = e
self.assertEqual(exception.message, "member with exception on get")
def test_member_skipped_exceptions(self):
# get_page with skip_exceptions - yields exception objects on error
self.assertEqual([o.message if isinstance(o, KnessetDataServiceObjectException) else o.id
for o in MockMember.get_page(skip_exceptions=True)],
[200, 201, 202, 'member with exception on init', 'member with exception on parse'])
@data_dependant_test()
def test_committee(self):
exception = None
try:
CommitteeWithVeryShortTimeoutAndInvalidService.get(1)
except KnessetDataServiceRequestException as e:
exception = e
self.assertIsInstance(exception, KnessetDataServiceRequestException)
self.assertListEqual([
exception.knesset_data_method_name,
exception.knesset_data_service_name,
exception.url,
str(exception.message)
], [
'Invalid Method Name',
'committees',
'http://online.knesset.gov.il/WsinternetSps/KnessetDataService/CommitteeScheduleData.svc/Invalid%20Method%20Name(1)',
"('Connection aborted.', error(104, 'Connection reset by peer'))",
])
@data_dependant_test()
def test_committee_meeting(self):
exception = None
try:
CommitteeMeetingWithVeryShortTimeoutAndInvalidService.get(1, datetime(2016, 1, 1))
except KnessetDataServiceRequestException as e:
exception = e
self.assertIsInstance(exception, KnessetDataServiceRequestException)
self.assertListEqual([
exception.knesset_data_method_name,
exception.knesset_data_service_name,
exception.url,
str(exception.message)
], [
'FOOBARBAZBAX',
'committees',
'http://online.knesset.gov.il/WsinternetSps/KnessetDataService/CommitteeScheduleData.svc/FOOBARBAZBAX?CommitteeId=%271%27&FromDate=%272016-01-01T00%3A00%3A00%27',
"('Connection aborted.', error(104, 'Connection reset by peer'))",
])
| [
"ori@uumpa.com"
] | ori@uumpa.com |
127c9acd8a9765018edf85f3ff4d9c51c329f2f6 | f0a0aa6a5fe0ded715bb65c78dbedd9b2ca24e2f | /100DaysOfPython/Day23/TurtleCrossingProject/car_manager.py | 41842e2f967a34cc57b00e5231eeb43ea972cf9f | [] | no_license | DeepanshuSarawagi/python | bb911dd8dfc2d567a2c51679bb10c3f37f11e500 | a7b4ea3a0b6f1f373df3220ca655e575ae401d09 | refs/heads/master | 2022-04-26T17:46:39.301757 | 2021-08-25T15:08:14 | 2021-08-25T15:08:14 | 228,613,467 | 1 | 0 | null | 2022-04-22T23:32:41 | 2019-12-17T12:38:01 | Python | UTF-8 | Python | false | false | 848 | py | from turtle import Turtle
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 5
class CarManager:
def __init__(self):
self.all_cars = []
self.car_speed = STARTING_MOVE_DISTANCE
def create_car(self):
random_chance = random.randint(1, 6)
if random_chance == 1:
new_car = Turtle("square")
new_car.shapesize(stretch_wid=1, stretch_len=2)
new_car.penup()
new_car.color(random.choice(COLORS))
random_y = random.randint(-250, 250)
new_car.goto(300, random_y)
self.all_cars.append(new_car)
def move_cars(self):
for car in self.all_cars:
car.backward(self.car_speed)
def level_up(self):
self.car_speed += MOVE_INCREMENT
| [
"deepanshusarawagi@gmail.com"
] | deepanshusarawagi@gmail.com |
5b4ba8b693c93e992fdc3aea4e2d9e67832dea69 | f63028878311f21f73ed21f9bc88a0fd2ba8ba88 | /01.python/ch09/ex21.py | 6b1414f677e5d2c1f87989df1df683d52416e500 | [] | no_license | nugeat23/workspace | ac12b93b0cb826206138aa2262382b0e6389977b | 221344b95daa40c3ba66d27e04cbf9dae3172edc | refs/heads/master | 2023-07-14T20:37:32.851769 | 2021-09-01T08:55:01 | 2021-09-01T08:55:01 | 383,780,263 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | score = 88,95,70,100,99
print(score)
print(type(score))
score = 88,
print(score)
print(type(score))
score = 88
print(score)
print(type(score))
# tuple() --->tuple 변환 함수
# list() --->list 변환 함수
| [
"nugeat23@gmail.com"
] | nugeat23@gmail.com |
e4d66bd82a67e533b833a4f380e67ced94e9c950 | 8016e033484d3cb88a4ee9b82bd3ca08557c12aa | /teclado_flow/oop/employee_with_decorator.py | 8bf063ea537f272bfd325b187915cbc9bb737313 | [] | no_license | keys4words/python | 72ecf5de80b14ad3a94abe1d48e82035a2f0fa3d | 08431836498e6caed8e01cbc3548b295b69056fe | refs/heads/master | 2021-06-16T19:42:21.294976 | 2020-04-30T14:40:24 | 2020-04-30T14:40:24 | 187,210,896 | 0 | 0 | null | 2021-03-20T01:25:04 | 2019-05-17T12:16:40 | Python | UTF-8 | Python | false | false | 796 | py | class Student:
def __init__(self, first, last):
self.first = first
self.last = last
# self.email = first.lower() + '.' + last.lower() + '@gmail.com'
@property
def email(self):
return '{}.{}@gmail.com'.format(self.first.lower(), self.last.lower())
@property
def fullname(self):
return '{} {}'.format(self.first, self.last)
@fullname.setter
def fullname(self, name):
first, last = name.split(' ')
self.first = first
self.last = last
@fullname.deleter
def fullname(self):
print('Delete Name!')
self.first = None
self.last = None
emp1 = Student('John', 'Rembo')
emp1.first = 'Jim'
emp1.fullname = 'James Bond'
print(emp1.fullname)
print(emp1.email)
del emp1.fullname
| [
"keys4words@gmail.com"
] | keys4words@gmail.com |
bb3f848cbd7e7206f778f4b3e89beca4f54cd461 | 67d8173a716da10a7350213d98938aae9f2115ce | /LeetCode/LC_PY_ANSWERS/random-pick-with-blacklist.py | 951cb20be9569d8fe879dea6da79554abcb45a75 | [
"MIT"
] | permissive | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 1,366 | py | # Time: ctor: O(b)
# pick: O(1)
# Space: O(b)
import random
class Solution(object):
def __init__(self, N, blacklist):
"""
:type N: int
:type blacklist: List[int]
"""
self.__n = N - len(blacklist)
self.__lookup = {}
white = iter(set(range(self.__n, N)) - set(blacklist))
for black in blacklist:
if black < self.__n:
self.__lookup[black] = next(white)
def pick(self):
"""
:rtype: int
"""
index = random.randint(0, self.__n - 1)
return self.__lookup[index] if index in self.__lookup else index
# Time: ctor: O(blogb)
# pick: O(logb)
# Space: O(b)
import random
class Solution2(object):
def __init__(self, N, blacklist):
"""
:type N: int
:type blacklist: List[int]
"""
self.__n = N - len(blacklist)
blacklist.sort()
self.__blacklist = blacklist
def pick(self):
"""
:rtype: int
"""
index = random.randint(0, self.__n - 1)
left, right = 0, len(self.__blacklist) - 1
while left <= right:
mid = left + (right - left) // 2
if index + mid < self.__blacklist[mid]:
right = mid - 1
else:
left = mid + 1
return index + left
| [
"30805062+jxie0755@users.noreply.github.com"
] | 30805062+jxie0755@users.noreply.github.com |
24ac38582013f5f4e58677f7f4c5ec41d47a34df | bf7c60d6bb4c3f35dd3d4114a3457026c6f6b95c | /blog/forms.py | 38901f11dbe5f9bb27a734edade7db4214fec740 | [] | no_license | zerofuxor/ContentQ-CMS | 2a093de904b332134abf851bd22f1a70c47e87dc | db7154910d3776ba3daf90f97fccb1cc51e5bf94 | refs/heads/master | 2021-01-17T07:45:49.614678 | 2011-01-07T18:16:32 | 2011-01-07T18:55:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | # Copyright 2010 Jose Maria Zambrana Arze <contact@josezambrana.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from google.appengine.ext.db import djangoforms
from django import forms
from blog import models
from common import forms as common_forms
TYPE_CHOICES = (('page','Page'),('post','Post'))
class PostCategoryForm(common_forms.CategoryForm):
class Meta:
model = models.PostCategory
exclude = ['uuid', 'slug', 'deleted_at']
class PostItemForm(common_forms.BaseContentForm):
class Meta:
model = models.PostItem
exclude = ['uuid', 'slug', 'plain_description','created_at', 'updated_at', 'deleted_at']
def __init__(self, *args, **kwargs):
super(PostItemForm, self).__init__(*args, **kwargs)
self.fields['category'].widget = forms.Select(choices=models.PostCategory.get_choices())
self.fields.keyOrder = ["name", "category", "status","description", "body", "tags", "meta_desc"]
| [
"contact@josezambrana.com"
] | contact@josezambrana.com |
15fb0a5f474eae9654c88030017361c3699aee95 | 9184e230f8b212e8f686a466c84ecc89abe375d1 | /arcseventdata/applications/make-tobyfit-par.py | da6c3877893937b116b75b899abe6c2270c007ab | [] | no_license | danse-inelastic/DrChops | 75b793d806e6351dde847f1d92ab6eebb1ef24d2 | 7ba4ce07a5a4645942192b4b81f7afcae505db90 | refs/heads/master | 2022-04-26T17:37:41.666851 | 2015-05-02T23:21:13 | 2015-05-02T23:21:13 | 34,094,584 | 0 | 1 | null | 2020-09-10T01:50:10 | 2015-04-17T03:30:52 | Python | UTF-8 | Python | false | false | 3,694 | py | #!/usr/bin/env python
import journal
info = journal.info('make-merlin-par')
def getpixelinfo(ARCSxml):
info.log('parsing acrs xml: %s' % ARCSxml)
from instrument.nixml import parse_file
instrument = parse_file(ARCSxml)
info.log('getting detector axes')
from arcseventdata.GetDetectorAxesInfo import getDetectorAxes
detaxes = getDetectorAxes(instrument)
npacks, ndetsperpack, npixelsperdet = [axis.size() for axis in detaxes]
info.log('getting pixel radii and heights')
from arcseventdata.getpixelsizes import getpixelsizes
radii, heights = getpixelsizes(
instrument, npacks, ndetsperpack, npixelsperdet)
widths = radii*2.
info.log('getting pixel L2, phis, psis')
from arcseventdata import getinstrumentinfo
ii = getinstrumentinfo(ARCSxml)
dists = ii['dists']
phis = ii['phis']
psis = ii['psis']
sas = ii['solidangles']
from reduction.units import length, angle
dists = dists.I
phis = phis.I
psis = psis.I
sas = sas.I
# 06/30/2009: was told that the last two columns are angles
da1 = widths/dists
da2 = sas/da1
dists.shape = phis.shape = psis.shape = widths.shape = heights.shape = -1,
da1.shape = da2.shape = -1,
#return dists, phis, psis, widths, heights
return dists, phis, psis, da1, da2
def getpixelinfo_mergedpixels(ARCSxml, pixelresolution):
npixels = 128
assert npixels%pixelresolution==0
from arcseventdata.combinepixels import combinepixels, geometricInfo, geometricInfo_MergedPixels
from histogram import axis
pixelaxis = axis('pixelID', range(0, 128, pixelresolution))
info.log('merging pixels')
phi_p, psi_p, dist_p, sa_p, dphi_p, dpsi_p = combinepixels(ARCSxml, pixelaxis, pixelresolution)
positions, radii, heights = geometricInfo(ARCSxml)
positions, radii, heights = geometricInfo_MergedPixels(positions, radii, heights, pixelresolution)
widths = radii*2
phis = phi_p.I
psis = psi_p.I
dists = dist_p.I
sas = sa_p.I
# 06/30/2009: was told that the last two columns are angles
da1 = widths/dists
da2 = sas/da1
dists.shape = phis.shape = psis.shape = widths.shape = heights.shape = -1,
da1.shape = da2.shape = -1,
#return dists, phis, psis, widths, heights
return dists, phis, psis, da1, da2
def writePar(stream, dists, phis, psis, widths, heights):
info.log('writing to par file')
n = len(dists)
assert n==len(phis) and n==len(psis) and n==len(widths) and n==len(heights)
def format(f):
return '%8.3f' % f
stream.write(str(n)+'\n')
for line in zip(dists, phis, psis, widths, heights):
s = ''.join(map(format, line))
s += '\n'
stream.write(s)
continue
return
from pyre.applications.Script import Script
class App(Script):
class Inventory(Script.Inventory):
import pyre.inventory
arcsxml = pyre.inventory.str('x', default='ARCS.xml')
outfile = pyre.inventory.str('o', default='ARCS.par')
resolution = pyre.inventory.int('r', default=1)
def main(self):
arcsxml = self.inventory.arcsxml
outfile = self.inventory.outfile
resolution = self.inventory.resolution
if resolution == 1:
dists, phis, psis, widths, heights = getpixelinfo(arcsxml)
else:
dists, phis, psis, widths, heights = getpixelinfo_mergedpixels(arcsxml, resolution)
writePar(open(outfile, 'w'), dists, phis, psis, widths, heights)
return
def main():
info.activate()
app = App('make-merlin-par')
app.run()
return
if __name__ == '__main__': main()
| [
"linjiao@caltech.edu"
] | linjiao@caltech.edu |
ffdd53afd9bef80915daae81253f58ffa97d52a2 | 1e168ced1a4bdb53967021e082b98027aea9d38a | /3.해커랭크/PythonString/TextWrap/hacker.py | 7c950f4d5a5d65b82f1504f4cf2b6c85f1ca3353 | [] | no_license | vvspearlvvs/CodingTest | 3ebf921308570ac11eb87e6660048ccfcaf90ce4 | fc61b71d955f73ef8710f792d008bc671614ef7a | refs/heads/main | 2023-07-13T15:57:11.312519 | 2021-08-25T02:15:28 | 2021-08-25T02:15:28 | 354,232,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 320 | py | def wrap(string,max_width):
result=[]
for i in range(0,len(string),max_width):
result.append(string[i:i+max_width])
return '\n'.join(result)
string="ABCDEFGHIJKLIMNOQRSTUVWXYZ"
max_width=4
r=wrap(string,max_width)
print(r)
'''
총길이26
0-3 4*1-1
4-7 4*2-1
8-11
12-15
16-19
20-23
24-27
24 25
'''
| [
"gg66477@gmail.com"
] | gg66477@gmail.com |
2c422b8ebd1397f4b9c53bebe2ededabcab3119d | 0c03fcf9b3bdb43473a740935c4bf028b40e5ec4 | /AegisServer/message.py | da4ac0a66dbaa4f373913828105715940060f455 | [] | no_license | Bowserinator/AegisServer2 | afe6b7fbd826e031a00aec94501db7f88a133b5f | 06fc64e54adcaf82ff9a62f1047f5aee5c07672a | refs/heads/master | 2021-01-01T03:56:38.310579 | 2016-05-24T22:21:19 | 2016-05-24T22:21:19 | 59,434,565 | 0 | 2 | null | 2016-10-28T23:47:40 | 2016-05-22T21:10:50 | Python | UTF-8 | Python | false | false | 1,083 | py |
#:IndigoTiger!Brad@botters/IndigoTiger MODE ##BWBellairs-bots -o+b Bowserinator *!*@unaffiliated/bowserinator
#MULTI MODE EXAMPLE on one user
#:IndigoTiger!Brad@botters/IndigoTiger MODE ##BWBellairs-bots +oooo RadioNeat IovoidBot wolfy1339 Andromeda-dev
#MULTI OP EXAMPLE
def phraseModeUser(ircmsg): #Gets modes on users
message = ircmsg
ircmsg = ircmsg.split(" MODE ")[1]
channel = ircmsg.split(" ")[0]
modes = ircmsg.split(" ")[1]
users = ircmsg.split(" ",2)[2]
if len(users) == 0:
return None
currentMode = ""; modes2 = []
for i in modes:
if i=="+" or i=="-":
currentMode = i; continue
if i in ['e','I','b','q','o','v' ]: #Modes with user parameters
modes2.append(currentMode+i)
if len(users.split(" ")) == 1: #1 user
return [message.split(" MODE ")[0], [[users,modes2]], channel]
userA = []
for i in users.split(" "):
userA.append([i,modes2[users.split(" ").index(i)] ])
return [message.split(" MODE ")[0], userA , channel]
| [
"bowserinator@gmail.com"
] | bowserinator@gmail.com |
ed2674f00f099d8225bcc2a8ea8f6830ef935e78 | fde186bd141ed055ba8ab915b2ad25355f8f3fb6 | /ABC/111/py/B.py | 73d5183e6546cf60e8fcf5a3b020f01075dad8ca | [] | no_license | Tsukumo3/Atcoder | 259ea6487ad25ba2d4bf96d3e1cf9be4a427d24e | 5f8d5cf4c0edee5f54b8e78bc14a62e23cab69cb | refs/heads/master | 2020-12-20T05:04:39.222657 | 2020-10-17T01:39:04 | 2020-10-17T01:39:04 | 235,969,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | n = input()
first = n[0]
length = len(n)
guess = int(''.join([first]*length))
if guess >= int(n):
print(guess)
else:
next = int(first)+1
guess = int(''.join([str(next)]*length))
print(guess)
| [
"53821328+Tsukumo3@users.noreply.github.com"
] | 53821328+Tsukumo3@users.noreply.github.com |
af7975f0ce7ddd2e1f93104e71e1a5e5505653ce | a2cdf6290ddbe897bff0778991bdbaff5adbf1e5 | /Stub REST API implementation/NLP_Analysis/nlpanalysis.py | fe5b0c063ed4ed6ae0dafaba1d996acc6cf4d87b | [] | no_license | BUEC500C1/news-analyzer-JimY233 | 90796cc09ae3565c0ad1724d90cb549161dee488 | 3dcb3dc6ae7c882751dc385a7b6e8615563ebaf9 | refs/heads/main | 2023-04-14T15:44:25.237727 | 2021-04-28T12:50:09 | 2021-04-28T12:50:09 | 337,237,874 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,538 | py | """
NLP analysis
Procedure-based Api
"""
from flask import Flask
from flask_restful import Resource, Api
import json
app = Flask(__name__)
api = Api(app)
#data in json file for example
TEXT = {"TEXT_ID": "text_id",
"TEXT": "text",
"Sentiment": "semtiment",
"NLP": ["nlp1","nlp2","nlp3"]
}
'''
Events
Event_Convert: File is converted to text in English
Event_Process: NLP analysis request and process the analysis
Event_Update: File is updated and re-analyze again
'''
def convert_text():
"convert the input to text in English"
pass
def create_data():
'''
check if it is text and in English, if not, use convert_text()
if convert event not successful, log error message
return success or failure
'''
pass
def update_data(data,message):
'''
update the data with requirement and log the message
return success or failure
'''
pass
def delete_data(data):
'''
delete the data and log the message
return success or failure
'''
pass
class NLPanalysis(Resource):
def post(self): #create: upload file and create the data for this file
'''
if Event_upload:
create_data() #create data
'''
return {'Hello': 'world'}
def delete(self): #delete: delete the file and relating data
pass
def put(self): #update: update the data record
pass
def get(self): #read: read data json file and return information
pass
api.add_resource(NLPanalysis,'/')
if __name__ == '__main__':
app.run(debug = True)
| [
"noreply@github.com"
] | BUEC500C1.noreply@github.com |
1d6fb51b992dd0221609f01ea891b56d2ff09c56 | 7f52618136c8d9b9ba0ce8f89f3fcc90c4e6feb7 | /csa_new/csa_new/doctype/umpire_level/umpire_level.py | 1161b6944d2e90d3760fea509076f58023729916 | [
"MIT"
] | permissive | Jishnu70055/user_management | 7ade7f196f974ea0b3ddb220e3fca49665d9de3b | 82d3d2c85a62c7f1162633c164cb7d50e229d2fd | refs/heads/main | 2023-07-06T14:03:00.213723 | 2021-08-10T12:42:10 | 2021-08-10T12:42:10 | 394,649,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | # Copyright (c) 2021, sd and contributors
# For license information, please see license.txt
# import frappe
from frappe.model.document import Document
class Umpirelevel(Document):
pass
| [
"jishnudq70055@gmail.com"
] | jishnudq70055@gmail.com |
27634e512c63dc9efe3e6be85a7a590c2451d939 | b33d585897d6ebdcb3a0b8efce44fb3576dae7d6 | /aiortc/rtcrtpreceiver.py | 9ecf9573e257a77e85efe1ab96f9e1a27db1756d | [
"BSD-3-Clause"
] | permissive | sfrost23/aiortc | 5af8ece2e2ac3d3b24eb8d04ce974b6449372e67 | ca23567ca74cd45ad1d507a9fe8c4a5e61ca6ae8 | refs/heads/master | 2020-04-15T03:24:09.285113 | 2019-01-06T16:42:36 | 2019-01-06T16:42:36 | 164,347,258 | 0 | 0 | BSD-3-Clause | 2019-01-06T21:30:06 | 2019-01-06T21:30:06 | null | UTF-8 | Python | false | false | 17,715 | py | import asyncio
import datetime
import logging
import queue
import random
import threading
import time
import attr
from . import clock
from .codecs import depayload, get_capabilities, get_decoder, is_rtx
from .exceptions import InvalidStateError
from .jitterbuffer import JitterBuffer
from .mediastreams import MediaStreamError, MediaStreamTrack
from .rate import RemoteBitrateEstimator
from .rtcrtpparameters import RTCRtpReceiveParameters
from .rtp import (RTCP_PSFB_APP, RTCP_PSFB_PLI, RTCP_RTPFB_NACK, RtcpByePacket,
RtcpPsfbPacket, RtcpReceiverInfo, RtcpRrPacket,
RtcpRtpfbPacket, RtcpSrPacket, RtpPacket, clamp_packets_lost,
pack_remb_fci, unwrap_rtx)
from .stats import (RTCInboundRtpStreamStats, RTCRemoteOutboundRtpStreamStats,
RTCStatsReport)
from .utils import uint16_add, uint16_gt
logger = logging.getLogger('rtp')
def decoder_worker(loop, input_q, output_q):
codec_name = None
decoder = None
while True:
task = input_q.get()
if task is None:
# inform the track that is has ended
asyncio.run_coroutine_threadsafe(output_q.put(None), loop)
break
codec, encoded_frame = task
if codec.name != codec_name:
decoder = get_decoder(codec)
codec_name = codec.name
for frame in decoder.decode(encoded_frame):
# pass the decoded frame to the track
asyncio.run_coroutine_threadsafe(output_q.put(frame), loop)
if decoder is not None:
del decoder
class NackGenerator:
def __init__(self):
self.max_seq = None
self.missing = set()
def add(self, packet):
missed = False
if self.max_seq is None:
self.max_seq = packet.sequence_number
return missed
# mark missing packets
if uint16_gt(packet.sequence_number, self.max_seq):
seq = uint16_add(self.max_seq, 1)
while uint16_gt(packet.sequence_number, seq):
self.missing.add(seq)
missed = True
seq = uint16_add(seq, 1)
self.max_seq = packet.sequence_number
else:
self.missing.discard(packet.sequence_number)
return missed
class StreamStatistics:
def __init__(self, clockrate):
self.base_seq = None
self.max_seq = None
self.cycles = 0
self.packets_received = 0
# jitter
self._clockrate = clockrate
self._jitter_q4 = 0
self._last_arrival = None
self._last_timestamp = None
# fraction lost
self._expected_prior = 0
self._received_prior = 0
def add(self, packet):
in_order = self.max_seq is None or uint16_gt(packet.sequence_number, self.max_seq)
self.packets_received += 1
if self.base_seq is None:
self.base_seq = packet.sequence_number
if in_order:
arrival = int(time.time() * self._clockrate)
if self.max_seq is not None and packet.sequence_number < self.max_seq:
self.cycles += (1 << 16)
self.max_seq = packet.sequence_number
if packet.timestamp != self._last_timestamp and self.packets_received > 1:
diff = abs((arrival - self._last_arrival) -
(packet.timestamp - self._last_timestamp))
self._jitter_q4 += diff - ((self._jitter_q4 + 8) >> 4)
self._last_arrival = arrival
self._last_timestamp = packet.timestamp
@property
def fraction_lost(self):
expected_interval = self.packets_expected - self._expected_prior
self._expected_prior = self.packets_expected
received_interval = self.packets_received - self._received_prior
self._received_prior = self.packets_received
lost_interval = expected_interval - received_interval
if (expected_interval == 0 or lost_interval <= 0):
return 0
else:
return (lost_interval << 8) // expected_interval
@property
def jitter(self):
return self._jitter_q4 >> 4
@property
def packets_expected(self):
return self.cycles + self.max_seq - self.base_seq + 1
@property
def packets_lost(self):
return clamp_packets_lost(self.packets_expected - self.packets_received)
class RemoteStreamTrack(MediaStreamTrack):
def __init__(self, kind):
super().__init__()
self.kind = kind
self._queue = asyncio.Queue()
async def recv(self):
"""
Receive the next frame.
"""
if self.readyState != 'live':
raise MediaStreamError
frame = await self._queue.get()
if frame is None:
self.stop()
raise MediaStreamError
return frame
class TimestampMapper:
def __init__(self):
self._last = None
self._origin = None
def map(self, timestamp):
if self._origin is None:
# first timestamp
self._origin = timestamp
elif timestamp < self._last:
# RTP timestamp wrapped
self._origin -= (1 << 32)
self._last = timestamp
return timestamp - self._origin
@attr.s
class RTCRtpContributingSource:
"""
The :class:`RTCRtpContributingSource` dictionary contains information about
a contributing source (CSRC).
"""
timestamp = attr.ib(type=datetime.datetime) # type: datetime.datetime
"The timestamp associated with this source."
source = attr.ib(type=int) # type: int
"The CSRC identifier associated with this source."
@attr.s
class RTCRtpSynchronizationSource:
"""
The :class:`RTCRtpSynchronizationSource` dictionary contains information about
a synchronization source (SSRC).
"""
timestamp = attr.ib(type=datetime.datetime) # type: datetime.datetime
"The timestamp associated with this source."
source = attr.ib(type=int) # type: int
"The SSRC identifier associated with this source."
class RTCRtpReceiver:
"""
The :class:`RTCRtpReceiver` interface manages the reception and decoding
of data for a :class:`MediaStreamTrack`.
:param: kind: The kind of media (`'audio'` or `'video'`).
:param: transport: An :class:`RTCDtlsTransport`.
"""
def __init__(self, kind, transport):
if transport.state == 'closed':
raise InvalidStateError
self.__active_ssrc = {}
self.__codecs = {}
self.__decoder_queue = queue.Queue()
self.__decoder_thread = None
self.__kind = kind
if kind == 'audio':
self.__jitter_buffer = JitterBuffer(capacity=16, prefetch=4)
self.__nack_generator = None
self.__remote_bitrate_estimator = None
else:
self.__jitter_buffer = JitterBuffer(capacity=128)
self.__nack_generator = NackGenerator()
self.__remote_bitrate_estimator = RemoteBitrateEstimator()
self._track = None
self.__rtcp_exited = asyncio.Event()
self.__rtcp_task = None
self.__rtx_ssrc = {}
self.__started = False
self.__stats = RTCStatsReport()
self.__timestamp_mapper = TimestampMapper()
self.__transport = transport
# RTCP
self.__lsr = {}
self.__lsr_time = {}
self.__remote_streams = {}
self.__rtcp_ssrc = None
@property
def transport(self):
"""
The :class:`RTCDtlsTransport` over which the media for the receiver's
track is received.
"""
return self.__transport
@classmethod
def getCapabilities(self, kind):
"""
Returns the most optimistic view of the system's capabilities for
receiving media of the given `kind`.
:rtype: :class:`RTCRtpCapabilities`
"""
return get_capabilities(kind)
async def getStats(self):
"""
Returns statistics about the RTP receiver.
:rtype: :class:`RTCStatsReport`
"""
for ssrc, stream in self.__remote_streams.items():
self.__stats.add(RTCInboundRtpStreamStats(
# RTCStats
timestamp=clock.current_datetime(),
type='inbound-rtp',
id='inbound-rtp_' + str(id(self)),
# RTCStreamStats
ssrc=ssrc,
kind=self.__kind,
transportId=self.transport._stats_id,
# RTCReceivedRtpStreamStats
packetsReceived=stream.packets_received,
packetsLost=stream.packets_lost,
jitter=stream.jitter,
# RTPInboundRtpStreamStats
))
self.__stats.update(self.transport._get_stats())
return self.__stats
def getSynchronizationSources(self):
"""
Returns a :class:`RTCRtpSynchronizationSource` for each unique SSRC identifier
received in the last 10 seconds.
"""
cutoff = clock.current_datetime() - datetime.timedelta(seconds=10)
sources = []
for source, timestamp in self.__active_ssrc.items():
if timestamp >= cutoff:
sources.append(RTCRtpSynchronizationSource(source=source, timestamp=timestamp))
return sources
async def receive(self, parameters: RTCRtpReceiveParameters):
"""
Attempt to set the parameters controlling the receiving of media.
:param: parameters: The :class:`RTCRtpParameters` for the receiver.
"""
if not self.__started:
for codec in parameters.codecs:
self.__codecs[codec.payloadType] = codec
for encoding in parameters.encodings:
if encoding.rtx:
self.__rtx_ssrc[encoding.rtx.ssrc] = encoding.ssrc
# start decoder thread
self.__decoder_thread = threading.Thread(
target=decoder_worker,
name=self.__kind + '-decoder',
args=(asyncio.get_event_loop(), self.__decoder_queue, self._track._queue))
self.__decoder_thread.start()
self.__transport._register_rtp_receiver(self, parameters)
self.__rtcp_task = asyncio.ensure_future(self._run_rtcp())
self.__started = True
def setTransport(self, transport):
self.__transport = transport
async def stop(self):
"""
Irreversibly stop the receiver.
"""
if self.__started:
self.__transport._unregister_rtp_receiver(self)
self.__stop_decoder()
self.__rtcp_task.cancel()
await self.__rtcp_exited.wait()
def _handle_disconnect(self):
self.__stop_decoder()
async def _handle_rtcp_packet(self, packet):
self.__log_debug('< %s', packet)
if isinstance(packet, RtcpSrPacket):
self.__stats.add(RTCRemoteOutboundRtpStreamStats(
# RTCStats
timestamp=clock.current_datetime(),
type='remote-outbound-rtp',
id='remote-outbound-rtp_' + str(id(self)),
# RTCStreamStats
ssrc=packet.ssrc,
kind=self.__kind,
transportId=self.transport._stats_id,
# RTCSentRtpStreamStats
packetsSent=packet.sender_info.packet_count,
bytesSent=packet.sender_info.octet_count,
# RTCRemoteOutboundRtpStreamStats
remoteTimestamp=clock.datetime_from_ntp(packet.sender_info.ntp_timestamp)
))
self.__lsr[packet.ssrc] = ((packet.sender_info.ntp_timestamp) >> 16) & 0xffffffff
self.__lsr_time[packet.ssrc] = time.time()
elif isinstance(packet, RtcpByePacket):
self.__stop_decoder()
async def _handle_rtp_packet(self, packet: RtpPacket, arrival_time_ms: int):
"""
Handle an incoming RTP packet.
"""
self.__log_debug('< %s', packet)
# feed bitrate estimator
if self.__remote_bitrate_estimator is not None:
if packet.extensions.abs_send_time is not None:
remb = self.__remote_bitrate_estimator.add(
abs_send_time=packet.extensions.abs_send_time,
arrival_time_ms=arrival_time_ms,
payload_size=len(packet.payload) + packet.padding_size,
ssrc=packet.ssrc,
)
if self.__rtcp_ssrc is not None and remb is not None:
# send Receiver Estimated Maximum Bitrate feedback
rtcp_packet = RtcpPsfbPacket(
fmt=RTCP_PSFB_APP, ssrc=self.__rtcp_ssrc, media_ssrc=0)
rtcp_packet.fci = pack_remb_fci(*remb)
await self._send_rtcp(rtcp_packet)
# keep track of sources
self.__active_ssrc[packet.ssrc] = clock.current_datetime()
# check the codec is known
codec = self.__codecs.get(packet.payload_type)
if codec is None:
self.__log_debug('x RTP packet with unknown payload type %d', packet.payload_type)
return
# feed RTCP statistics
if packet.ssrc not in self.__remote_streams:
self.__remote_streams[packet.ssrc] = StreamStatistics(codec.clockRate)
self.__remote_streams[packet.ssrc].add(packet)
# unwrap retransmission packet
if is_rtx(codec):
original_ssrc = self.__rtx_ssrc.get(packet.ssrc)
if original_ssrc is None:
self.__log_debug('x RTX packet from unknown SSRC %d', packet.ssrc)
return
codec = self.__codecs[codec.parameters['apt']]
packet = unwrap_rtx(packet,
payload_type=codec.payloadType,
ssrc=original_ssrc)
# send NACKs for any missing any packets
if self.__nack_generator is not None and self.__nack_generator.add(packet):
await self._send_rtcp_nack(packet.ssrc, sorted(self.__nack_generator.missing))
# parse codec-specific information
try:
if packet.payload:
packet._data = depayload(codec, packet.payload)
else:
packet._data = b''
except ValueError as exc:
self.__log_debug('x RTP payload parsing failed: %s', exc)
return
# try to re-assemble encoded frame
encoded_frame = self.__jitter_buffer.add(packet)
# if we have a complete encoded frame, decode it
if encoded_frame is not None and self.__decoder_thread:
encoded_frame.timestamp = self.__timestamp_mapper.map(encoded_frame.timestamp)
self.__decoder_queue.put((codec, encoded_frame))
async def _run_rtcp(self):
self.__log_debug('- RTCP started')
try:
while True:
# The interval between RTCP packets is varied randomly over the
# range [0.5, 1.5] times the calculated interval.
await asyncio.sleep(0.5 + random.random())
# RTCP RR
reports = []
for ssrc, stream in self.__remote_streams.items():
lsr = 0
dlsr = 0
if ssrc in self.__lsr:
lsr = self.__lsr[ssrc]
delay = time.time() - self.__lsr_time[ssrc]
if delay > 0 and delay < 65536:
dlsr = int(delay * 65536)
reports.append(RtcpReceiverInfo(
ssrc=ssrc,
fraction_lost=stream.fraction_lost,
packets_lost=stream.packets_lost,
highest_sequence=stream.max_seq,
jitter=stream.jitter,
lsr=lsr,
dlsr=dlsr))
if self.__rtcp_ssrc is not None and reports:
packet = RtcpRrPacket(ssrc=self.__rtcp_ssrc, reports=reports)
await self._send_rtcp(packet)
except asyncio.CancelledError:
pass
self.__log_debug('- RTCP finished')
self.__rtcp_exited.set()
async def _send_rtcp(self, packet):
self.__log_debug('> %s', packet)
try:
await self.transport._send_rtp(bytes(packet))
except ConnectionError:
pass
async def _send_rtcp_nack(self, media_ssrc, lost):
"""
Send an RTCP packet to report missing RTP packets.
"""
if self.__rtcp_ssrc is not None:
packet = RtcpRtpfbPacket(
fmt=RTCP_RTPFB_NACK, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc)
packet.lost = lost
await self._send_rtcp(packet)
async def _send_rtcp_pli(self, media_ssrc):
"""
Send an RTCP packet to report picture loss.
"""
if self.__rtcp_ssrc is not None:
packet = RtcpPsfbPacket(fmt=RTCP_PSFB_PLI, ssrc=self.__rtcp_ssrc, media_ssrc=media_ssrc)
await self._send_rtcp(packet)
def _set_rtcp_ssrc(self, ssrc):
self.__rtcp_ssrc = ssrc
def __stop_decoder(self):
"""
Stop the decoder thread, which will in turn stop the track.
"""
if self.__decoder_thread:
self.__decoder_queue.put(None)
self.__decoder_thread.join()
self.__decoder_thread = None
def __log_debug(self, msg, *args):
logger.debug('receiver(%s) ' + msg, self.__kind, *args)
| [
"jeremy.laine@m4x.org"
] | jeremy.laine@m4x.org |
1e77ea0f2585d378b5f441157e9e6ff618ea0b73 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/452/usersdata/302/104113/submittedfiles/avenida.py | 73284b82cb60a4041aaa2603048d546eb5cb7a7c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | # -*- coding: utf-8 -*-
m = int(input('Digite o número de linhas: '))
n = int(input('DIgite o número de colunas: '))
matriz = []
for i in range(m):
linha = []
for j in range(n):
linha.append(float(input('Digite o elemento %d de %d: ' %((j+1),(i+1)))))
matriz.append(linha)
print(matriz)
a = 0
b = 0
c = 0
for i in range(m-1):
for j in range(n-1):
a = a + matriz[j][i]
print(a)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
aaec94386ce3465b76f2926ae313d521e83025a8 | 97d36ccecc679acf9353a8ee52a4dd50bdb57b11 | /okooospiderman/okooospiderman5.1mogu.py | c1f277db6b387dca84fa9f1b02ac31d439515606 | [] | no_license | github4n/pythonwork | ebc6a2cfdfc501873f26469c9d8af3b0dd44151f | fa582c7efdc904ce83e5a0319277bcc366a086f2 | refs/heads/master | 2020-04-16T22:18:45.372528 | 2019-01-16T00:04:26 | 2019-01-16T00:04:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,336 | py | #此版本是在okooospiderman5.1_forWin的基础上用蘑菇代理的版本,用于双开终端加快爬取速度
#把文件保存的位置放到E盘,试验成功后会放到移动硬盘上
#把api改成蘑菇代理的api
#不知道是不是蘑菇代理还是多开的原因,这个程序的ip特别容易被封
#换成了讯代理,再测试一下
#改成两场比赛换一次ip看看效率
#两场比赛换一次ip速度比三场比赛换一场ip稍快一点,并且出错率低一些
from gevent import monkey;monkey.patch_all()
import os
import re
import gevent
import time
import random#导入随机数模块
from bs4 import BeautifulSoup#在提取代码的时候还是要用到beautifulsoup来提取标签
from datetime import datetime, timedelta, timezone#用来把时间字符串转换成时间
import pytz#用来设置时区信息
import os#用来获取文件名列表
import requests
import urllib
import YDM
import time
import csv
import json#用来将字典写入json文件
import psutil#用来获取内存使用信息以方便释放
import copy #用来复制对象
def checkip(ip):
global header
global UAlist
header4 = header
iplist = ip
for i in range(0,len(iplist)):
error4 = True
mal3 = 1
while (error4 ==True and mal3 <= 3):#总共拨三次,首拨1次重拨2次
try:
header4['User-Agent'] = random.choice(UAlist)#每尝试一次换一次UA
check = requests.get('http://www.okooo.com/jingcai/',headers = header4,proxies = {"http":"http://"+ iplist[i]},timeout = 6.5)
except Exception as e:
error4 = True
mal3 = mal3 + 1
if mal3 > 3:
iplist[i] = ''
print('第' + str(i) + '个IP不合格,已去除')
else:
error4 = False
print('第' + str(i) + '个IP合格')
while '' in iplist:
iplist.remove('')
return iplist
def dateRange(start, end, step=1, format="%Y-%m-%d"):#生成日期列表函数,用于给datelist赋值
strptime, strftime = datetime.strptime, datetime.strftime
days = (strptime(end, format) - strptime(start, format)).days + 1
return [strftime(strptime(start, format) + timedelta(i), format) for i in range(0, days, step)]
def ydm(filename):#把filepath传给它,他就能得到验证码的验证结果
username = '921202jsy'
password = '921202jay'
appid = 1
appkey = '22cc5376925e9387a23cf797cb9ba745'
yundama = YDM.YDMHttp(username,password,appid,appkey)
cid, result = yundama.decode(filename, 1005, 60)
return result
def randomdatas(filename):#把filepath传给它,它就能得到一个随机的登录账户
User = list()
with open('F:\\data\\okoookonto_new.csv',"r") as f:#打开文件,并按行读取,每行为一个列表
reader = csv.reader(f)
for row in reader:
User.append(row)
datas = {
'UserName':'',
'PassWord':'',
'LoginType':'okooo',
'RememberMe':'1',
'AuthType':'okooo',
'AuthCode':'',
}#datas的值取决于yundama
suiji = random.randint(0,len(User)-1)
datas['UserName'] = User[suiji][0]
datas['PassWord'] = User[suiji][1]
datas['AuthCode'] = ydm(filename)#验证码用云打码模块识别
return datas
def login(datas):#把datas给它,它就能进行登录,不切换ip
global header
global r
global proxylist
header2 = header
error = True
while error == True:
try:
denglu = r.post('http://www.okooo.com/I/?method=ok.user.login.login',headers = header2,verify=False,data = datas,allow_redirects=False,timeout = 16)#向对面服务器传送数据
error = False
except Exception as e:
print('login超时,正在重拨')
r.proxies = random.choice(proxylist)#换一个ip
error = True
error = True
while error == True:
try:
zuqiuzhongxin = r.get('http://www.okooo.com/soccer/',headers = header2,verify=False,allow_redirects=False,timeout = 16)#进入足球中心
error = False
except Exception as e:
print('login超时,正在重拨')
r.proxies = random.choice(proxylist)#换一个ip
error = True
header2['Referer'] = 'http://www.okooo.com/soccer/'#必须加上这个才能进入足球日历
header2['Upgrade-Insecure-Requests'] = '1'#这个也得加上
error = True
while error == True:
try:
zuqiurili = r.get('http://www.okooo.com/soccer/match/',headers = header2,verify=False,allow_redirects=False,timeout = 16)#进入足球日历,成功
error = False
except Exception as e:
print('login超时,正在重拨')
r.proxies = random.choice(proxylist)#换一个ip
error = True
def coprocess(urllist,date):#用协程的方式并发打开其他公司,并爬取数据,在dangtianbisai函数里被执行
ge = list()
for i in urllist:
ge.append(gevent.spawn(datatofile,i,date))
gevent.joinall(ge)
def datatofile(url,date):#在coprocess里被执行,不同公司共用一个ip
global header
global r
global proxylist
global UAlist
proxyzanshi = proxylist.copy()#必须用copy这个函数,否则proxylist也会随着proxyzanshi的改变而改变
copyr = copy.copy(r)#这样copyr.proxies的改变才不会影响r
header4 = header
header4['Referer'] = 'http://www.okooo.com/soccer/'#必须加上这个才能进入足球日历
header4['Upgrade-Insecure-Requests'] = '1'#这个也得加上
header4['User-Agent'] = random.choice(UAlist)
error3 = True
mal = 1
while (error3 == True and mal <= 5):#算上1次首拨和3次重拨,总共应该是4次
try:
firma = copyr.get(url,headers = header4,verify=False,allow_redirects=False,timeout = 9.5)#进入单个公司赔率的网页
content3 = firma.content.decode('GB18030')#获得该网页的代码
firma.close()#关闭连接
del(firma)#释放内存
#提取数据用beautifulsoup和re结合的方式比较靠谱
sucker3 = '<a class="bluetxt" href="/soccer/match/(.*?)/odds/change/(.*?)/">'
sucker4 = '> <b>(.*?)</b>'
sucker5 = '/schedule/">(.*?)</a>'
sucker6 = 'odds/">(.*?) vs (.*?)</a>'
cid = re.search(sucker3,content3).group(2)
urlnum = re.search(sucker3,content3).group(1)
companyname = re.search(sucker4,content3).group(1)
league = re.search(sucker5,content3).group(1)
zhudui = re.search(sucker6,content3).group(1)
kedui = re.search(sucker6,content3).group(2)
soup = BeautifulSoup(content3,"html5lib")#'html5lib'容错率最高
table = soup.table
tr = table.find_all('tr')
del tr[0],tr[0],tr[1]
s1 = list()
for x in range(0,len(tr)):
s1.append(str(tr[x]))
sucker7 = '(>)(.*?)(<)'
s2 = list()#s2为存储时间和赔率的列表
for u in range(0,len(s1)):
uu = re.findall(sucker7,s1[u])
uuu = list()
for w in range(0,len(uu)):
uuu.append(uu[w][1])
while '' in uuu:
uuu.remove('')#去除列表中的空元素
for i in range(0,len(uuu)):
if uuu[i][-1] == '↑':#去除列表中的箭头们
uuu[i] = uuu[i][:-1]
elif uuu[i][-1] == '↓':
uuu[i] = uuu[i][:-1]
for i in range(2,len(uuu)):
uuu[i] = float(uuu[i])
s2.append(uuu)
tzinfo = pytz.timezone('Etc/GMT-8')#先定义时区信息,这里代表北京时间
for i in range(0,len(s2)):#把s2中的时间转换成UTC时间
s2[i][0] = datetime.strptime(s2[i][0][:16],'%Y/%m/%d %H:%M')#先转成datetime实例(北京时间)
s2[i][0] = s2[i][0].replace(tzinfo = tzinfo)#讲时间都标上北京时间
s2[i][0] = s2[i][0].astimezone(timezone(timedelta(hours=0)))#转换成utc时间
for i in range(0,len(s2)):#把概率转化成百分比
s2[i][5] = round(s2[i][5]*0.01,4)#还必须得四舍五入,要不然不是两位小数
s2[i][6] = round(s2[i][6]*0.01,4)
s2[i][7] = round(s2[i][7]*0.01,4)
for i in range(0,len(s2)):#把剩余时间转化成分钟数
match = re.match('赛前(.*?)小时(.*?)分',s2[i][1])
s2[i][1] = int(match.group(1))*60 + int(match.group(2))#转化成据比赛开始前的剩余分钟数
filepath = 'F:\\data\\okooofile\\'+date+'.txt'
with open(filepath,'a') as f:
for i in range(0,len(s2)):#每一次变盘就插入一个记录
record = {}
record['date'] = date
record['urlnum'] = urlnum
record['league'] = league
record['cid'] = cid
record['zhudui'] = zhudui
record['kedui'] = kedui
record['companyname'] = companyname
record['timestamp'] = s2[i][0]
record['resttime'] = s2[i][1]
record['peilv'] = [s2[i][2],s2[i][3],s2[i][4]]
record['gailv'] = [s2[i][5],s2[i][6],s2[i][7]]
record['kailizhishu'] = [s2[i][8],s2[i][9],s2[i][10]]
record['fanhuanlv'] = s2[i][11]
record_str = str(record)
f.write(record_str)
print(url)
error3 = False
except Exception as e:
if re.search('.*?赛前.*?',str(e)):
print('Error:',e)
print(url + '出错,跳过并写入Errorlog文件,格式不符')
with open('F:\\data\\Errorlog.txt','a') as f:
f.write(url + '出错,跳过并写入Errorlog文件,格式不符')
f.write('\n')
error3 = False
elif re.search('.*?NoneType.*?',str(e)) and mal <= 4:
print('Error:',e)
print(url + '出错,跳过并写入Errorlog文件,NoneType')
with open('F:\\data\\Errorlog.txt','a') as f:
f.write(url + '出错,跳过并写入Errorlog文件,NoneType')
f.write('\n')
error3 = False
elif re.search('.*?Read timed out.*?',str(e)) and mal <= 4:
print('Error:',e)
print('datatofile超时或出错,2到3秒后进行第'+ str(mal) + '次重拨')
copyr.proxies = random.choice(proxyzanshi)#简单的超时不需要剔除ip
header4['User-Agent'] = random.choice(UAlist)#出错了才换UA
mal = mal + 1
time.sleep(random.uniform(2,3))#随机休息
error3 = True
elif re.search('.*?Max retries exceeded.*?',str(e)) and mal <= 4:
print('Error:',e)
print('datatofile超时或出错,2到3秒后进行第'+ str(mal) + '次重拨')
proxyzanshi.remove(copyr.proxies)#去掉刚才出错的ip
copyr.proxies = random.choice(proxyzanshi)#出错了才换ip
header4['User-Agent'] = random.choice(UAlist)#出错了才换UA
mal = mal + 1
time.sleep(random.uniform(2,3))#随机休息
error3 = True
else:
print(url + '出错,跳过并写入Errorlog文件,重拨4次')
with open('F:\\data\\Errorlog.txt','a') as f:
f.write(url + '出错,跳过并写入Errorlog文件,重拨4次')
f.write('\n')
error3 = False
def dangtianbisai(date,startgame = 0):#在这之前需要先生成一个date列表,由于一天只有一个IP会造成datatofile超时,所以决定每3场比赛重新提取一次IP
global header
global r
global proxylist
global UAlist
starttime = time.time()
header3 = header
header3['Referer'] = 'http://www.okooo.com/soccer/'#必须加上这个才能进入足球日历
header3['Upgrade-Insecure-Requests'] = '1'#这个也得加上
header3['User-Agent'] = random.choice(UAlist)
error = True
while error == True:
try:
wangye = r.get('http://www.okooo.com/soccer/match/?date=' + date,headers = header3,verify=False,allow_redirects=False,timeout = 31)
error = False
except Exception as e:
print('dangtianbisai超时1,10秒后重拨')
header3['User-Agent'] = random.choice(UAlist)#出错了才换UA
r.proxies = random.choice(proxylist)#出错了才换IP
time.sleep(10)
error = True
print('进入日期:'+ date)
content1 = wangye.content.decode('gb18030')#取出wangye的源代码
sucker1 = '/soccer/match/.*?/odds/'
bisaiurl = re.findall(sucker1,content1)#获得当天的比赛列表
print('从'+ date +'第'+ str(startgame) + '场比赛开始爬取')
print(str(bisaiurl))
for i in range(startgame,len(bisaiurl)):#从断点开始(如果有的话)每场比赛换一个ip爬取,同时也换一个UA
if (i%2 == 0 and i != 0):#如果是3的倍数且不等于零,则提取一组新ip
print('已经爬了2场比赛,需要重新提取新ip')
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1') #接入混拨(蜻蜓)代理
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
r.proxies = random.choice(proxylist)
while (len(proxylist) <=4):
print('有效ip数目不足,需等待10秒重新提取')
time.sleep(10)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1')
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
r.proxies = random.choice(proxylist)
time.sleep(random.uniform(1,3))#每场比赛爬去之间间隔1到3秒
error2 = True
mal2 = 1
proxyzanshi = proxylist.copy()
while (error2 == True and mal2 <= 4):#1次首拨,3次重拨,共4次
try:
william = r.get('http://www.okooo.com' + bisaiurl[i] + 'change/14/',headers = header3,timeout = 31)#打开威廉希尔
content2 = william.content.decode('gb18030')
sucker2 = bisaiurl[i] + 'change/.*?/'
companyurl = re.findall(sucker2,content2)#从威廉的源码中获取其他公司的链接
if (len(companyurl) < 3 and mal <= 3):
print('日期' + date + '第' + str(i) +'场比赛出错,无法从威廉源码中获取其他公司链接,10秒后重拨第'+ str(mal2) +'次')
mal2 = mal2 + 1
header3['User-Agent'] = random.choice(UAlist)#出错了才换UA
proxyzanshi.remove(r.proxies)
r.proxies = random.choice(proxyzanshi)#出错了才换ip
time.sleep(10)
error2 = True
else:
error2 = False
except Exception as e:
print('dangtianbisai' + '进入' + bisaiurl[i] + '超时,10秒后重拨第' + str(mal2) +'次')
mal2 = mal2 + 1
header3['User-Agent'] = random.choice(UAlist)#出错了才换UA
proxyzanshi.remove(r.proxies)
r.proxies = random.choice(proxyzanshi)#出错了才换ip
time.sleep(10)
error2 = True
if (len(companyurl) < 3):
print('日期' + date + '第' + str(i) +'场比赛出错,无法从威廉源码中获取其他公司链接,跳过并写入Errorlog文件')
with open('F:\\data\\Errorlog.txt','a') as f:
f.write(bisaiurl[i] + ',日期' + date + '第' + str(i) +'场比赛出错,没有威廉')
f.write('\n')
with open('F:\\data\\okooolog.txt','w') as f:
f.write(date+str(i))#出错跳过的日期也要在日志中记录下爬取进度
continue
for j in range(0,len(companyurl)):
companyurl[j] = 'http://www.okooo.com' + companyurl[j]
coprocess(companyurl,date)
print('日期' + date + '第' + str(i) +'场比赛爬取成功')
with open('F:\\data\\okooolog.txt','w') as f:
f.write(date+str(i))#在日志中记录下爬取进度
endtime = time.time()
print('日期:' + date + ',当天比赛爬取成功' + '用时:' + str(endtime - starttime) + '秒' + '\n')
with open('F:\\data\\finished.txt',"at") as f:
f.write('日期:' + date + ',当天比赛爬取成功' + '用时:' + str(endtime - starttime) + '秒' + '\n')
f.write('\n')
class Startpoint(object):#定义起始点类,给出日志路径就能得到爬去日期和比赛场次
def __init__(self,logpath):
self.logpath = logpath
log = open(self.logpath,'r')
try:
logrecord = log.readline().strip('\n')
log.close()
if logrecord != '':
self.startdate = logrecord[0:10]#前十位是日期
self.startgame = int(logrecord[10:])#后面是比赛的序号
else:
self.startdate = datetime.now().strftime('%Y-%m-%d')
self.startgame = '0'
except Exception as e:
print('Error:',e)
self.startdate = datetime.now().strftime('%Y-%m-%d')
self.startgame = '0'
def main():#从打开首页到登录成功
global header
global r
global proxylist
error = True
while error == True:
try:
r.get('http://www.okooo.com/jingcai/',headers = header,verify=False,allow_redirects=False,timeout = 31)#从首页开启会话
error = False
except Exception as e:
print('Error:',e)
print('main超时,正在重拨1')
r.proxies = random.choice(proxylist)
error = True
error = True
mal3 = 0
while error == True:
try:
yanzhengma = r.get('http://www.okooo.com/I/?method=ok.user.settings.authcodepic',headers = header,verify=False,allow_redirects=False,timeout = 31)#get请求登录的验证码
error = False
except Exception as e:
if (mal3%3 != 0 or mal3 == 0):
mal3 = mal3 + 1
print('Error:',e)
print('main超时,正在进行第'+str(mal3)+'次重拨2,')
r.proxies = random.choice(proxylist)
error = True
else:
print('main获取验证码失败,10秒后重启回话,重新提取ip')
r.close()
time.sleep(10)
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}#设置UA假装是浏览器
header['User-Agent'] = random.choice(UAlist)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1') #接入混拨代理
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
while (len(proxylist) <=4):
print('有效ip数目不足,需等待15秒重新提取')
time.sleep(10)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1')
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
r = requests.Session()#开启会话
r.proxies = random.choice(proxylist)
error = True
filepath = 'F:\\data\\yanzhengma.png'
with open(filepath,"wb") as f:
f.write(yanzhengma.content)#保存验证码到本地
print('已获得验证码')
datas = randomdatas(filepath)#生成随机账户的datas
while len(datas['AuthCode']) != 5:#如果验证码识别有问题,那就重新来
r = requests.Session()#开启会话
r.proxies = random.choice(proxylist)#使用随机IP
error = True
while error == True:
try:
r.get('http://www.okooo.com/jingcai/',headers = header,verify=False,allow_redirects=False,timeout = 31)
error = False
except Exception as e:
print('Error:',e)
print('main超时,正在重拨3')
r.proxies = random.choice(proxylist)
error = True
error = True
while error == True:
try:
yanzhengma = r.get('http://www.okooo.com/I/?method=ok.user.settings.authcodepic',headers = header,verify=False,allow_redirects=False,timeout = 31)#get请求登录的验证码
error = False
except Exception as e:
print('main超时,正在重拨4')
r.proxies = random.choice(proxylist)
error = True
with open(filepath,"wb") as f:
f.write(yanzhengma.content)#保存验证码到本地
print('已重新获得验证码')
datas = randomdatas(filepath)#生成随机账户的datas
print('云打码已尝试一次')
login(datas)#登录账户
print('正在登录下面账户:')
print(str(datas))
####################################以上是函数定义部分##########################################
####################################以下是主程序部分###########################################
start = time.time()
UAcontent = urllib.request.urlopen('file:///F:/data/useragentswitcher.xml').read()
UAcontent = str(UAcontent)
UAname = re.findall('(useragent=")(.*?)(")',UAcontent)
UAlist = list()
for z in range(0,int(len(UAname))):
UAlist.append(UAname[z][1])
UAlist = UAlist[0:586]#这样就得到了一个拥有586个UA的UA池
UAlist.append('Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36')#再加一个
logpath = 'F:\\data\\okooolog.txt'
beginpoint = Startpoint(logpath)#得到起始点信息
datelist = dateRange("2010-04-10", beginpoint.startdate)#生成一个到起始点信息的日期列表
datelist.reverse()#让列表倒序,使得爬虫从最近的一天往前爬
error = True
n = 0
while error == True:
try:
for i in datelist:#开启一个循环,保证爬取每天的数据用的UA,IP,账户都不一样
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}#设置UA假装是浏览器
header['User-Agent'] = random.choice(UAlist)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1') #接入混拨代理
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
while (len(proxylist) <=4):
print('有效ip数目不足,需等待15秒重新提取')
time.sleep(10)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1')
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
r = requests.Session()#开启会话
r.proxies = random.choice(proxylist)
main()
ceshi = r.get('http://www.okooo.com/soccer/match/?date=2017-01-01',headers = header,verify=False,allow_redirects=False,timeout = 31)#进入1月1日,看看有没有重定向,有的话需要重新登录
while (ceshi.status_code != 200) and (ceshi.status_code != 203):#'!=200'意味着重定向到了登录页面,登录页面的验证码请求是加密的其他url,无法从此登录
print(str(ceshi.status_code))
print('登录失败,正在重新登录')
time.sleep(10)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1')#接入混拨代理
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for l in range(0,len(proxylist)):
proxylist[l] = {"http":"http://"+ proxylist[l],}
print(proxylist)
while (len(proxylist) <=4):
print('有效ip数目不足,需等待15秒重新提取')
time.sleep(10)
proxycontent = requests.get('http://api.xdaili.cn/xdaili-api//privateProxy/applyStaticProxy?spiderId=4f85e66b7f9f4297b146af4df281cd13&returnType=1&count=1')
print('已获取IP')
proxylist = re.findall('(.*?)\\r\\n',proxycontent.text)
print('正在检查IP')
proxylist = checkip(proxylist)
for j in range(0,len(proxylist)):
proxylist[j] = {"http":"http://" + proxylist[j],}
print(proxylist)
header = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}#设置UA假装是浏览器
header['User-Agent'] = random.choice(UAlist)
r = requests.Session()#开启会话
r.proxies = random.choice(proxylist)
main()
ceshi = r.get('http://www.okooo.com/soccer/match/?date=2017-01-01',headers = header,verify=False,allow_redirects=False,timeout = 31)
print('登录成功')
print('准备进入:' + i)
if n == 0:
dangtianbisai(i,int(beginpoint.startgame))#从断点比赛开始爬取数据,并在屏幕打印出用时
else:
dangtianbisai(i)
n = 1
r.close()#关闭会话
error = False
except Exception as e:
print('Error:',e)
print('IP不可用,需要重新提取')
time.sleep(15)
error = True
end = time.time()
print('任务完毕,总用时' + str(end-start) + '秒,任务日期:' + str(datelist[-1]) + '——' + str(datelist[0]))
| [
"littlecat.j@hotmail.com"
] | littlecat.j@hotmail.com |
1f8869417410dde70157bbd60e97c6e6b31bd854 | 4e1aa596a601f684f1f46058fbd91b2040e69cf0 | /scrolltwo/scrolltwo/wsgi.py | 575fd8d9926fd0c07df66f7cef88dca8a5785793 | [] | no_license | dpuman/cs50sui | 584b99f2580ddc4fa68454a9aa00d2e2a675ce03 | a019178d5319a56eae7b3fc5855dfd3adf427f0a | refs/heads/master | 2023-01-31T20:01:55.770764 | 2020-12-15T18:00:14 | 2020-12-15T18:00:14 | 318,615,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for scrolltwo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'scrolltwo.settings')
application = get_wsgi_application()
| [
"dpumail.in@gmail.com"
] | dpumail.in@gmail.com |
94a16418a44c7c93ab1561beab6aeb8798bb1701 | d9e8b7d5c468b38cdf18cece9dff12ad1188a71b | /Models/project_Clothes/Cloth_proj/FirstApp/migrations/0001_initial.py | fc6e1d9c9644f1af71cd698e3db2147011f152cc | [] | no_license | Ruchika-Munde/Django_Task | f14e0497a4f8045a68dbe58bbd772abf606369d3 | 7fa549842a544527b9f78cbfcf52c26dde31463c | refs/heads/master | 2022-12-16T17:53:12.577323 | 2020-09-09T07:34:43 | 2020-09-09T07:34:43 | 294,036,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 574 | py | # Generated by Django 2.2.7 on 2019-11-28 16:38
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Clothes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Catagory', models.TextField()),
('Price', models.FloatField()),
('Pattern', models.TextField()),
],
),
]
| [
"ruchamunde@gmail.com"
] | ruchamunde@gmail.com |
380e0a119079ff1619313228729d7cc6057e2410 | eae2fb8556f62722e4467f4554747785aaf907be | /gan/plotting/epochs.py | 4f40f01ccdb5eb67a2beeb9b2d53392a31a34ccb | [] | no_license | aminnj/DY-GAN | a78fc9c4cb75e9f3238af735a92615c4b3586fe7 | a2a3f4e293b25ce2edcbdfd677270bc81ab14838 | refs/heads/master | 2020-05-24T23:09:36.553106 | 2018-10-17T22:13:11 | 2018-10-17T22:13:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,202 | py | import sys
sys.path.extend(["../../","../"])
import os
import numpy as np
from physicsfuncs import *
import glob
import ROOT as r
import plottery.plottery as ply
import plottery.utils as plu
def get_quantities(fname):
data = np.load(fname)
masses = Minv(data)
return {
"epoch": int(fname.rsplit("_",1)[-1].split(".")[0]),
"masses": masses[np.isfinite(masses)],
"Z_pZ": Z_pZ(data),
"Z_pT": Z_pT(data),
"phis": get_phis(data),
"dphis": get_dphis(data),
"etas": get_etas(data),
"detas": get_detas(data),
}
# fnames = glob.glob("../progress/vtestadam/*npy")
fnames = glob.glob("../progress/vdecaynoise/npy")
data = np.load("../data_xyz.npy")
points_mz = []
points_zpt = []
points_zpz = []
points_phis = []
points_dphis = []
points_etas = []
points_detas = []
fnames = sorted(fnames, key=lambda x:int(x.rsplit("_",1)[-1].split(".")[0]))
for fname in fnames:
quantities = get_quantities(fname)
if not np.isfinite(quantities["masses"].mean()): continue
if not np.isfinite(quantities["masses"].std()): continue
points_mz.append([quantities["epoch"], quantities["masses"].mean(), quantities["masses"].std()])
points_zpt.append([quantities["epoch"], quantities["Z_pT"].mean(), quantities["Z_pT"].std()])
points_zpz.append([quantities["epoch"], quantities["Z_pZ"].mean(), quantities["Z_pZ"].std()])
points_phis.append([quantities["epoch"], quantities["phis"].mean(), quantities["phis"].std()])
points_dphis.append([quantities["epoch"], quantities["dphis"].mean(), quantities["dphis"].std()])
points_etas.append([quantities["epoch"], quantities["etas"].mean(), quantities["etas"].std()])
points_detas.append([quantities["epoch"], quantities["detas"].mean(), quantities["detas"].std()])
mZs = data[:,0]
zpz = Z_pZ(data[:,range(1,9)])
zpt = Z_pT(data[:,range(1,9)])
phis = get_phis(data[:,range(1,9)])
dphis = get_dphis(data[:,range(1,9)])
etas = get_etas(data[:,range(1,9)])
detas = get_detas(data[:,range(1,9)])
# zcent = 90.9925
# zstd = 5.2383
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
def make_plot(points, truth, label_truth, label_pred, fname):
truth_cent = truth.mean()
truth_std = truth.std()
points = sorted(points)
smooth = True
window = 15
points = np.array(points)
if not smooth:
xvals = points[:,0]
yvals = points[:,1]
ydown = points[:,2]
yup = points[:,2]
else:
xvals = points[:,0][window-1:]
yvals = moving_average(points[:,1],n=window)
ydown = moving_average(points[:,2],n=window)
yup = moving_average(points[:,2],n=window)
# hpred = r.TH1F("hpred",100,truth.min(),truth.max())
# htruth = r.TH1F("htruth",100,truth.min(),truth.max())
# fill_fast(hpred, yvals)
# fill_fast(htruth, truth)
ply.plot_graph(
[
([0.,max(xvals)],[truth_cent,truth_cent],[truth_std,truth_std],[truth_std,truth_std]),
(xvals,yvals,ydown,yup),
],
colors = [r.kAzure+2,r.kRed-2],
legend_labels = [label_truth, label_pred],
options = {
"legend_alignment": "bottom right",
"legend_scalex": 0.7,
"xaxis_label": "epoch",
"yaxis_label": label_pred,
"output_name": fname,
"output_ic": True,
}
)
make_plot(points_mz, mZs, "m_{Z}", "#mu(inv. mass)", "plots/epoch_mz.png")
make_plot(points_zpt, zpt, "p_{T}^{Z}", "p_{T}^{Z} generated", "plots/epoch_zpt.png")
make_plot(points_zpz, zpz, "p_{z}^{Z}", "p_{z}^{Z} generated", "plots/epoch_zpz.png")
make_plot(points_phis, phis, "#phi(lep)", "#phi(lep) generated", "plots/epoch_phis.png")
make_plot(points_dphis, dphis, "#delta#phi(l1,l2)", "#delta#phi(l1,l2) generated", "plots/epoch_dphis.png")
make_plot(points_etas, etas, "#eta(lep)", "#eta(lep) generated", "plots/epoch_etas.png")
make_plot(points_detas, detas, "#delta#eta(l1,l2)", "#delta#eta(l1,l2) generated", "plots/epoch_detas.png")
| [
"amin.nj@gmail.com"
] | amin.nj@gmail.com |
2cc0e7cd02d52e631303ca9340ee8a22a5c7bcfe | 44722fb1541645937f17e8e920f4954ff99cc046 | /src/gamesbyexample/rainbow.py | 553517b98e511d9caf24f98f17260bc85c482dfa | [] | no_license | asweigart/gamesbyexample | a065d21be6c2e05a4c17643986b667efae0bc6de | 222bfc3b15ade1cf3bde158ba72a8b7a969ccc5a | refs/heads/main | 2023-07-16T12:12:58.541597 | 2021-09-01T21:24:35 | 2021-09-01T21:24:35 | 343,331,493 | 89 | 10 | null | null | null | null | UTF-8 | Python | false | false | 1,673 | py | """Rainbow, by Al Sweigart al@inventwithpython.com
Shows a simple rainbow animation. Press Ctrl-C to stop.
This code is available at https://nostarch.com/big-book-small-python-programming
Tags: tiny, artistic, bext, beginner, scrolling"""
__version__ = 0
import time, sys
try:
import bext
except ImportError:
print('This program requires the bext module, which you')
print('can install by following the instructions at')
print('https://pypi.org/project/Bext/')
sys.exit()
print('Rainbow, by Al Sweigart al@inventwithpython.com')
print('Press Ctrl-C to stop.')
time.sleep(3)
indent = 0 # How many spaces to indent.
indentIncreasing = True # Whether the indentation is increasing or not.
try:
while True: # Main program loop.
print(' ' * indent, end='')
bext.fg('red')
print('##', end='')
bext.fg('yellow')
print('##', end='')
bext.fg('green')
print('##', end='')
bext.fg('blue')
print('##', end='')
bext.fg('cyan')
print('##', end='')
bext.fg('purple')
print('##')
if indentIncreasing:
# Increase the number of spaces:
indent = indent + 1
if indent == 60: # (!) Change this to 10 or 30.
# Change direction:
indentIncreasing = False
else:
# Decrease the number of spaces:
indent = indent - 1
if indent == 0:
# Change direction:
indentIncreasing = True
time.sleep(0.02) # Add a slight pause.
except KeyboardInterrupt:
sys.exit() # When Ctrl-C is pressed, end the program.
| [
"asweigart@gmail.com"
] | asweigart@gmail.com |
1312438de18b6fd9bacc2273fba281dfd62f8874 | c1960138a37d9b87bbc6ebd225ec54e09ede4a33 | /adafruit-circuitpython-bundle-py-20210402/lib/adafruit_ble_eddystone/uid.py | fe5807d34308cdf0e9866a52eadba92ba46edf29 | [] | no_license | apalileo/ACCD_PHCR_SP21 | 76d0e27c4203a2e90270cb2d84a75169f5db5240 | 37923f70f4c5536b18f0353470bedab200c67bad | refs/heads/main | 2023-04-07T00:01:35.922061 | 2021-04-15T18:02:22 | 2021-04-15T18:02:22 | 332,101,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # SPDX-FileCopyrightText: 2020 Scott Shawcroft for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
`adafruit_ble_eddystone.uid`
================================================================================
Static Eddystone UID advertisement. Documented by Google here:
https://github.com/google/eddystone/tree/master/eddystone-uid
"""
from . import EddystoneAdvertisement, EddystoneFrameStruct, EddystoneFrameBytes
__version__ = "1.0.5"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_BLE_Eddystone.git"
class EddystoneUID(EddystoneAdvertisement): # pylint: disable=too-few-public-methods
"""Static Eddystone unique identifier.
:param bytes instance_id: instance component of the id. 10 bytes long
:param bytes namespace_id: namespace component of the id. 6 bytes long
:param int tx_power: TX power at the beacon
"""
match_prefixes = (b"\x03\xaa\xfe", b"\x16\xaa\xfe\x00")
frame_type = b"\x00"
tx_power = EddystoneFrameStruct("<B", offset=0)
"""TX power at the beacon in dBm"""
namespace_id = EddystoneFrameBytes(length=10, offset=1)
"""10 byte namespace id"""
instance_id = EddystoneFrameBytes(length=6, offset=11)
"""6 byte instance id"""
reserved = EddystoneFrameBytes(length=2, offset=17)
def __init__(self, instance_id, *, namespace_id=b"CircuitPy!", tx_power=0):
super().__init__(minimum_size=20)
if self.mutable:
self.tx_power = tx_power
self.namespace_id = namespace_id
self.instance_id = instance_id
| [
"55570902+apalileo@users.noreply.github.com"
] | 55570902+apalileo@users.noreply.github.com |
131eac026aadd6a26cf792f42e57ceddf82a57cb | 3cb7dd2da84141168aee63a76b11eb0d9f75e6aa | /pretrained_tfmodels.py | 9c77d88842d235076ceb07d643ace6e7a0331104 | [] | no_license | OlgaBelitskaya/kaggle_tfpractice | 8428935d2736c15ea1b56d482ed04a66d01537e6 | ffcee547ea7817ffc5c81eb4817f03a4915f9009 | refs/heads/main | 2023-03-27T07:59:51.459085 | 2021-03-25T16:28:05 | 2021-03-25T16:28:05 | 308,240,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,837 | py | # -*- coding: utf-8 -*-
"""pretrained-tfmodels.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1auXjQuWrUv42OIACAXzYaSZb316dSI-t
"""
# Commented out IPython magic to ensure Python compatibility.
from IPython.display import display,HTML
def dhtml(st):
display(HTML("""<style>
@import url('https://fonts.googleapis.com/css?family=Roboto|Ewert&effect=3d');
</style><p class='font-effect-3d' onclick='setStyle(this,"#00ff66")'
style='font-family:Roboto; font-size:25px; color:#ff355e;'>
# %s</p>"""%st+"""<script>
function setStyle(element,c) {
var docs=document.getElementsByClassName('font-effect-3d');
for (var i=0; i<docs.length; i++) {
docs[i].style='font-family:Ewert; font-size:22px;';
docs[i].style.color=c;}; };
</script>"""))
dhtml('Code Modules & Helpful Functions')
import numpy as np,pylab as pl,pandas as pd
import h5py,tensorflow as tf
import tensorflow_hub as th
def premodel(pixels,dense,mh,labels):
model=tf.keras.Sequential([
tf.keras.layers.Input((pixels,pixels,3),
name='input'),
th.KerasLayer(mh,trainable=True),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(dense,activation='relu'),
tf.keras.layers.Dropout(rate=.5),
tf.keras.layers.Dense(labels,activation='softmax')])
model.compile(optimizer='adam',metrics=['accuracy'],
loss='sparse_categorical_crossentropy')
display(model.summary())
return model
fw='weights.best.hdf5'
def cb(fw):
early_stopping=tf.keras.callbacks\
.EarlyStopping(monitor='val_loss',patience=20,verbose=2)
checkpointer=tf.keras.callbacks\
.ModelCheckpoint(filepath=fw,save_best_only=True,verbose=2)
lr_reduction=tf.keras.callbacks\
.ReduceLROnPlateau(monitor='val_loss',verbose=2,
patience=5,factor=.8)
return [checkpointer,early_stopping,lr_reduction]
def display_resize(x_train,x_valid,x_test,
y_valid,pixels):
x_train=tf.image.resize(x_train,[pixels,pixels])
x_valid=tf.image.resize(x_valid,[pixels,pixels])
x_test=tf.image.resize(x_test,[pixels,pixels])
img=x_valid[1]
lbl='One example of resized images \nlabel: '+\
str(y_valid[1][0])+'\nshape: '+str(img.shape)
pl.imshow(img); pl.title(lbl)
return [x_train,x_valid,x_test]
dhtml('Data Loading & Preprocessing')
fpath='../input/classification-of-handwritten-letters/'
f='LetterColorImages_123.h5'
f=h5py.File(fpath+f,'r')
keys=list(f.keys()); print(keys)
x=np.array(f[keys[1]],dtype='float32')/255
y=np.array(f[keys[2]],dtype='int32')\
.reshape(-1,1)-1
N=len(y); n=int(.1*N)
shuffle_ids=np.arange(N)
np.random.RandomState(23).shuffle(shuffle_ids)
x,y=x[shuffle_ids],y[shuffle_ids]
x_test,x_valid,x_train=x[:n],x[n:2*n],x[2*n:]
y_test,y_valid,y_train=y[:n],y[n:2*n],y[2*n:]
x_valid.shape,y_valid.shape
dhtml('Pre-Trained Saved Models')
[handle_base,pixels]=["mobilenet_v2_050_96",96]
dhtml('#1 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train,x_valid,x_test,
y_valid,pixels)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v2_075_96",96]
dhtml('#2 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v1_100_128",128]
dhtml('#3 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
[x_train1,x_valid1,x_test1]=\
display_resize(x_train,x_valid,x_test,
y_valid,pixels)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test)
[handle_base,pixels]=["mobilenet_v2_050_128",128]
dhtml('#4 '+handle_base)
mhandle="https://tfhub.dev/google/imagenet/{}/feature_vector/4"\
.format(handle_base)
model=premodel(pixels,1024,mhandle,33)
history=model.fit(x=x_train1,y=y_train,batch_size=64,
epochs=50,callbacks=cb(fw),
validation_data=(x_valid1,y_valid))
model.load_weights(fw)
model.evaluate(x_test1,y_test) | [
"safuolga@gmail.com"
] | safuolga@gmail.com |
7824041c43a79311186c6802442e4a2d26292730 | fb00b570251ba52df467e4cc030a30e778f8a970 | /Atividade 02 - semana 09/questão1_semana9_atividade02_runcodes.py | 6893a0b29843828af8d9941aa6aed51063808c08 | [] | no_license | SirLeonardoFerreira/Atividades-ifpi | 7379f9df4640fd1ee3623d80e4341f495e855895 | e366ee3f801dc9a1876c7399a2eefd37a03d0a55 | refs/heads/master | 2023-01-05T04:03:30.774277 | 2020-11-02T00:56:10 | 2020-11-02T00:56:10 | 287,967,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,441 | py | from random import randint, seed
seed()
def gerar_matriz(linhas, colunas):
matriz = []
for l in range(linhas):
linha = []
for c in range(colunas):
linha.append(int(input()))
matriz.append(linha)
return matriz
def main():
numero = int(input())
resultado_matriz = gerar_matriz(numero, numero)
aux_coluna = 0
aux_numero = 0
for num_linha in range(len(resultado_matriz)):
for num_coluna in resultado_matriz[num_linha]:
indice_coluna = resultado_matriz[num_linha].index(num_coluna, aux_coluna)
if aux_numero == 0:
num_maior = num_menor = num_coluna
aux_coluna += 1
aux_numero += 1
tupla_maior_num = [num_linha, indice_coluna]
tupla_menor_num = [num_linha, indice_coluna]
if aux_coluna == numero - 1:
aux_coluna = 0
else:
if num_coluna > num_maior:
num_maior = num_coluna
tupla_maior_num = [num_linha, indice_coluna]
elif num_coluna < num_menor:
num_menor = num_coluna
tupla_menor_num = [num_linha, indice_coluna]
aux_coluna += 1
print(f'{tuple(tupla_maior_num)}')
print(f'{tuple(tupla_menor_num)}')
if __name__ == '__main__':
main()
| [
"lleoalves02@gmail.com"
] | lleoalves02@gmail.com |
2a26ace393d5196c245e912229e343c128df3fb8 | 98d61512fdf7f8426d4634a86edd25669944ab9e | /algorithms/DailyTemperatures/solution.py | 2b030a3ca012c45739680ce2a4dc8556e3ca46c6 | [] | no_license | P-ppc/leetcode | 145102804320c6283fa653fc4a7ae89bf745b2fb | 0d90db3f0ca02743ee7d5e959ac7c83cdb435b92 | refs/heads/master | 2021-07-12T02:49:15.369119 | 2018-11-13T05:34:51 | 2018-11-24T12:34:07 | 132,237,265 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 536 | py | class Solution(object):
def dailyTemperatures(self, temperatures):
"""
:type temperatures: List[int]
:rtype: List[int]
"""
length = len(temperatures)
res = [0] * length
stack = []
for i in xrange(length):
while len(stack) > 0 and stack[-1]['value'] < temperatures[i]:
e = stack.pop()
res[e['index']] = i - e['index']
stack.append({ 'index': i, 'value': temperatures[i] })
return res | [
"ppc-user@foxmail.com"
] | ppc-user@foxmail.com |
86fdb2307fbaf9aa02aa603604f2b03812fbd954 | bd7887ed09185178331b8579a0f01690241f0daf | /Chapter-05/classification-labeledpoint.py | eba3916dbd3ada6b244bfa4e57ef7e19505a5ba8 | [] | no_license | nanfengpo/MachineLearningSpark | a9622ef355e10f3697462eec5168078d01e689d7 | de68154bd08908dc91ca9776c21dc2c6e441d242 | refs/heads/master | 2021-01-21T15:13:54.243853 | 2016-09-29T03:46:43 | 2016-09-29T03:46:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,225 | py | # coding=utf-8
from pyspark import SparkContext, SparkConf
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.classification import LogisticRegressionWithLBFGS
from pyspark.mllib.classification import SVMWithSGD
from pyspark.mllib.classification import NaiveBayes
from pyspark.mllib.tree import DecisionTree
from pyspark.mllib.linalg.distributed import RowMatrix
from pyspark.mllib.linalg import Vectors
from pyspark.mllib.feature import StandardScaler
conf = SparkConf().setAppName("Classification-LabeledPoint").setMaster("local[2]").set("spark.executor.memory", "5g")
sc = SparkContext(conf= conf)
#get data source
raw_data = sc.textFile("/user-program/python/MachineLearningSpark/Data/train-noheader.tsv")
records = raw_data.map(lambda line : line.split("\t"))
records_first_data = records.first()
print("the first data of records :")
print(records_first_data)
print("the number of records fields :")
print(len(records_first_data))
#get data feature
def labeled_point(r):
trimmed = map(lambda l : l.replace('\"', ' '), r)
label = int(trimmed[len(trimmed)-1])
features = trimmed[4 : len(trimmed)-1]
features = map(lambda f : f.replace('?', '0'), features)
for i in range(0, len(features)):
features[i] = float(features[i])
return LabeledPoint(label, Vectors.dense(features))
data = records.map(lambda r : labeled_point(r))
num_data = data.count()
print("the number of data :")
print(num_data)
def labeled_point_nb(r):
trimmed = map(lambda l : l.replace('\"', ' '), r)
label = int(trimmed[len(trimmed)-1])
features = trimmed[4 : len(trimmed)-1]
features = map(lambda f: f.replace('?', '0'), features)
for i in range(0, len(features)):
features[i] = float(features[i])
if features[i] < 0.0:
features[i] = 0.0
return LabeledPoint(label, Vectors.dense(features))
nb_data = records.map(lambda r : labeled_point_nb(r))
print("the first data of nb data and the count of nb data:")
print(nb_data.first())
#start train model
num_iterations = 10
max_tree_depth = 5
lr_model = LogisticRegressionWithLBFGS().train(data, num_iterations)
print("logistic regression model :")
print(lr_model)
svm_model = SVMWithSGD().train(data, num_iterations)
print("svm model :")
print(svm_model)
nb_model = NaiveBayes().train(nb_data)
print("naive bayes model :")
print(nb_model)
dt_model = DecisionTree().trainClassifier(data, 2, {})
print("decision tree model :")
print(dt_model)
#start predict
data_point = data.first()
lr_prediction = lr_model.predict(data_point.features)
print("logistic model prediction :" + str(lr_prediction))
print("the true label :" + str(data_point.label))
#analyze data
vectors = data.map(lambda lp : lp.features)
matrix = RowMatrix(vectors)
matrix_summary = matrix.computeColumnSummaryStatistics()
print("the col mean of matrix :")
print(matrix_summary.mean())
print("the col min of matrix :")
print(matrix_summary.min())
print("the col max of matrix :")
print(matrix_summary.max())
print("the col variance of matrix :")
print(matrix_summary.variance())
print("the col num non zero of matrix :")
print(matrix_summary.numNonzeros())
#transform data from data to standard scalar
scaler = StandardScaler(withMean = True, withStd = True).fit(vectors)
labels = data.map(lambda lp : lp.label)
features_transformed = scaler.transform(vectors)
scaled_data = (labels.zip(features_transformed).map(lambda p : LabeledPoint(p[0], p[1])))
print("transformation before :")
print(data.first().features)
print("transformation after :")
print(scaled_data.first().features)
#train logistic regression use scaled data
lr_model_scaled = LogisticRegressionWithLBFGS().train(scaled_data, num_iterations)
print("logistic regression model use scaled data :")
print(lr_model_scaled)
# def total_correct_scaled(sd):
# if lr_model_scaled.predict(sd.features) == sd.label:
# return 1
# else:
# return 0
# lr_total_correct_scaled = scaled_data.map(lambda sd : total_correct_scaled(sd)).sum()
# print(lr_total_correct_scaled)
# lr_accuracy_scaled = float(lr_total_correct_scaled)/float(num_data)
# print("logistic regression accuracy scaled :")
# print(lr_accuracy_scaled) #the memory is enough
sc.stop() | [
"lovejing0306@gmail.com"
] | lovejing0306@gmail.com |
ab6a93a7699cad2d1b540dd5d51fa8a2cf133dd1 | 83956acc942a3c0e537b474e7fe80af2819658db | /devops-console/apps/test/migrations/0003_auto_20190916_1544.py | f430cb476290fbbafa8ebce6fb1b51882e628374 | [
"Apache-2.0"
] | permissive | lilinghell/devops | e3312d77a5b970550491d86c4befb59d5193c2dd | 1b2890d3f2d9f6e15e5b32d0910bc4768f065adc | refs/heads/master | 2022-12-10T04:43:37.764577 | 2020-08-04T08:59:24 | 2020-08-04T08:59:24 | 225,258,838 | 5 | 1 | Apache-2.0 | 2022-12-08T11:23:22 | 2019-12-02T01:24:04 | Vue | UTF-8 | Python | false | false | 8,414 | py | # Generated by Django 2.1.5 on 2019-09-16 07:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('orgs', '0009_organization_logo'),
('features', '0017_auto_20190911_1550'),
('designs', '0004_auto_20190916_1544'),
('test', '0002_auto_20190916_1130'),
]
operations = [
migrations.CreateModel(
name='Test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('prerequisites', models.TextField(blank=True, null=True, verbose_name='前置条件')),
('body', models.TextField(verbose_name='请求body')),
('expected', models.TextField(blank=True, null=True, verbose_name='预期结果')),
('response', models.TextField(blank=True, null=True, verbose_name='response')),
],
options={
'ordering': ['-created_at'],
'db_table': 'test',
},
),
migrations.CreateModel(
name='TestAutoPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128, verbose_name='名称')),
('time', models.CharField(max_length=64, verbose_name='时刻,1-24逗号隔开')),
('week', models.CharField(max_length=14, verbose_name='星期几,1-7逗号隔开')),
('description', models.TextField(null=True, verbose_name='描述')),
],
options={
'ordering': ['-created_at'],
'db_table': 'test_auto_plan',
},
),
migrations.CreateModel(
name='TestCase',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128, verbose_name='用例名')),
('status', models.CharField(choices=[('0', '0'), ('1', '1'), ('2', '2'), ('3', '3')], max_length=2, verbose_name='状态')),
('type', models.CharField(choices=[('0', '0'), ('1', '1')], max_length=2, verbose_name='用例类型')),
('level', models.CharField(choices=[('P0', 'P0'), ('P1', 'P1'), ('P2', 'P2'), ('P3', 'P3')], max_length=2, verbose_name='用例等级')),
('prerequisites', models.TextField(blank=True, null=True, verbose_name='前置条件')),
('desc', models.TextField(blank=True, null=True, verbose_name='描述')),
('expected', models.TextField(blank=True, null=True, verbose_name='预期结果')),
('created_by', models.ForeignKey(on_delete=models.SET(-999), to=settings.AUTH_USER_MODEL)),
('feature', models.ManyToManyField(blank=True, related_name='test_case_feature', to='features.Feature', verbose_name='归属需求')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_case_group', to='test.TestGroup', verbose_name='归属用例组')),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgs.Organization')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_case_user', to=settings.AUTH_USER_MODEL, verbose_name='归属用例组')),
],
options={
'ordering': ['-created_at'],
'db_table': 'test_case',
},
),
migrations.CreateModel(
name='TestPlan',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=128, verbose_name='名称')),
('start_date', models.CharField(max_length=8, verbose_name='开始日期YYYYMMDD')),
('end_date', models.CharField(max_length=8, verbose_name='结束日期YYYYMMDD')),
('description', models.TextField(null=True, verbose_name='描述')),
('case', models.ManyToManyField(blank=True, related_name='test_plan_case', to='test.TestCase', verbose_name='用例')),
('created_by', models.ForeignKey(on_delete=models.SET(-999), to=settings.AUTH_USER_MODEL)),
('org', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgs.Organization')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_plan_user', to=settings.AUTH_USER_MODEL, verbose_name='负责人')),
],
options={
'ordering': ['-created_at'],
'db_table': 'test_plan',
},
),
migrations.AddField(
model_name='testautoplan',
name='case',
field=models.ManyToManyField(blank=True, related_name='test_auto_plan_case', to='test.TestCase', verbose_name='用例'),
),
migrations.AddField(
model_name='testautoplan',
name='created_by',
field=models.ForeignKey(on_delete=models.SET(-999), to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='testautoplan',
name='env',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_auto_plan_env', to='test.TestEnv', verbose_name='测试环境'),
),
migrations.AddField(
model_name='testautoplan',
name='org',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgs.Organization'),
),
migrations.AddField(
model_name='testautoplan',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_auto_plan_user', to=settings.AUTH_USER_MODEL, verbose_name='负责人'),
),
migrations.AddField(
model_name='test',
name='case',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='test_case', to='test.TestCase', verbose_name='用例'),
),
migrations.AddField(
model_name='test',
name='created_by',
field=models.ForeignKey(on_delete=models.SET(-999), to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='test',
name='env',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_env', to='test.TestEnv', verbose_name='测试环境'),
),
migrations.AddField(
model_name='test',
name='interface',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='test_interface', to='designs.Interfaces', verbose_name='接口'),
),
migrations.AddField(
model_name='test',
name='org',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='orgs.Organization'),
),
migrations.AlterUniqueTogether(
name='testplan',
unique_together={('name',)},
),
migrations.AlterUniqueTogether(
name='testautoplan',
unique_together={('name',)},
),
migrations.AlterUniqueTogether(
name='test',
unique_together={('case',)},
),
]
| [
"lilinghell@163.com"
] | lilinghell@163.com |
663cfd01044341ba3397f1eb66fd1007ef15ba9a | a7b07e14f58008e4c9567a9ae67429cedf00e1dc | /docs/jnpr_healthbot_swagger/swagger_client/models/rule_schema_formula_stddev.py | 77fe0829a2dbee849bd20fa490be0d4d15af7329 | [
"Apache-2.0"
] | permissive | dmontagner/healthbot-py-client | 3750d8375bc4fa7bedcdbc6f85f17fb812c19ea9 | 0952e0a9e7ed63c9fe84879f40407c3327735252 | refs/heads/master | 2020-08-03T12:16:38.428848 | 2019-09-30T01:57:24 | 2019-09-30T01:57:24 | 211,750,200 | 0 | 0 | Apache-2.0 | 2019-09-30T01:17:48 | 2019-09-30T01:17:47 | null | UTF-8 | Python | false | false | 4,780 | py | # coding: utf-8
"""
Healthbot APIs
API interface for Healthbot application # noqa: E501
OpenAPI spec version: 1.0.0
Contact: healthbot-hackers@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RuleSchemaFormulaStddev(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'field_name': 'str',
'time_range': 'str'
}
attribute_map = {
'field_name': 'field-name',
'time_range': 'time-range'
}
def __init__(self, field_name=None, time_range=None): # noqa: E501
"""RuleSchemaFormulaStddev - a model defined in Swagger""" # noqa: E501
self._field_name = None
self._time_range = None
self.discriminator = None
self.field_name = field_name
self.time_range = time_range
@property
def field_name(self):
"""Gets the field_name of this RuleSchemaFormulaStddev. # noqa: E501
Field name on which standard deviation operation needs to be performed # noqa: E501
:return: The field_name of this RuleSchemaFormulaStddev. # noqa: E501
:rtype: str
"""
return self._field_name
@field_name.setter
def field_name(self, field_name):
"""Sets the field_name of this RuleSchemaFormulaStddev.
Field name on which standard deviation operation needs to be performed # noqa: E501
:param field_name: The field_name of this RuleSchemaFormulaStddev. # noqa: E501
:type: str
"""
if field_name is None:
raise ValueError("Invalid value for `field_name`, must not be `None`") # noqa: E501
self._field_name = field_name
@property
def time_range(self):
"""Gets the time_range of this RuleSchemaFormulaStddev. # noqa: E501
How much back in time should we look for data. Specify positive integer followed by s/m/h/d/w/y representing seconds/minutes/hours/days/weeks/years. Eg: 2s # noqa: E501
:return: The time_range of this RuleSchemaFormulaStddev. # noqa: E501
:rtype: str
"""
return self._time_range
@time_range.setter
def time_range(self, time_range):
"""Sets the time_range of this RuleSchemaFormulaStddev.
How much back in time should we look for data. Specify positive integer followed by s/m/h/d/w/y representing seconds/minutes/hours/days/weeks/years. Eg: 2s # noqa: E501
:param time_range: The time_range of this RuleSchemaFormulaStddev. # noqa: E501
:type: str
"""
if time_range is None:
raise ValueError("Invalid value for `time_range`, must not be `None`") # noqa: E501
if time_range is not None and not re.search('^[1-9][0-9]*[smhdwy]$', time_range): # noqa: E501
raise ValueError("Invalid value for `time_range`, must be a follow pattern or equal to `/^[1-9][0-9]*[smhdwy]$/`") # noqa: E501
self._time_range = time_range
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RuleSchemaFormulaStddev):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"nitinkr@juniper.net"
] | nitinkr@juniper.net |
f08cb6551ae9483477d79afa14b9d3a1152bdb97 | 3a512b1ef962a77cc358747e51a565b0311fb5d3 | /tests/run_tests.py | c2e2de73bcb479d957f4067c2e61158b279dcb44 | [] | no_license | gokererdogan/rllib | 2f19ce8bcb011f213e66b61e5fd3ab3d68a8fc2a | 3052a9c6c95d3c8d5dc833bff0d8a8a01d8f360a | refs/heads/master | 2021-01-09T20:26:23.181937 | 2016-10-09T00:16:18 | 2016-10-09T00:16:18 | 62,526,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | """
rllib - Reinforcement Learning Library
Script for running tests. Enables the user to skip slow tests and run only the selected test modules.
Run using
python run_tests.py module1 module2 ... --skipslow
To run coverage analysis run (requires coverage.py to be installed)
coverage run --source ../ run_tests.py module1 module 2 ... --skipslow
Goker Erdogan
https://github.com/gokererdogan/
"""
import unittest
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Run rllib unittests.")
parser.add_argument('modules', type=str, nargs='+', help='Test module names to run. If discover, '
'uses unittest.discover to find tests in '
'the current folder.')
# note that --skipslow parameter seems to have no effect here but it is checked in TestCase classes using
# unittest skipIf decorator.
parser.add_argument('--skipslow', action='store_true', help='Do not run slow tests.')
args = parser.parse_args()
loader = unittest.TestLoader()
if 'discover' in args.modules:
tests = loader.discover('./')
else:
tests = loader.loadTestsFromNames(args.modules)
unittest.TextTestRunner(verbosity=2).run(tests)
| [
"gokererdogan@gmail.com"
] | gokererdogan@gmail.com |
763f837f2946f23e4779eb98a1dd18d31f76bad3 | 3f66c9877f0c8a394dbc1fa98dedb9273316b175 | /services/github.py | c9fd6b5f5763e372632c8f203c01c1affbfc053a | [
"MIT"
] | permissive | onejgordon/flow-dashboard | c06f6760d0087cebebe75102b543ac35d7aa8469 | b8d85d9313e51cf386f6d2e5944fc958a7d96769 | refs/heads/develop | 2023-09-03T12:20:57.223724 | 2023-07-02T15:55:02 | 2023-07-02T15:55:02 | 84,657,014 | 1,801 | 250 | MIT | 2023-04-01T02:06:25 | 2017-03-11T14:46:24 | Python | UTF-8 | Python | false | false | 2,131 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# API calls to interact with Github
from google.appengine.api import urlfetch
import base64
import json
import logging
import urllib
from datetime import datetime, timedelta, time
from google.appengine.api import memcache
import tools
from bs4 import BeautifulSoup
BASE = 'https://api.github.com'
REPO_MEMKEY = "GITHUB:%s"
GH_DATE = "%Y-%m-%dT%H:%M:%SZ"
class GithubClient(object):
def __init__(self, user):
self.user = user
self.pat = self.user.get_integration_prop('github_pat')
self.github_username = self.user.get_integration_prop('github_username')
def _can_run(self):
return self.pat and self.github_username
def _parse_raw_date(self, date):
return datetime.strptime(date, GH_DATE)
def api_call(self, url):
'''
Return tuple (response_object, json parsed response)
'''
if not url.startswith('http'):
url = BASE + url
auth_header = {"Authorization": "Basic %s" % base64.b64encode("%s:%s" % (self.github_username, self.pat))}
logging.debug("GET %s" % url)
response = urlfetch.fetch(url, method="GET", deadline=60, headers=auth_header)
if response.status_code == 200:
return (response, json.loads(response.content))
else:
logging.debug(response.content)
return (response, None)
def get_contributions_on_date_range(self, date_range):
'''
Currently scraping Github public overview page (no API yet)
'''
response = urlfetch.fetch("https://github.com/%s?tab=overview" % self.github_username, deadline=30)
if response.status_code == 200:
bs = BeautifulSoup(response.content, "html.parser")
commits_dict = {}
for date in date_range:
iso_date = tools.iso_date(date)
commits_on_day = bs.find('rect', {'data-date': iso_date}).get('data-count', 0)
commits_dict[date] = commits_on_day
return commits_dict
else:
logging.error("Error getting contributions")
| [
"onejgordon@gmail.com"
] | onejgordon@gmail.com |
c270c18498a91bafae0c1cd098cfe070ee348d68 | 868e3f5c10a8043134aab7bc3d546b62e3d158c7 | /caffe2/python/operator_test/reduce_ops_test.py | 8595cc0d355c56c54f81892c12c96ed49c93641e | [
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] | permissive | youyou3/caffe2 | 583cf3c96b5b8bbaa5891aae18b0a07289a14183 | 4f6b8a0cf984ef7ac908f2ec7b3d292ed915ede8 | refs/heads/master | 2021-01-20T15:26:36.207631 | 2018-08-04T08:55:38 | 2018-08-04T08:55:38 | 82,814,821 | 0 | 0 | null | 2017-02-22T14:38:36 | 2017-02-22T14:38:36 | null | UTF-8 | Python | false | false | 1,569 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestReduceFrontSum(hu.HypothesisTestCase):
def reduce_op_test(self, op_name, op_ref, in_data, num_reduce_dims, device):
op = core.CreateOperator(
op_name,
["inputs"],
["outputs"],
num_reduce_dim=num_reduce_dims
)
self.assertReferenceChecks(
device_option=device,
op=op,
inputs=[in_data],
reference=op_ref
)
self.assertGradientChecks(
device, op, [in_data], 0, [0], stepsize=1e-2, threshold=1e-2)
@given(num_reduce_dim=st.integers(1, 3), **hu.gcs)
def test_reduce_from_sum(self, num_reduce_dim, gc, dc):
X = np.random.rand(7, 4, 3, 5).astype(np.float32)
def ref_sum(X):
return [np.sum(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test("ReduceFrontSum", ref_sum, X, num_reduce_dim, gc)
@given(num_reduce_dim=st.integers(1, 3), **hu.gcs)
def test_reduce_from_mean(self, num_reduce_dim, gc, dc):
X = np.random.rand(6, 7, 8, 2).astype(np.float32)
def ref_mean(X):
return [np.mean(X, axis=(tuple(range(num_reduce_dim))))]
self.reduce_op_test("ReduceFrontMean", ref_mean, X, num_reduce_dim, gc)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
3e88bef79d59e2ae15ff41cf4f3de18b26aa0e2a | 1af49694004c6fbc31deada5618dae37255ce978 | /tools/android/build_speed/benchmark.py | 9b769d72d23cf1e454c328f37b94fc2b78b8c386 | [
"LGPL-2.0-or-later",
"Zlib",
"BSD-3-Clause",
"MIT",
"LGPL-2.1-only",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.1",
"GPL-2.0-only",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-unknown",
"APSL-2.0"
] | permissive | sadrulhc/chromium | 59682b173a00269ed036eee5ebfa317ba3a770cc | a4b950c23db47a0fdd63549cccf9ac8acd8e2c41 | refs/heads/master | 2023-02-02T07:59:20.295144 | 2020-12-01T21:32:32 | 2020-12-01T21:32:32 | 317,678,056 | 3 | 0 | BSD-3-Clause | 2020-12-01T21:56:26 | 2020-12-01T21:56:25 | null | UTF-8 | Python | false | false | 9,892 | py | #!/usr/bin/env python3
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tool to run build benchmarks (e.g. incremental build time).
Example Command:
tools/android/build_speed/benchmark.py all_incremental
Example Output:
Summary
gn args: target_os="android" use_goma=true android_fast_local_dev=true
gn gen: 6.7s
chrome_java_nosig: 36.1s avg (35.9s, 36.3s)
chrome_java_sig: 38.9s avg (38.8s, 39.1s)
chrome_java_res: 22.5s avg (22.5s, 22.4s)
base_java_nosig: 41.0s avg (41.1s, 40.9s)
base_java_sig: 93.1s avg (93.1s, 93.2s)
Note: This tool will make edits on files in your local repo. It will revert the
edits afterwards.
"""
import argparse
import contextlib
import logging
import os
import pathlib
import re
import subprocess
import sys
import time
import shutil
USE_PYTHON_3 = f'This script will only run under python3.'
_SRC_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
sys.path.append(os.path.join(_SRC_ROOT, 'build', 'android'))
from pylib import constants
# pylint: disable=line-too-long
_URL_BAR = 'chrome/android/java/src/org/chromium/chrome/browser/omnibox/UrlBar.java'
_COMMON_ARGS = [
'target_os="android"',
'use_goma=true',
]
_GN_ARG_PRESETS = {
'fast_local_dev': _COMMON_ARGS + ['android_fast_local_dev=true'],
'incremental_install': _COMMON_ARGS + ['incremental_install=true'],
}
_BENCHMARKS = {
'all_incremental': {
'suite': [
'chrome_java_nosig',
'chrome_java_sig',
'chrome_java_res',
'base_java_nosig',
'base_java_sig',
],
},
'extra_incremental': {
'suite': [
'turbine_headers',
'compile_java',
],
},
'chrome_java_nosig': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': '"Url',
'to_string': '"Url1',
'change_file': _URL_BAR,
},
'chrome_java_sig': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': 'UrlBar";',
'to_string': 'UrlBar";public void NewInterfaceMethod(){}',
'change_file': _URL_BAR,
},
'chrome_java_res': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': '14181C',
'to_string': '14181D',
'change_file': 'chrome/android/java/res/values/colors.xml',
},
'base_java_nosig': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': '"SysUtil',
'to_string': '"SysUtil1',
'change_file': 'base/android/java/src/org/chromium/base/SysUtils.java',
},
'base_java_sig': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': 'SysUtils";',
'to_string': 'SysUtils";public void NewInterfaceMethod(){}',
'change_file': 'base/android/java/src/org/chromium/base/SysUtils.java',
},
'turbine_headers': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': '# found in the LICENSE file.',
'to_string': '#temporary_edit_for_benchmark.py',
'change_file': 'build/android/gyp/turbine.py',
},
'compile_java': {
'kind': 'incremental',
'target': 'chrome_public_apk',
'from_string': '# found in the LICENSE file.',
'to_string': '#temporary_edit_for_benchmark.py',
'change_file': 'build/android/gyp/compile_java.py',
},
}
@contextlib.contextmanager
def _backup_file(file_path):
file_backup_path = file_path + '.backup'
logging.info('Creating %s for backup', file_backup_path)
# Move the original file and copy back to preserve metadata.
shutil.move(file_path, file_backup_path)
try:
shutil.copy(file_backup_path, file_path)
yield
finally:
shutil.move(file_backup_path, file_path)
def _run_and_time_cmd(cmd):
logging.debug('Running %s', cmd)
start = time.time()
try:
# Since output can be verbose, only show it for debug/errors.
show_output = logging.getLogger().isEnabledFor(logging.DEBUG)
subprocess.run(cmd,
cwd=_SRC_ROOT,
capture_output=not show_output,
check=True,
text=True)
except subprocess.CalledProcessError as e:
logging.error('Output was: %s', e.output)
raise
return time.time() - start
def _run_gn_gen(out_dir):
return _run_and_time_cmd(['gn', 'gen', '-C', out_dir])
def _run_autoninja(out_dir, *args):
return _run_and_time_cmd(['autoninja', '-C', out_dir] + list(args))
def _run_incremental_benchmark(*, out_dir, target, from_string, to_string,
change_file):
# This ensures that the only change is the one that this script makes.
prep_time = _run_autoninja(out_dir, target)
logging.info(f'Took {prep_time:.1f}s to prep this test')
change_file_path = os.path.join(_SRC_ROOT, change_file)
with _backup_file(change_file_path):
with open(change_file_path, 'r') as f:
content = f.read()
with open(change_file_path, 'w') as f:
new_content = re.sub(from_string, to_string, content)
assert content != new_content, (
f'Need to update {from_string} in {change_file}')
f.write(new_content)
yield _run_autoninja(out_dir, target)
# Since we are restoring the original file, this is the same incremental
# change, just reversed, so do a second run to save on prep time. This
# ensures a minimum of two runs.
pathlib.Path(change_file_path).touch()
yield _run_autoninja(out_dir, target)
def _run_benchmark(*, kind, **kwargs):
if kind == 'incremental':
return _run_incremental_benchmark(**kwargs)
else:
raise NotImplementedError(f'Benchmark type {kind} is not defined.')
def _format_result(time_taken):
avg_time = sum(time_taken) / len(time_taken)
list_of_times = ', '.join(f'{t:.1f}s' for t in time_taken)
result = f'{avg_time:.1f}s'
if len(time_taken) > 1:
result += f' avg ({list_of_times})'
return result
def _parse_benchmarks(benchmarks):
for benchmark in benchmarks:
assert benchmark in _BENCHMARKS, (
f'{benchmark} is not a valid benchmark/suite.')
info = _BENCHMARKS[benchmark]
if 'suite' in info:
yield from _parse_benchmarks(info['suite'])
else:
yield benchmark, info
def run_benchmarks(benchmarks, gn_args, output_directory, repeat):
out_dir = os.path.relpath(output_directory, _SRC_ROOT)
args_gn_path = os.path.join(out_dir, 'args.gn')
with _backup_file(args_gn_path):
with open(args_gn_path, 'w') as f:
# Use newlines instead of spaces since autoninja.py uses regex to
# determine whether use_goma is turned on or off.
f.write('\n'.join(gn_args))
yield f'gn gen', [_run_gn_gen(out_dir)]
for name, info in _parse_benchmarks(benchmarks):
logging.info(f'Starting {name}...')
time_taken = []
for run_num in range(repeat):
logging.info(f'Run number: {run_num + 1}')
for elapsed in _run_benchmark(out_dir=out_dir, **info):
logging.info(f'Time: {elapsed:.1f}s')
time_taken.append(elapsed)
logging.info(f'Completed {name}')
logging.info('Result: %s', _format_result(time_taken))
yield name, time_taken
def _list_benchmarks():
strs = ['\nBenchmarks:']
for name in _BENCHMARKS.keys():
strs.append(f' {name}')
return '\n'.join(strs)
def main():
parser = argparse.ArgumentParser(
description=__doc__ + _list_benchmarks(),
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('benchmark',
nargs='+',
metavar='BENCHMARK',
choices=_BENCHMARKS.keys(),
help='Names of default benchmark(s) to run.')
parser.add_argument('-A',
'--args',
choices=_GN_ARG_PRESETS.keys(),
default='fast_local_dev',
help='The set of GN args to use for these benchmarks.')
parser.add_argument('-r',
'--repeat',
type=int,
default=1,
help='Number of times to repeat the benchmark.')
parser.add_argument(
'-C',
'--output-directory',
help='If outdir is not provided, will attempt to guess.')
parser.add_argument('-v',
'--verbose',
action='count',
default=0,
help='1 to print logging, 2 to print ninja output.')
args = parser.parse_args()
if args.output_directory:
constants.SetOutputDirectory(args.output_directory)
constants.CheckOutputDirectory()
out_dir = constants.GetOutDirectory()
if args.verbose >= 2:
level = logging.DEBUG
elif args.verbose == 1:
level = logging.INFO
else:
level = logging.WARNING
logging.basicConfig(
level=level, format='%(levelname).1s %(relativeCreated)6d %(message)s')
gn_args = _GN_ARG_PRESETS[args.args]
results = run_benchmarks(args.benchmark, gn_args, out_dir, args.repeat)
print('Summary')
print(f'gn args: {" ".join(gn_args)}')
for name, result in results:
print(f'{name}: {_format_result(result)}')
if __name__ == '__main__':
sys.exit(main())
| [
"commit-bot@chromium.org"
] | commit-bot@chromium.org |
5a272cb0ea381fa3d256e53a54d6d6973a340be9 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/리스트_20200628150947.py | 5e4a0efaa2b29576d9569f7e2fcb75e79e9b810b | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 148 | py | # 리스트 []
# 지하철 칸별로 10명, 20명, 30명
# subway1 = 10
# subway2 = 20
# subway3 = 30
subway = [10, 20, 30]
print(subway)
subway
| [
"sangha0719@gmail.com"
] | sangha0719@gmail.com |
0f43ddb205e564af927fdbd2a8373be27ac57c82 | 010279e2ba272d09e9d2c4e903722e5faba2cf7a | /contrib/python/pyzmq/py2/zmq/green/device.py | 4b070237e312255dd3815cc49cf58b4f5529ca47 | [
"Apache-2.0",
"LGPL-3.0-only",
"LicenseRef-scancode-zeromq-exception-lgpl-3.0",
"BSD-3-Clause"
] | permissive | catboost/catboost | 854c1a1f439a96f1ae6b48e16644be20aa04dba2 | f5042e35b945aded77b23470ead62d7eacefde92 | refs/heads/master | 2023-09-01T12:14:14.174108 | 2023-09-01T10:01:01 | 2023-09-01T10:22:12 | 97,556,265 | 8,012 | 1,425 | Apache-2.0 | 2023-09-11T03:32:32 | 2017-07-18T05:29:04 | Python | UTF-8 | Python | false | false | 950 | py | # Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import zmq
from zmq.green import Poller
def device(device_type, isocket, osocket):
"""Start a zeromq device (gevent-compatible).
Unlike the true zmq.device, this does not release the GIL.
Parameters
----------
device_type : (QUEUE, FORWARDER, STREAMER)
The type of device to start (ignored).
isocket : Socket
The Socket instance for the incoming traffic.
osocket : Socket
The Socket instance for the outbound traffic.
"""
p = Poller()
if osocket == -1:
osocket = isocket
p.register(isocket, zmq.POLLIN)
p.register(osocket, zmq.POLLIN)
while True:
events = dict(p.poll())
if isocket in events:
osocket.send_multipart(isocket.recv_multipart())
if osocket in events:
isocket.send_multipart(osocket.recv_multipart())
| [
"akhropov@yandex-team.com"
] | akhropov@yandex-team.com |
bbafea7bc0e26f656674d86b73ec98bea6ca6bc5 | 9573a059adc7e5524cfdc4578ac5440be1878a62 | /examples/benchmarks/json/parsers/parsimonious_json.py | 90f96a73f8e5691fe9c5320865967900e6ae8495 | [
"MIT"
] | permissive | eerimoq/textparser | a1764fa06262b1355927a6573ebc7a8f4c51d482 | 1ef809eb283da3c3ec7b8bc682f11eeada3a81d6 | refs/heads/master | 2022-04-29T22:42:30.462809 | 2022-04-16T09:00:09 | 2022-04-16T09:00:09 | 141,811,843 | 32 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | """Based on
https://gist.github.com/goodmami/686385b4b39a3bac00fbbe78a5cda6c8, by
Michael Wayne Goodman.
"""
import timeit
from parsimonious.grammar import Grammar
grammar = Grammar(
r"""
Start = ~"\s*" ( Object / Array ) ~"\s*"
Object = ~"{\s*" Members? ~"\s*}"
Members = MappingComma* Mapping
MappingComma = Mapping ~"\s*,\s*"
Mapping = DQString ~"\s*:\s*" Value
Array = ~"\[\s*" Items? ~"\s*\]"
Items = ValueComma* Value
ValueComma = Value ~"\s*,\s*"
Value = Object / Array / DQString
/ TrueVal / FalseVal / NullVal / Float / Integer
TrueVal = "true"
FalseVal = "false"
NullVal = "null"
DQString = ~"\"[^\"\\\\]*(?:\\\\.[^\"\\\\]*)*\""
Float = ~"[-+]?(\d+(\.\d*)?|\.\d+)([eE][-+]?\d+)?"
Integer = ~"[-+]?\d+"
""")
def parse_time(json_string, iterations):
def _parse():
grammar.parse(json_string)
return timeit.timeit(_parse, number=iterations)
def parse(json_string):
return grammar.parse(json_string)
def version():
return 'unknown'
| [
"erik.moqvist@gmail.com"
] | erik.moqvist@gmail.com |
d29a70b5ace0f73e09a5fdd23a57fd314c755741 | 849a174efea976d4daed419b85668c2ba05fd2b9 | /algorithms/dynamic/binomial_coefficient_2.py | 97d61b41fa2af5a1780910a98b525b1ae8fa6082 | [] | no_license | samyuktahegde/Python | 61e6fedbdd2a94b29e4475621afa6d5e98bf49b8 | b02fa6e908661a918e0024f508df0192d5553411 | refs/heads/master | 2018-09-18T20:27:55.980689 | 2018-08-09T05:49:33 | 2018-08-09T05:49:33 | 116,491,078 | 0 | 0 | null | 2018-02-05T05:33:53 | 2018-01-06T14:52:16 | null | UTF-8 | Python | false | false | 403 | py | def binomial_coeffient(n, k):
c = [[0 for x in range(k+1)] for x in range(n+1)]
for i in range(n+1):
for j in range(min(i, k)+1):
if j==0 or j==i:
c[i][j] = 1
else:
c[i][j] = c[i-1][j-1]+c[i-1][j]
return c[n][k]
n = 4
k = 2
print("Value of C[" + str(n) + "][" + str(k) + "] is " + str(binomial_coeffient(n,k)))
| [
"noreply@github.com"
] | samyuktahegde.noreply@github.com |
dcdeadf0874314325430c005262caf34f5d57e95 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_chequebooks.py | a64bccb00541ab2be6bdffd65c502995c5fc16bc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py |
from xai.brain.wordbase.nouns._chequebook import _CHEQUEBOOK
#calss header
class _CHEQUEBOOKS(_CHEQUEBOOK, ):
def __init__(self,):
_CHEQUEBOOK.__init__(self)
self.name = "CHEQUEBOOKS"
self.specie = 'nouns'
self.basic = "chequebook"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
43683693c70eb2d9afe8f668fac78a3d2a457a53 | 20674c17d815214bf66b75be686bb8a45c0f5914 | /version1/884_Uncommon_Words_from_Two_Sentences.py | 8b901dd8a2004bae8830fb7a37a2c6d05e11ca06 | [] | no_license | moontree/leetcode | e7b670969fe20785b15aae82996875fd66de1b08 | f2bf9b13508cd01c8f383789569e55a438f77202 | refs/heads/master | 2021-05-20T20:36:45.615420 | 2020-04-02T09:15:26 | 2020-04-02T09:15:26 | 252,408,563 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,652 | py | """
We are given two sentences A and B.
(A sentence is a string of space separated words.
Each word consists only of lowercase letters.)
A word is uncommon if it appears exactly once in one of the sentences,
and does not appear in the other sentence.
Return a list of all uncommon words.
You may return the list in any order.
Example 1:
Input:
A = "this apple is sweet",
B = "this apple is sour"
Output:
["sweet","sour"]
Example 2:
Input:
A = "apple apple",
B = "banana"
Output:
["banana"]
Note:
0 <= A.length <= 200
0 <= B.length <= 200
A and B both contain only spaces and lowercase letters.
"""
class Solution(object):
def uncommonFromSentences(self, A, B):
"""
:type A: str
:type B: str
:rtype: List[str]
"""
cache = {}
for word in A.split():
cache[word] = cache.get(word, 0) + 1
for word in B.split():
cache[word] = cache.get(word, 0) + 1
return [key for key in cache if cache[key] == 1]
examples = [
{
"input": {
"A": "this apple is sweet",
"B": "this apple is sour"
},
"output": ["sweet", "sour"]
}, {
"input": {
"A": "apple apple",
"B": "banana"
},
"output": ["banana"]
}
]
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
v = func(**example['input'])
print v, v == example['output'] | [
"zhangchao@zhangchaodeMacBook-Pro.local"
] | zhangchao@zhangchaodeMacBook-Pro.local |
186b4e5def4f17606a3f1234fae8c50ac9b8dfb9 | d5125ccc1ef9915ffd72c575225a620aac5cb347 | /study_scripts/python_learning_book/timeseqs.py | b235e262e790b0fcbadc02727377627af9a9e387 | [] | no_license | yurui829/stefanbo | 2231074e0e4f04438aff647563299ad1947bd760 | 449f862c81a3b4ae3e079ecb4a15b3a5cbcca701 | refs/heads/master | 2021-01-24T23:42:52.064783 | 2014-07-02T03:05:04 | 2014-07-02T03:05:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 716 | py | #!/usr/bin/env python
# File timeseqs.py
import sys, mytimer
reps = 10000
repslist = range(reps)
def forLoop():
res = []
for x in repslist:
res.append(abs(x))
return res
def listComp():
return [abs(x) for x in repslist]
def mapCall():
return list(map(abs, repslist))
def genExpr():
return list(abs(x) for x in repslist)
def genFunc():
def gen():
for x in repslist:
yield abs(x)
return list(gen())
print(sys.version)
for tester in (mytimer.timer, mytimer.best):
print('<%s>' % tester.__name__)
for test in (forLoop, listComp, mapCall, genExpr, genFunc):
elapsed, result = tester(test)
print ('-' * 35)
print ('%-9s: %.5f => [%s...%s]') % (test.__name__, elapsed, result[0], result[-1])
| [
"stefan_bo@163.com"
] | stefan_bo@163.com |
df0043829249d008722b8d6b71f36bc0d9f853ff | cf43a84a9f1ea5983c63a14f55a60c2c6f5bbedb | /setup.py | cc0e1a8f1ca2ea5fb3daf9267f5baae0e3561852 | [] | no_license | research-core/core-people | 66f6862527755c09d537cc7740af5ebc470b9b83 | f017002d39d1112415ce326d6aeb0b86fba6293b | refs/heads/master | 2020-06-29T03:16:20.229962 | 2019-08-29T14:30:39 | 2019-08-29T14:30:39 | 200,423,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import re
from setuptools import setup, find_packages
version, license = None, None
with open('people/__init__.py', 'r') as fd:
content = fd.read()
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
license = re.search(r'^__license__\s*=\s*[\'"]([^\'"]*)[\'"]', content, re.MULTILINE).group(1)
if version is None: raise RuntimeError('Cannot find version information')
if license is None: raise RuntimeError('Cannot find license information')
with open('README.md', 'r') as fd:
long_description = fd.read()
setup(
name='core-people',
version=version,
description='Research CORE ERM - people module',
author='Ricardo Ribeiro, Hugo Cachitas',
author_email='ricardojvr@gmail.com, hugo.cachitas@research.fchampalimaud.org',
url='https://github.com/research-core/core-people',
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
license=license,
install_requires=['core-common'],
package_data={
'people': [
'fixtures/initial_data.yaml',
'static/img/*.png',
'static/*.png',
]
},
)
| [
"ricardojvr@gmail.com"
] | ricardojvr@gmail.com |
5e22581eb4731d03a947c87cd853a93c1fbe0dab | 5a6d7fb808de598a9a13f90855d13ea7c8212f92 | /deeptech/training/optimizers/_smart_optimizer.py | d9f6fbc4d4a1581f8d2b9235aac483443d057d7e | [
"MIT"
] | permissive | penguinmenac3/deeptech | 0b4f48c59b6e4b1d00a2d5b3d876c27ab3f4f2da | 0c7fb170d62f193dbbb2018f7b8d42f713178bb8 | refs/heads/main | 2023-03-08T17:44:30.222433 | 2021-03-02T21:28:46 | 2021-03-02T21:28:46 | 302,860,310 | 1 | 0 | MIT | 2021-01-11T22:12:58 | 2020-10-10T09:04:01 | Python | UTF-8 | Python | false | false | 1,019 | py | """doc
# deeptech.training.optimizers._smart_optimizer
> Automatically create an optimizer with the parameters of the model.
"""
from deeptech.core.config import inject_kwargs
def smart_optimizer(optimizer, *args, **kwargs):
"""
Convert a pytorch optimizer into a lambda function that expects the config, model and loss as parameters, to instantiate the optimizer with all trainable parameters.
:param optimizer: A pytorch optimizer that should be made smart.
:param *args: Any ordered arguments the original optimizer expects.
:param **kwargs: Any named arguments the original optimizer expects.
"""
def _join_parameters(model, loss):
model_params = list(model.parameters())
loss_params = list(loss.parameters())
return model_params + loss_params
@inject_kwargs()
def create_optimizer(model, loss, training_initial_lr=None):
return optimizer(_join_parameters(model, loss), training_initial_lr, *args, **kwargs)
return create_optimizer
| [
"mail@michaelfuerst.de"
] | mail@michaelfuerst.de |
4c7ba0f6cd28b20062f6c3264106a480b402adcd | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /kjph2fGDWmLKY2n2J_3.py | b6224dd24a7a90e281db3dff5da27cfb06543ce7 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
import re
def valid_color (color):
return bool(re.match("rgb(a)?\((,?((?=\d{1,3}\%)(100|\d{1,2})\%|(25[0-5]|1?\d{1,2}))){3}(?(1),(1\.0*|0?\.?\d+))\)", "".join(color.split()))) and (color.startswith("rgb(") or color.startswith("rgba("))
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
33afdf2528b843227fee0aca8c696b9cebe8cfff | 2e8ff2eb86f34ce2fc330766906b48ffc8df0dab | /tensorflow_probability/python/experimental/inference_gym/internal/test_util.py | 8a1c2f1fb612430d44087d7416fe1d0f4bce8497 | [
"Apache-2.0"
] | permissive | wataruhashimoto52/probability | 9613f9a3cc685ff1a20643c4a05a48f9cf0fe1ae | 12e3f256544eadea6e863868da825614f4423eb0 | refs/heads/master | 2021-07-16T18:44:25.970036 | 2020-06-14T02:48:29 | 2020-06-14T02:51:59 | 146,873,495 | 0 | 0 | Apache-2.0 | 2018-08-31T09:51:20 | 2018-08-31T09:51:20 | null | UTF-8 | Python | false | false | 16,143 | py | # Lint as: python2, python3
# Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test utilities for the Inference Gym."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import flags
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util
flags.DEFINE_bool('use_tfds', False, 'Whether to run tests that use TFDS.')
FLAGS = flags.FLAGS
__all__ = [
'InferenceGymTestCase',
'run_hmc_on_model',
'MCMCResults',
'uses_tfds',
]
def uses_tfds(test_fn):
def _new_test_fn(self, *args, **kwargs):
if FLAGS.use_tfds:
test_fn(self, *args, **kwargs)
else:
self.skipTest('Uses TensorFlow Datasets. Enable using --use_tfds')
return _new_test_fn
class MCMCResults(
collections.namedtuple('MCMCResults', [
'chain',
'accept_rate',
'ess',
'r_hat',
])):
"""Results of an MCMC run.
Attributes:
chain: A possibly nested structure of Tensors, representing the HMC chain.
accept_rate: Acceptance rate of MCMC proposals.
ess: Effective sample size.
r_hat: Potential scale reduction.
"""
def run_hmc_on_model(
model,
num_chains,
num_steps,
num_leapfrog_steps,
step_size,
target_accept_prob=0.9,
seed=None,
dtype=tf.float32,
use_xla=False,
):
"""Runs HMC on a target.
Args:
model: The model to validate.
num_chains: Number of chains to run in parallel.
num_steps: Total number of steps to take. The first half are used to warm up
the sampler.
num_leapfrog_steps: Number of leapfrog steps to take.
step_size: Step size to use.
target_accept_prob: Target acceptance probability.
seed: Optional seed to use. By default, `test_util.test_seed()` is used.
dtype: DType to use for the algorithm.
use_xla: Whether to use XLA.
Returns:
mcmc_results: `MCMCResults`.
"""
step_size = tf.convert_to_tensor(step_size, dtype)
def target_log_prob_fn(*x):
x = tf.nest.pack_sequence_as(model.dtype, x)
return model.unnormalized_log_prob(x)
if seed is None:
seed = test_util.test_seed()
if tf.executing_eagerly():
# TODO(b/68017812,b/141368747): remove once eager correctly supports seed.
tf.random.set_seed(seed)
seed = None
current_state = tf.nest.map_structure(
lambda b, e: b( # pylint: disable=g-long-lambda
tf.zeros([num_chains] + list(e), dtype=dtype)),
model.default_event_space_bijector,
model.event_shape)
# tfp.mcmc only works well with lists.
current_state = tf.nest.flatten(current_state)
hmc = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
num_leapfrog_steps=num_leapfrog_steps,
step_size=[tf.fill(s.shape, step_size) for s in current_state],
seed=seed)
hmc = tfp.mcmc.TransformedTransitionKernel(
hmc, tf.nest.flatten(model.default_event_space_bijector))
hmc = tfp.mcmc.DualAveragingStepSizeAdaptation(
hmc,
num_adaptation_steps=int(num_steps // 2 * 0.8),
target_accept_prob=target_accept_prob)
chain, is_accepted = tf.function(
lambda: tfp.mcmc.sample_chain( # pylint: disable=g-long-lambda
current_state=current_state,
kernel=hmc,
num_results=num_steps // 2,
num_burnin_steps=num_steps // 2,
trace_fn=lambda _, pkr: # pylint: disable=g-long-lambda
(pkr.inner_results.inner_results.is_accepted),
parallel_iterations=1),
autograph=False,
experimental_compile=use_xla)()
accept_rate = tf.reduce_mean(tf.cast(is_accepted, dtype))
ess = tf.nest.map_structure(
lambda c: tfp.mcmc.effective_sample_size( # pylint: disable=g-long-lambda
c,
cross_chain_dims=1,
filter_beyond_positive_pairs=True),
chain)
r_hat = tf.nest.map_structure(tfp.mcmc.potential_scale_reduction, chain)
mcmc_results = MCMCResults(
chain=tf.nest.pack_sequence_as(model.default_event_space_bijector, chain),
accept_rate=accept_rate,
ess=ess,
r_hat=r_hat,
)
return mcmc_results
class InferenceGymTestCase(test_util.TestCase):
"""A TestCase mixin for common tests on inference gym targets."""
def validate_log_prob_and_transforms(
self,
model,
sample_transformation_shapes,
check_ground_truth_mean=False,
check_ground_truth_mean_standard_error=False,
check_ground_truth_standard_deviation=False,
check_ground_truth_standard_deviation_standard_error=False,
seed=None,
):
"""Validate that the model's log probability and sample transformations run.
This checks that unconstrained values passed through the event space
bijectors into `unnormalized_log_prob` and sample transformations yield
finite values. This also verifies that the transformed values have the
expected shape.
Args:
model: The model to validate.
sample_transformation_shapes: Shapes of the transformation outputs.
check_ground_truth_mean: Whether to check the shape of the ground truth
mean.
check_ground_truth_mean_standard_error: Whether to check the shape of the
ground truth standard error.
check_ground_truth_standard_deviation: Whether to check the shape of the
ground truth standard deviation.
check_ground_truth_standard_deviation_standard_error: Whether to check the
shape of the ground truth standard deviation standard error.
seed: Optional seed to use. By default, `test_util.test_seed()` is used.
"""
batch_size = 16
if seed is not None:
seed = tfp.util.SeedStream(seed, 'validate_log_prob_and_transforms')
else:
seed = test_util.test_seed_stream()
def _random_element(shape, dtype, default_event_space_bijector):
unconstrained_shape = default_event_space_bijector.inverse_event_shape(
shape)
unconstrained_shape = tf.TensorShape([batch_size
]).concatenate(unconstrained_shape)
return default_event_space_bijector.forward(
tf.random.normal(unconstrained_shape, dtype=dtype, seed=seed()))
test_points = tf.nest.map_structure(_random_element, model.event_shape,
model.dtype,
model.default_event_space_bijector)
log_prob = self.evaluate(model.unnormalized_log_prob(test_points))
self.assertAllFinite(log_prob)
self.assertEqual((batch_size,), log_prob.shape)
for name, sample_transformation in model.sample_transformations.items():
transformed_points = self.evaluate(sample_transformation(test_points))
def _assertions_part(expected_shape, transformed_part):
self.assertAllFinite(transformed_part)
self.assertEqual(
(batch_size,) + tuple(expected_shape),
tuple(list(transformed_part.shape)))
self.assertAllAssertsNested(
_assertions_part,
sample_transformation_shapes[name],
transformed_points,
shallow=transformed_points,
msg='Checking outputs of: {}'.format(name))
def _ground_truth_shape_check_part(expected_shape, ground_truth):
self.assertEqual(
tuple(expected_shape),
tuple(ground_truth.shape))
if check_ground_truth_mean:
self.assertAllAssertsNested(
_ground_truth_shape_check_part,
sample_transformation_shapes[name],
sample_transformation.ground_truth_mean,
shallow=transformed_points,
msg='Checking ground truth mean of: {}'.format(name))
if check_ground_truth_mean_standard_error:
self.assertAllAssertsNested(
_ground_truth_shape_check_part,
sample_transformation_shapes[name],
sample_transformation.ground_truth_mean_standard_error,
shallow=transformed_points,
msg='Checking ground truth mean standard error: {}'.format(name))
if check_ground_truth_standard_deviation:
self.assertAllAssertsNested(
_ground_truth_shape_check_part,
sample_transformation_shapes[name],
sample_transformation.ground_truth_standard_deviation,
shallow=transformed_points,
msg='Checking ground truth standard deviation: {}'.format(name))
if check_ground_truth_standard_deviation_standard_error:
self.assertAllAssertsNested(
_ground_truth_shape_check_part,
sample_transformation_shapes[name],
sample_transformation
.ground_truth_standard_deviation_standard_error,
shallow=transformed_points,
msg='Checking ground truth standard deviation strandard error: {}'
.format(name))
def validate_ground_truth_using_hmc(
self,
model,
num_chains,
num_steps,
num_leapfrog_steps,
step_size,
target_accept_prob=0.9,
seed=None,
dtype=tf.float32,
):
"""Validates the ground truth of a model using HMC.
Args:
model: The model to validate.
num_chains: Number of chains to run in parallel.
num_steps: Total number of steps to take. The first half are used to warm
up the sampler.
num_leapfrog_steps: Number of leapfrog steps to take.
step_size: Step size to use.
target_accept_prob: Target acceptance probability.
seed: Optional seed to use. By default, `test_util.test_seed()` is used.
dtype: DType to use for the algorithm.
"""
mcmc_results = self.evaluate(
run_hmc_on_model(
model,
num_chains=num_chains,
num_steps=num_steps,
num_leapfrog_steps=num_leapfrog_steps,
step_size=step_size,
target_accept_prob=target_accept_prob,
seed=seed,
dtype=dtype))
logging.info('Acceptance rate: %s', mcmc_results.accept_rate)
logging.info('ESS: %s', mcmc_results.ess)
logging.info('r_hat: %s', mcmc_results.r_hat)
for name, sample_transformation in model.sample_transformations.items():
transformed_chain = self.evaluate(
tf.nest.map_structure(tf.identity,
sample_transformation(mcmc_results.chain)))
# tfp.mcmc.effective_sample_size only works well with lists.
flat_transformed_chain = tf.nest.flatten(transformed_chain)
cross_chain_dims = [1] * len(flat_transformed_chain)
flat_ess = self.evaluate(
tfp.mcmc.effective_sample_size(
flat_transformed_chain,
cross_chain_dims=cross_chain_dims,
filter_beyond_positive_pairs=True))
self._z_test(
name=name,
sample_transformation=sample_transformation,
transformed_samples=transformed_chain,
num_samples=tf.nest.pack_sequence_as(transformed_chain, flat_ess),
sample_dims=(0, 1),
)
def validate_ground_truth_using_monte_carlo(
self,
model,
num_samples,
seed=None,
dtype=tf.float32,
):
"""Validates the ground truth of a model using forward sampling.
This requires a model to have a `sample` method. This is typically only
applicable to synthetic models.
Args:
model: The model to validate. It must have a `sample` method.
num_samples: Number of samples to generate.
seed: Optional seed to use. By default, `test_util.test_seed()` is used.
dtype: DType to use for the algorithm.
"""
if seed is None:
seed = test_util.test_seed()
samples = model.sample(num_samples, seed=seed)
for name, sample_transformation in model.sample_transformations.items():
transformed_samples = self.evaluate(
tf.identity(sample_transformation(samples)))
nested_num_samples = tf.nest.map_structure(lambda _: num_samples,
transformed_samples)
self._z_test(
name=name,
sample_transformation=sample_transformation,
transformed_samples=transformed_samples,
num_samples=nested_num_samples,
sample_dims=0,
)
def _z_test(
self,
name,
sample_transformation,
transformed_samples,
num_samples,
sample_dims=0,
):
"""Does a two-sided Z-test between some samples and the ground truth."""
sample_mean = tf.nest.map_structure(
lambda transformed_samples: np.mean( # pylint: disable=g-long-lambda
transformed_samples,
axis=sample_dims),
transformed_samples)
sample_variance = tf.nest.map_structure(
lambda transformed_samples: np.var( # pylint: disable=g-long-lambda
transformed_samples,
axis=sample_dims),
transformed_samples)
# TODO(b/144524123): As written, this does a two sided Z-test at an
# alpha=O(1e-7). It definitely has very little power as a result.
# Currently it also uses the sample variance to compute the Z-score. In
# principle, we can use the ground truth variance, but it's unclear
# whether that's appropriate. Heuristically, a typical error that HMC has
# is getting stuck, meaning that the sample variance is too low,
# causing the test to fail more often. HMC can also in principle
# over-estimate the variance, but that seems less typical.
#
# We should re-examine the literature for Z-testing and justify these
# choices on formal grounds.
if sample_transformation.ground_truth_mean is not None:
def _mean_assertions_part(ground_truth_mean, sample_mean, sample_variance,
num_samples):
self.assertAllClose(
ground_truth_mean,
sample_mean,
# TODO(b/144290399): Use the full atol vector.
atol=np.array(5. * np.sqrt(sample_variance / num_samples)).max(),
)
self.assertAllAssertsNested(
_mean_assertions_part,
sample_transformation.ground_truth_mean,
sample_mean,
sample_variance,
num_samples,
msg='Comparing mean of "{}"'.format(name))
if sample_transformation.ground_truth_standard_deviation is not None:
# From https://math.stackexchange.com/q/72975
fourth_moment = tf.nest.map_structure(
lambda transformed_samples, sample_mean: np.mean( # pylint: disable=g-long-lambda
(transformed_samples - sample_mean)**4,
axis=tuple(tf.nest.flatten(sample_dims))),
transformed_samples,
sample_mean)
def _var_assertions_part(ground_truth_standard_deviation, sample_variance,
fourth_moment, num_samples):
self.assertAllClose(
np.square(ground_truth_standard_deviation),
sample_variance,
# TODO(b/144290399): Use the full atol vector.
atol=np.array(
5. * np.sqrt(fourth_moment / num_samples - sample_variance**2 *
(num_samples - 3) / num_samples /
(num_samples - 1))).max(),
)
self.assertAllAssertsNested(
_var_assertions_part,
sample_transformation.ground_truth_standard_deviation,
sample_variance,
fourth_moment,
num_samples,
msg='Comparing variance of "{}"'.format(name),
)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
9539b719de78e4f40c061393f9735ce08c61d9fe | f797aecc0a7847aefc228c097a86ffc9cc1f5cc3 | /hongkong/hongkong/spiders/HKEX_delisted_company_list.py | a12c31e656ad7825a62a1af59965e0e2c9dde4cf | [] | no_license | waynecanfly/spiderItemV2 | b359ac773bb9fbfbf4f893704d542654bd3994e3 | 972a5fb002d051a2630b40c9e6582392daf22d0f | refs/heads/master | 2020-10-01T08:35:35.888512 | 2019-12-14T02:52:06 | 2019-12-14T02:52:06 | 227,500,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | # -*- coding: utf-8 -*-
import time
import scrapy
from hongkong.items import HongKongDelistedCompanyItem
from samples.base_rule import HKEXIsNewDelistedSec
class HkexDelistedCompanyListSpider(scrapy.Spider):
'''获取退上市公司列表'''
name = 'HKEX_delisted_company_list'
allowed_domains = ['webb-site.com']
start_urls = ['http://webb-site.com/']
market_url_dict = {
'main': 'https://webb-site.com/dbpub/delisted.asp?s=nameup&t=s&e=m',
'gem': 'https://webb-site.com/dbpub/delisted.asp?s=nameup&t=s&e=g'
}
def start_requests(self):
new_market_url_dict = {v: k for k, v in self.market_url_dict.items()}
for url in self.market_url_dict.values():
bond_info = new_market_url_dict[url]
yield scrapy.Request(url=url, callback=self.parse, meta={
'bond_info': bond_info,
})
# print(new_market_url_dict)
def parse(self, response):
market_type = response.meta['bond_info']
infos = response.xpath("//body/div[@class='mainbody']/table[@class='numtable']")
info_list = infos.xpath("//tr")
info_list.pop(0)
for info in info_list:
stock_code = info.xpath("./td[2]/a/text()").extract()
issuer = info.xpath("./td[4]/a/text()").extract()
first_trade = info.xpath("./td[5]/text()").extract()
last_trade = info.xpath("./td[6]/text()").extract()
delisted_date = info.xpath("./td[7]/text()").extract()
trading_life_years = info.xpath("./td[8]/text()").extract()
reason = info.xpath("./td[9]/text()").extract()
# 以下代码重复严重,可以重写
if len(stock_code) == 0:
stock_code = 'Null'
else:
stock_code = stock_code[0]
if len(issuer) == 0:
issuer = 'Null'
else:
issuer = issuer[0]
if len(first_trade) == 0:
first_trade = 'Null'
else:
first_trade = first_trade[0]
if len(last_trade) == 0:
last_trade = 'Null'
else:
last_trade = last_trade[0]
if len(delisted_date) == 0:
delisted_date = 'Null'
else:
delisted_date = delisted_date[0]
if len(trading_life_years) == 0:
trading_life_years = 'Null'
else:
trading_life_years = trading_life_years[0]
if len(reason) == 0:
reason = 'Null'
else:
reason = reason[0]
gmt_create = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
if not HKEXIsNewDelistedSec(stock_code):
item = HongKongDelistedCompanyItem()
item['country_code'] = 'HKG'
item['exchange_market_code'] = 'HKEX'
item['security_code'] = stock_code
item['issuer'] = issuer
item['first_trade'] = first_trade
item['market_type'] = market_type
item['last_trade'] = last_trade
item['delisting_date'] = delisted_date
item['trading_life_years'] = trading_life_years
item['status'] = -2
item['reason'] = reason
item['gmt_create'] = gmt_create
item['user_create'] = 'cf'
yield item | [
"1370153124@qq.com"
] | 1370153124@qq.com |
2529c9992a9b2928cc03b54996a982008abac123 | f516b7561b93f640bcb376766a7ecc3440dcbb99 | /leetcode/easy/remove-duplicates-from-sorted-array.py | d220514e45221042eecda3ade154765409ac3fa4 | [
"Apache-2.0"
] | permissive | vtemian/interviews-prep | c41e1399cdaac9653c76d09598612f7450e6d302 | ddef96b5ecc699a590376a892a804c143fe18034 | refs/heads/master | 2020-04-30T15:44:42.116286 | 2019-09-10T19:41:41 | 2019-09-10T19:41:41 | 176,928,167 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
if len(nums) < 2:
return len(nums)
start = 0
moving = 1
while moving < len(nums):
while moving < len(nums) and nums[start] == nums[moving]:
moving += 1
if moving == len(nums):
break
nums[start + 1] = nums[moving]
start += 1
moving += 1
return start + 1
| [
"vladtemian@gmail.com"
] | vladtemian@gmail.com |
54d9fd3cbc0666b68171746496af21098c4d479a | b26674cda3264ad16af39333d79a700b72587736 | /corehq/apps/change_feed/producer.py | 7f48ff3b7aa3399ac0b571e40e188b2d41e61c2b | [] | no_license | tlwakwella/commcare-hq | 2835206d8db84ff142f705dbdd171e85579fbf43 | a3ac7210b77bea6c2d0392df207d191496118872 | refs/heads/master | 2021-01-18T02:07:09.268150 | 2016-03-24T14:12:49 | 2016-03-24T14:12:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | from __future__ import unicode_literals
import json
import time
from corehq.util.soft_assert import soft_assert
from kafka import KeyedProducer
from kafka.common import LeaderNotAvailableError, FailedPayloadsError, KafkaUnavailableError
from corehq.apps.change_feed.connection import get_kafka_client_or_none
import logging
def send_to_kafka(producer, topic, change_meta):
def _send_to_kafka():
producer.send_messages(
bytes(topic),
bytes(change_meta.domain.encode('utf-8') if change_meta.domain is not None else None),
bytes(json.dumps(change_meta.to_json())),
)
try:
tries = 3
for i in range(tries):
# try a few times because the python kafka libraries can trigger timeouts
# if they are idle for a while.
try:
_send_to_kafka()
break
except (FailedPayloadsError, KafkaUnavailableError):
if i == (tries - 1):
# if it's the last try, fail hard
raise
except LeaderNotAvailableError:
# kafka seems to be down. sleep a bit to avoid crazy amounts of error spam
time.sleep(15)
raise
except Exception as e:
_assert = soft_assert(to='@'.join(['czue', 'dimagi.com']))
_assert(False, 'Problem sending change to kafka {}: {} ({})'.format(
change_meta.to_json(), e, type(e)
))
raise
class ChangeProducer(object):
def __init__(self, kafka=None):
self._kafka = kafka
self._producer = None
self._has_error = False
@property
def kafka(self):
# load everything lazily to avoid doing this work if not needed
if self._kafka is None and not self._has_error:
self._kafka = get_kafka_client_or_none()
if self._kafka is None:
logging.warning('Kafka is not available! Change producer is doing nothing.')
self._has_error = True
return self._kafka
@property
def producer(self):
if self._producer is None and not self._has_error:
if self.kafka is not None:
self._producer = KeyedProducer(self._kafka)
else:
# if self.kafka is None then we should be in an error state
assert self._has_error
return self._producer
def send_change(self, topic, change_meta):
if self.producer:
send_to_kafka(self.producer, topic, change_meta)
producer = ChangeProducer()
| [
"czue@dimagi.com"
] | czue@dimagi.com |
c9c4998b73d718599e53a27fadaacedca0945866 | 38b8bceafb4d80afc7c77196eb9ee99694191bcf | /scrapy/study/urllib2_test01.py | 16dec1b14848300fd229a9f07cd209c82123f51c | [] | no_license | tangc1986/PythonStudy | f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff | 1ed1956758e971647426e7096ac2e8cbcca585b4 | refs/heads/master | 2021-01-23T20:39:23.930754 | 2017-10-08T07:40:32 | 2017-10-08T07:42:38 | 42,122,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | import urllib2
response = urllib2.urlopen('http://www.baidu.com')
html = response.read()
print html
| [
"tangc1986@gmail.com"
] | tangc1986@gmail.com |
fca11b46debbd03649c2ea39df9ed1f363eb9fa5 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-as/huaweicloudsdkas/v1/model/bandwidth_result.py | 7ed77075e6464937dd2977fd447ada089a4289e8 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BandwidthResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'size': 'int',
'share_type': 'str',
'charging_mode': 'str',
'id': 'str'
}
attribute_map = {
'size': 'size',
'share_type': 'share_type',
'charging_mode': 'charging_mode',
'id': 'id'
}
def __init__(self, size=None, share_type=None, charging_mode=None, id=None):
"""BandwidthResult - a model defined in huaweicloud sdk"""
self._size = None
self._share_type = None
self._charging_mode = None
self._id = None
self.discriminator = None
if size is not None:
self.size = size
if share_type is not None:
self.share_type = share_type
if charging_mode is not None:
self.charging_mode = charging_mode
if id is not None:
self.id = id
@property
def size(self):
"""Gets the size of this BandwidthResult.
带宽(Mbit/s)。
:return: The size of this BandwidthResult.
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this BandwidthResult.
带宽(Mbit/s)。
:param size: The size of this BandwidthResult.
:type: int
"""
self._size = size
@property
def share_type(self):
"""Gets the share_type of this BandwidthResult.
带宽的共享类型。共享类型枚举:PER,表示独享。目前只支持独享。
:return: The share_type of this BandwidthResult.
:rtype: str
"""
return self._share_type
@share_type.setter
def share_type(self, share_type):
"""Sets the share_type of this BandwidthResult.
带宽的共享类型。共享类型枚举:PER,表示独享。目前只支持独享。
:param share_type: The share_type of this BandwidthResult.
:type: str
"""
self._share_type = share_type
@property
def charging_mode(self):
"""Gets the charging_mode of this BandwidthResult.
带宽的计费类型。字段值为“bandwidth”,表示按带宽计费。字段值为“traffic”,表示按流量计费。
:return: The charging_mode of this BandwidthResult.
:rtype: str
"""
return self._charging_mode
@charging_mode.setter
def charging_mode(self, charging_mode):
"""Sets the charging_mode of this BandwidthResult.
带宽的计费类型。字段值为“bandwidth”,表示按带宽计费。字段值为“traffic”,表示按流量计费。
:param charging_mode: The charging_mode of this BandwidthResult.
:type: str
"""
self._charging_mode = charging_mode
@property
def id(self):
"""Gets the id of this BandwidthResult.
带宽ID,创建WHOLE类型带宽的弹性IP时指定的共享带宽。
:return: The id of this BandwidthResult.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this BandwidthResult.
带宽ID,创建WHOLE类型带宽的弹性IP时指定的共享带宽。
:param id: The id of this BandwidthResult.
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BandwidthResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
593e23c23bff91b3481cdb1e25deadc33108d1dc | 3a28b1a12d0710c06f6360381ad8be6cf3707907 | /modular_model/triHPC/triHPCThermo/HPCAllTrays13CstmLiqEtlp_pP.py | 684b0d0f0124a7bc311424edfc52f261b9f1d358 | [] | no_license | WheatZhang/DynamicModelling | 6ce1d71d3b55176fd4d77a6aedbaf87e25ce4d02 | ea099245135fe73e8c9590502b9c8b87768cb165 | refs/heads/master | 2020-06-15T14:12:50.373047 | 2019-07-05T01:37:06 | 2019-07-05T01:37:06 | 195,319,788 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | def LiqEtlp_pP(P,T,x_N2):
x = (P-5.56380000e+02)/3.71707300e-01
y = (T--1.77068007e+02)/5.35543333e-02
z = (x_N2-9.17851528e-01)/8.26935123e-03
output = \
1*2.41243980e+01
liq_etlp = output*1.00000000e+00+0.00000000e+00
return liq_etlp | [
"1052632241@qq.com"
] | 1052632241@qq.com |
ee43bb18ca6eafa1bfb32ed0af4d657f0fad1558 | 33211b03bc7c0e13ad2b39938e99851ad206332e | /ragdoll/__init__.py | 506256fa48165e55e0b48b94f152cb273f8974be | [] | no_license | wangsen992/ragdoll | fe341f0c6acc9842e9b81851cf8ca864f87b0352 | cdd4bd8a7fa5286af8749c0157fe51653543a033 | refs/heads/master | 2021-06-23T00:23:54.738709 | 2017-07-25T04:26:05 | 2017-07-25T04:26:05 | 94,606,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | """
Ragdoll initialisation.
"""
from .db import *
from .composite import *
from .nutrient import *
from .flyweight import *
from .dictionary import *
from .human import *
from .req import *
from .plots import *
__version__=0.1 | [
"wangsen992@gmail.com"
] | wangsen992@gmail.com |
d43f17b2a9272f382d3b9edea82ed5030ed82918 | 17be0e9275082c3239fedc11bc617ecd5856136c | /letor/offline/query_characterizer.py | ba3ab26b9b27e8d9bd178eff043fddddbf869187 | [] | no_license | mdkmongo/semantichealth.github.io | 8bb814bfd3b0b3a71828625a2acebfd8013e2eef | 6462ba2cc406967b0371b09822e4c26860e96c91 | refs/heads/master | 2021-01-21T08:24:07.128484 | 2016-08-19T05:35:04 | 2016-08-19T05:35:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,029 | py | from sklearn.feature_extraction.text import CountVectorizer
from get_query_clusters import *
import numpy as np
def query_characterizer(queries, log, similarity_limit = 0.9):
'''
queries - list of string for queries
return - list of integers to indicate cluster for each query
'''
# vectorize queries
log.trace('characterizing %d queries' %queries.shape[0])
characterizer = CountVectorizer()
encoded_query = characterizer.fit_transform(queries)
# set all values to 1 of encoded query (don't care duplicate terms in query)
encoded_query.data = np.ones(encoded_query.data.size)
# find the optimal clusters based on minimum within cluster distance
min_sim, k = 0, 0
while min_sim < similarity_limit:
k += 1
clusters, min_sim, centroids = get_query_clusters(encoded_query, k, log)
log.trace('characterizing queries with k = %d, minimum similarity is %.4f' %(k, min_sim))
return clusters, characterizer.vocabulary_, centroids.toarray() #, avg_sim, k
| [
"ynglei@gmail.com"
] | ynglei@gmail.com |
ea4fb014273cdd117adbdffd61693eb7335a22b3 | 8324db17c426d83d95ce668ee6c7914eec4c7cc1 | /app/user/v1/services/otp_service.py | 44e93ee257a4d04001b22875deeff64064e8f431 | [] | no_license | iCodeIN/whatsapp_clone_backend | 21224f994a7d6e901aeff16f3c461bab72d720a3 | abcbb8fad81feb5e697af61277a21bc99e3ca81b | refs/heads/master | 2023-02-23T17:28:15.960392 | 2021-01-25T16:29:06 | 2021-01-25T16:29:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,351 | py | from helpers.cache_adapter import CacheAdapter
from helpers.misc_helper import get_random_number
class OTPService():
def __init__(self, mobile_number):
self.mobile_number = mobile_number
self.OTP_PREFIX = 'OTP_'
self.OTP_EXPIRY = 600 # in seconds
self.cache_adapter = CacheAdapter()
def get_otp(self):
"""
Returns the OTP stored in cache for
the given number
"""
key = self.OTP_PREFIX + self.mobile_number
return self.cache_adapter.get(key)
def clear_otp(self):
"""
Clears the OTP from the cache
"""
key = self.OTP_PREFIX + self.mobile_number
self.cache_adapter.delete(key)
def generate_otp(self):
"""
Generates OTP for the given mobile number
"""
key = self.OTP_PREFIX + self.mobile_number
# Gets a new OTP if not present in cache,
# and if it's already present, returns the same
# OTP
one_time_password = self.cache_adapter.get(key)
if one_time_password is None:
one_time_password = get_random_number()
# setting the OTP in cache
self.cache_adapter.set(
key,
one_time_password,
self.OTP_EXPIRY
)
return one_time_password
| [
"sajal.4591@gmail.com"
] | sajal.4591@gmail.com |
7d5b92bd2af2037c9ecef1441beb9ad0fb39fa58 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/classification/GoogleNet_ID1623_for_PyTorch/demo.py | 61e10d8b90967fcf483c8eac545a688daeaa25ac | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 4,723 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# -*- coding: utf-8 -*-
"""demo.py
"""
import os
import torch
import numpy as np
from googlenet import googlenet
import argparse
from apex import amp
import apex
import torch.distributed as dist
parser = argparse.ArgumentParser(description=' googlenet demo ')
parser.add_argument('--device', default='npu', type=str,
help='npu or gpu')
parser.add_argument('--device-list', default='0,1,2,3,4,5,6,7', type=str, help='device id list')
parser.add_argument('--dist-backend', default='hccl', type=str,
help='distributed backend')
parser.add_argument('--addr', default='192.168.88.3', type=str,
help='master addr')
'''
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
if args.amp:
amp.load_state_dict(checkpoint['amp'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
'''
def device_id_to_process_device_map(device_list):
devices = device_list.split(",")
devices = [int(x) for x in devices]
devices.sort()
process_device_map = dict()
for process_id, device_id in enumerate(devices):
process_device_map[process_id] = device_id
return process_device_map
def build_model():
global loc
# 请自定义模型并加载预训练模型
args = parser.parse_args()
args.process_device_map = device_id_to_process_device_map(args.device_list)
os.environ['MASTER_ADDR'] = args.addr
os.environ['MASTER_PORT'] = '29688'
ngpus_per_node = len(args.process_device_map)
dist.init_process_group(backend=args.dist_backend, # init_method=args.dist_url,
world_size=1, rank=0)
args.gpu = args.process_device_map[0]
loc = 'npu:{}'.format(args.gpu)
torch.npu.set_device(loc)
model = googlenet().to(loc)
optimizer = apex.optimizers.NpuFusedSGD(model.parameters(), 0.5,
momentum=0.9,
weight_decay=1.0e-04)
model, optimizer = amp.initialize(model, optimizer, opt_level="O2", loss_scale=1024)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], broadcast_buffers=False)
checkpoint = torch.load('./checkpoint.pth.tar')
model.load_state_dict(checkpoint['state_dict'])
model.eval() # 注意设置eval模式
return model
def get_raw_data():
# 请自定义获取数据方式,请勿将原始数据上传至代码仓
from PIL import Image
from urllib.request import urlretrieve
IMAGE_URL = 'https://bbs-img.huaweicloud.com/blogs/img/thumb/1591951315139_8989_1363.png'
urlretrieve(IMAGE_URL, 'tmp.jpg')
img = Image.open("tmp.jpg")
img = img.convert('RGB')
return img
def pre_process(raw_data):
# 请自定义模型预处理方法
from torchvision import transforms
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
transforms_list = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
input_data = transforms_list(raw_data)
return input_data.unsqueeze(0)
def post_process(output_tensor):
# 请自定义后处理方法
print(output_tensor)
return torch.argmax(output_tensor, 1)
if __name__ == '__main__':
# 1. 获取原始数据
raw_data = get_raw_data()
# 2. 构建模型
model = build_model()
# 3. 预处理
input_tensor = pre_process(raw_data)
# 4. 执行forward
output_tensor = model(input_tensor.to(loc))
# 5. 后处理
result = post_process(output_tensor)
# 6. 打印
print(result)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
d7c061e7a434f605159fe0952b9fbb9232f37412 | 6e7aa175667d08d8d285fd66d13239205aff44ff | /libs/pyglet/baby/entity.py | d799609c37fe7f8d8f458034e4882b32c3f80c75 | [] | no_license | jaredly/GameCC | c2a9d7b14fc45813a27bdde86e16a3e3594396e2 | babadbe9348c502d0f433fb82e72ceff475c3a3b | refs/heads/master | 2016-08-05T10:53:03.794386 | 2010-06-25T03:13:02 | 2010-06-25T03:13:02 | 269,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
import pyglet
from pyglet import *
class Entity(object):
def __init__(self, id, size, x, y, rot):
self.id = id
self.size = size
self.x = x
self.y = y
self.rot = rot
def draw(self):
glLoadIdentity()
glTranslatef(self.x, self.y, 0.0)
glRotatef(self.rot, 0, 0, 1)
glScalef(self.size, self.size, 1.0)
glBegin(GL_TRIANGLES)
glColor4f(1.0, 0.0, 0.0, 0.0)
glVertex2f(0.0, 0.5)
glColor4f(0.0, 0.0, 1.0, 1.0)
glVertex2f(0.2, -0.5)
glColor4f(0.0, 0.0, 1.0, 1.0)
glVertex2f(-0.2, -0.5)
glEnd()
# vim: et sw=4 sts=4
| [
"jared@jaredforsyth.com"
] | jared@jaredforsyth.com |
1e03619a331e14654fa38e66498e961c5be32f57 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4308/codes/1635_2445.py | a6e6742d111f1323488ecfe4d2d366cb270ef44c | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | t = input("temp em celsius c / F: ")
vt = float(input("valor da temp: "))
if (t == "F"):
v = 5 / 9 * (vt - 32)
if (t == "C"):
v = (vt *(9 / 5)) + 32
print(round(v , 2)) | [
"jvlo@icomp.ufam.edu.br"
] | jvlo@icomp.ufam.edu.br |
b1f8ede7e93347ddc25e9082984a18a3e6fc2ee9 | 41c26da9c57052a3c9cd17b81d91f41ef074cf8d | /MyLeetCode/python/Remove Duplicates from Sorted Array II.py | fe24b6747a5ccd8a30d9d99b4a59544ec3d72026 | [] | no_license | ihuei801/leetcode | a82f59a16574f4781ce64a5faa099b75943de94e | fe79161211cc08c269cde9e1fdcfed27de11f2cb | refs/heads/master | 2021-06-08T05:12:53.934029 | 2020-05-07T07:22:25 | 2020-05-07T07:22:25 | 93,356,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | ###
# Time Complexity: O(n)
# Space Complexity: O(1)
###
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) < 3:
return len(nums)
i = l = 2
while i < len(nums):
if nums[i] > nums[l-2]:
nums[l] = nums[i]
l += 1
i += 1
return l | [
"hhuang@pinterest.com"
] | hhuang@pinterest.com |
fb671f0b336b329f3ccd3d3bb5bd17cb48bb1a92 | 029aa4fa6217dbb239037dec8f2e64f5b94795d0 | /Python算法指南/48_矩阵元素ZigZag返回_图形轨迹_难.py | c9d061da4948446bc195b407f14364f528fcccfc | [] | no_license | tonyyo/algorithm | 5a3f0bd4395a75703f9ee84b01e42a74283a5de9 | 60dd5281e7ce4dfb603b795aa194a67ff867caf6 | refs/heads/master | 2022-12-14T16:04:46.723771 | 2020-09-23T06:59:33 | 2020-09-23T06:59:33 | 270,216,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | class Solution:
def printZMatrix(self, matrix):
x, y= 0, 0
xLen = len(matrix[0])
yLen = len(matrix)
dx = [-1, 1]
dy = [1, -1]
ans = [matrix[x][y]]
direct = 1
for i in range(xLen * yLen - 1): # 因为提前加入了一个元素, 所以长度减1
nextX = x + dx[direct]
nextY = y + dy[direct]
if nextX >= 0 and nextX < xLen and nextY >= 0 and nextY < yLen:
x = x + dx[direct]
y = y + dy[direct]
ans.append(matrix[y][x])
else:
if direct == 1:
if nextY < 0:
x = x + 1
ans.append(matrix[y][x])
else:
y = y + 1
ans.append(matrix[y][x])
direct = 0
else:
if nextX < 0:
y = y + 1
ans.append(matrix[y][x])
else:
x = x + 1
ans.append(matrix[y][x])
direct = 1
return ans
# 主函数
if __name__ == "__main__":
matrim = [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]
# 创建对象
solution = Solution()
print("输入的数组为:", matrim)
print("ZigZag顺序返回矩阵的所有元素是:", solution.printZMatrix(matrim))
# class Solution:
# def printZMatrix(self, matrix):
# if len(matrix) == 0:
# return []
# x, y = 0, 0
# n, m = len(matrix), len(matrix[0])
# rows, cols = range(n), range(m)
# dx = [1, -1] #x的左右方向
# dy = [-1, 1] #y的上下方向
# direct = 1 # 1 = 左下方移动, 0 = 右上方移动,
# result = [] #轨迹上的点序
# for i in range(len(matrix) * len(matrix[0])): # 循环二维列表
# result.append(matrix[x][y])
# nextX = x + dx[direct] # 试探
# nextY = y + dy[direct]
# if nextX not in rows or nextY not in cols: # 当x和y都在集合中时, 不需要进入该判断条件
# if direct == 1: #左下方移动, 只有可能, 左边小于0, 下面超出范围.
# if nextY >= m: # m是最大行号
# nextX, nextY = x + 1, y # 向左下移动超过最大行, 则向右横向移动
# else:
# nextX, nextY = x, y + 1 # 否则, 向下移动
# else:
# if nextX >= n: # n是最大列号
# nextX, nextY = x, y + 1 # 向右上移动超过最大列, 则向下移动
# else:
# nextX, nextY = x + 1, y #否则, 向右移动
# direct = 1 - direct # 超出列表的最大行数或列数, 改变方向
# x, y = nextX, nextY
# return result | [
"1325338208@qq.com"
] | 1325338208@qq.com |
d043c6f4acf1b99b655fbe8953cdfe77b06d817f | 2c942aec676880cd1a80251005f6d8a3f6fe605a | /learning/sqlalchemy/firebird.py | a0b0e2028105727a853d16176d09ffe5206ae96b | [] | no_license | rsprenkels/python | 53ac1c89b5d60642f1a0c692a8bbac6b6d99d4c0 | 1e621ed146bdc52b735613c124a1bab36a36a39a | refs/heads/master | 2021-06-24T03:37:05.056202 | 2020-12-21T17:19:24 | 2020-12-21T17:19:24 | 151,983,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | import logging
import time
from sqlalchemy import create_engine
import re
# need to have FDB python firebird driver installed: https://pypi.org/project/fdb/
# and some libraries: sudo apt-get install firebird3.0-common firebird3.0-utils firebird-dev firebird3.0-doc
# engine = create_engine('mysql+pymysql://etl:fN9GwzhXrYtcrj@dev-reportingdb001.m.int1-dus.dg-ao.de/dwsta')
#engine = create_engine('mysql+pymysql://jdbc:firebirdsql://10.100.211.55:3050/D:\\variobill\\production_dg\\data\\DG_VARIOBILL.FDB?lc_ctype=UTF8')
engine = create_engine('firebird+fdb://SYSDBA:Guiffez9@10.100.211.55:3050/D:\\variobill\\production_dg\\data\\DG_VARIOBILL.FDB')
query = """
SELECT
T.*,
-- lots of work is available, but throughput in the last minute less than <threshhold>
CASE WHEN T.num_old_notcompleted_orders >= 1 AND T.num_recently_finished < 5
THEN 1
ELSE 0
END AS Document_service_is_down
FROM (
SELECT (
SELECT
count(*) AS num_old_notcompleted_orders
FROM
DOCUMENT_ORDERS
WHERE
TS_WORK_FINISHED IS NULL -- order not completed
AND TS_ORDER_CREATED > dateadd( -10 DAY TO CAST('Now' AS DATE)) -- orders created in last 10 days
AND datediff(SECOND, TS_ORDER_CREATED, CAST('NOW' AS timestamp)) >= 60 -- orders older than 60 seconds
), (
SELECT
count(*) AS num_recently_finished
FROM
DOCUMENT_ORDERS
WHERE
TS_WORK_FINISHED IS NOT NULL -- order completed
AND TS_ORDER_CREATED > dateadd( -10 DAY TO CAST('Now' AS DATE)) -- orders created in last 10 days
AND datediff(SECOND, TS_WORK_FINISHED, CAST('NOW' AS timestamp)) < 30 -- finished within the last 30 seconds
)
FROM
RDB$DATABASE rd
) T
"""
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
log = logging.getLogger('queue_moni')
log.info("Starting ...")
while True:
connection = engine.connect()
result = connection.execute(query)
for row in result:
res = {}
for k, v in zip(result.keys(), row):
res[k] = v
log.info(res)
connection.close()
time.sleep(5)
connection.close() | [
"ron.sprenkels@gmail.com"
] | ron.sprenkels@gmail.com |
d27f9f0db14090d79918969d17c5555b04d42283 | 578db86c51d44ebddd0dc7b1738985b3dc69eb74 | /corehq/apps/app_manager/migrations/0014_create_exchangeapplication.py | e35359020468e6455be7cbabadb3bff0b2f9ed1d | [
"BSD-3-Clause"
] | permissive | dimagi/commcare-hq | a43c7dd32b5f89c89fd5aa1b1359ab7301f4ff6b | e7391ddae1af1dbf118211ecb52c83fc508aa656 | refs/heads/master | 2023-08-16T22:38:27.853437 | 2023-08-16T19:07:19 | 2023-08-16T19:07:19 | 247,278 | 499 | 203 | BSD-3-Clause | 2023-09-14T19:03:24 | 2009-07-09T17:00:07 | Python | UTF-8 | Python | false | false | 816 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-03-12 20:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app_manager', '0013_rename_sqlglobalappconfig'),
]
operations = [
migrations.CreateModel(
name='ExchangeApplication',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('domain', models.CharField(max_length=255)),
('app_id', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='exchangeapplication',
unique_together=set([('domain', 'app_id')]),
),
]
| [
"orange.jenny@gmail.com"
] | orange.jenny@gmail.com |
8c8481a5aa61b668a41404a7d40c3abef74520ab | bb83b8e085d74dc2cd7e32e2688b103410309c3b | /tests/03-opencv-test-slow.py | 2d08a5edbbc370e24a3d02aa9a6f9720ddb69a58 | [] | no_license | praveen-palanisamy/duckietown-slimremote | 7fe21db47de9e68edd6bdf70b0f5e81ec6919fff | 3e4ec7e2995b82b618d20c580d44cf207dd47540 | refs/heads/master | 2020-03-27T08:09:43.455059 | 2018-08-21T18:34:33 | 2018-08-21T18:34:33 | 146,225,722 | 0 | 0 | null | 2018-08-26T23:58:55 | 2018-08-26T23:58:54 | null | UTF-8 | Python | false | false | 681 | py | import random
import time
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
for i in range(19):
print(i, cap.get(i))
# properties are listed here:
# https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get
cap.set(3, 320)
cap.set(4, 240)
cap.set(5, 90) # framerate (90Hz is PiCam limit on old firmware)
tests = 10
timings = 0
i = 0
while (True):
# Capture frame-by-frame
start = time.time()
ret, frame = cap.read()
if not ret:
print("something wrong")
else:
print("got frame")
i += 1
if i == tests:
break
time.sleep(random.random()*3)
cap.release()
| [
"fgolemo@gmail.com"
] | fgolemo@gmail.com |
6c18ec2a9320cc3a969de22f2e5ffaa73575cebe | 69a327a2af65d7252b624fe7cadd537eb51ca6d6 | /Greedy/BOJ_12915.py | 6988d3195bc8af0fb54e3b0d0475f6eb2c6756c1 | [] | no_license | enriver/algorithm_python | 45b742bd17c6a2991ac8095d13272ec4f88d9bf5 | 77897f2bf0241756ba6fd07c052424a6f4991090 | refs/heads/master | 2023-09-03T23:28:23.975609 | 2021-10-29T09:25:32 | 2021-10-29T09:25:32 | 278,907,031 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # 대회 개최 - S2
import sys
if __name__=="__main__":
E,EM,M,MH,H=map(int,sys.stdin.readline().split())
count=0
while True:
check=[False]*3
if E>0:
E-=1
check[0]=True
else:
if EM>0:
EM-=1
check[0]=True
else:
break
if M>0:
M-=1
check[1]=True
else:
if EM>0 and MH>0:
if EM>=MH:
EM-=1
else:
MH-=1
elif EM==0 and MH>0:
MH-=1
elif EM>0 and MH==0:
EM-=1
else:
break
check[1]=True
if H>0:
H-=1
check[2]=True
else:
if MH>0:
MH-=1
check[2]=True
else:
break
if False in check:
break
count+=1
print(count) | [
"riverkeh@naver.com"
] | riverkeh@naver.com |
10d080ce6227bf8d9ed60804bfc7694c2aab1388 | af8f0d50bb11279c9ff0b81fae97f754df98c350 | /src/book/api/serializers/bookrent.py | 6150b5b433197328d3a8ea1364681b88f89caa6d | [
"Apache-2.0"
] | permissive | DmytroKaminskiy/ltt | 592ed061efe3cae169a4e01f21d2e112e58714a1 | d08df4d102e678651cd42928e2343733c3308d71 | refs/heads/master | 2022-12-18T09:56:36.077545 | 2020-09-20T15:57:35 | 2020-09-20T15:57:35 | 292,520,616 | 0 | 0 | Apache-2.0 | 2020-09-20T15:49:58 | 2020-09-03T09:09:26 | HTML | UTF-8 | Python | false | false | 719 | py | from book.models import BookRent
from rest_framework import serializers
class BookRentSerializer(serializers.ModelSerializer):
class Meta:
model = BookRent
fields = (
'id',
'price', 'price_period', 'days_period',
'user_id', 'book', 'created', 'end',
'status', 'days_period_initial',
)
extra_kwargs = {
'price': {'read_only': True},
'price_period': {'read_only': True},
'days_period': {'read_only': True},
'book': {'required': True},
'created': {'read_only': True},
'end': {'read_only': True},
'days_period_initial': {'read_only': True},
}
| [
"dmytro.kaminskyi92@gmail.com"
] | dmytro.kaminskyi92@gmail.com |
2d07f87480d88e8e632428cfd92799b42dc34c4e | c531778b6b568e5924fcf438dce274067b6e1d31 | /resources/lib/common/fileops.py | 9747eb0db8fdded067bf1e3ca6134db8c778c7c0 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | CastagnaIT/plugin.video.netflix | a5180fbbaea244a490f750a2dd417b4e7303321a | ece10d24449faaccd7d65a4093c6b5679ee0b383 | refs/heads/master | 2023-07-01T23:32:20.442923 | 2023-06-27T06:42:18 | 2023-06-27T06:42:18 | 164,314,803 | 2,019 | 456 | MIT | 2023-09-13T13:34:06 | 2019-01-06T14:27:56 | Python | UTF-8 | Python | false | false | 5,675 | py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Helper functions for file operations
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import os
import xml.etree.ElementTree as ET
import xbmc
import xbmcvfs
from resources.lib.globals import G
from .misc_utils import build_url
def check_folder_path(path):
"""
Check if folder path ends with path delimiter
If not correct it (makes sure xbmcvfs.exists is working correct)
"""
end = ''
if '/' in path and not path.endswith('/'):
end = '/'
if '\\' in path and not path.endswith('\\'):
end = '\\'
return path + end
def folder_exists(path):
"""
Checks if a given path exists
:param path: The path
:return: True if exists
"""
return xbmcvfs.exists(check_folder_path(path))
def create_folder(path):
"""
Create a folder if not exists
:param path: The path
"""
if not folder_exists(path):
xbmcvfs.mkdirs(path)
def file_exists(file_path):
"""
Checks if a given file exists
:param file_path: File path to check
:return: True if exists
"""
return xbmcvfs.exists(xbmcvfs.translatePath(file_path))
def copy_file(from_path, to_path):
"""
Copy a file to destination
:param from_path: File path to copy
:param to_path: Destination file path
:return: True if copied
"""
try:
return xbmcvfs.copy(xbmcvfs.translatePath(from_path),
xbmcvfs.translatePath(to_path))
finally:
pass
def save_file_def(filename, content, mode='wb'):
"""
Saves the given content under given filename, in the default add-on data folder
:param filename: The filename
:param content: The content of the file
:param mode: optional mode options
"""
save_file(os.path.join(G.DATA_PATH, filename), content, mode)
def save_file(file_path, content, mode='wb'):
"""
Saves the given content under given filename path
:param file_path: The filename path
:param content: The content of the file
:param mode: optional mode options
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
file_handle.write(bytearray(content))
def load_file_def(filename, mode='rb'):
"""
Loads the content of a given filename, from the default add-on data folder
:param filename: The file to load
:param mode: optional mode options
:return: The content of the file
"""
return load_file(os.path.join(G.DATA_PATH, filename), mode)
def load_file(file_path, mode='rb'):
"""
Loads the content of a given filename
:param file_path: The file path to load
:param mode: optional mode options
:return: The content of the file
"""
with xbmcvfs.File(xbmcvfs.translatePath(file_path), mode) as file_handle:
return file_handle.readBytes().decode('utf-8')
def delete_file_safe(file_path):
if xbmcvfs.exists(file_path):
try:
xbmcvfs.delete(file_path)
finally:
pass
def delete_file(filename):
file_path = xbmcvfs.translatePath(os.path.join(G.DATA_PATH, filename))
try:
xbmcvfs.delete(file_path)
finally:
pass
def list_dir(path):
"""
List the contents of a folder
:return: The contents of the folder as tuple (directories, files)
"""
return xbmcvfs.listdir(path)
def delete_folder_contents(path, delete_subfolders=False):
"""
Delete all files in a folder
:param path: Path to perform delete contents
:param delete_subfolders: If True delete also all subfolders
"""
directories, files = list_dir(xbmcvfs.translatePath(path))
for filename in files:
xbmcvfs.delete(os.path.join(path, filename))
if not delete_subfolders:
return
for directory in directories:
delete_folder_contents(os.path.join(path, directory), True)
# Give time because the system performs previous op. otherwise it can't delete the folder
xbmc.sleep(80)
xbmcvfs.rmdir(os.path.join(path, directory))
def delete_folder(path):
"""Delete a folder with all his contents"""
delete_folder_contents(path, True)
# Give time because the system performs previous op. otherwise it can't delete the folder
xbmc.sleep(80)
xbmcvfs.rmdir(xbmcvfs.translatePath(path))
def write_strm_file(videoid, file_path):
"""Write a playable URL to a STRM file"""
filehandle = xbmcvfs.File(xbmcvfs.translatePath(file_path), 'wb')
try:
filehandle.write(bytearray(build_url(videoid=videoid,
mode=G.MODE_PLAY_STRM).encode('utf-8')))
finally:
filehandle.close()
def write_nfo_file(nfo_data, file_path):
"""Write a NFO file"""
filehandle = xbmcvfs.File(xbmcvfs.translatePath(file_path), 'wb')
try:
filehandle.write(bytearray('<?xml version=\'1.0\' encoding=\'UTF-8\'?>'.encode('utf-8')))
filehandle.write(bytearray(ET.tostring(nfo_data, encoding='utf-8', method='xml')))
finally:
filehandle.close()
def join_folders_paths(*args):
"""Join multiple folder paths in a safe way"""
# Avoid the use of os.path.join, in some cases with special chars like % break the path
return xbmcvfs.makeLegalFilename('/'.join(args))
def get_xml_nodes_text(nodelist):
"""Get the text value of text node list"""
rc = []
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc.append(node.data)
return ''.join(rc)
| [
"gottardo.stefano.83@gmail.com"
] | gottardo.stefano.83@gmail.com |
788e6db343a9cecdc688f28d7679566f6d75ae21 | db575f3401a5e25494e30d98ec915158dd7e529b | /BIO_Stocks/ACHV.py | 54d786c8ecbb6e82a8972b7e44fa20ff5367c587 | [] | no_license | andisc/StockWebScraping | b10453295b4b16f065064db6a1e3bbcba0d62bad | 41db75e941cfccaa7043a53b0e23ba6e5daa958a | refs/heads/main | 2023-08-08T01:33:33.495541 | 2023-07-22T21:41:08 | 2023-07-22T21:41:08 | 355,332,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,107 | py | import requests
from lxml import html
from bs4 import BeautifulSoup
import os
from datetime import date, datetime
from ValidationTools import validateday
from Database_Connections import InsertData, Insert_Logging
def main(id_control):
try:
url = 'http://ir.achievelifesciences.com/news-releases'
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
result = requests.get(url, headers=headers)
#print(result.content.decode())
html_content = result.content.decode()
soup = BeautifulSoup(html_content, 'html.parser')
#print(soup)
articles = soup.findAll('li', attrs={'class':'wd_item'})
# get first article
FIRST_ARTICLE = articles[0]
article_date = FIRST_ARTICLE.find('div', attrs={'class':'wd_date'})
article_desc = FIRST_ARTICLE.find('div', attrs={'class':'wd_title'})
v_article_date = article_date.text.lstrip().rstrip()
#if the process find any article with the today date
istoday, v_art_date = validateday(v_article_date)
if (istoday == True):
v_ticker = os.path.basename(__file__).replace(".py", "")
v_url = article_desc.a.get('href')
v_description = article_desc.text.lstrip().rstrip()
now = datetime.now()
print("URL: " + v_url)
print("DESCRIPTION: " + v_description)
print("ARTICLE_DATE: " + str(now))
# Insert articles
if "https://" in v_url:
InsertData(v_ticker, v_description, v_url, v_art_date)
else:
InsertData(v_ticker, v_description, url, v_art_date)
except Exception:
error_message = "Entrou na excepção ao tratar " + os.path.basename(__file__) + "..."
print(error_message)
Insert_Logging(id_control, 'Detail', error_message)
pass
#InsertData()
if __name__ == "__main__":
main()
| [
"andisc_3@hotmail.com"
] | andisc_3@hotmail.com |
d969ab824e08e75a16eeb90aaee42be1aa6b5858 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_24750.py | 3c1abfc799213a15517ec3fcd462723ed8f6473a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | # Convert value to row index in NumPy array
cat_index = np.searchsorted(categories, A[0])
B[A[1], cat_index] = A[2]
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
18a8fc714c96568c2445ca1875ce8a2ac25eaa47 | 2612f336d667a087823234daf946f09b40d8ca3d | /python/helpers/typeshed/stdlib/3.5/zipapp.pyi | 9fac5a026afff523bc4f6860c2b92dce62e420c9 | [
"MIT",
"Apache-2.0"
] | permissive | tnorbye/intellij-community | df7f181861fc5c551c02c73df3b00b70ab2dd589 | f01cf262fc196bf4dbb99e20cd937dee3705a7b6 | refs/heads/master | 2021-04-06T06:57:57.974599 | 2018-03-13T17:37:00 | 2018-03-13T17:37:00 | 125,079,130 | 2 | 0 | Apache-2.0 | 2018-03-13T16:09:41 | 2018-03-13T16:09:41 | null | UTF-8 | Python | false | false | 363 | pyi | # Stubs for zipapp (Python 3.5+)
from pathlib import Path
from typing import BinaryIO, Optional, Union
_Path = Union[str, Path, BinaryIO]
class ZipAppError(Exception): ...
def create_archive(source: _Path, target: Optional[_Path] = ..., interpreter: Optional[str] = ..., main: Optional[str] = ...) -> None: ...
def get_interpreter(archive: _Path) -> str: ...
| [
"andrey.vlasovskikh@jetbrains.com"
] | andrey.vlasovskikh@jetbrains.com |
873c78875fe1bf5a9557aea8427a692b110ac7a0 | e7a56f1f086352a45947a7ab3cecd71828d21f50 | /tovp/promotions/migrations/0005_auto_20150302_1323.py | f64b2f62600beeece25c1687f6fc8be5aa6f4fe1 | [
"MIT"
] | permissive | nrsimha/tovp | af2df2967a47e43c5378dc52c99652e8242c429b | 311bc957c95c294811d737f5df30b0a218d35610 | refs/heads/master | 2023-05-26T05:50:52.405855 | 2017-05-10T13:40:59 | 2017-05-10T13:40:59 | 27,473,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,745 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('promotions', '0004_auto_20150302_1036'),
]
operations = [
migrations.AddField(
model_name='goldenbrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='guruparamparabrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='nrsimhatile',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
migrations.AddField(
model_name='radhamadhavabrick',
name='brick_status',
field=models.CharField(verbose_name='Brick Status', max_length=100, default='need_to_send', choices=[('need_to_send', 'Need to send to DC'), ('name_given', 'Name given to DC'), ('brick_made', 'Brick is made')]),
preserve_default=True,
),
]
| [
"pnd@mayapurmedia.com"
] | pnd@mayapurmedia.com |
354524169b42cadba1eed445e88ac79ae684b416 | 9ffabcaef668b1c0ec8f9451e2d02b472ca6c61d | /compute/compute_provisioner/compute_provisioner/allocator.py | 684862cf43b20da1cdbcc0dc1c61183335c2d3a6 | [] | no_license | ESGF/esgf-compute-wps | 704ee5940e9cbc7d12ef41e0a724202c69fffc67 | 82fb3e79f8e43367fa31a6dba7127da4b744a944 | refs/heads/devel | 2021-07-15T06:52:30.731608 | 2021-03-11T01:28:40 | 2021-03-11T01:28:40 | 29,711,838 | 9 | 6 | null | 2020-02-12T18:12:47 | 2015-01-23T02:09:58 | Python | UTF-8 | Python | false | false | 3,829 | py | import logging
import yaml
from jinja2 import DebugUndefined
from jinja2 import Template
from kubernetes import client
from kubernetes import config
logger = logging.getLogger(__name__)
class KubernetesAllocator(object):
def __init__(self):
config.load_incluster_config()
self.core = client.CoreV1Api()
self.apps = client.AppsV1Api()
self.extensions = client.ExtensionsV1beta1Api()
def create_pod(self, namespace, body, **kwargs):
return self.core.create_namespaced_pod(namespace, body, **kwargs)
def list_pods(self, namespace, label_selector, **kwargs):
return self.core.list_namespaced_pod(namespace, label_selector=label_selector, **kwargs)
def create_deployment(self, namespace, body, **kwargs):
return self.apps.create_namespaced_deployment(namespace, body, **kwargs)
def create_service(self, namespace, body, **kwargs):
return self.core.create_namespaced_service(namespace, body, **kwargs)
def create_ingress(self, namespace, body, **kwargs):
return self.extensions.create_namespaced_ingress(namespace, body, **kwargs)
def create_config_map(self, namespace, body, **kwargs):
return self.core.create_namespaced_config_map(namespace, body, **kwargs)
def delete_resources(self, namespace, label_selector, **kwargs):
api_mapping = {
'pod': self.core,
'deployment': self.apps,
'service': self.core,
'ingress': self.extensions,
'config_map': self.core,
}
for name, api in api_mapping.items():
list_name = f'list_namespaced_{name!s}'
delete_name = f'delete_namespaced_{name!s}'
output = getattr(api, list_name)(namespace, label_selector=label_selector, **kwargs)
logger.info(f'Removing {len(output.items)!r} {name!s}')
for x in output.items:
getattr(api, delete_name)(x.metadata.name, namespace, **kwargs)
def create_resources(self, request, namespace, labels, service_account_name, image_pull_secret, **kwargs):
for item in request:
template = Template(item, undefined=DebugUndefined)
config = {
'image_pull_secret': image_pull_secret,
'labels': [f'{x}: {y}' for x, y in labels.items()],
}
rendered_item = template.render(**config)
yaml_data = yaml.safe_load(rendered_item)
try:
yaml_data['metadata']['labels'].update(labels)
except KeyError:
yaml_data['metadata'].update({
'labels': labels
})
kind = yaml_data['kind']
logger.info(f'Allocating {kind!r} with labels {yaml_data["metadata"]["labels"]!r}')
if kind == 'Pod':
yaml_data['spec']['serviceAccountName'] = service_account_name
yaml_data['spec']['imagePullSecrets'] = [
{'name': image_pull_secret},
]
self.create_pod(namespace, yaml_data)
elif kind == 'Deployment':
yaml_data['spec']['template']['spec']['serviceAccountName'] = service_account_name
yaml_data['spec']['template']['spec']['imagePullSecrets'] = [
{'name': image_pull_secret},
]
self.create_deployment(namespace, yaml_data)
elif kind == 'Service':
self.create_service(namespace, yaml_data)
elif kind == 'Ingress':
self.create_ingress(namespace, yaml_data)
elif kind == 'ConfigMap':
self.create_config_map(namespace, yaml_data)
else:
raise Exception('Requested an unsupported resource')
| [
"boutte.jason@gmail.com"
] | boutte.jason@gmail.com |
670730c7a29ef8f6c8e267f16949943f8b56d7af | d9f3fd0661bcf13416eb0d3d7bfbc545706af3e0 | /dev_bot.py | f6d2f13d63f3404838ddeda7016d2663ba7ab7c4 | [
"MIT"
] | permissive | jayrav13/njit-events-api | a26edffd145f4820e53933f4555d8b4a4ca601fc | 0027c0741601170d4806e45dbf4c08eecfb2cecc | refs/heads/master | 2021-01-10T08:59:38.574026 | 2016-01-30T23:33:56 | 2016-01-30T23:33:56 | 44,531,107 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,984 | py | # By Jay Ravaliya
# Imports
from twython import Twython
from secret import consumer_key, consumer_secret, access_token, access_secret
from model import Posted, db
import requests
import json
import datetime
import random
import math
import sys
# Set up Twitter keys
twitter = Twython(consumer_key, consumer_secret, access_token, access_secret)
# Set up payload for analytics.
payload = {
"userid" : "TwitterBot",
"device" : "TwitterBot"
}
# Send post request to API to get data, convert it to JSON right away.
r = requests.post("http://eventsatnjit.jayravaliya.com/api/v0.2/events", json=payload).json()
# Retrive current time.
currenttime = datetime.datetime.now()
# Total number of events, counted.
total = 0
# At 8:00 AM, post morning tweet.
if(currenttime.hour == 8):
# Count total elements that are taking place today. Post it.
# Else, post that there are no events going on.
for elem in r["response"]:
if elem["datetime"]["is_today"] == True:
total = total + 1
if total > 0:
tweet = "There are " + str(total) + " events taking place today! Be sure to stop by and check some out! via @EventsAtNJIT"
else:
tweet = "Ah - no events going on today! Be sure to check back tomorrow to see what's going on!"
print(tweet)
twitter.update_status(status=tweet)
# If posting at night, post # of events going on tomorrow.
elif(currenttime.hour == 22):
tweet = "That's all for today! Visit back tomorrow to learn about the awesome events taking place on campus! via @EventsAtNJIT"
twitter.update_status(status=tweet)
# Posting every two hours:
else:
# Starting text.
starters = [
"Awesome event coming up: ",
"Did you know? ",
"Check this out: ",
"Stop by: "
]
# Categories to include.
categories = [
"Intramurals & Recreation",
"Reception, Banquet, Party",
"Lecture, Seminar, Workshop",
"Conference, Fair",
"Other"
]
# Count the number of events. Exit if there are no events left.
num_events = 0
def today_events():
global num_events
for elem in r["response"]:
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"]):
num_events = num_events + 1
today_events()
if (num_events == 0):
print "NO EVENTS"
sys.exit()
# Input JSON element - ouput validity.
def valid_event(elem):
if (elem["datetime"]["is_today"] == True or elem["datetime"]["is_tomorrow"] == True):
if (elem["datetime"]["multiday"] == False and (elem["datetime"]["currently_happening"] == False or elem["datetime"]["starting_now"] == True)):
return True
return False
# Input JSON element - output tweet.
def generate_tweet(elem):
print("Element Id: " + str(elem["id"]))
# Random intro, unless happening now.
if elem["datetime"]["currently_happening"] == True:
intro = "Happening Now: "
else:
intro = starters[int(math.floor(random.random() * len(starters)))]
# Add basic data.
tweet = "\"" + elem["name"] + "\"" + " hosted by " + elem["organization"] + " "
if elem["datetime"]["is_today"] == True:
tweet = tweet + "starts today "
elif elem["datetime"]["is_tomorrow"] == True:
tweet = tweet + "starts tomorrow "
elif elem["datetime"]["currently_happening"] == True:
tweet = tweet + "started "
else:
tweet = tweet + "starts on " + elem["datetime"]["start"]["common_formats"]["date"] + " "
# Finalize tweet, return.
tweet = tweet + "at " + elem["datetime"]["start"]["common_formats"]["time"] + " in " + elem["location"] + "."
if len(intro + tweet) <= 140:
return intro + tweet
elif len(tweet) <= 140:
return tweet
else:
return None
# Loop through events, tweet!
for elem in r["response"]:
if valid_event(elem) == True:
try:
tweet = generate_tweet(elem)
p = Posted.query.filter_by(event_id=elem["id"]).first()
if tweet != None and p == None:
print tweet + " / " + str(len(tweet))
p = Posted(elem["id"])
db.session.add(p)
db.session.commit()
twitter.update_status(status=tweet)
break
except:
pass
| [
"jayrav13@gmail.com"
] | jayrav13@gmail.com |
cd157bf03647a3a30c7fc5d919c7b066c4747813 | 43f9cfd3761171ab59742d7a5b768b73e81eb973 | /lang/femtocode/thirdparty/meta/asttools/visitors/copy_tree.py | 09fe4891fcb2028d07ab600999f95c448f4c1035 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | diana-hep/femtocode | 2980c4d39f941506d345651ee56ddb00a63320d5 | bfde538a99f35345eec8b5a0db670f29f83e1cc5 | refs/heads/master | 2021-01-19T12:13:04.303112 | 2017-08-15T05:42:49 | 2017-08-15T05:42:49 | 69,881,392 | 26 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,255 | py | '''
Created on Dec 12, 2011
@author: sean
'''
from . import Visitor
import ast
#FIXME: add tests
class CopyVisitor(Visitor):
'''
Copy only ast nodes and lists
'''
def visitDefault(self, node):
Node = type(node)
new_node = Node()
for _field in Node._fields:
if hasattr(node, _field):
field = getattr(node, _field)
if isinstance(field, (list, tuple)):
new_list = []
for item in field:
if isinstance(item, ast.AST):
new_item = self.visit(item)
else:
new_item = item
new_list.append(new_item)
setattr(new_node, _field, new_list)
elif isinstance(field, ast.AST):
setattr(new_node, _field, self.visit(field))
else:
setattr(new_node, _field, field)
for _attr in node._attributes:
if hasattr(node, _attr):
setattr(new_node, _attr, getattr(node, _attr))
return new_node
def copy_node(node):
return CopyVisitor().visit(node)
| [
"jpivarski@gmail.com"
] | jpivarski@gmail.com |
a327de873746d6c5b6eedee78c3955284df9f7b7 | 14913a0fb7e1d17318a55a12f5a181dddad3c328 | /07.garosero1.py | 55135979754b8aa2ae2388e221ad3cecc621ca44 | [] | no_license | Jesuisjavert/Algorithm | 6571836ec23ac3036565738c2bee94f416595f22 | 730549d19e66e20b3474a235a600958a8e036a0e | refs/heads/master | 2023-02-16T06:34:50.984529 | 2020-09-25T09:40:30 | 2020-09-25T09:40:30 | 330,849,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | for test_case in range(0, 10):
T = int(input())
N = 100
arr = [list(map(int, input().split())) for i in range(N)]
MAX = 0
eorkrtjs1 = eorkrtjs2 = 0
garo = 0
sero = 0
for i in range(N):
eorkrtjs1 += arr[i][i]
eorkrtjs2 += arr[i][99-i]
for j in range(N):
garo += arr[i][j]
sero += arr[j][i]
MAX = max(garo, sero, eorkrtjs1, eorkrtjs2)
print(f'{T},{Max}') | [
"jesuisjavert@gmail.com"
] | jesuisjavert@gmail.com |
d674081c8c96e211f3cd7a9640cd5819afa9a6fd | ca55dcaa64ea9db4068e13091321cfebecc0ff41 | /baekjoon/arithmeticOperations/2588.py | 70af070c2ec93c05f7df2f2c6e64bc1f58c44df7 | [] | no_license | gomtinQQ/algorithm-python | 8fb8343594b945099ae2a4dfa794ecb47e54ab0b | 751562922b66e335f621d366bb73dacdc7125140 | refs/heads/master | 2022-12-07T23:05:44.535593 | 2020-08-21T12:29:58 | 2020-08-21T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | '''
2588: A+B-2
'''
A = int(input())
B = int(input())
print(A+B) | [
"minhyeonlee1@gmail.com"
] | minhyeonlee1@gmail.com |
0dcd15026a5b0eae2c404317ff80ccb29922343f | 1531f1bfe739bd978c5a5bc2ac07a8f5854f982a | /high freq 2.py | 00ada86080e63c6c011d175a877129ca6744c33b | [] | no_license | liyi0206/leetcode-self-practice | ad8da596e505e000aee8fdea3479269b55d066e3 | 577e07e6602390bfe6ddd9ab0e12ddc2bc58c0b3 | refs/heads/master | 2020-12-26T03:10:22.474843 | 2016-05-26T05:24:56 | 2016-05-26T05:24:56 | 59,178,916 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,276 | py | #94. Binary Tree Inorder Traversal
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def inorderTraversal(self, root):
res=[]
stack=[]
cur=root
while cur or stack:
if cur:
stack.append(cur)
cur=cur.left
else:
parent=stack.pop()
res.append(parent.val)
cur=parent.right
return res
#144. Binary Tree Preorder Traversal
class Solution(object):
def preorderTraversal(self, root):
res=[]
stack=[]
cur=root
while cur or stack:
if cur:
res.append(cur.val)
stack.append(cur)
cur=cur.left
else:
parent=stack.pop()
cur=parent.right
return res
#235. Lowest Common Ancestor of a Binary Search Tree
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
cur=root
pval,qval=min(p.val,q.val),max(p.val,q.val)
while cur:
if pval<=cur.val<=qval: return cur
elif cur.val<pval: cur=cur.right
else: cur=cur.left
#236. Lowest Common Ancestor of a Binary Tree
class Solution(object):
def lowestCommonAncestor(self, root, p, q):
if not root: return None
if root==p or root==q: return root ##return node itself, not node.val
left =self.lowestCommonAncestor(root.left,p,q)
right=self.lowestCommonAncestor(root.right,p,q)
if not left: return right
elif not right: return left
else: return root
#270. Closest Binary Search Tree Value
class Solution(object):
def closestValue(self, root, target):
res=root.val ## better than a random large number
cur=root
while cur:
if cur.val==target: return cur.val
if abs(cur.val-target)<abs(res-target): res=cur.val
if cur.val<target: cur=cur.right
else: cur=cur.left
return res
#272. Closest Binary Search Tree Value II
class Solution(object):
def closestKValues(self, root, target, k):
array1,array2=[],[]
stack=[]
cur=root
while cur or stack:
if cur:
stack.append(cur)
cur=cur.left
else:
parent=stack.pop()
if parent.val<=target:
array1.append(parent.val)
else:
# append here and pop(0) later is more efficient,
# as not all vals needs to pop.
array2.append(parent.val)
if len(array2)>k: break
cur=parent.right
#print array1, array2
res=[]
for i in range(k):
if not array1 and not array2: break #not going to happen
elif not array2: res.append(array1.pop())
elif not array1: res.append(array2.pop(0))
elif target-array1[-1]<array2[0]-target: res.append(array1.pop())
else: res.append(array2.pop(0))
### best - assume k is always valid, that is: k <= total nodes.
#for i in range(k):
# if not res2 or (res1 and target-res1[-1]<res2[-1]-target):
# res.append(res1.pop())
# else: res.append(res2.pop())
return res
#root=TreeNode(2)
#root.left =TreeNode(1)
#root.right=TreeNode(4)
#root.right.left =TreeNode(3)
#root.right.right=TreeNode(5)
#a=Solution()
#print a.closestKValues(root,6,3) #[5,4,3]
#a=Solution()
#print a.closestKValues(TreeNode(1),0,1)
#146. LRU Cache - see ds implementation
#308. Range Sum Query 2D - Mutable
class NumMatrix(object):
def __init__(self, matrix):
if not matrix: return ### o/w runtime error
self.matrix=matrix
self.n,self.m=len(matrix),len(matrix[0])
self.tree=[[0]*(self.m+1) for i in range(self.n+1)]
### initialize tree
for x in range(self.n):
for y in range(self.m):
self.add(x+1,y+1,matrix[x][y])
#for l in self.tree: print l
def lowbit(self,x):
return x&(-x)
def add(self,x,y,val):
while x<=self.n: ### not x<n
z=y
while z<=self.m: ### not z<n
self.tree[x][z]+=val
z+=self.lowbit(z)
x+=self.lowbit(x)
def sum(self,x,y):
res=0
while x>0: # not matter if x>=0
z=y
while z>0: # not matter if z>=0
res+=self.tree[x][z]
z-=self.lowbit(z)
x-=self.lowbit(x)
return res
def update(self,i,j,val):
delta=val-self.matrix[i][j]
self.add(i+1,j+1,delta)
self.matrix[i][j]=val ###
def sumRegion(self,row1,col1,row2,col2):
return self.sum(row2+1,col2+1) \
- self.sum(row2+1,col1) \
- self.sum(row1,col2+1) \
+ self.sum(row1,col1)
#matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]]
#numMatrix=NumMatrix(matrix)
#print numMatrix.sumRegion(2,1,4,3) #8
#numMatrix.update(3,2,2)
#print numMatrix.sumRegion(2,1,4,3) #10
#251. Flatten 2D Vector
class Vector2D(object):
def __init__(self, vec2d):
self.v=vec2d
self.x,self.y=0,0
def next(self):
if self.hasNext():
res=self.v[self.x][self.y]
self.y+=1
return res
else: return None
def hasNext(self):
# jump out of loop when x,y both<limit, or both==limit
while self.x<len(self.v) and self.y==len(self.v[self.x]):
self.x+=1
self.y=0
if self.x==len(self.v): return False
else: return True
#281. Zigzag Iterator
class ZigzagIterator(object):
def __init__(self, v1, v2):
self.v=[v1,v2]
#self.n,self.m=len(v1),len(v2)
self.x,self.y=0,0
self.flag0,self.flag1=0,0
def next(self):
if self.hasNext():
res=self.v[self.x][self.y]
if self.flag0==0 and self.flag1==0:
if self.x==0: self.x=1
else: self.x,self.y=0,self.y+1
else: self.y+=1
return res
else: return None
def hasNext(self):
while self.y==len(self.v[self.x]) and (self.flag0==0 or self.flag1==0):
if self.x==0:
self.flag0=1
self.x=1
else:
self.flag1=1
self.x=0
self.y+=1
if self.flag0==0 or self.flag1==0: return True
else: return False
#v1 = [1, 2]
#v2 = [3, 4, 5, 6]
#a=ZigzagIterator(v1,v2)
#while a.hasNext():
# print a.next(),
#20. Valid Parentheses
class Solution(object):
def isValid(self, s):
mp={'(':')', '{':'}', '[':']'}
stack=[]
for c in s:
if c in mp: stack.append(c)
### remember not stack
elif not stack or mp[stack.pop()]!=c: return False
if stack: return False
return True
#a=Solution()
#print a.isValid("()[]{}")
#print a.isValid("([)]")
#66. Plus One
class Solution(object):
def plusOne(self, digits):
#res=[]
res=[0]*len(digits)
carry=1
for i in reversed(range(len(digits))):
cur=carry+digits[i]
carry=cur/10
cur=cur%10
#res.insert(0,cur)
res[i]=cur # faster
if carry: res.insert(0,1)
return res
#279. Perfect Squares
class Solution(object):
def numSquares(self,n): #TLE
dp=[100]*(n+1)
dp[0]=0
dp[1]=1
for i in range(2,n+1):
j=1
while j*j<=n:
dp[i]=min(dp[i],dp[i-j*j]+1)
j+=1
#print dp
return dp[-1]
#a=Solution()
#print a.numSquares(12) #3
#print a.numSquares(13) #2
#print a.numSquares(7691) #3
#print a.numSquares(8829) #2
#print a.numSquares(9975) #4
| [
"ly.protegee@gmail.com"
] | ly.protegee@gmail.com |
08fb7d4a8b60f9656534c8c19aa38ab3d8a6a448 | abeec076f89231c4dd589e84def8301e653d6e20 | /orders/migrations/0009_remove_order_cook.py | f119d17a90f1126e4fe5228f032a2bb623ef00a1 | [] | no_license | gibil5/pcm_restaurant | 1cde6ee2780d3aa39dbc26dd9583f8465a1ff13a | a56ec01c533ed2b6e198de9813f9518a3eca2d14 | refs/heads/master | 2020-08-29T20:10:13.606229 | 2019-12-01T19:48:47 | 2019-12-01T19:48:47 | 218,160,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | # Generated by Django 2.2.6 on 2019-11-15 20:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0008_auto_20191115_1359'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='cook',
),
]
| [
"jrevilla55@gmail.com"
] | jrevilla55@gmail.com |
a6eddbcfb2a9ca0bb96202a98f76663c6b28ed92 | 88de1855cddc294bf7e23e000738b97e2ce8fe5d | /peek_core_user/server/UserImportApiABC.py | 93c4a9b1cb214e31ccc4ded908c89d4457828b92 | [] | no_license | Synerty/peek-core-user | cea121a5bc37552055eff7d9c25e621531435631 | 89c9b782a9f5c7ae042a1498062c30cc07efa8c8 | refs/heads/master | 2020-03-18T17:07:18.765974 | 2020-02-24T03:32:40 | 2020-02-24T03:32:40 | 135,007,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,509 | py | from abc import ABCMeta, abstractmethod
from twisted.internet.defer import Deferred
class UserImportApiABC(metaclass=ABCMeta):
@abstractmethod
def importInternalUsers(self, importHash: str, usersEncodedPayload: bytes) -> Deferred:
""" Import Internal Users
Add, replace and remove users in the internal DB
:param importHash: A string representing this group of items to import
:param usersEncodedPayload: A List[ImportInternalUserTuple] to import,
wrapped in a serialised payload.
Wrap the disps list with ::
dispsVortexMsg = Payload(tuples=users).toVortexMsg()
Calling this method with no tuples will delete all items with this importHash
:return: A deferred that fires when the users are loaded
"""
@abstractmethod
def importInternalGroups(self, importHash: str, groupsEncodedPayload: bytes) -> Deferred:
""" Import Internal Groups
Add, replace and remove users in the internal DB
:param importHash: A string representing this group of items to import
:param groupsEncodedPayload: A List[ImportInternalGroupTuple] to import,
wrapped in a serialised payload.
Wrap the disps list with ::
dispsVortexMsg = Payload(tuples=groups).toVortexMsg()
Calling this method with no tuples will delete all items with this importHash
:return: A deferred that fires when the groups are loaded
"""
| [
"jarrod.chesney@synerty.com"
] | jarrod.chesney@synerty.com |
80b4417afbb29d716ffbdac552a7a325410ed080 | 25c531d2acc0218cc8fc3e275db4c2042dbc3a96 | /exam2/min_max_valid.py | 71429f561fd5401c5e2b1dd5c3842876b9877c95 | [] | no_license | anaswara-97/python_project | 230242287886479ec134cb48cdfbacb70e9c9228 | efd0156d0c67b9686f52638b8b3264eb6bdef23d | refs/heads/master | 2023-08-16T16:16:11.063927 | 2021-09-20T14:24:50 | 2021-09-20T14:24:50 | 402,699,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py |
import re
x=input("enter string: ")
n='[A-Z]\w[a-zA-Z0-9][A-Z]{5,10}$'
m=re.fullmatch(n,x)
if m is not None:
print("valid")
else:
print("invalid") | [
"warrior123@gmail.com"
] | warrior123@gmail.com |
dc5c0a4ef33f5f4687f9d3e60f82b0952c5ad268 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/managednetworkfabric/azure-mgmt-managednetworkfabric/generated_samples/internet_gateways_list_by_subscription_maximum_set_gen.py | 17d2cd72ae957d32e3c0592b68efc2a6404ab9b0 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,648 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.managednetworkfabric import ManagedNetworkFabricMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-managednetworkfabric
# USAGE
python internet_gateways_list_by_subscription_maximum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ManagedNetworkFabricMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="1234ABCD-0A1B-1234-5678-123456ABCDEF",
)
response = client.internet_gateways.list_by_subscription()
for item in response:
print(item)
# x-ms-original-file: specification/managednetworkfabric/resource-manager/Microsoft.ManagedNetworkFabric/stable/2023-06-15/examples/InternetGateways_ListBySubscription_MaximumSet_Gen.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | Azure.noreply@github.com |
a99189c5c97d330b6b9d22b893860bde4226486a | 5d2f4c05ba0ac80370ed1d03dc1fde6c2a6d53b3 | /common/urls.py | 23e6e24078ba7d95f9d4f7bf99fe1e7411558010 | [] | no_license | yindashan/dwz | af7f1d2a0ca5edad0beac15a71861701c7f0c8ab | eca18a91d882facae93e7d3ec66f4c943f9eba32 | refs/heads/master | 2020-04-18T20:07:31.519664 | 2014-10-15T02:31:28 | 2014-10-15T02:31:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | #!usr/bin/env python
#coding: utf-8
from django.conf.urls import patterns
from django.conf.urls import url
urlpatterns = patterns('',
url(r'^$', 'common.views.index',name="index"),
url(r'^index/$', 'common.views.index',name="common_index"),
url(r'^login/$', 'common.views.login', name="common_login"),
url(r'^logout/$', 'common.views.logout', name="common_logout"),
url(r'^success/$', 'common.views.success',name="common_success"),
url(r'^nav_index/$', 'common.views.nav_index', name="common_index"),
url(r'^nav_resource/$', 'common.views.nav_resource', name="common_resource"),
url(r'^nav_log/$', 'common.views.nav_log', name="common_log"),
url(r'^nav_ippool/$', 'common.views.nav_ippool', name="common_ippool"),
url(r'^nav_user/$', 'common.views.nav_user', name="common_user"),
url(r'^nav_authority/$', 'common.views.nav_authority', name="common_authority"),
url(r'^main/$', 'common.views.main', name="common_main"),
) | [
"="
] | = |
d5ff5fb556f4adb297f78cc050836eea87ea143c | 9868f287cfa54a8ed6c67b91b59d4f09bbd9410c | /large_language_model/paxml/utils/select_text.py | 30115f754f6bce47da31347bddc489f62936f902 | [
"Apache-2.0"
] | permissive | mlcommons/training | 41c7e21ea074b5f5bb040d3602e621c3e987cc0e | 2f4a93fb4888180755a8ef55f4b977ef8f60a89e | refs/heads/master | 2023-09-05T12:45:53.020925 | 2023-08-03T15:43:54 | 2023-08-03T15:43:54 | 127,351,529 | 431 | 162 | Apache-2.0 | 2023-09-07T23:35:53 | 2018-03-29T21:56:06 | Python | UTF-8 | Python | false | false | 2,240 | py | """Script to randomly pick certain number of text from C4 dataset.
"""
import argparse
import time
import tensorflow as tf
import tensorflow_datasets as tfds
parser = argparse.ArgumentParser(
description="Randomly pick text from C4 dataset.")
parser.add_argument(
"--data_dir",
type=str,
default="",
help="Path to tfds directory, which contains C4/../x.y.z.")
parser.add_argument(
"--language",
type=str,
default="en",
help="Language of dataset.")
parser.add_argument(
"--version",
type=str,
default="3.0.1",
help="Version of dataset.")
parser.add_argument(
"--split",
type=str,
default="train",
help="Split of dataset.")
parser.add_argument(
"--num_examples",
type=int,
default=40000000,
help="Number of examples to pick from dataset.")
parser.add_argument(
"--output_text_file",
type=str,
default="",
help="Path for output text file.")
args = parser.parse_args()
if __name__ == '__main__':
tic = time.time()
ds_name = "c4/" + args.language + ":" + args.version
ds = tfds.load(
ds_name,
split=args.split,
shuffle_files=True,
data_dir=args.data_dir)
num_examples = 0
max_text_length = 0
total_text_length = 0
num_lines = 0
max_line_length = 0
total_line_length = 0
fout = open(args.output_text_file, "wb")
for example in ds:
text = example["text"].numpy()
length = len(text)
if length > max_text_length:
max_text_length = length
total_text_length += length
fout.write(text)
fout.write(b"\n\n")
num_examples += 1
if (num_examples % 10000) == 0:
print(num_examples)
lines = text.split(b"\n")
for line in lines:
line_length = len(line)
if line_length > max_line_length:
max_line_length = line_length
total_line_length += line_length
num_lines += 1
if num_examples >= args.num_examples:
break
fout.close()
print(
"num_examples = ", num_examples,
"max_length = ", max_text_length,
"avg_length = ", total_text_length / num_examples)
print(
"num_lines = ", num_lines,
"max_length = ", max_line_length,
"avg_length = ", total_line_length / num_lines)
| [
"noreply@github.com"
] | mlcommons.noreply@github.com |
88d8c278e5cb2bcc73aba96487f527d9802dcc1f | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/containerservice/v20200101/list_managed_cluster_admin_credentials.py | a4415be37a63f657ee0531031637b6bb9de397f8 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,176 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListManagedClusterAdminCredentialsResult',
'AwaitableListManagedClusterAdminCredentialsResult',
'list_managed_cluster_admin_credentials',
'list_managed_cluster_admin_credentials_output',
]
@pulumi.output_type
class ListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
"""
def __init__(__self__, kubeconfigs=None):
if kubeconfigs and not isinstance(kubeconfigs, list):
raise TypeError("Expected argument 'kubeconfigs' to be a list")
pulumi.set(__self__, "kubeconfigs", kubeconfigs)
@property
@pulumi.getter
def kubeconfigs(self) -> Sequence['outputs.CredentialResultResponse']:
"""
Base64-encoded Kubernetes configuration file.
"""
return pulumi.get(self, "kubeconfigs")
class AwaitableListManagedClusterAdminCredentialsResult(ListManagedClusterAdminCredentialsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListManagedClusterAdminCredentialsResult(
kubeconfigs=self.kubeconfigs)
def list_managed_cluster_admin_credentials(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListManagedClusterAdminCredentialsResult:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20200101:listManagedClusterAdminCredentials', __args__, opts=opts, typ=ListManagedClusterAdminCredentialsResult).value
return AwaitableListManagedClusterAdminCredentialsResult(
kubeconfigs=__ret__.kubeconfigs)
@_utilities.lift_output_func(list_managed_cluster_admin_credentials)
def list_managed_cluster_admin_credentials_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListManagedClusterAdminCredentialsResult]:
"""
The list of credential result response.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
...
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
3c5f03457f2f60fe39e5750b059b7bc60beee531 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02817/s229193665.py | 34f777ef254ec35ad793fc0a38161836c51398eb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 88 | py | from sys import stdin
import math
s,t = stdin.readline().rstrip().split()
print(t + s) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9e1bc8cffb5fd42b2baafbb9a70995f6cd284795 | 39329ae5773c9b4c1f9c91eec393507f5e8ae1c0 | /server/.history/server_20200307204844.py | c8fab0782eb750f449ebba0c2641d732d187e834 | [] | no_license | dobreandrei1/legal | 083267aae7faa10775e5a634679869fce0ac3136 | dd05fad8df599f9fc34f56628ebd8861f7a004b4 | refs/heads/master | 2021-09-08T20:16:29.926214 | 2020-03-08T09:24:04 | 2020-03-08T09:24:04 | 245,785,262 | 0 | 0 | null | 2021-09-03T00:42:33 | 2020-03-08T09:22:37 | Python | UTF-8 | Python | false | false | 1,681 | py | from pathlib import Path
from flask import Flask, render_template, request, send_file, send_from_directory, safe_join, abort, current_app
# from werkzeug import secure_filename
import pandas as pd
import os
import time
import json
from flask_cors import CORS
from haikunator import Haikunator
import unidecode
import PyPDF2
import unidecode
haikunator = Haikunator()
app = Flask(__name__)
CORS(app)
applicationVersion = 0
@app.route('/upload')
def upload_file():
return render_template('upload.html')
@app.route('/api/titles', methods = ['GET', 'POST'])
def get_titles():
if request.method == 'POST':
f = request.files['file']
filename = request.form['filename']
# TODO: maybe check if file alreay exists and not save multipletime
# - get list of all files
# - if filename variable is a substr of any file name in folder: compare their contents
# - if match don`t save file again but use that one
name = filename + '.pdf'
if Path(name).exists():
name = filename + '.pdf'
f.save(name)
pdfFileObject = open('clauze.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
pages = pdfReader.numPages
pageObject = pdfReader.getPage(0)
print(unidecode.unidecode(pageObject.extractText()))
pdfFileObject1 = open('clauze.pdf', 'rb')
pdfReader1 = PyPDF2.PdfFileReader(pdfFileObject1)
pages1 = pdfReader1.numPages
pageObject1 = pdfReader1.getPage(0)
print(unidecode.unidecode(pageObject.extractText()))
return 1
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
| [
"dobreandrei1@yahoo.com"
] | dobreandrei1@yahoo.com |
4b4bc1e6daf1efdf2056529b8c8fc3498f56d5d1 | 304e75224229786ba64c6ef2124007c305019b23 | /src/easy/answer/decompress_run_length_encoded_list.py | aebe45e26dd4e6ae7aee6574b4b462cd68ecac45 | [] | no_license | Takuma-Ikeda/other-LeetCode | 9179a8100e07d56138fd3f3f626951195e285da2 | 499616d07011bee730b9967e9861e341e62d606d | refs/heads/master | 2023-04-14T06:09:35.341039 | 2023-04-10T02:29:18 | 2023-04-10T02:29:18 | 226,260,312 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,008 | py | from typing import List
class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
result = []
# 偶数番目
freq = nums[0::2]
# 奇数番目
val = nums[1::2]
for i in range(len(freq)):
while 0 != freq[i]:
result += [val[i]]
freq[i] -= 1
return result
# 模範解答
# https://leetcode.com/problems/decompress-run-length-encoded-list/discuss/478426/Python-3-(one-line)-(beats-100)
'''
class Solution:
def decompressRLElist(self, nums: List[int]) -> List[int]:
l, result = len(nums), []
# range ※ 引数 3 つバージョン
# 第一引数: start
# 題ニ引数: stop
# 第三引数: step (オプション) 数字を刻み方を指定できる
for i in range(0, l, 2):
# 繰り返し回数 * [値]
result.extend(nums[i] * [nums[i + 1]])
return result
'''
| [
"el.programdear@gmail.com"
] | el.programdear@gmail.com |
124b5269c241ba565b5083c8361be7607ad63332 | 6a33cb94d4af1d8a7329ddc6c9d42f870c35bb2f | /python/euler39.py | 2de1687b9949fd54f3d78119582b7a029ffaeed8 | [] | no_license | vochong/project-euler | 836321cc8e7d2e7cdf22b3b136d44dcba74a8701 | 6a0c7103861ff825bf84800b6e2e62819a41e36d | refs/heads/master | 2020-04-29T10:41:48.487159 | 2018-09-19T00:13:34 | 2018-09-19T00:13:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | def euler39():
"""
If p is the perimeter of a right angle triangle with integral length
sides, {a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p <= 1000, is the number of solutions maximised?
"""
count = [0] * 1001
for a in range(1, 333):
for b in range(a+1, 500):
c = (a**2 + b**2) ** 0.5
p = a + b + int(c)
if int(c) != c: continue
if p > 1000: break
count[p] += 1
return count.index(max(count))
if __name__ == "__main__":
print euler39()
| [
"kueltz.anton@gmail.com"
] | kueltz.anton@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.