blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bf7898ea922ae54fcc5710394cda47509e7d9b00
|
5ffdf4ddee5700e6bb3b062a07c1a9cf7e6adbc1
|
/Algorithms/Implementation/drawing_book.py
|
4fe8eda68f7768cb7f6dd22dcff00be2ab616c74
|
[
"MIT"
] |
permissive
|
byung-u/HackerRank
|
23df791f9460970c3b4517cb7bb15f615c5d47d0
|
4c02fefff7002b3af774b99ebf8d40f149f9d163
|
refs/heads/master
| 2021-05-05T13:05:46.722675
| 2018-03-30T08:07:36
| 2018-03-30T08:07:36
| 104,960,152
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 192
|
py
|
#!/usr/bin/env python3
import sys
def solve(n, p):
d = [n // 2 - p // 2, p // 2]
return min(d)
n = int(input().strip())
p = int(input().strip())
result = solve(n, p)
print(result)
|
[
"iam.byungwoo@gmail.com"
] |
iam.byungwoo@gmail.com
|
8f617fa52dcc342dbaa43fd68f2228f83369ceb1
|
e5e82783627e934809d59c3ac9eebee2f032555b
|
/build/kobuki_bumper2pc/catkin_generated/generate_cached_setup.py
|
0ae9bc3df2a185fa36e524e215549e7664f15eee
|
[] |
no_license
|
xy919/my_simulation
|
b5f312e811afe627186c050950b5b5a3f087f9c1
|
1258e6480ec6c572440e48cd2b4eb7124005f603
|
refs/heads/master
| 2023-03-19T15:17:52.925713
| 2021-03-01T12:25:02
| 2021-03-01T12:25:02
| 332,885,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,332
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/xy/simulation_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/xy/simulation_ws/devel/.private/kobuki_bumper2pc/env.sh')
output_filename = '/home/xy/simulation_ws/build/kobuki_bumper2pc/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"xinyin919@gmail.com"
] |
xinyin919@gmail.com
|
4ea9deba416abbdc26d547e948fd1ae9db3887bf
|
3b728b25f77b5b00a8e0f6acdd6a9ef066e29cd0
|
/annotation.py
|
ef0e8b40e7c1b7110eb21a6d1fb23979a9c45e4c
|
[] |
no_license
|
iceshade000/VrelationT
|
58d05a340bc366b5dd5a5e02168dce74dc4fb6d8
|
f675f5c80bfc75cfc9449bef0acebeb8c54789db
|
refs/heads/master
| 2021-01-23T07:51:24.338181
| 2017-03-28T13:02:31
| 2017-03-28T13:02:31
| 86,460,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,094
|
py
|
#coding:utf-8
import scipy.io as sio
import numpy as np
annotaion_train='./dataset/annotation_train.mat'
annotation_test='./dataset/annotation_test.mat'
train=sio.loadmat(annotaion_train)['annotation_train'][0]
test=sio.loadmat(annotation_test)['annotation_test'][0]
train_img_name=[]
test_img_name=[]
for i in range(0,4000):
train_img_name.append(train[i][0][0][0][0])
for i in range(0,1000):
test_img_name.append(test[i][0][0][0][0])
print train_img_name[0]
np.save('./dataset/train_img_name.npy',train_img_name)
np.save('./dataset/test_img_name.npy',test_img_name)
'''
print np.shape(train[4][0][0][1][0])[0]
temp= train[4][0][0][1][0]
print temp[11]
#print np.shape(temp)[0]
train_bbox0=temp[11][0][0][0][0]
train_bbox1=temp[11][0][0][1][0]
train_entity0=temp[11][0][0][2][0][0][0]
train_relation=temp[11][0][0][2][0][1][0]
train_entity1=temp[11][0][0][2][0][2][0]
print train_entity1
print np.shape(temp)[0]
train_bbox0.append(temp[1][0][0][0][0])
train_bbox1.append(temp[1][0][0][1][0])
train_entity0.append(temp[1][0][0][2][0][0][0])
train_relation.append(temp[1][0][0][2][0][1][0])
train_entity1.append(temp[1][0][0][2][0][2][0])
'''
train_relation_img=[]
train_bbox0=[]
train_bbox1=[]
train_entity0=[]
train_entity1=[]
train_relation=[]
for i in range (0,4000):
if len(train[i][0][0])==2:
temp=train[i][0][0][1][0]
for j in range(0,len(temp)-1):
train_relation_img.append(i) #label
train_bbox0.append(temp[j][0][0][0][0])
train_bbox1.append(temp[j][0][0][1][0])
train_entity0.append(temp[j][0][0][2][0][0][0])
train_relation.append(temp[j][0][0][2][0][1][0])
train_entity1.append(temp[j][0][0][2][0][2][0])
np.save('./dataset/train_relation_img.npy',train_relation_img)
np.save('./dataset/train_bbox0.npy',train_bbox0)
np.save('./dataset/train_bbox1.npy',train_bbox1)
np.save('./dataset/train_entity0.npy',train_entity0)
np.save('./dataset/train_entity1.npy',train_entity1)
np.save('./dataset/train_relation.npy',train_relation)
test_relation_img=[]
test_bbox0=[]
test_bbox1=[]
test_entity0=[]
test_entity1=[]
test_relation=[]
for i in range (0,1000):
if len(test[i][0][0])==2:
temp=test[i][0][0][1][0]
for j in range(0,len(temp)-1):
test_relation_img.append(i) #label
test_bbox0.append(temp[j][0][0][0][0])
test_bbox1.append(temp[j][0][0][1][0])
test_entity0.append(temp[j][0][0][2][0][0][0])
test_relation.append(temp[j][0][0][2][0][1][0])
test_entity1.append(temp[j][0][0][2][0][2][0])
np.save('./dataset/test_relation_img.npy',test_relation_img)
np.save('./dataset/test_bbox0.npy',test_bbox0)
np.save('./dataset/test_bbox1.npy',test_bbox1)
np.save('./dataset/test_entity0.npy',test_entity0)
np.save('./dataset/test_entity1.npy',test_entity1)
np.save('./dataset/test_relation.npy',test_relation)
print train_relation_img[0]
print train_bbox0[0]
print train_bbox1[0]
print train_entity0[0]
print train_relation[0]
#np.save('./dataset/relation2vec.npy',relation2vec)
|
[
"857332641@qq.com"
] |
857332641@qq.com
|
a3f4a3764a42b6ee37636c45e0920a7a19fee253
|
687928e5bc8d5cf68d543005bb24c862460edcfc
|
/nssrc/com/citrix/netscaler/nitro/resource/config/filter/filterhtmlinjectionparameter.py
|
6b6cbf7bfbed029b7b9528ec317458d2c799b8c2
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
mbs91/nitro
|
c6c81665d6abd04de8b9f09554e5e8e541f4a2b8
|
be74e1e177f5c205c16126bc9b023f2348788409
|
refs/heads/master
| 2021-05-29T19:24:04.520762
| 2015-06-26T02:03:09
| 2015-06-26T02:03:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,945
|
py
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class filterhtmlinjectionparameter(base_resource) :
""" Configuration for HTML injection parameter resource. """
def __init__(self) :
self._rate = 0
self._frequency = 0
self._strict = ""
self._htmlsearchlen = 0
@property
def rate(self) :
"""For a rate of x, HTML injection is done for 1 out of x policy matches.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._rate
except Exception as e:
raise e
@rate.setter
def rate(self, rate) :
"""For a rate of x, HTML injection is done for 1 out of x policy matches.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._rate = rate
except Exception as e:
raise e
@property
def frequency(self) :
"""For a frequency of x, HTML injection is done at least once per x milliseconds.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._frequency
except Exception as e:
raise e
@frequency.setter
def frequency(self, frequency) :
"""For a frequency of x, HTML injection is done at least once per x milliseconds.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._frequency = frequency
except Exception as e:
raise e
@property
def strict(self) :
"""Searching for <html> tag. If this parameter is enabled, HTML injection does not insert the prebody or postbody content unless the <html> tag is found.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED.
"""
try :
return self._strict
except Exception as e:
raise e
@strict.setter
def strict(self, strict) :
"""Searching for <html> tag. If this parameter is enabled, HTML injection does not insert the prebody or postbody content unless the <html> tag is found.<br/>Default value: ENABLED<br/>Possible values = ENABLED, DISABLED
"""
try :
self._strict = strict
except Exception as e:
raise e
@property
def htmlsearchlen(self) :
"""Number of characters, in the HTTP body, in which to search for the <html> tag if strict mode is set.<br/>Default value: 1024<br/>Minimum length = 1.
"""
try :
return self._htmlsearchlen
except Exception as e:
raise e
@htmlsearchlen.setter
def htmlsearchlen(self, htmlsearchlen) :
"""Number of characters, in the HTTP body, in which to search for the <html> tag if strict mode is set.<br/>Default value: 1024<br/>Minimum length = 1
"""
try :
self._htmlsearchlen = htmlsearchlen
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(filterhtmlinjectionparameter_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.filterhtmlinjectionparameter
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
""" Use this API to update filterhtmlinjectionparameter.
"""
try :
if type(resource) is not list :
updateresource = filterhtmlinjectionparameter()
updateresource.rate = resource.rate
updateresource.frequency = resource.frequency
updateresource.strict = resource.strict
updateresource.htmlsearchlen = resource.htmlsearchlen
return updateresource.update_resource(client)
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
""" Use this API to unset the properties of filterhtmlinjectionparameter resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = filterhtmlinjectionparameter()
return unsetresource.unset_resource(client, args)
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
""" Use this API to fetch all the filterhtmlinjectionparameter resources that are configured on netscaler.
"""
try :
if not name :
obj = filterhtmlinjectionparameter()
response = obj.get_resources(client, option_)
return response
except Exception as e :
raise e
class Strict:
ENABLED = "ENABLED"
DISABLED = "DISABLED"
class filterhtmlinjectionparameter_response(base_response) :
def __init__(self, length=1) :
self.filterhtmlinjectionparameter = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.filterhtmlinjectionparameter = [filterhtmlinjectionparameter() for _ in range(length)]
|
[
"bensassimaha@gmail.com"
] |
bensassimaha@gmail.com
|
64f07323eadfa86a13e53dee8c2cf7b9c5f8cc5d
|
126a17b567fe9657340270cd57b11e7c91909edb
|
/EFA/efa_files/run_efa/run_efa_10obs_Z500_12Z.py
|
9b69a59703fd7fd1699fe3707bd85067ddf1907e
|
[] |
no_license
|
bopopescu/EFA-code
|
06d072e1138eae39e5bf75538e8d19256ae7c4c0
|
4f4ceb10b354b21770a28af3b61d2399343686b7
|
refs/heads/master
| 2022-11-24T09:48:30.731060
| 2018-09-21T01:19:27
| 2018-09-21T01:19:27
| 282,699,352
| 0
| 0
| null | 2020-07-26T17:27:25
| 2020-07-26T17:27:25
| null |
UTF-8
|
Python
| false
| false
| 6,577
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 16 09:56:29 2017
@author: stangen
"""
from netCDF4 import Dataset, num2date, date2num
import numpy as np
from nicks_files.operational_cfsv2 import get_cfsv2_ensemble
from datetime import datetime, timedelta
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import efa_files.cfs_utilities_st as ut
import time
import os
#from old_ensemble_verification import error_vs_spread
# Luke's (super useful) assimilation tools:
from efa_xray.state.ensemble import EnsembleState
from efa_xray.observation.observation import Observation
from efa_xray.assimilation.ensrf import EnSRF
# directory where the ensemble of all times is
infile = '/home/disk/hot/stangen/Documents/GEFS/ensembles' + \
'/2017090600/2017090600_21mem_10days.nc'
# only variable I am using is 500mb height
vrbls=['Z500','T500','RH500','U500','V500','Z700','T700','RH700','U700','V700', \
'Z850','T850','RH850','U850','V850','Z925','T925','RH925','U925','V925', \
'Z1000','T1000','RH1000','U1000','V1000','T2M','RH2M','U10M','V10M', \
'PWAT','MSLP','P6HR']
# loading/accessing the netcdf data
with Dataset(infile,'r') as ncdata:
times = ncdata.variables['time']
ftimes = num2date(times[:],
times.units)
lats = ncdata.variables['lat'][:]
lons = ncdata.variables['lon'][:]
mems = ncdata.variables['ens'][:]
#print(ncdata.variables)
# storing the variable data in a dict (state?)
allvars = {}
for var in vrbls:
allvars[var] = (['validtime','y','x','mem'],
ncdata.variables[var][:])
lonarr, latarr = np.meshgrid(lons, lats)
# Package into an EnsembleState object knowing the state and metadata
statecls = EnsembleState.from_vardict(allvars,
{'validtime' : ftimes,
'lat' : (['y','x'], latarr),
'lon' : (['y','x'], lonarr),
'mem' : mems,
})
# Creating 2 dummy obs to test EFA- if exact coordinates in the lat/lon,
# interpolate will fail.
#key west
ob1 = Observation(value=5900, time=datetime(2017,9,6,12),lat=24.55,lon=278.21,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Miami
ob2 = Observation(value=5900, time=datetime(2017,9,6,12),lat=25.75,lon=279.62,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Tampa Bay
ob3 = Observation(value=5870, time=datetime(2017,9,6,12),lat=27.70,lon=277.6,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Tallahassee
ob4 = Observation(value=5850, time=datetime(2017,9,6,12),lat=30.45,lon=275.7,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Jacksonville
ob5 = Observation(value=5860, time=datetime(2017,9,6,12),lat=30.50,lon=278.3,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#BMX
ob6 = Observation(value=5780, time=datetime(2017,9,6,12),lat=33.16,lon=273.24,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Charleston
ob7 = Observation(value=5840, time=datetime(2017,9,6,12),lat=32.90,lon=279.97,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#LIX Slidell
ob8 = Observation(value=5850, time=datetime(2017,9,6,12),lat=30.34,lon=270.17,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Jackson
ob9 = Observation(value=5820, time=datetime(2017,9,6,12),lat=32.32,lon=269.92,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
#Nashville
ob10 = Observation(value=5690, time=datetime(2017,9,6,12),lat=36.25,lon=273.43,
obtype = 'Z500', localize_radius=2000, assimilate_this=True,
error=10)
# Put the observations into a list for EnSRF
observations = []
observations.append(ob1)
observations.append(ob2)
observations.append(ob3)
observations.append(ob4)
observations.append(ob5)
observations.append(ob6)
observations.append(ob7)
observations.append(ob8)
observations.append(ob9)
observations.append(ob10)
# Put the state class object and observation objects into EnSRF object
assimilator = EnSRF(statecls, observations, loc='GC')
print(assimilator)
# Update the prior with EFA- post_state is an EnsembleState object
post_state, post_obs = assimilator.update()
state=post_state
outfile = '/home/disk/hot/stangen/Documents/GEFS/posterior/' + \
'2017090600/2017090600_21mem_10days_Z500_12Z_efa.nc'
tunit='seconds since 1970-01-01'
# Write ensemble forecast to netcdf
with Dataset(outfile,'w') as dset:
dset.createDimension('time',None)
dset.createDimension('lat',state.ny())
dset.createDimension('lon',state.nx())
dset.createDimension('ens',state.nmems())
dset.createVariable('time','i4',('time',))
dset.createVariable('lat','f8',('lat',))
dset.createVariable('lon','f8',('lon'))
dset.createVariable('ens','i4',('ens',))
dset.variables['time'].units = tunit
dset.variables['lat'].units = 'degrees_north'
dset.variables['lon'].units = 'degrees_east'
dset.variables['ens'].units = 'member_number'
dset.variables['time'][:] = date2num(state.ensemble_times(),tunit)
dset.variables['lat'][:] = state['lat'].values[:,0]
dset.variables['lon'][:] = state['lon'].values[0,:]
dset.variables['ens'][:] = state['mem'].values
for var in state.vars():
print('Writing variable {}'.format(var))
dset.createVariable(var, 'f8', ('time','lat','lon','ens',))
dset.variables[var].units = ut.get_units(var)
dset.variables[var][:] = state[var].values
#Get the required packages
#from netCDF4 import Dataset
#import numpy as np
#import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
#
##Import the ncfile, assign a file handle, indicate read-only
#my_example_nc_file = '/home/disk/hot/stangen/Documents/GEFS/ensembles/2017081400_21mem_1days.nc'
#fh = Dataset(my_example_nc_file, mode='r')
##Print the variables to see what we have available
#print(fh.variables)
|
[
"noreply@github.com"
] |
bopopescu.noreply@github.com
|
43c7c6ac1baef896e5789872e7d827d7ecb4bc12
|
78f3fe4a148c86ce9b80411a3433a49ccfdc02dd
|
/2019/01/graphics/iran-sites-20190111/graphic_config.py
|
56c2d07dff01c61202071cbe56a957059120a489
|
[] |
no_license
|
nprapps/graphics-archive
|
54cfc4d4d670aca4d71839d70f23a8bf645c692f
|
fe92cd061730496cb95c9df8fa624505c3b291f8
|
refs/heads/master
| 2023-03-04T11:35:36.413216
| 2023-02-26T23:26:48
| 2023-02-26T23:26:48
| 22,472,848
| 16
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
#!/usr/bin/env python
import base_filters
COPY_GOOGLE_DOC_KEY = '10stgNr7Ft4jtW0e0myavT73aN8cRK2Nr4mKDyvCfcy0'
USE_ASSETS = False
# Use these variables to override the default cache timeouts for this graphic
# DEFAULT_MAX_AGE = 20
# ASSETS_MAX_AGE = 300
JINJA_FILTER_FUNCTIONS = base_filters.FILTERS
|
[
"ahurt@npr.org"
] |
ahurt@npr.org
|
aeacb6d509ed05af792bea60aeff5c2d067d0906
|
ee9655d3ffcdb70ae68692f400096b479b39d0f7
|
/Python/FindTheDivisors.py
|
f38eb6f14c3a39a31c1246611b15c24b401c02c4
|
[] |
no_license
|
yaelBrown/Codewars
|
4f123387b8c4ea6e55ec1ff5d2ae9b1d674c06cf
|
efa10770b593e48579c256b9d6b69deede64e9ba
|
refs/heads/master
| 2020-11-27T16:02:43.409465
| 2020-03-20T00:59:49
| 2020-03-20T00:59:49
| 229,521,981
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
"""
Create a function named divisors/Divisors that takes an integer n > 1 and returns an array with all of the integer's divisors(except for 1 and the number itself), from smallest to largest. If the number is prime return the string '(integer) is prime' (null in C#) (use Either String a in Haskell and Result<Vec<u32>, String> in Rust).
Example:
divisors(12); #should return [2,3,4,6]
divisors(25); #should return [5]
divisors(13); #should return "13 is prime"
"""
def divisors(int):
divisor = int
cnt = int
out = []
while cnt > 1:
if int % divisor == 0:
out.append(divisor)
cnt -= 1
divisor -= 1
del out[0]
out.reverse()
if len(out) == 0:
return "{} is prime".format(int)
else:
return out
# aa = 12
# print(aa,"this is the number:")
print(divisors(13))
empty = []
# print(empty == [])
"""
def divisors(n):
return [i for i in xrange(2, n) if not n % i] or '%d is prime' % n
def divisors(num):
l = [a for a in range(2,num) if num%a == 0]
if len(l) == 0:
return str(num) + " is prime"
return l
def divisors(integer):
return [n for n in range(2, integer) if integer % n == 0] or '{} is prime'.format(integer)
"""
|
[
"yaelrbrown@gmail.com"
] |
yaelrbrown@gmail.com
|
725c32768eaf5cb663de12aeb13f3cc23ab227c3
|
98cae8f4bfcab16a49e1f17ecf5324adf71cecf3
|
/TUScheduler_v0.6/TUScheduler.py
|
61f7a29ddd2337fb3391f44ae1f5cc0687ba5212
|
[] |
no_license
|
dknife/GUI_wxPython
|
b58b483db6a1278d7f98da9c48dba29ea2d6f705
|
86f8ee0bbed9339c839af9b3529ae809404d8daa
|
refs/heads/master
| 2020-04-08T11:21:56.011367
| 2019-01-18T05:07:46
| 2019-01-18T05:07:46
| 159,303,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
import wx
# Define the tab content as classes:
import TabIntro
import TabBasicInfo
import TabRoomInfo
import TabSolve # notebook page for solving the problem for generating the schedule
class CoreData:
def __init__(self):
self.bClassDataFinished = False
self.nProfessors = 0
self.nClassRooms = 0
self.ClassUnits = []
self.ProfInfo = []
self.tabBasic = None
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="동명대학교 게임공학과 시간표 작성기")
self.SetSize((1400,1024))
# Create a panel and notebook (tabs holder)
p = wx.Panel(self)
nb = wx.Notebook(p)
self.CoreData = CoreData()
# Create the tab windows
self.tabIntro = TabIntro.TabIntro(nb)
self.CoreData.tabBasic = self.tabBasic = TabBasicInfo.TabBasicInfo(nb, self.CoreData)
self.tabRooms = TabRoomInfo.TabRoomInfo(nb, self.CoreData)
self.tabSolve = TabSolve.TabSolve(nb, self.CoreData)
# Add the windows to tabs and name them.
nb.AddPage(self.tabIntro, "초기화면")
nb.AddPage(self.tabBasic, "기본정보 입력")
nb.AddPage(self.tabRooms, "강의불가 시간 입력")
nb.AddPage(self.tabSolve, "강의 시간표 생성")
# Set noteboook in a sizer to create the layout
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
MainFrame().Show()
app.MainLoop()
|
[
"young.min.kang@gmail.com"
] |
young.min.kang@gmail.com
|
78bf0a1fc5326bb6021a7f083099152d6b4ca6ba
|
71e18daf9e567792a6ce1ae243ba793d1c3527f0
|
/JaeKwi/gcd.py
|
207625581973656a1c2502362ea02c137925d3c4
|
[] |
no_license
|
ohjooyeong/python_algorithm
|
67b18d92deba3abd94f9e239227acd40788140aa
|
d63d7087988e61bc72900014b0e72603d0150600
|
refs/heads/master
| 2020-06-22T18:10:41.613155
| 2020-05-27T13:12:03
| 2020-05-27T13:12:03
| 197,767,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 733
|
py
|
#최대공약수 구하기 알고리즘
def gcd(a, b):
i = min(a, b)
while True:
if (a % i == 0) and (b % i == 0):
return i
i = i - 1
print(gcd(10, 12))
print(gcd(3, 6))
print(gcd(60, 24))
#유클리드 최대공약수 알고리즘
#a 와 b는 최대공약수 'b'와 'a를 b로 나눈 나머지'의 최대공약수와 같습니다.
#어떤 수와 0의 최대공약수는 자기 자신
def ugcd(a, b):
print(a, b)
if b == 0:
return a
return ugcd(b, a % b)
print(ugcd(10, 12))
#n번째 피보나치 수열값
#[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144 ---]
def fibo(n):
if n <= 1:
return n
return fibo(n - 2) + fibo(n - 1)
print(fibo(7))
print(fibo(10))
|
[
"brb1111@naver.com"
] |
brb1111@naver.com
|
50c9356a07e3bda83163c31d013afb98c6582e21
|
20f90c522b05d0c14ba58ec0d3d783b0ebe4b4e5
|
/gps/gps_server/map/PCDao.py
|
5838dee97259fc2d9ddb497b06820717b0ff7d67
|
[
"Apache-2.0"
] |
permissive
|
gustavodsf/js_projects
|
73d0e1973a57c333a07e48aa9cb17e10ff253973
|
91f045084b73b307b2932eb5e58b9ec60bcaca3b
|
refs/heads/main
| 2023-04-28T02:43:58.646124
| 2021-05-15T21:02:39
| 2021-05-15T21:02:39
| 367,727,516
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,392
|
py
|
import PostgresDB
import PC
class PCDao:
def __init__(self):
self.pg = PostgresDB.PostgresDB()
self.pg.connect()
def pontosCadastrados(self):
listPC = {}
query = "select pccadcodigo,pccaddescresumida from life_pc"
cur = self.pg.executeQuery(query)
rows = cur.fetchall()
for row in rows:
listPC[row[0]] = row[1]
return listPC
def pcFromLinhas(self,idCodigoLinha):
listPC = []
query = "select (SELECT count(llp.id_linha_fk)=1 FROM life_linha_pc llp WHERE llp.pccadcodigo = lpc.pccadcodigo),lpc.linpcsequencia,pc.pccaddescricao,pc.latitude,pc.longitude, pc.pccadauxiliar, pccadchave, tpc.pctdescricao, lpc.pccadcodigo from life_linha_pc lpc JOIN life_pc pc ON lpc.pccadcodigo = pc.pccadcodigo JOIN life_tipo_pc tpc on tpc.pctcodigo = pc.pctcodigo where lincodigo = '"+str(idCodigoLinha)+"' order by linpcsequencia"
cur = self.pg.executeQuery(query)
rows = cur.fetchall()
for row in rows:
pc = PC.PC()
pc.caracteristico = row[0]
pc.sequencia = row[1]
pc.descricao = row[2]
pc.latitude = row[3]
pc.longitude = row[4]
pc.auxiliar = row[5]
pc.chave = row[6]
pc.tipo = row[7]
pc.codigo = row[8]
listPC.append(pc)
return listPC
|
[
"prover@easymine.com.br"
] |
prover@easymine.com.br
|
43d428165bda8b444cd11770569cbabf8878dc4a
|
f2fcf807b441aabca1ad220b66770bb6a018b4ae
|
/coderbyte/Wave_Sorting.py
|
a5e6111e316f57d534229316d5b34ded22b0364b
|
[] |
no_license
|
gokou00/python_programming_challenges
|
22d1c53ccccf1f438754edad07b1d7ed77574c2c
|
0214d60074a3b57ff2c6c71a780ce5f9a480e78c
|
refs/heads/master
| 2020-05-17T15:41:07.759580
| 2019-04-27T16:36:56
| 2019-04-27T16:36:56
| 183,797,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
import itertools
def WaveSorting(arr):
for x in list(itertools.permutations(arr)):
if checker(list(x)) == True:
print(x)
return True
else:
continue
return False
#Zig Zag checker
def checker(arr):
count = 1
for i in range(len(arr) - 1):
if count % 2 == 1:
if arr[i] > arr[i+1]:
count+=1
continue
else:
return False
if count % 2 == 0:
if arr[i] < arr[i+1]:
count+=1
continue
else:
return False
return True
print(WaveSorting([1, 1, 1, 1, 5, 2, 5, 1, 1, 3, 5, 6, 8, 3]))
|
[
"gamblecua@gmail.com"
] |
gamblecua@gmail.com
|
95f2e42218ca0fe67d97ea59393957c2b697f965
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02880/s734961242.py
|
f2db82b88bab727ec3a005fb2338dca6d1cceedb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 168
|
py
|
n = int(input())
a = 0
for x in range(1, 9 + 1):
for y in range(1, 9 + 1):
if x * y == n:
a = True
if a:
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9d46ef04fb2dab03201181ff3e984852eb91e32f
|
0e1245a588be591e7a5752cbe23774c172929f81
|
/73.py
|
67a97ce1d1495d60fe1dedd79d58849620716361
|
[] |
no_license
|
Phantom1911/leetcode
|
9e41c82f712c596dc58589afb198acedd9351e6b
|
b9789aa7f7d5b99ff41f2791a292a0d0b57af67f
|
refs/heads/master
| 2022-07-10T22:00:01.424841
| 2022-06-08T09:00:32
| 2022-06-08T09:00:32
| 207,652,984
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
class Solution:
def hashed(self, i, j):
return str(i) + ":" + str(j)
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
origzeros = set()
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0:
origzeros.add(self.hashed(i, j))
n = len(matrix)
m = len(matrix[0])
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j] == 0 and self.hashed(i, j) in origzeros:
k = j + 1
while k < m:
matrix[i][k] = 0
k += 1
k = j - 1
while k >= 0:
matrix[i][k] = 0
k -= 1
k = i + 1
while k < n:
matrix[k][j] = 0
k += 1
k = i - 1
while k >= 0:
matrix[k][j] = 0
k -= 1
|
[
"aastik.koshta@flipkart.com"
] |
aastik.koshta@flipkart.com
|
d7be41837c1795a29c6d16ddc882a3df84312ee7
|
23a1faa037ddaf34a7b5db8ae10ff8fa1bb79b94
|
/GFG/Arrays/Non-Repeating Element/solution.py
|
64a6dec04b807bee89b95b9e0964e997ae9122cb
|
[] |
no_license
|
Pyk017/Competetive-Programming
|
e57d2fe1e26eeeca49777d79ad0cbac3ab22fe63
|
aaa689f9e208bc80e05a24b31aa652048858de22
|
refs/heads/master
| 2023-04-27T09:37:16.432258
| 2023-04-22T08:01:18
| 2023-04-22T08:01:18
| 231,229,696
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
class Solution:
def firstNonRepeating(self, arr, n):
d = dict()
for i in arr:
if i not in d:
d[i] = 1
else:
d[i] += 1
for i in arr:
if d[i] == 1:
return i
return 0
from collections import defaultdict
n = int(input())
arr = list(map(int,input().strip().split()))
ob = Solution()
print(ob.firstNonRepeating(arr, n))
|
[
"prakharkumar506978@gmail.com"
] |
prakharkumar506978@gmail.com
|
4ce1e8a2a69742a3efd7c8738ed7614b7e389af6
|
497291126f711206d430d3a4f4898e04e88650da
|
/imbh/run_duty_cycle_hyperpe.py
|
9745ba663e96e844390e18ef35d325c58ec7c9a1
|
[] |
no_license
|
avivajpeyi/imbh_pe
|
0688f9fc3a83ca4e9b0bb92a76240efa5bb6155e
|
a3641a592a66bb42354525704757279f75e37d0b
|
refs/heads/master
| 2022-07-08T01:44:47.895128
| 2019-10-11T02:44:54
| 2019-10-11T02:44:54
| 183,551,205
| 0
| 0
| null | 2022-06-21T22:03:42
| 2019-04-26T03:35:29
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,565
|
py
|
# #!/usr/bin/env python3
import argparse
import logging
import os
import pandas as pd
from hyper_pe.duty_cycle import sample_duty_cycle_likelihood
logging.basicConfig(level=logging.DEBUG)
INJECTION_NUMBER = "InjNum"
LOG_BF = "log_bayes_factor"
LOG_EVIDENCE = "log_evidence"
LOG_NOISE_EVIDENCE = "log_noise_evidence"
LOG_GLITCH_H_EVIDENCE = "log_glitchH_evidence"
LOG_GLITCH_L_EVIDENCE = "log_glitchL_evidence"
def start_duty_cycle_sampling(evid_csv_path):
csv_df = pd.read_csv(evid_csv_path)
try:
evid_df = csv_df[
[
LOG_EVIDENCE,
LOG_NOISE_EVIDENCE,
LOG_GLITCH_H_EVIDENCE,
LOG_GLITCH_L_EVIDENCE,
]
]
except KeyError:
logging.warning(f"df keys: {csv_df.columns}")
csv_df["lnZn"] = csv_df["lnZs"] - csv_df["lnBF"]
evid_df = csv_df.copy()
evid_df = evid_df.rename(
columns={
"lnZs": LOG_EVIDENCE,
"lnZn": LOG_NOISE_EVIDENCE,
"lnZg_H1": LOG_GLITCH_H_EVIDENCE,
"lnZg_L1": LOG_GLITCH_L_EVIDENCE,
}
)
logging.warning(f"df keys: {evid_df.columns}")
sample_duty_cycle_likelihood(evid_df, os.path.dirname(evid_csv_path))
def main():
parser = argparse.ArgumentParser(description="Generates duty cyckle from evid csv")
parser.add_argument("--csv", "-c", type=str, help="path to csv of evid'")
args = parser.parse_args()
start_duty_cycle_sampling(args.csv)
if __name__ == "__main__":
main()
|
[
"avi.vajpeyi@gmail.com"
] |
avi.vajpeyi@gmail.com
|
465159c5322cd6e2489132082dbea37da599957a
|
f8dd8d046100f1223713e047074f30c7ce5a59cd
|
/testing/epilogue/mappers.py
|
5c5a5b399ca13f484d9e91c72551c26c0bf3fef5
|
[] |
no_license
|
dotslash227/98fitcortex
|
57aed99270799eff68fdff62db0b8c1d9aabd4a2
|
bd4002151e5def00c3dea1f5a1abfb06ba3e809a
|
refs/heads/master
| 2022-12-17T00:51:20.302948
| 2019-02-27T13:54:22
| 2019-02-27T13:54:22
| 197,362,824
| 0
| 0
| null | 2022-12-08T00:02:42
| 2019-07-17T09:55:14
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,200
|
py
|
from dietplan.goals import Goals
from django.db import models
QUANTITY_MANIPULATE = [
"Parantha",
"Roti",
"Dosa",
"Cheela",
"Uttapam"
]
UNCHANGABLE_ITEMS = [
'Boiled Egg White',
'Salad'
]
fieldMapper = {
Goals.WeightLoss : "squared_diff_weight_loss",
Goals.MaintainWeight : "squared_diff_weight_maintain",
Goals.WeightGain : "squared_diff_weight_gain",
Goals.MuscleGain : "squared_diff_muscle_gain"
}
exclusionMapper = {
'wheat' : models.Q(wheat = 0),
'nuts' : models.Q(nuts = 0),
'nut' : models.Q(nut = 0),
'dairy' : models.Q(dairy = 0),
'lamb_mutton' : models.Q(lamb_mutton = 0),
'beef' : models.Q(dairy = 0),
'seafood' : models.Q(seafood = 0),
'poultary' : models.Q(poultary = 0),
'meat' : models.Q(meat = 0),
'egg' : models.Q(egg = 0)
}
food_category_exclusion_mapper = {
'veg' : models.Q(poultary = 0) & models.Q(seafood = 0) & models.Q(pork = 0) & models.Q(meat = 0) & models.Q(lamb_mutton = 0) & models.Q(beef = 0) & models.Q(other_meat = 0) & models.Q(egg = 0),
'nonveg' : models.Q(),
'egg' : models.Q(poultary = 0) & models.Q(seafood = 0) & models.Q( pork = 0) & models.Q(meat = 0) & models.Q( lamb_mutton = 0) & models.Q(beef = 0) & models.Q(other_meat = 0)
}
|
[
"shikhar.chauhan@live.com"
] |
shikhar.chauhan@live.com
|
702bbc35033be5dceb6ce00b936b4454b6967fa7
|
92af0d7e1d0c6b17e80ee249bb133d8e1f1c7852
|
/ABC032/C.py
|
ebe841040fc3a4d67560536ab8150eebca8e2840
|
[] |
no_license
|
ohshige15/AtCoder
|
6157089f4672d8497789db02db3bfce334ec0152
|
c0d1e979631d9df62e70a2b1066bc670fccae1ec
|
refs/heads/master
| 2020-04-23T03:25:43.325538
| 2019-09-19T11:18:57
| 2019-09-19T11:18:57
| 170,878,361
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 366
|
py
|
N, K = map(int, input().split())
S = [int(input()) for _ in range(N)]
if 0 in S:
print(N)
exit()
right = 0
x = 1
result = 0
num = 0
for left in range(N):
while right < N and x * S[right] <= K:
x *= S[right]
num += 1
right += 1
if num > 0:
result = max(result, num)
x //= S[left]
num -= 1
print(result)
|
[
"satoshi.o.xxx@gmail.com"
] |
satoshi.o.xxx@gmail.com
|
047633c791be1f00e0a04427c35678b27a32def8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02394/s187065290.py
|
4fe9336d1106bc1613fb5a630b4ef0149cf87dee
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 238
|
py
|
W, H, x, y, r = map(int, input().split())
flg = False
for X in [x - r, x + r]:
for Y in [y - r, y + r]:
if not 0 <= X <= W:
flg = True
if not 0 <= Y <= H:
flg = True
print(["Yes", "No"][flg])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
28047072122faeb0a8ff271caa8b794daca31443
|
2a54e8d6ed124c64abb9e075cc5524bb859ba0fa
|
/.history/1-Python-Basics/1-data-type_20200410234228.py
|
4b390bbdb256dd67e36b8d768d844c87fe689b0b
|
[] |
no_license
|
CaptainStorm21/Python-Foundation
|
01b5fbaf7a913506518cf22e0339dd948e65cea1
|
a385adeda74f43dd7fb2d99d326b0be23db25024
|
refs/heads/master
| 2021-05-23T01:29:18.885239
| 2020-04-23T19:18:06
| 2020-04-23T19:18:06
| 253,171,611
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 239
|
py
|
# Fundamental Data Types
# int
# float
# bool
# str
# list
# tuple
# set
# dict
# Classes -> custom types
# Specialized Data Types
# None
# Fundamentals Data Types
# integer
print(type( 2+ 4 ))
print( 2% 4 )
print( 2* 4 )
print( 2/ 4 )
|
[
"tikana4@yahoo.com"
] |
tikana4@yahoo.com
|
ce06e61c6efee40e31bae635ca9e9e2ad6ac741d
|
46ac0965941d06fde419a6f216db2a653a245dbd
|
/sdks/python/appcenter_sdk/models/AppleLoginRequest.py
|
c7b3f491fbed5f33675bbbef853bfc07d3d8d1bf
|
[
"MIT",
"Unlicense"
] |
permissive
|
b3nab/appcenter-sdks
|
11f0bab00d020abb30ee951f7656a3d7ed783eac
|
bcc19c998b5f648a147f0d6a593dd0324e2ab1ea
|
refs/heads/master
| 2022-01-27T15:06:07.202852
| 2019-05-19T00:12:43
| 2019-05-19T00:12:43
| 187,386,747
| 0
| 3
|
MIT
| 2022-01-22T07:57:59
| 2019-05-18T17:29:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,930
|
py
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class AppleLoginRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'username': 'string',
'password': 'string',
'team_identifier': 'string',
'cookie': 'string'
}
attribute_map = {
'username': 'username',
'password': 'password',
'team_identifier': 'team_identifier',
'cookie': 'cookie'
}
def __init__(self, username=None, password=None, team_identifier=None, cookie=None): # noqa: E501
"""AppleLoginRequest - a model defined in Swagger""" # noqa: E501
self._username = None
self._password = None
self._team_identifier = None
self._cookie = None
self.discriminator = None
self.username = username
self.password = password
if team_identifier is not None:
self.team_identifier = team_identifier
if cookie is not None:
self.cookie = cookie
@property
def username(self):
"""Gets the username of this AppleLoginRequest. # noqa: E501
The username for the Apple Developer account. # noqa: E501
:return: The username of this AppleLoginRequest. # noqa: E501
:rtype: string
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this AppleLoginRequest.
The username for the Apple Developer account. # noqa: E501
:param username: The username of this AppleLoginRequest. # noqa: E501
:type: string
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def password(self):
"""Gets the password of this AppleLoginRequest. # noqa: E501
The password for the Apple Developer account. # noqa: E501
:return: The password of this AppleLoginRequest. # noqa: E501
:rtype: string
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this AppleLoginRequest.
The password for the Apple Developer account. # noqa: E501
:param password: The password of this AppleLoginRequest. # noqa: E501
:type: string
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
self._password = password
@property
def team_identifier(self):
"""Gets the team_identifier of this AppleLoginRequest. # noqa: E501
Identifier of the team to use when logged in. # noqa: E501
:return: The team_identifier of this AppleLoginRequest. # noqa: E501
:rtype: string
"""
return self._team_identifier
@team_identifier.setter
def team_identifier(self, team_identifier):
"""Sets the team_identifier of this AppleLoginRequest.
Identifier of the team to use when logged in. # noqa: E501
:param team_identifier: The team_identifier of this AppleLoginRequest. # noqa: E501
:type: string
"""
self._team_identifier = team_identifier
@property
def cookie(self):
"""Gets the cookie of this AppleLoginRequest. # noqa: E501
The 30-day session cookie for multi-factor authentication backed accounts. # noqa: E501
:return: The cookie of this AppleLoginRequest. # noqa: E501
:rtype: string
"""
return self._cookie
@cookie.setter
def cookie(self, cookie):
"""Sets the cookie of this AppleLoginRequest.
The 30-day session cookie for multi-factor authentication backed accounts. # noqa: E501
:param cookie: The cookie of this AppleLoginRequest. # noqa: E501
:type: string
"""
self._cookie = cookie
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AppleLoginRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"b3nab@users.noreply.github.com"
] |
b3nab@users.noreply.github.com
|
29c8ce4b10808ec246f7fca15551a28845b40238
|
102a33464fd3a16ceedd134e9c64fea554ca5273
|
/apps/achievements/models.py
|
e4843e4d6ac88ff6e4ce360080417aaf44be3ced
|
[] |
no_license
|
pythonguru101/django-ecommerce
|
b688bbe2b1a53c906aa80f86f764cf9787e6c2fe
|
f94de9c21223716db5ffcb86ba87219da88d2ff4
|
refs/heads/master
| 2020-07-24T14:57:02.047702
| 2020-06-10T06:06:23
| 2020-06-10T06:06:23
| 207,961,132
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,051
|
py
|
# encoding: utf-8
import datetime
from django.db import models
from django.urls import reverse, reverse_lazy
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from imagekit.models import ImageSpecField
from imagekit.processors import ResizeToCover, ResizeToFill, ResizeToFit, \
ResizeCanvas, Anchor
from apps.shop.models import Product
from apps.utils import upload_to
class Category(models.Model):
"""Категория соревнований"""
name = models.CharField(_(u'название'), blank=False, max_length=80)
description = models.TextField(_(u'описание'), blank=True)
slug = models.SlugField(_(u'Слаг'), unique=True)
sort = models.PositiveSmallIntegerField(_(u'сортировка'), default=1)
is_active = models.BooleanField(_(u'вкл/выкл'), default=True)
class Meta:
verbose_name = _(u'тип соревнования')
verbose_name_plural = _(u'типы соревнований')
def __unicode__(self):
return self.name
def records_by_date(self):
records = self.records.all()
# @models.permalink
def get_absolute_url(self):
return reverse('achievements-category', kwargs={'slug': self.slug})
class RecordManager(models.Manager):
def approved(self):
"""только одобренные админом"""
return self.filter(is_confirmed=True)
def pending(self):
"""только ожидающие"""
return self.filter(is_confirmed=False)
class Record(models.Model):
user = models.ForeignKey(User, verbose_name=_(u'рекордсмен'),
related_name='records', on_delete=models.CASCADE)
category = models.ForeignKey(Category, verbose_name=_(u'категория'),
related_name='records', on_delete=models.CASCADE)
powerball = models.ForeignKey(Product, verbose_name=_(u'модель'),
related_name='records', blank=True, null=True, on_delete=models.CASCADE)
value = models.IntegerField(_(u'скорость вращения(пользователь)'),
blank=False, null=False)
is_confirmed = models.BooleanField(_(u'подтверждён'), default=True)
comment = models.TextField(_(u'комментарий'), blank=True)
created_at = models.DateTimeField(_(u'дата'), blank=False, editable=False,
default=datetime.datetime.now)
approved_at = models.DateTimeField(_(u'дата подтверждения'), blank=True,
null=True,
editable=False)
objects = RecordManager()
class Meta:
verbose_name = _(u'рекорд')
verbose_name_plural = _(u'рекорды')
ordering = ['-created_at']
def __unicode__(self):
return "%s %s" % (self.value, self.created_at.strftime('%Y.%m.%d'))
# @models.permalink
def get_absolute_url(self):
return reverse('achievements-record', kwargs={'category_slug': self.category.slug, 'id': self.id})
class RecordProof(models.Model):
record = models.ForeignKey(Record, verbose_name=_(u'рекорд'),
related_name='proofs', on_delete=models.CASCADE)
image = models.ImageField(_(u'изображение'),
upload_to=upload_to('achievements'))
image_photo = ImageSpecField(source='image',
processors=[ResizeToCover(580, 580),
ResizeCanvas(580, 580,
anchor=Anchor.CENTER)],
format='JPEG',
options={'quality': 90})
class Meta:
verbose_name = _(u'картинка с рекордом')
verbose_name_plural = _(u'картинка с рекордами')
|
[
"pythonguru101@gmail.com"
] |
pythonguru101@gmail.com
|
44bf97a7387695c03b03dcadc2cc8af396310a5f
|
30569618ec13465ee323f27797933ba85035711a
|
/test/test_body45.py
|
b6617635fc03af5523117f5b2eb72c7734db7e89
|
[
"MIT"
] |
permissive
|
ike709/tgs4-api-pyclient
|
c0fdd7e648fd4fb77f0caf3253a7e1daafc0477a
|
97918cfe614cc4ef06ef2485efff163417a8cd44
|
refs/heads/main
| 2023-03-14T08:11:06.146596
| 2021-03-01T18:21:33
| 2021-03-01T18:21:33
| 336,353,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 812
|
py
|
# coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.body45 import Body45 # noqa: E501
from swagger_client.rest import ApiException
class TestBody45(unittest.TestCase):
"""Body45 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testBody45(self):
"""Test Body45"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.body45.Body45() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"sparebytes@protonmail.com"
] |
sparebytes@protonmail.com
|
b9dbbdcf8aabb4a43e2bf9ab40c79574aabae8b5
|
57b4ee27801c23cdd6a6d974dbc278f49740f770
|
/easyctf_sum.py
|
db3111c2f8ff777a15a36c1f10b48a2d8da72772
|
[] |
no_license
|
zwhubuntu/CTF-chal-code
|
4de9fc0fe9ee85eab3906b36b8798ec959db628c
|
8c912e165f9cc294b3b85fab3d776cd63acc203e
|
refs/heads/master
| 2021-01-20T18:39:26.961563
| 2017-09-25T14:07:56
| 2017-09-25T14:07:56
| 62,563,092
| 7
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
sum_lst = []
N = raw_input('please input the numbers(1-99):')
if int(N) <= 0 or int(N) >= 100:
exit('N error')
for i in xrange(int(N)):
tmp = raw_input('enter the numbers(-999-999):')
if int(tmp) <= -1000 or int(tmp) >= 1000:
exit('input error')
sum_lst.append(int(tmp))
print sum_lst
print "the sum of your input is %s" % sum(sum_lst)
|
[
"zwhubuntu@hotmail.com"
] |
zwhubuntu@hotmail.com
|
9638971dd529b93751bed146bd1e3d1d6d93a4dd
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03380/s425959610.py
|
31aca42fa9e84f2d82c80aee4d054ec74ccfb8c5
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 753
|
py
|
import bisect,collections,copy,itertools,math,string
import sys
def I(): return int(sys.stdin.readline().rstrip())
def LI(): return list(map(int,sys.stdin.readline().rstrip().split()))
def S(): return sys.stdin.readline().rstrip()
def LS(): return list(sys.stdin.readline().rstrip().split())
def main():
n = I()
a = LI()
mx = max(a)
a.remove(mx)
r = (float("inf"), -1)
half = mx//2
if mx%2 == 0:
for x in a:
sa = abs(x-half)
if sa < r[0]:
r = (sa, x)
else:
for x in a:
sa = min(abs(x-half), abs(x-(half+1)))
if sa < r[0]:
r = (sa, x)
ans = (mx, r[1])
print(*ans)
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
9f296804ff9e989e9205c36736e5ef5b1a3119e9
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/friends/PCPlayerFriendsManager.py
|
636ac3e97ed5dec977e51e17263be6847a0e1e44
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,946
|
py
|
from direct.distributed.DistributedObjectGlobal import DistributedObjectGlobal
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.otpbase import OTPGlobals
from otp.friends.PlayerFriendsManager import PlayerFriendsManager
from pirates.friends.PCFriendPlayerInfo import PCFriendPlayerInfo
class PCPlayerFriendsManager(PlayerFriendsManager):
notify = directNotify.newCategory('PCPlayerFriendsManager')
def __init__(self, cr):
PlayerFriendsManager.__init__(self, cr)
self.playerId2ShipState = { }
self.playerId2ShipId = { }
self.shipId2ShipState = { }
def updatePlayerFriend(self, id, info, isNewFriend):
pcinfo = PCFriendPlayerInfo.makeFromFriendInfo(info)
PlayerFriendsManager.updatePlayerFriend(self, id, pcinfo, isNewFriend)
def removePlayerFriend(self, id):
PlayerFriendsManager.removePlayerFriend(self, id)
self.playerId2ShipState.pop(id, None)
shipId = self.playerId2ShipId.get(id, 0)
if shipId:
self.shipId2ShipState.pop(id, None)
self.playerId2ShipId.pop(id, None)
def setShipState(self, playerId, onShip, shipId):
self.playerId2ShipState[playerId] = onShip
self.playerId2ShipId[playerId] = shipId
self.shipId2ShipState[shipId] = onShip
localAvatar.guiMgr.socialPanel.updateAll()
def getShipState(self, playerId):
return self.playerId2ShipState.get(playerId, 0)
def getShipId2State(self, shipId):
return self.shipId2ShipState.get(shipId, 0)
def getShipId(self, playerId):
return self.playerId2ShipId.get(playerId, 0)
def setBandId(self, playerId, bandMgrId, bandId):
info = self.playerId2Info.get(playerId)
if info:
info.setBandId(bandMgrId, bandId)
def getBandId(self, playerId):
info = self.playerId2Info.get(playerId)
if info:
return info.getBandId()
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
71508c2ec1d4c71972e976a99db2c00182518c60
|
78d5a6e0846cb6b03544e4f717651ca59dfc620c
|
/treasury-admin/transfert/migrations/0017_auto_20180110_0901.py
|
b694b0fe2963aefd1b9bbafd7b5f0c1488f4ef67
|
[] |
no_license
|
bsca-bank/treasury-admin
|
8952788a9a6e25a1c59aae0a35bbee357d94e685
|
5167d6c4517028856701066dd5ed6ac9534a9151
|
refs/heads/master
| 2023-02-05T12:45:52.945279
| 2020-12-13T08:07:41
| 2020-12-13T08:07:41
| 320,323,196
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-01-10 08:01
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('transfert', '0016_auto_20180110_0857'),
]
operations = [
migrations.AlterField(
model_name='virementctrl',
name='date_go',
field=models.DateTimeField(blank=True, default=datetime.datetime.now),
),
migrations.AlterField(
model_name='virementctrl',
name='date_val',
field=models.DateField(blank=True, null=True),
),
]
|
[
"cn.makodo@gmail.com"
] |
cn.makodo@gmail.com
|
895dd3ff39d919619df1ebd2d2be6f9c95e96aaf
|
ab1287568346bfbac9c383d67793d19f2a415971
|
/easy/1_two_sum_1.py
|
b600912f7ffed249c079df5c3ed70a49be94b655
|
[] |
no_license
|
richwandell/lc_python
|
6080e9d0967e853695ff5619d94c512e9908fb68
|
772824ef40cb0011713dba489d40c62b3577db14
|
refs/heads/master
| 2021-11-18T19:43:42.217991
| 2021-10-03T17:47:17
| 2021-10-03T17:47:17
| 238,301,509
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
from typing import List
class Solution:
"""
Given an array of integers nums and an integer target, return indices of the two numbers such that they
add up to target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
You can return the answer in any order.
Example 1:
Input: nums = [2,7,11,15], target = 9
Output: [0,1]
Output: Because nums[0] + nums[1] == 9, we return [0, 1].
Example 2:
Input: nums = [3,2,4], target = 6
Output: [1,2]
Example 3:
Input: nums = [3,3], target = 6
Output: [0,1]
"""
def twoSum(self, nums: List[int], target: int) -> List[int]:
c = {}
for i, n in enumerate(nums):
if target - n in c:
return [c[target - n], i]
c[n] = i
s = Solution()
print(s.twoSum([2,7,11,15], 9))
print(s.twoSum([3,2,4], 6))
print(s.twoSum([3,3], 6))
|
[
"richwandell@gmail.com"
] |
richwandell@gmail.com
|
2ef867123d2bf54310095ddc4c17fb882ea0e808
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-vpc/huaweicloudsdkvpc/v3/model/remove_vpc_extend_cidr_request.py
|
668aabe967a0ef394c18ab98a3e6b3dc0a6f1712
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RemoveVpcExtendCidrRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'vpc_id': 'str',
'body': 'RemoveVpcExtendCidrRequestBody'
}
attribute_map = {
'vpc_id': 'vpc_id',
'body': 'body'
}
def __init__(self, vpc_id=None, body=None):
"""RemoveVpcExtendCidrRequest
The model defined in huaweicloud sdk
:param vpc_id: VPC资源ID
:type vpc_id: str
:param body: Body of the RemoveVpcExtendCidrRequest
:type body: :class:`huaweicloudsdkvpc.v3.RemoveVpcExtendCidrRequestBody`
"""
self._vpc_id = None
self._body = None
self.discriminator = None
self.vpc_id = vpc_id
if body is not None:
self.body = body
@property
def vpc_id(self):
"""Gets the vpc_id of this RemoveVpcExtendCidrRequest.
VPC资源ID
:return: The vpc_id of this RemoveVpcExtendCidrRequest.
:rtype: str
"""
return self._vpc_id
@vpc_id.setter
def vpc_id(self, vpc_id):
"""Sets the vpc_id of this RemoveVpcExtendCidrRequest.
VPC资源ID
:param vpc_id: The vpc_id of this RemoveVpcExtendCidrRequest.
:type vpc_id: str
"""
self._vpc_id = vpc_id
@property
def body(self):
"""Gets the body of this RemoveVpcExtendCidrRequest.
:return: The body of this RemoveVpcExtendCidrRequest.
:rtype: :class:`huaweicloudsdkvpc.v3.RemoveVpcExtendCidrRequestBody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this RemoveVpcExtendCidrRequest.
:param body: The body of this RemoveVpcExtendCidrRequest.
:type body: :class:`huaweicloudsdkvpc.v3.RemoveVpcExtendCidrRequestBody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RemoveVpcExtendCidrRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
4b55aa81ccd27d9b65e650d10ff8bd5e1c4e1d62
|
d24a6e0be809ae3af8bc8daa6dacfc1789d38a84
|
/other_contests/zone2021_a/D.py
|
e46420949401d799239c856ab3b869ffdd11a64f
|
[] |
no_license
|
k-harada/AtCoder
|
5d8004ce41c5fc6ad6ef90480ef847eaddeea179
|
02b0a6c92a05c6858b87cb22623ce877c1039f8f
|
refs/heads/master
| 2023-08-21T18:55:53.644331
| 2023-08-05T14:21:25
| 2023-08-05T14:21:25
| 184,904,794
| 9
| 0
| null | 2023-05-22T16:29:18
| 2019-05-04T14:24:18
|
Python
|
UTF-8
|
Python
| false
| false
| 754
|
py
|
from collections import deque
def solve(s):
r = deque()
flag = 1
for c in s:
if c == "R":
flag *= -1
else:
if flag == 1:
r.append(c)
else:
r.appendleft(c)
res = ""
while r:
c = r.popleft()
if len(res) == 0:
res = c
elif c == res[-1]:
res = res[:-1]
else:
res = res + c
if flag == -1:
res = "".join(list(reversed(res)))
# print(res)
return res
def main():
s = input()
res = solve(s)
print(res)
def test():
assert solve("ozRnonnoe") == "zone"
assert solve("hellospaceRhellospace") == ""
if __name__ == "__main__":
test()
main()
|
[
"cashfeg@gmail.com"
] |
cashfeg@gmail.com
|
ad568708af39b811f08c7c3ed0b9d358bbd726e2
|
cbe264842df4eae3569b28ed4aae9489014ed23c
|
/deep-learning-from-scratch/ch03/sigmoid.py
|
5aefc89779482688db2dc7ad69903faf06204bf2
|
[
"MIT"
] |
permissive
|
zeroam/TIL
|
31e176c2f4c3e1ef72b1155353690cc2f7160f96
|
43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1
|
refs/heads/master
| 2021-07-23T01:43:34.135033
| 2021-07-10T06:47:17
| 2021-07-10T06:47:17
| 167,952,375
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import numpy as np
import matplotlib.pylab as plt
def sigmoid(x):
return 1 / (1 + np.exp(-x))
if __name__ == "__main__":
x = np.arange(-5.0, 5.0, 0.1)
y = sigmoid(x)
plt.plot(x, y)
plt.ylim(-0.1, 1.1) # y축 범위 지정
plt.show()
|
[
"imdff0803@gmail.com"
] |
imdff0803@gmail.com
|
e7e60143d9aaa90cf6e5943e789b9e5890f89fc6
|
a140fe192fd643ce556fa34bf2f84ddbdb97f091
|
/.history/class맴버변수_20200708171019.py
|
44bda9c141515ac179155ab41781d659c16b544b
|
[] |
no_license
|
sangha0719/py-practice
|
826f13cb422ef43992a69f822b9f04c2cb6d4815
|
6d71ce64bf91cc3bccee81378577d84ba9d9c121
|
refs/heads/master
| 2023-03-13T04:40:55.883279
| 2021-02-25T12:02:04
| 2021-02-25T12:02:04
| 342,230,484
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 714
|
py
|
class Unit:
def __init__(self, name, hp, damage):
self.name = name
self.hp = hp
self.damage = damage
print("{0} 유닛이 생성 되었습니다.".format(self.name))
print("체력 {0}, 공격력 {1}".format(self.hp, self.damage))
# 레이스 : 공중 유닛, 비행기, 클로킹 (상대방에게 보이지 않음)
wraith1 = Unit("레이스", 80, 5)
print("유닛 이름 : {0}, 공격력 : {1}".format(wraith1.name, wraith1.damage))
# 마인드 컨트롤 : 상대방 유닛을 내 것으로 만드는 것 (빼앗음)
wraith2 = Unit("레이스", 80, 5)
wraith2.clocking = True
if wraith2.clocking == True:
print("{0}는 형재 클로킹 상태입니다.".format)
|
[
"sangha0719@gmail.com"
] |
sangha0719@gmail.com
|
75caced479952f3f4a02d5a74e91bbe1cfeccd0a
|
00689951be97b3e9e3a036aca64efaa1ee59134a
|
/aula019 - DICIONARIOS/ex093-guanabara.py
|
9ffb53154a44b6f447ea35257eec577a11f082f1
|
[
"MIT"
] |
permissive
|
miradouro/CursoEmVideo-Python
|
4826cf387cc9424e675f2b115842a643f2d67c8d
|
cc7b05a9a4aad8e6ef3b29453d83370094d75e41
|
refs/heads/main
| 2023-03-24T08:51:34.183169
| 2021-03-20T22:15:02
| 2021-03-20T22:15:02
| 349,843,991
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
#guanabara fez um pouco diferente
#mas o resultado foi exatamente o mesmo
jogador = dict()
partidas = list()
jogador['nome']= str(input('Nome do jogador: ')).strip().title()
tot = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
for c in range(0, tot):
partidas.append((int(input(f'Quantos gols na partida {c+1}? '))))
jogador['gols'] = partidas[:]
jogador['total'] = sum(partidas)
print('-='*30)
print(jogador)
print('-='*30)
for k, v in jogador.items():
print(f'O campo {k} tem o valor {v}.')
print('-='*30)
print(f'O jogador {jogador["nome"]} jogou {len(jogador["gols"])} partidas.')
print('-='*30)
for i, v in enumerate(jogador['gols']):
print(f' => Na partida {i}, fez {v} gols.')
print('-='*30)
print(f'Foi um total de {jogador["total"]} gols.')
|
[
"rafaelmiradouro@gmail.com"
] |
rafaelmiradouro@gmail.com
|
658f0cc40bf342b9c626ea9c325d5b563c7809f8
|
19201b7ef6fa2c3f2b56fb9d03fd9cfcc8ef3c28
|
/__init__.py
|
61882ef054c3a42a9c0785be9a962eb7a7fb1f31
|
[
"MIT"
] |
permissive
|
cheery/textended
|
244bc37806579da2a5de3792338ec12cb698d1a4
|
8d4240f1b2257ac55ddb4894c275b3bef16f650c
|
refs/heads/master
| 2021-03-12T21:49:44.502521
| 2015-03-27T20:49:36
| 2015-03-27T20:49:36
| 27,974,892
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 336
|
py
|
import common
import stream
import decoding
import encoding
def load(fd, transform=(lambda tag, ident, contents: (tag, ident, contents))):
rd = stream.ReadStream(fd, transform)
return decoding.file(rd)
def dump(contents, fd, transform=(lambda x: x)):
wr = stream.WriteStream(fd, transform)
encoding.file(wr, contents)
|
[
"cheery@boxbase.org"
] |
cheery@boxbase.org
|
051a43bb6e03f493131a8ef1439012e278e92a4a
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/462/usersdata/321/105188/submittedfiles/avenida.py
|
55095eda84d8606e4e569422f2a3f9f2dc4073bf
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 150
|
py
|
# -*- coding: utf-8 -*-
m= int(input('Número de quadras no sentido Norte-Sul: '))
n= int(input('Número de quadras no sentido Leste-Oeste: '))
a= []
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
958208db1acc443a3fd9507117c0dbd7e8e575a5
|
61d216e7ebc601a2584e679f6322e8e20eea4ed4
|
/python-tldextract/lilac.py
|
df5680b7c9768ef14059b0ed987fbee3313a78d6
|
[] |
no_license
|
relcodego/repo
|
a43103ec0148f35b0c6f03bb3243ae18a82ed5dd
|
c0be6f5c8a99474eb34a4abe8c17e5c412831627
|
refs/heads/master
| 2020-04-06T04:38:55.725050
| 2016-03-06T18:31:26
| 2016-03-06T18:31:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 982
|
py
|
#!/usr/bin/env python3
import fileinput
from lilaclib import *
build_prefix = 'extra-x86_64'
def pre_build():
info = get_pypi_info('tldextract')
pkgver = info['info']['version']
release = [x for x in info['releases'][pkgver] if x['packagetype'] == 'sdist'][0]
md5sum = release['md5_digest']
url = release['url']
oldver = None
for line in edit_file('PKGBUILD'):
line = line.rstrip('\n')
if line.startswith('pkgver='):
oldver = line.split('=', 1)[-1]
line = 'pkgver=' + pkgver
elif line.startswith('pkgrel='):
oldrel = int(line.split('=', 1)[-1])
if oldver != pkgver:
line = 'pkgrel=1'
# else we're rebuilding, leave as it is
elif line.startswith('source='):
line = 'source=(%s)' % url
elif line.startswith('md5sums='):
line = 'md5sums=(%s)' % md5sum
print(line)
def post_build():
git_add_files('PKGBUILD')
git_commit()
update_aur_repo()
if __name__ == '__main__':
single_main()
|
[
"lilydjwg@gmail.com"
] |
lilydjwg@gmail.com
|
3f6260d2509dcdec264f7b4ea609a6f79f791d31
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/autosar/models/diagnostic_data_identifier_set_subtypes_enum.py
|
7564190d363a4be71f86e4f1020fcf5e4167b8ea
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 197
|
py
|
from enum import Enum
__NAMESPACE__ = "http://autosar.org/schema/r4.0"
class DiagnosticDataIdentifierSetSubtypesEnum(Enum):
DIAGNOSTIC_DATA_IDENTIFIER_SET = "DIAGNOSTIC-DATA-IDENTIFIER-SET"
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
f9be86d77d5420d42f33ea887f09dd4ce926440e
|
08c73d76d4f933bae76b5f8519bc0883d2ba184a
|
/src/test/bee_view.py
|
b0d0e1900bb1b899fbd0df0226249dd4505d033a
|
[] |
no_license
|
palencia77/social-core
|
fa17df4d48d07d2f97041491599f08bcddfb4e20
|
f7a0812b70c476ce073f8bdb54bbde4d517658cf
|
refs/heads/master
| 2021-09-16T01:01:24.109023
| 2018-05-28T03:36:10
| 2018-05-28T03:36:10
| 85,777,596
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
'''
Created on 20/06/2014
@author: palencia77
'''
import json
import simplejson
import requests
import base64
data = {}
data['login'] = 'rvalera@najoconsultores.com'
data['password'] = 'admin'
result = requests.post("http://localhost:5000/user/validate", data=json.dumps(data))
validate_result = result.json()
if 'token' in validate_result:
data = {}
data['access_token'] = validate_result['token']
print data
result = requests.get("http://localhost:5000/bee/view", params=data )
q_result = result.json()
print q_result
|
[
"jpalencia@technisys.com"
] |
jpalencia@technisys.com
|
250e96b2bc7c17cced4114c4671053ae7a82a0c0
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_07_01/models/virtual_hub_paged.py
|
40bb1179c20157ba6936a11dfa4b7c17bffab222
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 942
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class VirtualHubPaged(Paged):
"""
A paging container for iterating over a list of :class:`VirtualHub <azure.mgmt.network.v2018_07_01.models.VirtualHub>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[VirtualHub]'}
}
def __init__(self, *args, **kwargs):
super(VirtualHubPaged, self).__init__(*args, **kwargs)
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
c19947bd2058ee8b4cf3ac15d3b7c40157e9e674
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009672.py
|
df07675cf90fd5d7281c8aa16282b03d8d9f76e7
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,663
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher127150(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i3.1.3.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher127150._instance is None:
CommutativeMatcher127150._instance = CommutativeMatcher127150()
return CommutativeMatcher127150._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 127149
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 127151
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i3.1.3.1.1', tmp3)
except ValueError:
pass
else:
pass
# State 127152
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i3.1.3.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 127153
if len(subjects2) == 0:
pass
# State 127154
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
871e86e845ab9354cfa2e634e49eae7b17823a11
|
d41d18d3ea6edd2ec478b500386375a8693f1392
|
/plotly/validators/histogram2dcontour/_stream.py
|
1fd236754e22d7a592d44fb179ef197e9f916640
|
[
"MIT"
] |
permissive
|
miladrux/plotly.py
|
38921dd6618650d03be9891d6078e771ffccc99a
|
dbb79e43e2cc6c5762251537d24bad1dab930fff
|
refs/heads/master
| 2020-03-27T01:46:57.497871
| 2018-08-20T22:37:38
| 2018-08-20T22:37:38
| 145,742,203
| 1
| 0
|
MIT
| 2018-08-22T17:37:07
| 2018-08-22T17:37:07
| null |
UTF-8
|
Python
| false
| false
| 865
|
py
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name='stream', parent_name='histogram2dcontour', **kwargs
):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to *50*, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.""",
**kwargs
)
|
[
"adam.kulidjian@gmail.com"
] |
adam.kulidjian@gmail.com
|
15cfabbf9a36f9eb5d7f3f5807902e9f8dd38ca1
|
b3c1055f7579c4099deb9df91bab085ebf9719cc
|
/testsite/settings.py
|
e87d21062415394d7f54bd71b75f67ff6efc4e5e
|
[
"BSD-2-Clause"
] |
permissive
|
knivets/djaodjin-multitier
|
8412f1a468bd29c835e1dfba30a94b697b4f8b7a
|
faef56e9424ab493c9e0fca0b6fd56231a648070
|
refs/heads/master
| 2020-03-22T16:33:49.120554
| 2019-01-31T04:09:27
| 2019-01-31T04:09:27
| 140,334,951
| 0
| 0
|
BSD-2-Clause
| 2018-07-09T19:51:00
| 2018-07-09T19:51:00
| null |
UTF-8
|
Python
| false
| false
| 5,967
|
py
|
"""
Django settings for testsite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
APP_NAME = os.path.basename(BASE_DIR)
def load_config(confpath):
'''
Given a path to a file, parse its lines in ini-like format, and then
set them in the current namespace.
'''
# todo: consider using something like ConfigObj for this:
# http://www.voidspace.org.uk/python/configobj.html
import re, sys
if os.path.isfile(confpath):
sys.stderr.write('config loaded from %s\n' % confpath)
with open(confpath) as conffile:
line = conffile.readline()
while line != '':
if not line.startswith('#'):
look = re.match(r'(\w+)\s*=\s*(.*)', line)
if look:
value = look.group(2) \
% {'LOCALSTATEDIR': BASE_DIR + '/var'}
try:
# Once Django 1.5 introduced ALLOWED_HOSTS (a tuple
# definitely in the site.conf set), we had no choice
# other than using eval. The {} are here to restrict
# the globals and locals context eval has access to.
# pylint: disable=eval-used
setattr(sys.modules[__name__],
look.group(1).upper(), eval(value, {}, {}))
except Exception:
raise
line = conffile.readline()
else:
sys.stderr.write('warning: config file %s does not exist.\n' % confpath)
load_config(os.path.join(BASE_DIR, 'credentials'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django_extensions',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'multitier',
'testsite',
)
MIDDLEWARE_CLASSES = (
'multitier.middleware.SiteMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testsite.urls'
WSGI_APPLICATION = 'testsite.wsgi.application'
# Templates
# ---------
TEMPLATE_DEBUG = True
# Django 1.7 and below
TEMPLATE_LOADERS = (
'multitier.template_loader.Loader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.media',
'django.core.context_processors.static',
'multitier.context_processors.site',
'multitier.context_processors.features_debug'
)
TEMPLATE_DIRS = (
BASE_DIR + '/testsite/templates',
)
# Django 1.8+
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': TEMPLATE_DIRS,
'OPTIONS': {
'context_processors': [proc.replace(
'django.core.context_processors',
'django.template.context_processors')
for proc in TEMPLATE_CONTEXT_PROCESSORS],
'loaders': TEMPLATE_LOADERS},
},
]
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASE_ROUTERS = ('multitier.routers.SiteRouter',)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite')
}
}
if os.getenv('MULTITIER_DB_FILE'):
MULTITIER_DB_FILE = os.getenv('MULTITIER_DB_FILE')
MULTITIER_DB_NAME = os.path.splitext(
os.path.basename(MULTITIER_DB_FILE))[0]
DATABASES.update({MULTITIER_DB_NAME: {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': MULTITIER_DB_FILE,
}})
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR + '/testsite/media'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_ROOT = BASE_DIR + '/testsite/static'
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'logfile':{
'level':'DEBUG',
'class':'logging.StreamHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'multitier': {
'handlers': ['logfile'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
# 'django.db.backends': {
# 'handlers': ['logfile'],
# 'level': 'DEBUG',
# 'propagate': True,
# },
}
}
LOGIN_REDIRECT_URL = 'accounts_profile'
|
[
"smirolo@djaodjin.com"
] |
smirolo@djaodjin.com
|
9b26830525fb30f00ea11fd317b1fcfd7398f3cd
|
6a63a3b241e161d1e69f1521077617ad86f31eab
|
/python/ray/data/datasource/torch_datasource.py
|
a6de55fad34a002c5b34ea5121c0f5d2d9472ff6
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
jovany-wang/ray
|
47a9df67e8ea26337517d625df50eb0b8b892135
|
227aef381a605cb1ebccbba4e84b840634196a35
|
refs/heads/master
| 2023-09-03T23:53:00.050619
| 2022-08-20T21:50:52
| 2022-08-20T21:50:52
| 240,190,407
| 1
| 1
|
Apache-2.0
| 2023-03-04T08:57:04
| 2020-02-13T06:13:19
|
Python
|
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
import logging
from typing import TYPE_CHECKING, Callable, Iterator, List
from ray.data.block import Block, BlockMetadata, T
from ray.data.datasource import Datasource, ReadTask
from ray.util.annotations import PublicAPI
if TYPE_CHECKING:
import torch.utils.data
logger = logging.getLogger(__name__)
@PublicAPI
class SimpleTorchDatasource(Datasource[T]):
"""A datasource that let's you use Torch datasets with Ray Data.
.. warning::
``SimpleTorchDatasource`` doesn't support parallel reads. You should only use
this datasource for small datasets like MNIST or CIFAR.
Example:
>>> import ray
>>> from ray.data.datasource import SimpleTorchDatasource
>>>
>>> dataset_factory = lambda: torchvision.datasets.MNIST("data", download=True)
>>> dataset = ray.data.read_datasource( # doctest: +SKIP
... SimpleTorchDatasource(), parallelism=1, dataset_factory=dataset_factory
... )
>>> dataset.take(1) # doctest: +SKIP
(<PIL.Image.Image image mode=L size=28x28 at 0x1142CCA60>, 5)
"""
def prepare_read(
self,
parallelism: int,
dataset_factory: Callable[[], "torch.utils.data.Dataset"],
) -> List[ReadTask]:
"""Return a read task that loads a Torch dataset.
Arguments:
parallelism: This argument isn't used.
dataset_factory: A no-argument function that returns the Torch dataset to
be read.
"""
import torch.utils.data
if isinstance(dataset_factory, torch.utils.data.Dataset):
raise ValueError(
"Expected a function that returns a Torch dataset, but got a "
"`torch.utils.data.Dataset` instead."
)
if parallelism > 1:
logger.warn(
"`SimpleTorchDatasource` doesn't support parallel reads. The "
"`parallelism` argument will be ignored."
)
def read_fn() -> Iterator[Block]:
# Load the entire dataset into memory.
block = list(dataset_factory())
# Store the data in a single block.
yield block
metadata = BlockMetadata(
num_rows=None,
size_bytes=None,
schema=None,
input_files=None,
exec_stats=None,
)
return [ReadTask(read_fn, metadata)]
|
[
"noreply@github.com"
] |
jovany-wang.noreply@github.com
|
c40d8d8be1066750f7be1e796730e58bbf4277f6
|
585e04dbc338efb5a9f8861e9970bd9bfc224f44
|
/src/SpecialPointSelector/PointBlock.py
|
d2e1936c9ac41017a3ce5f039ec461d29db1cf8a
|
[] |
no_license
|
CONNJUR/PyScheduler
|
34dba79baf881216dfd06a1421849603f35b145f
|
150f60495d5a0b86bb211f4c5d691e7d79a9e0b7
|
refs/heads/master
| 2021-01-23T08:52:46.891768
| 2014-04-26T15:37:03
| 2014-04-26T15:37:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
'''
@author: mattf
required parameters:
per dimension:
range for point block (low and high)
algorithm:
generate a table of n-dimensional points containing all grid points within the given block ranges
'''
import ListGenerators as lg
def getSelector(blockRanges):
def func():
specialPoints = lg.multipleDimensions(blockRanges)
return specialPoints
return func
|
[
"mfenwick100@gmail.com"
] |
mfenwick100@gmail.com
|
b4feb38bda3f725e1fb7b89c53d2f282c612023e
|
f65f755fd6568cbd56d789c45ceba4f46ea82327
|
/commons/requests/advanced.py
|
0fcaee746a9c386e19a477ddb32a0d3f95a60b5d
|
[] |
no_license
|
tmorayan007/python3-cookbook
|
bd77b749bc4e6519d6ae85471ed140fa44398bef
|
b00c545fdf26a03ee9504f1c79c402d055168d5d
|
refs/heads/master
| 2020-05-21T10:12:01.776694
| 2014-12-07T14:15:26
| 2014-12-07T14:15:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,212
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 高级主题
"""
import requests
import re
from PIL import Image
from io import StringIO
import json
from requests import Request, Session
from contextlib import closing
from requests.auth import AuthBase
from requests.auth import HTTPBasicAuth
def advanced():
# # Session对象
# with requests.Session() as s:
# s.get('http://httpbin.org/cookies/set/sessioncookie/123456789')
# r = s.get("http://httpbin.org/cookies")
# print(r.text) # '{"cookies": {"sessioncookie": "123456789"}}'
# s = requests.Session()
# s.auth = ('user', 'pass')
# s.headers.update({'x-test': 'true'})
# # both 'x-test' and 'x-test2' are sent
# s.get('http://httpbin.org/headers', headers={'x-test2': 'true'})
# # session中的值可以被方法中的覆盖,如果想移除某个参数,可以在方法中设置其值为None即可
# # Request和Response对象
# r = requests.get('http://en.wikipedia.org/wiki/Monty_Python')
# # 访问服务器返回来的headers
# print(r.headers)
# # 访问我们发送给服务器的headers
# print(r.request.headers)
# # Prepared Requests,你想在发送给服务器之前对body或header加工处理的话
# s = Session()
# req = Request('GET', url,
# data=data,
# headers=header
# )
# prepped = s.prepare_request(req)
# # do something with prepped.body
# # do something with prepped.headers
# resp = s.send(prepped,
# stream=stream,
# verify=verify,
# proxies=proxies,
# cert=cert,
# timeout=timeout
# )
# print(resp.status_code)
# # SSL证书认证,verify缺省为True
# requests.get('https://kennethreitz.com', verify=True)
# requests.get('https://github.com', verify=True)
# requests.get('https://kennethreitz.com', cert=('/path/server.crt', '/path/key'))
# # Body内容流
# # 默认情况下,当你构造一个request的时候,response的body会自动下载,可以使用stream延迟下载
# tarball_url = 'https://github.com/kennethreitz/requests/tarball/master'
# r = requests.get(tarball_url, stream=True) # 这时候只有响应的headers被下载,连接仍然未断开
# if int(r.headers['content-length']) < TOO_LONG:
# content = r.content
# # 接下来还能使用 Response.iter_content and Response.iter_lines来迭代读取数据
# # 或者是urllib3.HTTPResponse at Response.raw.获取为解码的元素字节数据
# # 更好的方法是下面的这样:
# with closing(requests.get('http://httpbin.org/get', stream=True)) as r:
# if int(r.headers['content-length']) < TOO_LONG:
# content = r.content
# # 流式上传模式,上传大文件不需要先将其读到内存中去
# with open('massive-body', 'rb') as f:
# requests.post('http://some.url/streamed', data=f)
# # 多文件POST上传提交
# # <input type=”file” name=”images” multiple=”true” required=”true”/>
# url = 'http://httpbin.org/post'
# multiple_files = [('images', ('foo.png', open('foo.png', 'rb'), 'image/png')),
# ('images', ('bar.png', open('bar.png', 'rb'), 'image/png'))]
# r = requests.post(url, files=multiple_files)
# print(r.text)
#
# # 自定义认证
# requests.get('http://pizzabin.org/admin', auth=PizzaAuth('kenneth'))
# # 流式请求
# r = requests.get('http://httpbin.org/stream/20', stream=True)
# for line in r.iter_lines():
# # filter out keep-alive new lines
# if line:
# print(json.loads(line.decode('utf-8')))
# # 代理
# proxies = {
# "http": "http://10.10.1.10:3128",
# "https": "http://10.10.1.10:1080",
# }
# 带基本认证的代理
# proxies = {
# "http": "http://user:pass@10.10.1.10:3128/",
# }
# requests.get("http://example.org", proxies=proxies)
# # Github提交示例
# body = json.dumps({"body": "Sounds great! I'll get right on it!"})
# url = "https://api.github.com/repos/kennethreitz/requests/issues/482/comments"
# auth = HTTPBasicAuth('fake@example.com', 'not_a_real_password')
# r = requests.post(url=url, data=body, auth=auth)
# print(r.status_code)
# content = r.json().decode('utf-8')
# print(content['body'])
# # Link Headers
# url = 'https://api.github.com/users/kennethreitz/repos?page=1&per_page=10'
# r = requests.head(url=url)
# print(r.headers['link'])
# print(r.links["next"])
# print(r.links["last"])
# # 超时,第一个是连接服务器的超时时间,第二个是下载超时时间。
# r = requests.get('https://github.com', timeout=(3.05, 27))
pass
class PizzaAuth(AuthBase):
"""Attaches HTTP Pizza Authentication to the given Request object."""
def __init__(self, username):
# setup any auth-related data here
self.username = username
def __call__(self, r):
# modify and return the request
r.headers['X-Pizza'] = self.username
return r
if __name__ == '__main__':
advanced()
|
[
"yidao620@gmail.com"
] |
yidao620@gmail.com
|
c5444fa6167a7d2a3aa8f04ce909bc56b920bc33
|
d6a152b8662af82ec604fa63c5c415dc6b59699b
|
/courses/migrations/0031_building_remove_course_recitations_course_room_and_more.py
|
b3803bd066cf5dd883a56508c7cb17c1957452cb
|
[] |
no_license
|
rybesh/aeshin
|
7cf433ba93309f49e2ff676c2d4568244f81ee52
|
292867a8b80031cacfce70c67387c656c3cb191b
|
refs/heads/master
| 2023-08-19T00:17:40.042842
| 2023-08-17T17:47:55
| 2023-08-17T17:47:55
| 22,109,808
| 0
| 0
| null | 2023-09-05T14:05:34
| 2014-07-22T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,607
|
py
|
# Generated by Django 4.2.4 on 2023-08-15 19:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("courses", "0030_assignment_is_inclass"),
]
operations = [
migrations.CreateModel(
name="Building",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=80)),
("url", models.URLField()),
],
),
migrations.RemoveField(
model_name="course",
name="recitations",
),
migrations.AddField(
model_name="course",
name="room",
field=models.CharField(blank=True, max_length=20),
),
migrations.AlterField(
model_name="course",
name="year",
field=models.IntegerField(
choices=[
(2011, "2011"),
(2012, "2012"),
(2013, "2013"),
(2014, "2014"),
(2015, "2015"),
(2016, "2016"),
(2017, "2017"),
(2018, "2018"),
(2019, "2019"),
(2020, "2020"),
(2021, "2021"),
(2022, "2022"),
(2023, "2023"),
(2024, "2024"),
(2025, "2025"),
(2026, "2026"),
(2027, "2027"),
]
),
),
migrations.CreateModel(
name="Recitation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("times", models.CharField(max_length=64)),
("number", models.CharField(max_length=20)),
("room", models.CharField(max_length=20)),
(
"building",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="courses.building",
),
),
(
"course",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
related_name="recitations",
to="courses.course",
),
),
(
"instructor",
models.ForeignKey(
on_delete=django.db.models.deletion.PROTECT,
to="courses.instructor",
),
),
],
),
migrations.AddField(
model_name="course",
name="building",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="courses.building",
),
),
]
|
[
"ryanshaw@unc.edu"
] |
ryanshaw@unc.edu
|
ffb44f0a9fd857ab3507d770761b563fad1597c2
|
a3a3e1298db9555eda37f8da0c74a437d897cb1f
|
/compiled/Python2/Euler_Problem-014.py
|
b6918329c20a65cd0488ccd346af8202aef2a9dc
|
[
"MIT"
] |
permissive
|
LStepanek/Project-Euler_Befunge
|
58f52254ee039ef6a5204fc65e62426c5e9d473a
|
f35fb2adecd737e410dee7b89b456cd61b25ce78
|
refs/heads/master
| 2021-01-01T17:51:52.413415
| 2017-05-03T17:23:01
| 2017-05-03T17:26:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,451
|
py
|
#!/usr/bin/env python2
# transpiled with BefunCompile v1.1.0 (c) 2015
import sys
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
x0=0
x1=32
def _0():
sa(4)
sa(1)
sa(2)
return 1
def _1():
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()+1);
v0=sp()
v1=sp()
sa(v0)
sa(v1)
return (11)if(sr()!=1)else(2)
def _2():
sp();
return (3)if(sr()<x0)else(10)
def _3():
sp();
return 4
def _4():
return (6)if(sr()<=1000000)else(5)
def _5():
global x1
global t0
global x0
sys.stdout.write(str(x1))
sys.stdout.flush()
t0=x0
sys.stdout.write(" :")
sys.stdout.flush()
sys.stdout.write(str(t0))
sys.stdout.flush()
return 12
def _6():
sa(sp()+1);
sa(sr());
sa(1)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(tm(sr(),2))
return 7
def _7():
return (9)if(sp()!=0)else(8)
def _8():
sa(td(sp(),2))
return 1
def _9():
sa(sp()*3);
sa(sp()+1);
return 1
def _10():
global x0
global x1
x0=sp()
x1=sr()
return 4
def _11():
sa(td(sp(),1))
sa(tm(sr(),2))
return 7
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11]
c=0
while c<12:
c=m[c]()
|
[
"mailport@mikescher.de"
] |
mailport@mikescher.de
|
876b2c535297256a3cc1025594131167dfffa2ab
|
03fe3e8201f8d490af2f4acd03f986bf45e97f8e
|
/binary_search_tree.py
|
9ab5215db1aa9406c77358baca94268077024f58
|
[] |
no_license
|
samarthhegdekalgar/PythonDataStructure
|
f1eb8a11374643cdb59e9af7d1008f4b5ecafdd1
|
fae60e3f6290a8a9d1287cc8eae5ded2bd85e711
|
refs/heads/master
| 2020-08-08T18:32:21.828779
| 2019-10-29T18:10:55
| 2019-10-29T18:10:55
| 213,889,455
| 0
| 0
| null | 2019-10-29T18:10:56
| 2019-10-09T10:32:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,091
|
py
|
class node:
def __init__(self, value=None):
self.value = value
self.left_child = None
self.right_child = None
class binary_search_tree:
def __init__(self):
self.root=None
def insert(self, value):
if self.root == None:
self.root = node(value)
else:
self._insert(value, self.root)
def _insert(self, value , cur_node):
if value < cur_node.value:
if cur_node.left_child == None:
cur_node.left_child = node(value)
else:
self._insert(value, cur_node.left_child)
elif value > cur_node.value:
if cur_node.right_child == None:
cur_node.right_child = node(value)
else:
self._insert(value, cur_node.right_child)
else:
print('Value already present!')
def print_tree(self):
if self.root != None:
self._print_tree(self.root)
def _print_tree(self, cur_node):
if cur_node != None:
self._print_tree(cur_node.left_child)
print(str(cur_node.value))
self._print_tree(cur_node.right_child)
def height(self):
if self.root != None:
return self._height(self.root, 0)
else:
return 0
def _height(self, cur_node, cur_height):
if cur_node == None:
return cur_height
left_height = self._height(cur_node.left_child, cur_height+1)
right_height = self._height(cur_node.right_child, cur_height+1)
return max(right_height, left_height)
def search(self, value):
if self.root != None:
return self._search(value, self.root)
else:
return False
def _search(self, value, cur_node):
if value == cur_node.value:
return True
elif value < cur_node.value and cur_node.left_child != None:
return self._search(value, cur_node.left_child)
elif value > cur_node.value and cur_node.right_child != None:
return self._search(value, cur_node.right_child)
else:
return False
def fill_tree(tree, num_elements=100, max_int=1000):
from random import randint
for _ in range(num_elements):
cur_item = randint(0, max_int)
tree.insert(cur_item)
return tree
tree = binary_search_tree()
tree = fill_tree(tree)
tree.print_tree()
print('tree height is:',str(tree.height()))
print(tree.search(999))
|
[
"samarthhegdekalgar@gmail.com"
] |
samarthhegdekalgar@gmail.com
|
902014dc161f12e20fa6bff50b6eb6c16379587d
|
5d434b255037add0268f73914cf3fa7e63f3a320
|
/orchestra/migrations/0009_auto_20150528_1910.py
|
dc040ab9b16a1bb7811a9c9ed294cd8475d8d177
|
[
"Apache-2.0",
"CC-BY-3.0"
] |
permissive
|
ksbek/orchestra
|
781c7cc599ca85c347772a241330c7261203d25b
|
07556717feb57efcf8fb29a1e2e98eebe2313b8c
|
refs/heads/master
| 2021-01-01T18:52:43.068533
| 2017-07-19T18:45:19
| 2017-07-19T18:45:19
| 98,458,290
| 0
| 1
| null | 2017-07-26T19:24:48
| 2017-07-26T19:24:48
| null |
UTF-8
|
Python
| false
| false
| 680
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('orchestra', '0008_auto_20150520_1953'),
]
operations = [
migrations.AddField(
model_name='project',
name='review_document_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='task',
name='step_slug',
field=models.CharField(choices=[(
'content_extraction', ' Content Extraction'), ('copy_pass', 'Copy Pass')], max_length=200),
),
]
|
[
"marcua@marcua.net"
] |
marcua@marcua.net
|
e51f1cd7dc469b6beb66cb2925073094e2718eb3
|
192b4fe6cc696664488d36dc8ed503cedfc51724
|
/askdjango/settings/common.py
|
c66622f47bf1d7d30adea8c2c3f15d6f588513f6
|
[] |
no_license
|
jucie15/askdjango-by-p.rogramming
|
302cc956c317abdd04495f3dc4ee55dd0c545c05
|
3d7988e6a7eebcb22a2b6bc5b56b8fa6875b83d5
|
refs/heads/master
| 2021-01-19T09:37:08.011398
| 2017-02-16T02:54:15
| 2017-02-16T02:54:15
| 82,130,898
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,803
|
py
|
"""
Django settings for askdjango project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# 환경변수로 설정함으로써 문자열로 받아온다.
#os.environ['MYPASSWORD']
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#c&7-l(!to8^@nn+93!#wgth1_o==q)++$3pfj*akx7lc1(o=a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
#django apps
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#thrid party apps
'django_extensions',
#local apps
'blog.apps.BlogConfig',
'webtoon.apps.WebtoonConfig',
'accounts.apps.AccountsConfig',
'shop.apps.ShopConfig',
'journal.apps.JournalConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'askdjango.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'askdjango', 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'askdjango.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'ASIA/SEOUL'
USE_I18N = True
USE_L10N = True
USE_TZ = True
from django.contrib.messages import constants
MESSAGE_TAGS = {constants.ERROR: 'danger'}
MESSAGE_LEVEL = constants.DEBUG
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'askdjango', 'static'),
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
|
[
"jucie15@nate.com"
] |
jucie15@nate.com
|
9eea1130f56f5ed45872d330d9569ba768003b4b
|
9bc318535bbcaaa7fb15a18929fc11a2bbf531d1
|
/satori-rules/plugin/libs/pymongo/server.py
|
f1bb181b5e7f3ab468ed6bb7f45ab881cd07995f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
leancloud/satori
|
dcab126548a54fde6d02d79053b239456439d211
|
701caccbd4fe45765001ca60435c0cb499477c03
|
refs/heads/master
| 2022-12-10T23:33:53.046905
| 2021-04-08T08:20:45
| 2021-04-08T08:20:45
| 67,022,336
| 259
| 89
|
Apache-2.0
| 2022-12-08T02:12:01
| 2016-08-31T09:13:02
|
Python
|
UTF-8
|
Python
| false
| false
| 6,414
|
py
|
# Copyright 2009-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Communicate with one MongoDB server in a topology."""
import contextlib
from datetime import datetime
from pymongo.errors import ConfigurationError
from pymongo.message import _Query, _convert_exception
from pymongo.response import Response, ExhaustResponse
from pymongo.server_type import SERVER_TYPE
class Server(object):
def __init__(self, server_description, pool, monitor):
"""Represent one MongoDB server."""
self._description = server_description
self._pool = pool
self._monitor = monitor
def open(self):
"""Start monitoring, or restart after a fork.
Multiple calls have no effect.
"""
self._monitor.open()
def reset(self):
"""Clear the connection pool."""
self.pool.reset()
def close(self):
"""Clear the connection pool and stop the monitor.
Reconnect with open().
"""
self._monitor.close()
self._pool.reset()
def request_check(self):
"""Check the server's state soon."""
self._monitor.request_check()
def send_message(self, message, all_credentials):
"""Send an unacknowledged message to MongoDB.
Can raise ConnectionFailure.
:Parameters:
- `message`: (request_id, data).
- `all_credentials`: dict, maps auth source to MongoCredential.
"""
_, data, max_doc_size = self._split_message(message)
with self.get_socket(all_credentials) as sock_info:
sock_info.send_message(data, max_doc_size)
def send_message_with_response(
self,
operation,
set_slave_okay,
all_credentials,
listeners,
exhaust=False):
"""Send a message to MongoDB and return a Response object.
Can raise ConnectionFailure.
:Parameters:
- `operation`: A _Query or _GetMore object.
- `set_slave_okay`: Pass to operation.get_message.
- `all_credentials`: dict, maps auth source to MongoCredential.
- `exhaust` (optional): If True, the socket used stays checked out.
It is returned along with its Pool in the Response.
"""
with self.get_socket(all_credentials, exhaust) as sock_info:
duration = None
publish = listeners.enabled_for_commands
if publish:
start = datetime.now()
use_find_cmd = False
if sock_info.max_wire_version >= 4:
if not exhaust:
use_find_cmd = True
elif (isinstance(operation, _Query) and
not operation.read_concern.ok_for_legacy):
raise ConfigurationError(
'read concern level of %s is not valid '
'with a max wire version of %d.'
% (operation.read_concern.level,
sock_info.max_wire_version))
message = operation.get_message(
set_slave_okay, sock_info.is_mongos, use_find_cmd)
request_id, data, max_doc_size = self._split_message(message)
if publish:
encoding_duration = datetime.now() - start
cmd, dbn = operation.as_command()
listeners.publish_command_start(
cmd, dbn, request_id, sock_info.address)
start = datetime.now()
try:
sock_info.send_message(data, max_doc_size)
response_data = sock_info.receive_message(1, request_id)
except Exception as exc:
if publish:
duration = (datetime.now() - start) + encoding_duration
failure = _convert_exception(exc)
listeners.publish_command_failure(
duration, failure, next(iter(cmd)), request_id,
sock_info.address)
raise
if publish:
duration = (datetime.now() - start) + encoding_duration
if exhaust:
return ExhaustResponse(
data=response_data,
address=self._description.address,
socket_info=sock_info,
pool=self._pool,
duration=duration,
request_id=request_id,
from_command=use_find_cmd)
else:
return Response(
data=response_data,
address=self._description.address,
duration=duration,
request_id=request_id,
from_command=use_find_cmd)
@contextlib.contextmanager
def get_socket(self, all_credentials, checkout=False):
with self.pool.get_socket(all_credentials, checkout) as sock_info:
yield sock_info
@property
def description(self):
return self._description
@description.setter
def description(self, server_description):
assert server_description.address == self._description.address
self._description = server_description
@property
def pool(self):
return self._pool
def _split_message(self, message):
"""Return request_id, data, max_doc_size.
:Parameters:
- `message`: (request_id, data, max_doc_size) or (request_id, data)
"""
if len(message) == 3:
return message
else:
# get_more and kill_cursors messages don't include BSON documents.
request_id, data = message
return request_id, data, 0
def __str__(self):
d = self._description
return '<Server "%s:%s" %s>' % (
d.address[0], d.address[1],
SERVER_TYPE._fields[d.server_type])
|
[
"feisuzhu@163.com"
] |
feisuzhu@163.com
|
225b8847d41d7f2dbb23d207ab7e3f2e008d09ba
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03448/s351490464.py
|
8ef0a650aa3bf51b9136e3d78884bc1dc1ebead4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import numpy as np
# 複数個格納
# A B = map(int, input().split())
# 行列化
# A = np.array(A)
# A=A.reshape(1,-1)
# A=A.T
#行列の比較
#C=((A%2 == vector0).all())
A = int(input())
B = int(input())
C = int(input())
X = int(input())
gohyaku=(np.arange(0,500*A+1,500)).reshape(1,-1)
hyaku=(np.arange(0,100*B+1,100)).reshape(1,-1)
goju=(np.arange(0,50*C+1,50)).reshape(1,-1)
gohyaku1=X-(gohyaku+hyaku.T)
count=0
for i in range(A+1):
for k in range(B+1):
if 0 <= gohyaku1[k,i] <= C*50:
count=count+1
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
75450ca6792fd930c509be53015f24c85ebf1160
|
a7cddd66c2c1c444c3ad4cd9729efeaf76d0616f
|
/tests/utils/test_keyspaces.py
|
76667c0c46443bc2462adc9913fd04481b42a2c5
|
[
"MIT"
] |
permissive
|
hunse/nengo_spinnaker
|
e9a51ea63b87265757721c97572ec769bcaa8f32
|
ad9f62f1b03437881a13836300648291f6e0ca22
|
refs/heads/master
| 2021-01-15T17:41:59.052633
| 2015-06-30T13:35:37
| 2015-06-30T13:35:37
| 38,339,348
| 1
| 0
| null | 2015-07-01T00:01:50
| 2015-07-01T00:01:49
| null |
UTF-8
|
Python
| false
| false
| 1,427
|
py
|
import pytest
from rig.bitfield import BitField
from nengo_spinnaker.utils import keyspaces
def test_get_derived_keyspaces():
"""Test creation of derived keyspaces."""
ks = BitField()
ks.add_field("index")
ks.add_field("spam")
# General usage
kss = keyspaces.get_derived_keyspaces(ks, (slice(5), 5, 6, 7))
for i, x in enumerate(kss):
assert x.index == i
# Specify a field
kss = keyspaces.get_derived_keyspaces(ks, slice(1, 3),
field_identifier="spam")
for x, i in zip(kss, (1, 2)):
assert x.spam == i
# Fail when no maximum is specified
with pytest.raises(ValueError):
list(keyspaces.get_derived_keyspaces(ks, (slice(None))))
def test_Keyspaces_and_is_nengo_keyspace():
"""Test the dictionary-like getter for keyspaces."""
kss = keyspaces.KeyspaceContainer()
default_ks = kss["nengo"]
default_ks(object=0, cluster=0, connection=0, index=0)
other_ks = kss["other"]
assert kss.routing_tag is not None
assert kss.filter_routing_tag is not None
# Can easily determine what is and isn't a default keyspace
assert keyspaces.is_nengo_keyspace(default_ks)
assert not keyspaces.is_nengo_keyspace(other_ks)
# Assigning fields fixes sizing and positioning
with pytest.raises(Exception):
other_ks.get_mask()
kss.assign_fields()
other_ks.get_mask()
|
[
"andrew.mundy@ieee.org"
] |
andrew.mundy@ieee.org
|
3ac5f17b4ea6bfb2f0915e9c5b2472082e5ad133
|
c51d1722bcbcf083e3ddfae7a573f75c59762e9e
|
/sublime/tpost.py
|
2d31a3e6024cf1945e3a2abc0a6cdf67a6451094
|
[] |
no_license
|
etc-rc6/t-arch-working
|
805d342c73ec3c54e6037399098d8a3e940cbd11
|
6ed49a23e6510c36870c4cd3398ebd4633b635fc
|
refs/heads/master
| 2021-01-10T12:39:16.301420
| 2016-01-06T14:11:59
| 2016-01-06T14:11:59
| 49,134,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,196
|
py
|
"""Sublime Text plugin for post.py"""
import sublime, sublime_plugin, subprocess, time, json, os
MYPATH = os.path.dirname(os.path.abspath(__file__))
POST = os.path.join(MYPATH, 'post.py')
class WorkOrder(object):
def __init__(self):
self.wd = os.path.join(MYPATH, 'failures')
def delete(self):
open(self.wd, 'w').close()
def open(self, vstring=None):
self.todo_list = []
# extraneous
if not vstring:
try:
with open(self.wd, 'r') as nctn:
self.todo_list = json.load(nctn)
print('workorder: read failures')
except: pass
# /extraneous
else:
self.todo_list.append({'order':vstring})
def close(self, failed):
for value in failed:
del value['obj']
with open(self.wd, 'rw') as nctn:
prev = json.load(nctn)
prev += failed
nctn.write(json.dumps(failed))
class TPostCommand(sublime_plugin.TextCommand):
def run(self, edit):
self.post()
def post(self, use_vstring=True):
self.vstring = '' # "view" string
if use_vstring:
self.get_vstring()
self.work_order = WorkOrder()
self.work_order.open(self.vstring)
for value in self.work_order.todo_list:
p = subprocess.Popen(['python', POST, value['order']], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
value['obj'] = p
time.sleep(1)
self.handle_threads()
if self.failed:
self.work_order.close(self.failed)
def get_vstring(self):
"""About: get all the selected regions or get whole open file"""
if self.view.has_non_empty_selection_region():
sels = self.view.sel()
for x in sels:
self.vstring += self.view.substr(x)
else:
self.vstring += self.view.substr(sublime.Region(0,self.view.size()))
def handle_threads(self):
self.failed = []
for value in self.work_order.todo_list:
print(value)
value['obj'].wait()
if value['obj'].returncode == 11:
print('success')
else:
print (value['obj'].communicate()[1])
self.failed.append(value)
class ClearBufferCommand(WorkOrder, sublime_plugin.ApplicationCommand):
def run(self):
self.delete()
class BufferOnlyCommand(TPostCommand):
def run(self, edit):
self.post(False)
|
[
"me@me.com"
] |
me@me.com
|
179c23d04db416463887d7040f753a49ac52a73c
|
b39d9ef9175077ac6f03b66d97b073d85b6bc4d0
|
/Zutectra_WC500073708.py
|
6c4c21645eae398c1007cea465dfff759e743906
|
[] |
no_license
|
urudaro/data-ue
|
2d840fdce8ba7e759b5551cb3ee277d046464fe0
|
176c57533b66754ee05a96a7429c3e610188e4aa
|
refs/heads/master
| 2021-01-22T12:02:16.931087
| 2013-07-16T14:05:41
| 2013-07-16T14:05:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
{'_data': [[u'Unknown',
[['GI', u'Sm\xe4rta i \xf6vre delen av buken Mindre vanliga'],
['General', u'Sm\xe4rta, urticaria, hematom Vanliga']]]],
'_pages': [4, 5],
u'_rank': 2,
u'_type': u'LFSU'}
|
[
"urudaro@gmail.com"
] |
urudaro@gmail.com
|
80e863581a50b13c8748b51b8898b692cd8b3053
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/travelport/models/search_event_2.py
|
969f4c131cb84079b26ec6cb5eb91dc4c88475b4
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 654
|
py
|
from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.type_event_type_2 import TypeEventType2
from travelport.models.type_time_range_2 import TypeTimeRange2
__NAMESPACE__ = "http://www.travelport.com/schema/common_v32_0"
@dataclass
class SearchEvent2(TypeTimeRange2):
"""
Search for various reservation events.
"""
class Meta:
name = "SearchEvent"
namespace = "http://www.travelport.com/schema/common_v32_0"
type_value: None | TypeEventType2 = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
}
)
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
a4e41ce5ee9ab479335f405232b0374d7e7d5414
|
f51c281d823870e7dbbe1f871cd981bb0ec8c07e
|
/rank-transform-of-an-array/rank-transform-of-an-array.py
|
be43cd55c3b8ee990d2de962c9a917add78d33f3
|
[] |
no_license
|
Arxtage/leetcode
|
e5e02fc400afaa6c216e835a0ee74296e3134646
|
95fcea1fc810a13ca7ecaa1cde6d3609cc695a9d
|
refs/heads/main
| 2023-08-27T19:37:17.691898
| 2021-10-23T23:48:51
| 2021-10-23T23:48:51
| 383,264,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
class Solution:
def arrayRankTransform(self, arr: List[int]) -> List[int]:
# O(n)
hashmap = collections.defaultdict(int)
arr_sorted = sorted(list(set(arr)))
for i in range(len(arr_sorted)):
hashmap[arr_sorted[i]] = i + 1
for i in range(len(arr)):
arr[i] = hashmap[arr[i]]
return arr
|
[
"31474005+Arxtage@users.noreply.github.com"
] |
31474005+Arxtage@users.noreply.github.com
|
c61a1dcbc77b77c087170b0acc3b3964bbae8213
|
9edaf93c833ba90ae9a903aa3c44c407a7e55198
|
/netex/models/wire_link_ref_structure.py
|
49c4aa39bce03581476eb1f2d11e81bf9c601adf
|
[] |
no_license
|
tefra/xsdata-samples
|
c50aab4828b8c7c4448dbdab9c67d1ebc519e292
|
ef027fe02e6a075d8ed676c86a80e9647d944571
|
refs/heads/main
| 2023-08-14T10:31:12.152696
| 2023-07-25T18:01:22
| 2023-07-25T18:01:22
| 222,543,692
| 6
| 1
| null | 2023-06-25T07:21:04
| 2019-11-18T21:00:37
|
Python
|
UTF-8
|
Python
| false
| false
| 243
|
py
|
from dataclasses import dataclass
from .infrastructure_link_ref_structure import InfrastructureLinkRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class WireLinkRefStructure(InfrastructureLinkRefStructure):
pass
|
[
"chris@komposta.net"
] |
chris@komposta.net
|
ba7379ee6ee55b3a3d417119d9ffcd16f92d17f0
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03434/s231043811.py
|
58725b3b18c929136bcc39545b671da97998758f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 540
|
py
|
import sys
import math
import itertools
import collections
import heapq
import re
import numpy as np
from functools import reduce
rr = lambda: sys.stdin.readline().rstrip()
rs = lambda: sys.stdin.readline().split()
ri = lambda: int(sys.stdin.readline())
rm = lambda: map(int, sys.stdin.readline().split())
rl = lambda: list(map(int, sys.stdin.readline().split()))
inf = float('inf')
mod = 10**9 + 7
n = ri()
li = sorted(rl(), reverse=True)
a = 0
b = 0
for i in range(n):
if i & 1 == 0:
a += li[i]
else:
b += li[i]
print(a-b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
41ce20e169fcbe87426ff7cea186435b73c50ae0
|
9a05e1e8c950b091124d805ea70f24d2837b827c
|
/daydayup/cema_python/oneday/1_2change1.py
|
550314a509fd5c9dc9babee41a9aed8f25204e44
|
[] |
no_license
|
fanzongpeng/mywork
|
20676a9fe0e0599461a756ad194e4bd35aad4668
|
aa6d044bbab3c0288de48888b2cc7dbd7785c91b
|
refs/heads/master
| 2022-05-31T06:03:26.826914
| 2020-04-30T09:50:22
| 2020-04-30T09:50:22
| 257,189,615
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 815
|
py
|
my_str = "I love Python"
my_list = ["python", "java", "lanuage", "age"]
my_list2 = [24, 12, 2.3, 9.7]
my_tuple = ("python", 33, "java", 8.8)
my_dict = {"name": "linda", "age": 88}
my_list1 = ['a', 'a', 1, 1, 2, 3]
my_set = {1, 2, 3}
a = 10
print(type(a))
# 强制转换从int到str
a1 = str(a)
print(type(a1))
# str 转 int
print(type(int(a1)))
# list与tuple元组转换
print(tuple(my_list))
print(list(my_tuple))
# 列表转成set变成不重复的
print(set(my_list1))
# 字典类型转成set只有key值
print(set(my_dict))
# 字典转成列表,key,value可以单转
print(list(my_dict.values()))
print(list(my_dict))
# my_tuple1 = ('one', 1), ('two', 2), ('three', 3)
# my_list_tuple = [('one', 1), ('two', 2), ('three', 3)]
# # print(my_tuple1)
# print(type(my_list_tuple))
# print(dict(my_list_tuple))
|
[
"18210023228.com"
] |
18210023228.com
|
7736b0d581aa5a48107b2970929e39106a017b0b
|
7ec92031e28b1a92a10a9f252f99211663e0d8f9
|
/src/py/l0404.py
|
f10c6fd9600478f89fa456420ae59688baa37fea
|
[] |
no_license
|
SS4G/leetcode_2020
|
4eb63f6afd59f84e44334e78cb06c7b33a89dd15
|
9a9a8fc779e7456db77f88e7dcdcc1f5cae92c62
|
refs/heads/master
| 2020-06-29T17:12:39.488350
| 2020-02-08T01:07:08
| 2020-02-08T01:07:08
| 200,575,620
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 620
|
py
|
class Solution(object):
def isLeaf(self, root):
if root is None:
return False
return root.left is None and root.right is None
def helper(self, root, leftSum):
if root is None or self.isLeaf(root):
return
if self.isLeaf(root.left):
leftSum[0] += root.left.val
self.helper(root.left, leftSum)
self.helper(root.right, leftSum)
def sumOfLeftLeaves(self, root):
"""
:type root: TreeNode
:rtype: int
"""
leftSum = [0,]
self.helper(root, leftSum)
return leftSum[0]
|
[
"zihengs@opera.com"
] |
zihengs@opera.com
|
e3e74dbde5aa2e2be661c09d14954b7a98652ccb
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_309/ch95_2019_11_27_18_50_15_020676.py
|
2000a50c89ba9a929a27109ee30d898bc4254ee3
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def mais_populoso(dic):
k = dic.keys()
for v[1],v[4] in dic.values():
soma = v[1] + v[4]
maispop[k] = max(soma)
return k
|
[
"you@example.com"
] |
you@example.com
|
f80268864677608dbee3b2b7ecd1bb4ba1dc5af0
|
5bce1118b13289308d23510f323c79aa972ddc27
|
/src/modules/darknight/darknightHelper.py
|
603a5835a327eb7035ca5a911afd98adc4c90f14
|
[] |
no_license
|
anupsl/pyApps
|
62b64b90723de32684bbabee402220317a4fe817
|
2651d502c366b87449a0c977a9876cc32521c57c
|
refs/heads/master
| 2022-07-03T05:49:12.828630
| 2020-05-10T17:25:26
| 2020-05-10T17:25:26
| 255,157,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,315
|
py
|
import traceback,random, pymongo
from datetime import datetime
from src.utilities.logger import Logger
from src.Constant.constant import constant
from src.modules.darknight.darknightThrift import DarknightThrift
from src.modules.darknight.darknightObject import DarknightObject
from src.utilities.utils import Utils
from src.utilities.dbhelper import dbHelper
from src.utilities.mongoHelper import MongoHelper
class DarknightHelper():
@staticmethod
def checkDarknightConn(ignoreConnectionError=False):
Utils.checkServerConnection('DARK_KNIGHT_THRIFT_SERVICE', DarknightThrift, 'darknightPort', ignoreConnectionError)
@staticmethod
def getConnObj(newConnection=False):
port = constant.config['darknightPort']
connPort = str(port) + '_obj'
if connPort in constant.config:
if newConnection:
constant.config[connPort].close()
constant.config[connPort] = DarknightThrift(port)
return constant.config[connPort]
else:
return DarknightThrift(port)
@staticmethod
def getEmailStatus(email):
query = 'select status from email_status where email = "'+email+'"'
result = dbHelper.queryDB(query, "darknight")
if result:
return result[0][0]
else:
return 0
@staticmethod
def generateSmsWhitelistingData(tmpValue, mobile = '918660430751'):
if constant.config['cluster'] in ['nightly', 'staging']:
for i in range(0, 3):
try:
testCol = constant.config['mongoConn']
value = {
"mobile": mobile,
"delivered": 0,
"not_delivered": 0
}
value.update(tmpValue)
value['total'] = value['delivered'] + value['not_delivered']
batchReq = []
batchReq.append(pymongo.ReplaceOne({'mobile': mobile}, value, upsert=True))
testCol.bulk_write(batchReq)
Logger.log(testCol.find({'mobile' : mobile})[0])
return
except pymongo.errors.ConnectionFailure as e:
Logger.log(e)
port = constant.config['INTOUCH_DB_MONGO_MASTER']
if Utils.restartTunnel(port):
DarknightHelper.getMongoConnection('whitelisting', 'mobile_status')
else:
break
except Exception as e:
break
raise Exception(e)
@staticmethod
def monthlyDelta():
monthList = []
date = datetime.now()
for delta in range(0, 8):
m, y = (date.month-delta) % 12, date.year + ((date.month)-delta-1) // 12
if not m: m = 12
d = min(date.day, [31,29 if y%4==0 and not y%400==0 else 28,31,30,31,30,31,31,30,31,30,31][m-1])
monthList.append(date.replace(day=d,month=m, year=y))
return monthList
@staticmethod
def getMongoConnection(database, collection):
port = constant.config['INTOUCH_DB_MONGO_MASTER']
m = MongoHelper(database, collection, port)
constant.config['mongoConn'] = m.mongodb
|
[
"anup@CAP-LAP-450.local"
] |
anup@CAP-LAP-450.local
|
46d063dfbd564d8ef90b175f6753adb82a364ec0
|
fc3d16b7a195652d4276d3112c8be856bd908f9a
|
/news_app/source_class.py
|
9c72380aa29892333fd4d9981523f34d8ab838cd
|
[
"MIT"
] |
permissive
|
petermirithu/pyra-s_news_centre
|
eb0f1735a62763a86c289facabc3985c1398ad6c
|
c8726f2db2d007b5584685a969d66df41be50ba5
|
refs/heads/master
| 2021-06-30T22:28:51.639786
| 2019-11-18T14:27:03
| 2019-11-18T14:27:03
| 221,890,761
| 0
| 0
|
MIT
| 2021-03-20T02:11:35
| 2019-11-15T09:31:49
|
Python
|
UTF-8
|
Python
| false
| false
| 212
|
py
|
class Source:
'''
source class to define how news sources look
'''
def __init__(self,id,name,language,country):
self.id=id
self.name=name
self.language=language
self.country=country
|
[
"pyra_m.k@yahoo.com"
] |
pyra_m.k@yahoo.com
|
f2893378baeddcab8d17ddf72b21c3ed6cd59617
|
2a70521e76564ff14c63100adaecc87fee40f8f4
|
/profiles/views.py
|
c6213b66adf442c56a31d9681a5f5d72b15c6348
|
[] |
no_license
|
Telling/bornhack-website
|
6f6bef9ea632c675b1a7e10dae69acd00def0d42
|
18a9ae27867c046a2b9fac46aa886c2788b139e7
|
refs/heads/master
| 2020-12-01T11:48:34.633206
| 2016-08-03T12:06:46
| 2016-08-03T12:06:46
| 64,944,894
| 1
| 0
| null | 2016-08-04T15:14:34
| 2016-08-04T15:14:34
| null |
UTF-8
|
Python
| false
| false
| 1,147
|
py
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views.generic import DetailView, UpdateView
from django.core.urlresolvers import reverse_lazy
from django.contrib import messages
from . import models, forms
class ProfileDetail(LoginRequiredMixin, DetailView):
model = models.Profile
def get_object(self, queryset=None):
return models.Profile.objects.get(user=self.request.user)
class ProfileUpdate(LoginRequiredMixin, UpdateView):
model = models.Profile
form_class = forms.ProfileForm
success_url = reverse_lazy('profiles:detail')
def get_object(self, queryset=None):
return models.Profile.objects.get(user=self.request.user)
def get_form_kwargs(self):
kwargs = super(ProfileUpdate, self).get_form_kwargs()
kwargs['initial'] = {'email': self.object.user.email}
return kwargs
def form_valid(self, form, **kwargs):
self.object.user.email = form.cleaned_data['email']
self.object.user.save()
messages.info(self.request, 'Your profile has been updated.')
return super(ProfileUpdate, self).form_valid(form, **kwargs)
|
[
"valberg@orn.li"
] |
valberg@orn.li
|
2d103b89fc1095dd48685e70e5488433471a6d7c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03671/s288171235.py
|
5a407677fafb20f7d42eccf5001ae17587259711
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
a,b,c=map(int,input().split())
if a<b<c:
print(a+b)
elif a<c<b:
print(a+c)
elif b<a<c:
print(b+a)
elif b<c<a:
print(b+c)
elif c<a<b:
print(c+a)
else:
print(c+b)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
1f4dc6b15a35654f4fd7cbffe57fb09b08ec4292
|
565ae8473c545c43341f5511b9633e97f0e4da8b
|
/course3_python_advanced/Advanced EXAM/PRACTICE/2020.02.18 - 18 feb - 300/02_book_worm_ab_40%-100%.py
|
5ffe46f0b9e7b363acc9cf6207cf7537dfacd5ae
|
[] |
no_license
|
andriiburka/Web-Development-with-Python
|
3934c1a3945bd983ab39d38b97f1af16fe784207
|
b6927653a2c6a9cc10a8768395233e347624c49a
|
refs/heads/master
| 2022-11-21T21:42:04.898254
| 2020-07-29T22:59:56
| 2020-07-29T22:59:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,704
|
py
|
from collections import deque
word = input()
matrix = deque([list(map(str, "".join(input()))) for row in range(int(input()))])
p_position = [[row, col] for row in range(len(matrix)) for col in range(len(matrix[row])) if matrix[row][col] == 'P']
r, c = p_position[0]
for i in range(int(input())):
cmd = input()
if cmd == 'up':
if r - 1 > -1:
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r - 1][c].isalpha():
word += matrix[r - 1][c]
matrix[r - 1][c] = tmp
r -= 1
continue
elif r - 1 < 0:
word = word[:-1]
elif cmd == 'down':
if r + 1 < len(matrix):
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r + 1][c].isalpha():
word += matrix[r + 1][c]
matrix[r + 1][c] = tmp
r += 1
continue
elif r + 1 > len(matrix) - 1:
word = word[:-1]
elif cmd == 'left':
if c - 1 >= 0:
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r][c - 1].isalpha():
word += matrix[r][c - 1]
matrix[r][c - 1] = tmp
c -= 1
continue
elif c - 1 < 0:
word = word[:-1]
elif cmd == 'right':
if c + 1 < len(matrix):
tmp = matrix[r][c]
matrix[r][c] = '-'
if matrix[r][c + 1].isalpha():
word += matrix[r][c + 1]
matrix[r][c + 1] = tmp
c += 1
continue
elif c + 1 > len(matrix) - 1:
word = word[:-1]
print(word)
[print("".join(row)) for row in matrix]
|
[
"andriiburka@gmail.com"
] |
andriiburka@gmail.com
|
2f92923a11d152fe876581c9aa12ada7aca0867d
|
0309bd25cdd8e89297f507be202634b07f5f6e85
|
/LeetCode/Easy/Python3/tests/test_findpivotindex.py
|
58071d2723b3c74b2a0b101e3193af71ebb68f08
|
[] |
no_license
|
AmyShackles/algo-practice
|
10fc4a5c5926232ff2b0aed6183cec9f21bf15f3
|
876e3be57357651348465f70ab312d4ac98d667a
|
refs/heads/main
| 2023-04-03T08:29:33.725236
| 2021-04-13T19:20:18
| 2021-04-13T19:20:18
| 338,672,757
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import unittest
from Python3.findpivotindex import Solution
class TestpivotIndex(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_1(self):
# For sanity checking:
# Input: nums = [1,7,3,6,5,6]
# Output: 3
self.assertEqual(Solution.pivotIndex([1, 7, 3, 6, 5, 6]), 3)
def test_2(self):
# For sanity checking:
# Input: nums = [1,2,3]
# Output: -1
self.assertEqual(Solution.pivotIndex([1, 2, 3]), -1)
def test_3(self):
# For sanity checking:
# Input: nums = [2,1,-1]
# Output: 0
self.assertEqual(Solution.pivotIndex([2, 1, -1]), 0)
if __name__ == "__main__":
unittest.main()
|
[
"amyshackles@gmail.com"
] |
amyshackles@gmail.com
|
1323a5fcd18009582e6eb70f125a050d19ab596d
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/934.py
|
5bd1ce553337e88919cc9f3a7dad7be6a3849492
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import collections
from collections import Counter
def word_count(word):
cnt = Counter()
temp = []
tempStr = ""
tempLen = len(word)
tempCount = 0
print tempLen
for l in word:
if l != " " and l != "\n":
tempStr += l
print tempStr
else:
if tempStr != "":
temp.append(tempStr)
tempStr = ""
if tempCount == tempLen-1:
temp.append(tempStr)
print tempCount
tempCount += 1
for l in temp:
cnt[l] += 1
return cnt
print(word_count("yo"))
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
6139dcf79ba85c68ed2f324e8a92a94503733e64
|
5fc8864b934ae90b438375b4c705ed2d350d4afc
|
/sheep/utils/qiniu/cache.py
|
4f26ff0ef542572342e77f5ed77515efe23546f6
|
[] |
no_license
|
L-BraveDog/sheep
|
71a1aabfc756a8458055c52936d2713f9aab24c8
|
4d6eda7c6358571d6680a1f2a2949ee3ac4220e7
|
refs/heads/master
| 2023-07-29T07:43:24.164143
| 2020-08-12T11:33:05
| 2020-08-12T11:33:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,308
|
py
|
# -*- coding: utf-8 -*-
# author:CY
# datetime:2020/7/22 15:48
import json
import hashlib
from rest_framework.response import Response
from rest_framework_extensions.cache.decorators import CacheResponse
class QiNiuCacheResponse(CacheResponse):
def process_cache_response(self,
view_instance,
view_method,
request,
args,
kwargs):
key = self.generate_key(request)
data = self.cache.get(key)
if not data:
response = view_method(view_instance, request, *args, **kwargs)
if not response.status_code >= 400 or self.cache_errors:
self.cache.set(key, response.data, self.timeout)
else:
response = Response(data=data)
if not hasattr(response, '_closable_objects'):
response._closable_objects = []
return response
@staticmethod
def generate_key(request):
""" 加密影响性能.去除"""
# hl = hashlib.md5()
# hl.update(json.dumps(request.data).encode(encoding='utf-8'))
# return hl.hexdigest()
return f'qiniu_{request.data.get("bucket", None)}'
qi_niu_cache_response = QiNiuCacheResponse
|
[
"907031027@qq.com"
] |
907031027@qq.com
|
12f32d4f3f4bb1de9257cc030b7cddf990cc172f
|
500bca3e22bd0c30c79b74918e9847742b3c428e
|
/cli/jobs/pipelines/add-column-and-word-count-using-spark/src/add_greeting_column.py
|
2afc93333d7b05cbae3195d47f1536e7fd2d811b
|
[
"MIT"
] |
permissive
|
Azure/azureml-examples
|
2304c862fd2e36e6640ecc4d09f69c5ed93b48ab
|
e5f7b247d4753f115a8f7da30cbe25294f71f9d7
|
refs/heads/main
| 2023-08-31T00:10:14.107509
| 2023-08-30T17:29:22
| 2023-08-30T17:29:22
| 289,334,021
| 1,219
| 1,074
|
MIT
| 2023-09-14T16:00:55
| 2020-08-21T18:04:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 474
|
py
|
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf
import argparse
from utils import util
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
parser = argparse.ArgumentParser()
parser.add_argument("--file_input")
args = parser.parse_args()
greeting_udf = udf(util.greeting)
df = spark.read.option("header", "true").csv(args.file_input)
df = df.withColumn("greeting", greeting_udf(df.species)).show()
print(sc.getConf().getAll())
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
9a13e896e1e705299c0e6cce39a6869ed0000f39
|
647b5eb4bdcd8cbc903a8576cf50385219905a00
|
/euler/python/35.py
|
d2011060dfb1f11b21f1171e68d13ee14ff79cb1
|
[] |
no_license
|
kradalby/programming-tasks
|
149ef4a62903a940c7297196e6984e17dee28011
|
96685634a8fea87cacda3e75be377383ac67a0ef
|
refs/heads/master
| 2021-04-29T10:17:21.707426
| 2020-01-09T17:16:52
| 2020-01-09T17:16:52
| 77,645,677
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
from math import floor, sqrt
def is_prime(n):
if n == 2:
return True
elif n == 1:
return False
elif n < 4:
return True
elif n % 2 == 0:
return False
elif n < 9:
return True
elif n % 3 == 0:
return False
else:
r = floor(sqrt(n))
f = 5
while f <= r:
if n % f == 0:
return False
if n % (f+2) == 0:
return False
f += 6
return True
def get_primes(n):
primes = []
for i in range(1,n+1):
if is_prime(i):
primes.append(i)
return primes
def is_circular_prime(n):
ns = str(n)
for i in range(len(ns)):
print(ns)
if not is_prime(int(ns[1:]+ns[:1])):
return False
ns = ns[1:]+ns[:1]
return True
primes = get_primes(1000000)
print(len(primes))
circ = []
for i in primes:
if is_circular_prime(i):
circ.append(i)
print(len(circ))
|
[
"kradalby@kradalby.no"
] |
kradalby@kradalby.no
|
eb182742ba237c0829e394079c6126094edb1ed2
|
119a85a388fe436361530fbb47932e704d749557
|
/PEAK-0.5a4dev_r2085/build/lib.macosx-10.6-x86_64-2.7/peak/metamodels/UML13/model/Behavioral_Elements/Use_Cases.py
|
efb8d979100cb09f4d8b6afecba259544f901264
|
[
"Python-2.0"
] |
permissive
|
chrisrgunn/cs156project
|
014d5b05c6bf0e08ab8bd0dea525057d0e65b9a7
|
e5414a37f9793c8b0674695b948482b559b18ea6
|
refs/heads/master
| 2021-01-19T14:09:49.046539
| 2017-05-24T02:10:29
| 2017-05-24T02:10:29
| 88,128,762
| 0
| 2
| null | 2017-05-04T23:49:09
| 2017-04-13T05:36:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,294
|
py
|
# ------------------------------------------------------------------------------
# Package: peak.metamodels.UML13.model.Behavioral_Elements.Use_Cases
# File: peak\metamodels\UML13\model\Behavioral_Elements\Use_Cases.py
# ------------------------------------------------------------------------------
from peak.util.imports import lazyModule as _lazy
_model = _lazy('peak.model.api')
#_config = _lazy('peak.config.api')
Core = _lazy(__name__, '../../Foundation/Core')
Common_Behavior = _lazy(__name__, '../Common_Behavior')
# ------------------------------------------------------------------------------
class UseCase(Core.Classifier):
class extend(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'extension'
sortPosn = 0
class extend2(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'base'
sortPosn = 1
class include(_model.StructuralFeature):
referencedType = 'Include'
referencedEnd = 'addition'
sortPosn = 2
class include2(_model.StructuralFeature):
referencedType = 'Include'
referencedEnd = 'base'
sortPosn = 3
class extensionPoint(_model.StructuralFeature):
referencedType = 'ExtensionPoint'
referencedEnd = 'useCase'
sortPosn = 4
class Actor(Core.Classifier):
pass
class UseCaseInstance(Common_Behavior.Instance):
pass
class Extend(Core.Relationship):
class condition(_model.StructuralFeature):
referencedType = 'Foundation/Data_Types/BooleanExpression'
upperBound = 1
lowerBound = 1
sortPosn = 0
class base(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extend2'
upperBound = 1
lowerBound = 1
sortPosn = 1
class extension(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extend'
upperBound = 1
lowerBound = 1
sortPosn = 2
class extensionPoint(_model.StructuralFeature):
referencedType = 'ExtensionPoint'
referencedEnd = 'extend'
lowerBound = 1
sortPosn = 3
class Include(Core.Relationship):
class addition(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'include'
upperBound = 1
lowerBound = 1
sortPosn = 0
class base(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'include2'
upperBound = 1
lowerBound = 1
sortPosn = 1
class ExtensionPoint(Core.ModelElement):
class location(_model.StructuralFeature):
referencedType = 'Foundation/Data_Types/LocationReference'
upperBound = 1
lowerBound = 1
sortPosn = 0
class useCase(_model.StructuralFeature):
referencedType = 'UseCase'
referencedEnd = 'extensionPoint'
upperBound = 1
lowerBound = 1
sortPosn = 1
class extend(_model.StructuralFeature):
referencedType = 'Extend'
referencedEnd = 'extensionPoint'
sortPosn = 2
# ------------------------------------------------------------------------------
#_config.setupModule()
|
[
"chrisrgunn@gmail.com"
] |
chrisrgunn@gmail.com
|
fed5d4acae7285a1eebaad8d868f82fae5a50334
|
4b4df51041551c9a855468ddf1d5004a988f59a2
|
/leetcode_python/Bit_Manipulation/binary-number-with-alternating-bits.py
|
54a6df3e0278a36e092633aa43846cee89ba652e
|
[] |
no_license
|
yennanliu/CS_basics
|
99b7ad3ef6817f04881d6a1993ec634f81525596
|
035ef08434fa1ca781a6fb2f9eed3538b7d20c02
|
refs/heads/master
| 2023-09-03T13:42:26.611712
| 2023-09-03T12:46:08
| 2023-09-03T12:46:08
| 66,194,791
| 64
| 40
| null | 2022-08-20T09:44:48
| 2016-08-21T11:11:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,643
|
py
|
# V0
# V1
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : GREEDY
# TO CHECK IF 0 ALWAYS NEXT/BEFORE 1 (1 ALWAYS NEXT/BEFORE 0)
# IDEA : ALL FUN IN PYTHON
# https://www.programiz.com/python-programming/methods/built-in/all
# Return Value from all()
# The all() method returns:
# True - If all elements in an iterable are true
# False - If any element in an iterable is false
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
bin_n = bin(n)[2:]
return all(bin_n[i] != bin_n[i+1] for i in range(len(bin_n) - 1))
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : PATTERN
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
b = 0b1010101010101010101010101010101010101010101010101010101010101010
while b > 0:
if b == n:
return True
b = b >> 1
return False
# V1''
# https://blog.csdn.net/fuxuemingzhu/article/details/79089937
# IDEA : BIT MANIPULATION
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
n ^= (n >> 1)
return not (n & n + 1)
# V2
# Time: O(1)
# Space: O(1)
class Solution(object):
def hasAlternatingBits(self, n):
"""
:type n: int
:rtype: bool
"""
n, curr = divmod(n, 2)
while n > 0:
if curr == n % 2:
return False
n, curr = divmod(n, 2)
return True
|
[
"f339339@gmail.com"
] |
f339339@gmail.com
|
614ff2ef1cd74d9d05909ec0c30ad60bffdc6e0e
|
30291450c064006f1bd9bc5c432b8a869e2166bb
|
/tags/1.3/zhpy/info.py
|
11b9f7e04ad0e89c86cb8dab2c053353bdebcd40
|
[
"MIT"
] |
permissive
|
BGCX261/zhpy-svn-to-git
|
96f04e2f72c61671324219a85939137ff5cd9ef6
|
70da095393fe13543433ab5115cb6c1a519d64b0
|
refs/heads/master
| 2021-01-22T22:49:04.898314
| 2015-08-25T15:44:00
| 2015-08-25T15:44:00
| 41,587,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,498
|
py
|
"""
zhpy package and plugin information
This is the MIT license:
http://www.opensource.org/licenses/mit-license.php
Copyright (c) 2007 Fred Lin and contributors. zhpy is a trademark of Fred Lin.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import pkg_resources
import sys
entrypoints = {"Traditional Chinese Keywords":"plugtw.tools",
"Simplified Chinese Keywords":"plugcn.tools"}
def retrieve_info():
"""
retrieve package and plugins info
"""
packages=['%s' % i for i in pkg_resources.require("zhpy")]
#plugins = {}
#for name, pointname in entrypoints.items():
# plugins[name] = ["%s (%s) - %d" % (entrypoint.name, \
# str(entrypoint.dist), \
# len(entrypoint.load().keyword))
# for entrypoint in pkg_resources.iter_entry_points(pointname)
# ]
return packages#, plugins
def info():
"""
show zhpy informations including version and plugins
ported from TurboGears2 tginfo command
"""
print """
Complete zhpy Version Information
zhpy requires:
"""
print " * python",sys.version.split()[0]
#packages, plugins = retrieve_info()
packages = retrieve_info()
for p in packages:
print ' *', p
# print """\nzhpy extends:"""
# for name, pluginlist in plugins.items():
# print "\n", name, "\n"
# for plugin in pluginlist:
# print ' *', plugin
print ""
|
[
"you@example.com"
] |
you@example.com
|
8a01907224c2e522b023d5e25c32cc5d0c980401
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Projects/pyinstaller/PyInstaller/hooks/pre_find_module_path/hook-distutils.py
|
501afb283c000e166bbb745cdda79f4e8589c51b
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:a57a0c2455f448f73674516e52b7af5089c4865777cbdaec011240a51f08272c
size 1744
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
e13c7088fa38705604685a9e8160543ba1c225f3
|
e039a739bb0580befef599bb71c72e64838b9924
|
/Exp_Hyperparams/run_dSVDD.py
|
ff99d808a0a13d4aa4511e1c18879c79e15b83aa
|
[] |
no_license
|
ddatta-DAC/AD_7
|
ab16785f543b8390731cab3195921ca6cbbc4f0a
|
e894b01712e6ad66f6b5715fcb0afa94a49ccf94
|
refs/heads/master
| 2023-04-16T15:07:09.274543
| 2021-04-27T16:12:40
| 2021-04-27T16:12:40
| 290,634,353
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,294
|
py
|
import torch
import random
import numpy as np
import os
import sys
import pandas as pd
sys.path.append('../../../.')
sys.path.append('../../.')
sys.path.append('../')
import yaml
from tqdm import tqdm
import argparse
from joblib import Parallel, delayed
try:
from .deepsvdd import deepsvdd as deepsvdd
except:
import deepsvdd as deepsvdd
try:
from .deepsvdd import base
except:
from deepsvdd import base
try:
from .deepsvdd import optim
except:
from deepsvdd import optim
try:
from .deepsvdd import *
except:
from deepsvdd import *
try:
from .deepsvdd.networks.AE import FC_dec
from .deepsvdd.AE import FC_enc
from .deepsvdd.deepSVDD import DeepSVDD
except:
from deepsvdd.networks.AE import FC_dec
from deepsvdd.networks.AE import FC_enc
from deepsvdd.deepSVDD import DeepSVDD
try:
from eval import eval
except:
from .eval import eval
try:
from . import logger_utils
except:
import logger_utils
try:
from .data_fetcher_v2 import data_fetcher
except:
from data_fetcher_v2 import data_fetcher
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print('Device ::', DEVICE)
def train_model(
data_dict,
config,
objective='soft-boundary',
nu = 0.01
):
global DEVICE
layer_dims = config['layer_dims']
LR = config['LR']
num_epochs = config['num_epochs']
batch_size = config['batch_size']
warm_up_epochs = config['warm_up_epochs']
ae_epochs = config['ae_epochs']
train_X = data_dict['train']
fc_layer_dims = [train_X.shape[1]] + list(layer_dims)
# Initialize DeepSVDD model and set neural network \phi
deep_SVDD = DeepSVDD(
DEVICE,
objective=objective,
nu = nu
)
deep_SVDD.set_network(fc_layer_dims)
# Train model on dataset
deep_SVDD.train(
train_X,
LR = LR,
num_epochs = num_epochs,
batch_size= batch_size,
ae_epochs = ae_epochs,
warm_up_epochs=warm_up_epochs
)
return deep_SVDD
def test_eval(model_obj, data_dict, num_anomaly_sets):
test_X = data_dict['test']
test_scores = model_obj.test(test_X)
auc_list = []
for idx in range(num_anomaly_sets):
key = 'anom_' + str(idx + 1)
anom_X = data_dict[key]
anom_scores = model_obj.test(anom_X)
auPR = eval.eval(anom_scores, test_scores, order='descending')
auc_list.append(auPR)
print("AUC : {:0.4f} ".format(auPR))
_mean = np.mean(auc_list)
_std = np.std(auc_list)
print(' Mean AUC ', np.mean(auc_list))
print(' AUC std', np.std(auc_list))
return _mean, _std
def execute(DATA_SET, nu, objective, id , config, anom_perc, num_anomaly_sets ):
data_dict, _ = data_fetcher.get_data(
DATA_SET,
set_id=id,
num_anom_sets=num_anomaly_sets,
anomaly_perc=anom_perc
)
model_obj = train_model(data_dict, config = config, nu=nu, objective = objective)
mean_aupr, std = test_eval(model_obj, data_dict, num_anomaly_sets)
return (mean_aupr, std)
parser = argparse.ArgumentParser(description='Run the model ')
parser.add_argument(
'--DATA_SET',
type=str,
help=' Which data set ?',
default='kddcup',
choices=['kddcup', 'kddcup_neptune', 'nsl_kdd', 'nb15','gureKDD']
)
parser.add_argument(
'--num_runs',
type=int,
default=1,
help='Number of runs'
)
parser.add_argument(
'--anom_perc',
type=int,
help='Percentage of anomalies',
default=None
)
parser.add_argument(
'--objective',
type=str,
default = 'one-class',
help='objective',
choices=['one-class', 'soft-boundary']
)
# =========================================
args = parser.parse_args()
DATA_SET = args.DATA_SET
num_runs = args.num_runs
LOG_FILE = 'log_results_{}.txt'.format(DATA_SET)
LOGGER = logger_utils.get_logger(LOG_FILE,'deepSVDD')
LOGGER.info(DATA_SET)
config_file = 'config.yaml'
anom_perc = args.anom_perc
with open(config_file, 'r') as fh:
config = yaml.safe_load(fh)
num_anomaly_sets = config[DATA_SET]['num_anomaly_sets']
anomaly_ratio = config[DATA_SET]['anomaly_ratio']
anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
step = 0.025
nu_values = np.arange(0.025,0.2+step,step)
nu_vs_auc = []
objective = args.objective
if anom_perc is None:
anom_perc = 100 * anomaly_ratio/(1+anomaly_ratio)
LOGGER.info(' Setting anomaly percentage to {} %'.format(anom_perc))
LOGGER.info(' Settingobjective to {} '.format(objective))
model_config = config[DATA_SET]['dsvdd']
for nu in nu_values:
LOGGER.info('Setting nu :: {}'.format(nu))
_res_ = Parallel(n_jobs=num_runs)(delayed(execute)(
DATA_SET, nu, objective, id, model_config, anom_perc, num_anomaly_sets ) for id in range(1,num_runs+1)
)
results = np.array(_res_)
mean_all_runs = np.mean(results[:,0])
_std = np.std(results[:,0])
LOGGER.info(' Runs {}: Mean: {:4f} | Std {:4f}'.format(num_runs, mean_all_runs, _std))
print('Mean AuPR over {} runs {:4f}'.format(num_runs, mean_all_runs))
print('Details: ', results[:,0])
nu_vs_auc.append((nu, mean_all_runs))
nu_vs_auc = np.array(nu_vs_auc)
LOGGER.info('nu vs AuPR '+ str(nu_vs_auc[:,0]) + str(nu_vs_auc[:,1]))
logger_utils.close_logger(LOGGER)
|
[
"ddatta@vt.edu"
] |
ddatta@vt.edu
|
d6e8f0d5efead89c44dddb8d6ccd4759b14870c7
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/6df5a87840c9f271ab86792449dc945cadc82f12-<get>-bug.py
|
8034b8985edb4a0b14bb3eca635326fdb5537a2a
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
def get(self, request, organization, integration_id):
try:
integration = Integration.objects.get(organizations=organization, id=integration_id, provider='github')
except Integration.DoesNotExist:
return Response(status=404)
field = request.GET.get('field')
query = request.GET.get('query')
if (field is None):
return Response({
'detail': 'field is a required parameter',
}, status=400)
if (not query):
return Response({
'detail': 'query is a required parameter',
}, status=400)
installation = integration.get_installation(organization.id)
if (field == 'externalIssue'):
repo = request.GET.get('repo')
if (repo is None):
return Response({
'detail': 'repo is a required parameter',
}, status=400)
try:
response = installation.search_issues(query=('repo:%s %s' % (repo, query)).encode('utf-8'))
except Exception as e:
return self.handle_api_error(e)
return Response([{
'label': ('#%s %s' % (i['number'], i['title'])),
'value': i['number'],
} for i in response.get('items', [])])
if (field == 'repo'):
account_type = ('user' if (integration.metadata['account_type'] == 'User') else 'org')
full_query = ('%s:%s %s' % (account_type, integration.name, query)).encode('utf-8')
try:
response = installation.get_client().search_repositories(full_query)
except Exception as e:
return self.handle_api_error(e)
return Response([{
'label': i['name'],
'value': i['full_name'],
} for i in response.get('items', [])])
return Response(status=400)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
67ec96965335573a4257c08e53f071a3e550aafc
|
3a38e0ae9eef4a00ff7206c85c0366be141c73b3
|
/edu_smart_server/threads/migrations/0001_initial.py
|
d038c835c43b4a1311266371e25cd64daf0efeb6
|
[] |
no_license
|
CodeNicely/edusmart_final
|
a32dac680b6343f4822624394b06db334e51ff61
|
64b2d14abbbf94e396f3d11529de437cf7561cad
|
refs/heads/master
| 2021-01-09T06:06:14.422656
| 2017-02-05T08:29:18
| 2017-02-05T08:29:18
| 80,914,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-05 00:45
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('classes', '0001_initial'),
('department', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='message_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('author_id', models.CharField(max_length=120, null=True)),
('author_name', models.CharField(max_length=120, null=True)),
('message', models.CharField(max_length=120)),
('teacher', models.BooleanField(default=False)),
('modified', models.DateTimeField(auto_now=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='thread_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('access_level', models.IntegerField()),
('title', models.CharField(max_length=120, null=True)),
('description', models.CharField(max_length=120, null=True)),
('author', models.CharField(max_length=120, null=True)),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('class_id', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='classes.class_data')),
('department', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='department.department_data')),
],
),
migrations.AddField(
model_name='message_data',
name='thread_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='threads.thread_data'),
),
]
|
[
"aditya999123@gmail.com"
] |
aditya999123@gmail.com
|
15f39983e23350dc4c36b142edf378d99df662ba
|
d5820207ff265362743a1a2d833fdaf5187cc308
|
/src/cern/jpymad/tools_optics.py
|
eee53b56036711ab319657afd127c960e90e24fb
|
[
"Apache-2.0"
] |
permissive
|
pymad/jpymad
|
54925b14e2fa34b07b17999d3ca05934f4b2309b
|
6372ada76de400ed949ff5161a30699a7adedaba
|
refs/heads/master
| 2021-01-10T20:47:01.991269
| 2014-06-28T14:08:57
| 2014-06-28T14:08:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,516
|
py
|
#-------------------------------------------------------------------------------
# This file is part of PyMad.
#
# Copyright (c) 2011, CERN. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 16 20:20:15 2010
@author: kaifox
"""
from __future__ import absolute_import
from .conversions import tofl, tostr
def get_values(optic, madxvarname):
"""
extract the values for the given madx-variable from the optcs object
PARAMETERS:
===========
optic: the object from which to extract the values
madxvarname: the name of the madx-variable for which to extract the values
"""
madxvar = pms.enums.MadxTwissVariable.fromMadxName(madxvarname) #@UndefinedVariable
values = optic.getAllValues(madxvar)
return tofl(values)
def get_names(optic):
'''
extracts the element names from the optics
'''
return tostr(optic.getNames())
|
[
"t_glaessle@gmx.de"
] |
t_glaessle@gmx.de
|
9518dbd5c0b43537d56acbc0c6d1d96bd5c035b6
|
d79c978cc60afc6ffae1c7fc7730ed4d1e2eb77a
|
/app.py
|
7cf24266880151663a0307c165dd5c5bd3506047
|
[] |
no_license
|
qhuydtvt/tk-vids
|
9ea5af7ea81d735c9baf5830a493ce879ac6d735
|
71225cea0bf67bf4b843b4fb6aa1641bfc9ad10b
|
refs/heads/master
| 2021-09-13T05:48:24.842104
| 2018-04-25T16:41:12
| 2018-04-25T16:41:12
| 105,641,975
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
from flask import Flask, render_template, request
from flask_cors import CORS
from urllib.request import urlopen
import json
import mlab
from models.audio import Audio
from flask_restful import Resource, Api
app = Flask(__name__)
cors = CORS(app, resources={r'/api/*': {"origins": "*"}})
api = Api(app)
mlab.connect()
class ApiAudio(Resource):
def get(self):
search_terms = request.args["search_terms"].lower().strip()
audio = Audio.objects(search_terms=search_terms).first()
if audio is not None:
return {
'success': 1,
'data': mlab.item2json(audio)
}
else:
return {
'success': 0,
'message': 'Not found'
}
def webpage_str(url):
return urlopen(url).read.decode('utf-8')
not_found_message = json.dumps ({
"sucess": 0,
"data": "not_found"
})
@app.route('/')
def index():
guide_list = [
{
"title": "Pure audio search",
"example": "api/audio?search_terms=thunder+imagine+dragons",
"format": "api/audio?search_terms=<Enter song|artist here>",
"parse_xml": "http://bit.ly/tk-xml-parser"
}
]
return render_template("index.html", guide_list=guide_list)
api.add_resource(ApiAudio, '/api/audio')
if __name__ == '__main__':
app.run(port=1212)
|
[
"qhuydtvt@gmail.com"
] |
qhuydtvt@gmail.com
|
17b962c6c281ee6f291f1520e6448f9c088dd12e
|
3d7dece5254e42059e8a2cb1e72b295460284983
|
/components/py_engine/micropython-lib/micropython/umqtt.robust/setup.py
|
f0f23ed8cc7dc9a25d7aae0acd27d4cfe92f24ab
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown",
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"GPL-1.0-or-later",
"Python-2.0"
] |
permissive
|
windowxia/AliOS-Things
|
172639d6e0d2b2e2e816bce757cf95e89187c132
|
a99f20706f9c666903a12a205edce13263b1fadb
|
refs/heads/master
| 2023-09-01T06:03:57.853390
| 2023-07-04T05:51:52
| 2023-07-04T06:49:36
| 149,751,180
| 0
| 0
|
Apache-2.0
| 2018-09-21T10:56:09
| 2018-09-21T10:56:08
| null |
UTF-8
|
Python
| false
| false
| 719
|
py
|
import sys
# Remove current dir from sys.path, otherwise setuptools will peek up our
# module instead of system's.
sys.path.pop(0)
from setuptools import setup
sys.path.append("..")
import sdist_upip
setup(
name="micropython-umqtt.robust",
version="1.0.1",
description='Lightweight MQTT client for MicroPython ("robust" version).',
long_description=open("README.rst").read(),
url="https://github.com/micropython/micropython-lib",
author="Paul Sokolovsky",
author_email="micro-python@googlegroups.com",
maintainer="micropython-lib Developers",
maintainer_email="micro-python@googlegroups.com",
license="MIT",
cmdclass={"sdist": sdist_upip.sdist},
packages=["umqtt"],
)
|
[
"yilu.myl@alibaba-inc.com"
] |
yilu.myl@alibaba-inc.com
|
6e99309c21dc75f92af4fbcfeb21e488e84a0537
|
228fd55571b31cdcf54ef42a7338ca42ab399588
|
/battery_client.py
|
0e885f3c27579ae3896668884b5f49661a95bf00
|
[] |
no_license
|
benthomasson/ev3-play
|
12aae6f846be11ad3fe029148eb084edccd7b991
|
d416a0fd2eebc1a13bef601ccfe98bcf0c17ef61
|
refs/heads/master
| 2022-09-04T00:37:10.463877
| 2022-08-22T12:50:35
| 2022-08-22T12:50:35
| 253,309,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 691
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Echo client program
import socket
import json
import psutil
import time
def send_message(s, msg_type, message_data):
message = json.dumps([msg_type, message_data])
length = len(message)
s.sendall(f'{length:04x}'.encode())
s.sendall(message.encode())
HOST = 'localhost'
PORT = 50007 # The same port as used by the server
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
send_message(s, 'Message', dict(msg='Hello world'))
while True:
battery = psutil.sensors_battery()
send_message(s, 'Battery', dict(percent=battery.percent))
time.sleep(1)
|
[
"ben.thomasson@gmail.com"
] |
ben.thomasson@gmail.com
|
8059eea3c4fe11d53b16161e256869544a1f7b8a
|
eafabc5e332f5fc0153e166d992ac0711cf90cd6
|
/BOJ/2644/2644번(촌수계산).py
|
3753969c178fbd1055026ee542090ce391404eee
|
[] |
no_license
|
PARKINHYO/Algorithm
|
96038ce21bd9f66208af0886208ef6ed925c23e2
|
0ed8687fe971fc2b05e2f50f62c0d0e47c368a6c
|
refs/heads/master
| 2021-12-23T23:48:25.247979
| 2021-08-20T01:52:50
| 2021-08-20T01:52:50
| 196,219,508
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
from collections import defaultdict
from sys import stdin
class Graph():
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
self.graph[v].append(u)
def BFS(self, s, v):
visited = [False] * 105
queue = []
result = [0]* 105
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
for i in self.graph[s]:
if visited[i] == False:
queue.append(i)
visited[i] = True
result[i] = result[s] + 1
if result[v] == 0:
print(-1)
else:
print(result[v])
if __name__ == '__main__':
g = Graph()
# f = open('input.txt', 'r')
# file_txt = []
# for line in f:
#
# file_txt.append(line[:-1])
# f.close()
n = int(stdin.readline())
a, b = map(int, stdin.readline().split(" "))
m = int(stdin.readline())
for i in range(m):
tmp1, tmp2 = map(int, stdin.readline().split(" "))
g.addEdge(tmp1, tmp2)
g.BFS(a, b)
|
[
"inhyopark122@gmail.com"
] |
inhyopark122@gmail.com
|
a8abd28e470a50db39911b213d6efe1374a7962b
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/fbs_1148+444/sdB_FBS_1148+444_coadd.py
|
0254387ab7054d564d646cf529283b7c20657047
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[177.84925,44.212194], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_FBS_1148+444/sdB_FBS_1148+444_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_FBS_1148+444/sdB_FBS_1148+444_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
36055e4e753fdcc3fdb4cb14e6b24e2dc38ddaa7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2535/48090/315890.py
|
c1bf75e0cf13230b181db8b8e4608c8c69acc14b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 322
|
py
|
arr=eval(input())
class Solution:
def maxBlockToSorted(self, arr):
res, max_val = 0, arr[0]
for i, num in enumerate(arr):
if num > max_val:
max_val = num
if max_val == i:
res += 1
return res
c=Solution()
print(c.maxBlockToSorted(arr))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
747ecb88556de97d63232ef35d142e151af9ca44
|
50aa9303450e06d1172f78c0478a58e5113d9bb9
|
/988 arranging-coions.py
|
69f2c7dd8cf72e28b804be49986218f4e6b11bce
|
[] |
no_license
|
zlldt/LintCode
|
6e1041b78a301651378833caf7fd7db9ce112ec5
|
e5012161131a8c8557bdb0296980b2a0b712c620
|
refs/heads/master
| 2021-06-27T05:24:08.471072
| 2019-03-02T12:56:26
| 2019-03-02T12:56:26
| 105,424,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 378
|
py
|
class Solution:
"""
@param n: a non-negative integer
@return: the total number of full staircase rows that can be formed
"""
def arrangeCoins(self, n):
# Write your code here
start = 1
sum = 1
if n==1:
return 1
while sum<n:
start += 1
sum = start*(1+start)/2
return start-1
|
[
"noreply@github.com"
] |
zlldt.noreply@github.com
|
88ffbba45ab17a0c77184fb4c8d033d5dbe545b5
|
a22f0ae4b4674f29449cc7f5aa9bd335e06c12eb
|
/MPDA_cfg_txt.py
|
9403f2b34016a9386b4fbcf3bc4b808488df17d7
|
[] |
no_license
|
Tesla2fox/MPDA_OPERATION
|
e2a4a0a49e7d8a2bacaca8191cb9a5b58e74b06a
|
c33784a28f49b8e7333b74b86191d958e66ff021
|
refs/heads/master
| 2020-03-13T11:34:11.117054
| 2018-04-26T05:15:51
| 2018-04-26T05:15:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,810
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 30 09:01:23 2018
@author: robot
"""
#import classDefine
import classDefine as cd
import plotly.plotly as py
import plotly.graph_objs as go
import math
import plotly
import copy
import random
import time
import datetime
import subprocess
#__import__ ('classDefine')
#py.sign_in('tesla_fox', 'HOTRQ3nIOdYUUszDIfgN')
#RobInfoMat=[1 2 1 0.2 3 1 1 0.3 5 2 1 0.4]
#TaskInfoMat= [7 8 0.15 5;5 9 0.25 6;10 12 0.12 4];
for runTimes in range(0,100):
robotNum = 50
taskNum = 50
fileDir = 'D://VScode//MPDA_orgDecode//data//'
f = open(fileDir+'MPDA_cfg.txt','w')
#write time
f.write('time ' +datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")+'\n')
f.write('AgentNum ' + str(robotNum)+'\n')
f.write('TaskNum '+ str(taskNum)+'\n')
robotList = []
taskPntList = []
#increaseRatioList = []
#initStateList = []
drawData = []
#create the taskPnt
for i in range(0,taskNum):
taskpnt = cd.TaskPnt()
taskpnt.random()
# taskpnt.displayTaskPnt()
taskPntList.append(copy.deepcopy(taskpnt))
# increaseRatioList.append(taskpnt.increaseRatio)
# initStateList.append(taskpnt.initState)
taskPntTrace = taskpnt.getTrace()
drawData.append(copy.deepcopy(taskPntTrace))
# print('<<<<<<<<<<<<<<<<')
#write txt
f.write('TaskIncreaseRatio ')
for j in range(0,taskNum):
f.write(' ')
f.write(str(taskPntList[j].increaseRatio))
f.write('\n')
f.write('TaskInitState ')
for j in range (0,taskNum):
f.write(' ')
f.write(str(taskPntList[j].initState))
f.write('\n')
#end write txt
#create the robots
f.write('AgentAbility ')
for i in range(0,robotNum):
rob = cd.Robot()
rob.random()
# rob.displayRobot()
robotList.append(copy.deepcopy(rob))
f.write(' '+str(rob.ability))
f.write('\n')
#end create
f.write('TaskDisMat ')
for i in range (0,taskNum):
for j in range(i+1,taskNum):
dis = cd.distance_point(taskPntList[i].pnt,taskPntList[j].pnt)
f.write(' '+str(dis))
f.write('\n')
f.write('Ag2TaskDisMat ')
for i in range (0,robotNum):
for j in range (0,taskNum):
dis = cd.distance_point(robotList[i].pnt,taskPntList[j].pnt)
f.write(' ' +str(dis))
f.write('\n')
f.write('Encode ')
for i in range(0,robotNum):
permutationList =[]
for j in range (0,taskNum):
permutationList.append(j)
random.shuffle(permutationList)
for j in range (0,taskNum):
f.write(' '+ str(permutationList[j]))
# print(disOrderPermutation)
f.write('\n')
#write something that C++ don't need to use
f.write('robotPosstion\n')
f.write('<<<<<<<<<\n')
for i in range(0,robotNum):
f.write('index '+ str(i))
f.write(' pntx = ' +str(robotList[i].pnt.x) + ' pnty = ' +
str(robotList[i].pnt.y) + '\n')
f.write('taskPosition\n')
f.write('<<<<<<<<<\n')
for i in range (0,taskNum):
f.write('index '+ str(i))
f.write(' pntx = ' +str(taskPntList[i].pnt.x) + ' pnty = ' +
str(taskPntList[i].pnt.y) + '\n')
# end writing the data to the configure txt
f.close()
pname = "D:\\VScode\\MPDA_StaticConstrn\\bin\\mpda_StaticConstrn\\Debug\\mpda_StaticConstrn.exe"
p = subprocess.Popen(pname,stdin =subprocess.PIPE,stdout = subprocess.PIPE)
o = p.communicate()
print(runTimes,'<<<<<<<<<<<<<<<<')
|
[
"stef_leon_gao@outlook.com"
] |
stef_leon_gao@outlook.com
|
fe95e473675f636834b33370b5a053d90ad799da
|
c7846ee0828539c2a2019928c1cbf3abd35665bf
|
/1861.py
|
5591737795d0cc329e37af61c882a6104f49553c
|
[] |
no_license
|
whiteblue0/sw_problems
|
10476601c8d6d68d42e2f30af87fcde1e5dbbcc5
|
1cefc6236cccc20477bf4eadb458a0fd06b09126
|
refs/heads/master
| 2020-06-20T10:44:57.463275
| 2020-05-03T07:27:57
| 2020-05-03T07:27:57
| 197,098,448
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# 우,하,좌,상
dx = [1,0,-1,0]
dy = [0,1,0,-1]
def ispass(y,x):
return 0<=y<N and 0<=x<N
def findpath(sy,sx):
global result
que =[]
cnt = 1
# visited[sy][sx] = 1
que.append((sy,sx))
while que:
y,x = que.pop(0)
for i in range(4):
ny,nx = y+dy[i], x+dx[i]
if ispass(ny,nx) and (data[ny][nx] == data[y][x]+1):
cnt += 1
que.append((ny,nx))
if cnt >= result[1]:
result[1] = cnt
result[0] = data[sy][sx]
T = int(input())
for tc in range(1,T+1):
N = int(input())
data = [list(map(int,input().split())) for _ in range(N)]
nums = [0]*(N**2+1)
result = [N**2, 0]
for i in range(N):
for j in range(N):
nums[data[i][j]] = (i,j)
for i in range(N**2,0,-1):
sy,sx = nums[i][0], nums[i][1]
findpath(sy,sx)
print("#{}".format(tc),result[0],result[1])
|
[
"21port@naver.com"
] |
21port@naver.com
|
6b25a67d62f342c36269ccdf5ef1219aa9acdecf
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/85131b28f01c2734886292b5f908bd346db627d78ffb686c086516a89e3ff520/xml/parsers/expat/errors.py
|
e407b956b7e12afd5a9207878324138d166d668f
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,558
|
py
|
# encoding: utf-8
# module xml.parsers.expat.errors calls itself pyexpat.errors
# from C:\Users\Doly\Anaconda3\lib\site-packages\numba\jitclass\_box.cp37-win_amd64.pyd
# by generator 1.147
""" Constants used to describe error conditions. """
# no imports
# Variables with simple values
XML_ERROR_ABORTED = 'parsing aborted'
XML_ERROR_ASYNC_ENTITY = 'asynchronous entity'
XML_ERROR_ATTRIBUTE_EXTERNAL_ENTITY_REF = 'reference to external entity in attribute'
XML_ERROR_BAD_CHAR_REF = 'reference to invalid character number'
XML_ERROR_BINARY_ENTITY_REF = 'reference to binary entity'
XML_ERROR_CANT_CHANGE_FEATURE_ONCE_PARSING = 'cannot change setting once parsing has begun'
XML_ERROR_DUPLICATE_ATTRIBUTE = 'duplicate attribute'
XML_ERROR_ENTITY_DECLARED_IN_PE = 'entity declared in parameter entity'
XML_ERROR_EXTERNAL_ENTITY_HANDLING = 'error in processing external entity reference'
XML_ERROR_FEATURE_REQUIRES_XML_DTD = 'requested feature requires XML_DTD support in Expat'
XML_ERROR_FINISHED = 'parsing finished'
XML_ERROR_INCOMPLETE_PE = 'incomplete markup in parameter entity'
XML_ERROR_INCORRECT_ENCODING = 'encoding specified in XML declaration is incorrect'
XML_ERROR_INVALID_TOKEN = 'not well-formed (invalid token)'
XML_ERROR_JUNK_AFTER_DOC_ELEMENT = 'junk after document element'
XML_ERROR_MISPLACED_XML_PI = 'XML or text declaration not at start of entity'
XML_ERROR_NOT_STANDALONE = 'document is not standalone'
XML_ERROR_NOT_SUSPENDED = 'parser not suspended'
XML_ERROR_NO_ELEMENTS = 'no element found'
XML_ERROR_NO_MEMORY = 'out of memory'
XML_ERROR_PARAM_ENTITY_REF = 'illegal parameter entity reference'
XML_ERROR_PARTIAL_CHAR = 'partial character'
XML_ERROR_PUBLICID = 'illegal character(s) in public id'
XML_ERROR_RECURSIVE_ENTITY_REF = 'recursive entity reference'
XML_ERROR_SUSPENDED = 'parser suspended'
XML_ERROR_SUSPEND_PE = 'cannot suspend in external parameter entity'
XML_ERROR_SYNTAX = 'syntax error'
XML_ERROR_TAG_MISMATCH = 'mismatched tag'
XML_ERROR_TEXT_DECL = 'text declaration not well-formed'
XML_ERROR_UNBOUND_PREFIX = 'unbound prefix'
XML_ERROR_UNCLOSED_CDATA_SECTION = 'unclosed CDATA section'
XML_ERROR_UNCLOSED_TOKEN = 'unclosed token'
XML_ERROR_UNDECLARING_PREFIX = 'must not undeclare prefix'
XML_ERROR_UNDEFINED_ENTITY = 'undefined entity'
XML_ERROR_UNEXPECTED_STATE = 'unexpected parser state - please send a bug report'
XML_ERROR_UNKNOWN_ENCODING = 'unknown encoding'
XML_ERROR_XML_DECL = 'XML declaration not well-formed'
__loader__ = None
__spec__ = None
# no functions
# no classes
# variables with complex values
codes = {
'XML declaration not well-formed': 30,
'XML or text declaration not at start of entity': 17,
'asynchronous entity': 13,
'cannot change setting once parsing has begun': 26,
'cannot suspend in external parameter entity': 37,
'document is not standalone': 22,
'duplicate attribute': 8,
'encoding specified in XML declaration is incorrect': 19,
'entity declared in parameter entity': 24,
'error in processing external entity reference': 21,
'illegal character(s) in public id': 32,
'illegal parameter entity reference': 10,
'incomplete markup in parameter entity': 29,
'junk after document element': 9,
'mismatched tag': 7,
'must not undeclare prefix': 28,
'no element found': 3,
'not well-formed (invalid token)': 4,
'out of memory': 1,
'parser not suspended': 34,
'parser suspended': 33,
'parsing aborted': 35,
'parsing finished': 36,
'partial character': 6,
'recursive entity reference': 12,
'reference to binary entity': 15,
'reference to external entity in attribute': 16,
'reference to invalid character number': 14,
'requested feature requires XML_DTD support in Expat': 25,
'syntax error': 2,
'text declaration not well-formed': 31,
'unbound prefix': 27,
'unclosed CDATA section': 20,
'unclosed token': 5,
'undefined entity': 11,
'unexpected parser state - please send a bug report': 23,
'unknown encoding': 18,
}
messages = {
1: 'out of memory',
2: 'syntax error',
3: 'no element found',
4: 'not well-formed (invalid token)',
5: 'unclosed token',
6: 'partial character',
7: 'mismatched tag',
8: 'duplicate attribute',
9: 'junk after document element',
10: 'illegal parameter entity reference',
11: 'undefined entity',
12: 'recursive entity reference',
13: 'asynchronous entity',
14: 'reference to invalid character number',
15: 'reference to binary entity',
16: 'reference to external entity in attribute',
17: 'XML or text declaration not at start of entity',
18: 'unknown encoding',
19: 'encoding specified in XML declaration is incorrect',
20: 'unclosed CDATA section',
21: 'error in processing external entity reference',
22: 'document is not standalone',
23: 'unexpected parser state - please send a bug report',
24: 'entity declared in parameter entity',
25: 'requested feature requires XML_DTD support in Expat',
26: 'cannot change setting once parsing has begun',
27: 'unbound prefix',
28: 'must not undeclare prefix',
29: 'incomplete markup in parameter entity',
30: 'XML declaration not well-formed',
31: 'text declaration not well-formed',
32: 'illegal character(s) in public id',
33: 'parser suspended',
34: 'parser not suspended',
35: 'parsing aborted',
36: 'parsing finished',
37: 'cannot suspend in external parameter entity',
}
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
60f18d36c09eaf99c386081dad6ff20c40293d18
|
66c94b5e427c0b8f8f7101de9c17af1423f00682
|
/keras2/keras66_3_hyper_lstm.py
|
b669f1052d34a3af94193a216b6ad0a57a9fccf6
|
[] |
no_license
|
NamkyuHan/bit_seoul
|
a34ea3c49666ee2183026e960e45092778643d55
|
3112eb576089cdf906c4f326337b4d2b5e5e4c29
|
refs/heads/master
| 2023-01-30T19:02:53.323592
| 2020-12-17T01:05:17
| 2020-12-17T01:05:17
| 311,277,610
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,501
|
py
|
import numpy as np
from tensorflow.keras.models import load_model, Sequential, Model
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Flatten, Input, LSTM
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.layers import Dropout
####1.데이터
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# x_predict=x_test[:10, :, :, :]
x_train = x_train.reshape(60000, 196, 4).astype('float32')/255.
x_test = x_test.reshape(10000, 196, 4).astype('float32')/255.
# x_predict=x_predict.astype('float32')/255.
# 원 핫 인코딩
y_train=to_categorical(y_train)
y_test=to_categorical(y_test)
#2. 모델 구성
def build_model(drop=0.5, optimizer='adam'):
inputs = Input(shape=(196,4), name='input')
x = LSTM(30, activation='relu', name='hidden1')(inputs)
x = Dropout(drop)(x)
x = Dense(20, activation='relu', name='hidden2')(x)
x = Dropout(drop)(x)
x = Dense(10, activation='relu', name='hidden3')(x)
x = Dropout(drop)(x)
outputs = Dense(10, activation='softmax', name='output')(x)
model = Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=optimizer, metrics=['acc'], loss='categorical_crossentropy')
return model
# 이 함수 부분이 중요하다 파라미터를 지정해주는 함수니까
# 노드의 갯수와 파라미터 값을 설정할 수 있다 그러므로 위의 모델에서 레이어를 적절히 구성해야 한다
def create_hyperparameters():
batchs = [10] #[10, 20, 30, 40, 50] # [10]
optimizers = ('rmsprop', 'adam', 'adadelta') #['rmsprop']
# dropout = np.linspace(0.1, 0.5, 5)
dropout = [0.1, 0.5, 5]
return{'batch_size' : batchs, "optimizer" : optimizers, "drop" : dropout}
hyperparamaters = create_hyperparameters()
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier #케라스를 사이킷런으로 감싸겠다
model = KerasClassifier(build_fn=build_model, verbose=1)
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
search = RandomizedSearchCV(model, hyperparamaters, cv=3)
search.fit(x_train, y_train)
print(search.best_params_)
acc = search.score(x_test, y_test)
print("최종 스코어 : ", acc)
# 그리드 서치를 랜덤 서치로 바꿔보자
'''
{'batch_size': 10, 'drop': 0.5, 'optimizer': 'rmsprop'}
1000/1000 [==============================] - 15s 15ms/step - loss: 301713288744930553888768.0000 - acc: 0.2320
최종 스코어 : 0.23199999332427979
'''
|
[
"rksh333@naver.com"
] |
rksh333@naver.com
|
40d92d8d26105c813762bbaaf0ca6165692e5266
|
8993469765f9e504c388eeba2c940f40ec971cb7
|
/labs1/24.py
|
da9f4f061d5748aadf5564408ee46328ed80785b
|
[] |
no_license
|
Sens3ii/ICT2020
|
293f8df77aeaba66b33c08540e037365b18246a7
|
c0f68680b13534bd5697a9f95fae9dc7e2ed4089
|
refs/heads/master
| 2023-02-25T17:51:55.459518
| 2021-02-03T09:27:53
| 2021-02-03T09:27:53
| 296,038,252
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 149
|
py
|
Days = int(input())
Hours = int(input())
Minutes = int(input())
Seconds =int(input())
print(Seconds+Minutes*60+Hours*60*60+Days+60*60*24,"sec")
|
[
"noreply@github.com"
] |
Sens3ii.noreply@github.com
|
02bfe76934653f7d299dcb398b654b19e587b33e
|
4e96f383d4703ad8ee58869ed91a0c8432c8a051
|
/Cura/fdm_materials/scripts/check_material_profiles.py
|
bad82e1ed75536a491b3ea2ab4a6e5b63c09cfcc
|
[
"CC0-1.0",
"GPL-3.0-only"
] |
permissive
|
flight7788/3d-printing-with-moveo-1
|
b2dba26010c4fa31815bc1d2d0966161a8600081
|
7fcb9c6b5da9245d54ac917de8c2a7f5148e42b0
|
refs/heads/Feature_Marlin_with_AlanBoy
| 2022-08-30T18:36:44.785058
| 2020-05-30T07:52:58
| 2020-05-30T07:52:58
| 212,583,912
| 0
| 0
|
MIT
| 2020-05-16T07:39:47
| 2019-10-03T13:13:01
|
C
|
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
# This script is dedicated to the public domain under the terms of the CC0 license.
from collections import OrderedDict
import os
import sys
import re
class MaterialProfilesValidator:
def __init__(self, root_dir: str):
self._repo_dir = os.path.abspath(root_dir)
self._materials_dir = self._repo_dir
self._guid_pattern = re.compile(r"<GUID>.*</GUID>")
def _get_guid(self, content: str) -> str:
guid = None
for line in content.splitlines():
line = line.strip()
if self._guid_pattern.match(line):
guid = line.strip("<GUID>").strip("</GUID>")
break
return guid
def get_materials_dir(self, dirpath: str):
for root_dir, dirnames, filenames in os.walk(dirpath):
has_materials_file = any(fn.endswith(".xml.fdm_material") for fn in filenames)
if not has_materials_file:
for dirname in dirnames:
full_dir_path = os.path.join(root_dir, dirname)
return self.get_materials_dir(full_dir_path)
return dirpath
## Validates the preset settings files and returns ``True`` or ``False``
# indicating whether there are invalid files.
def validate(self) -> bool:
# parse the definition file
guid_dict = OrderedDict()
materials_dir = self.get_materials_dir(self._materials_dir)
# go through all the preset settings files
for _, _, filenames in os.walk(materials_dir):
for filename in filenames:
file_path = os.path.join(materials_dir, filename)
if not filename.endswith(".xml.fdm_material"):
continue
with open(file_path, "r", encoding = "utf-8") as f:
content = f.read()
guid = self._get_guid(content)
if guid not in guid_dict:
guid_dict[guid] = []
item_list = guid_dict[guid]
item_list.append({"file_name": filename,
"file_path": file_path})
break
has_invalid_files = False
for guid, file_item_list in guid_dict.items():
if len(file_item_list) <= 1:
continue
has_invalid_files = True
if guid is not None:
print("-> The following files contain the same GUID [%s]:" % guid)
else:
print("-> The following files DO NOT contain any GUID:")
for file_item in file_item_list:
print(" -- [%s]" % file_item["file_name"])
print("-> PLEASE make sure to generate unique GUIDs for each material.")
return not has_invalid_files
if __name__ == "__main__":
script_dir = os.path.dirname(os.path.realpath(__file__))
root_dir = os.path.abspath(os.path.join(script_dir, ".."))
validator = MaterialProfilesValidator(root_dir)
is_everything_validate = validator.validate()
ret_code = 0 if is_everything_validate else 1
sys.exit(ret_code)
|
[
"t106360212@ntut.org.tw"
] |
t106360212@ntut.org.tw
|
b06374f5b30ecf289a54899c6656ebc17d0ebfad
|
18a281c772550d174fc903f35f70e27ee09bb89a
|
/web/config/wsgi.py
|
8d0b154dc4018687b0e7031b5f7a4890542fefe5
|
[] |
no_license
|
quinceleaf/implementing-webhooks
|
60ef33091a7254a509965cc4cc4de635709f8ec4
|
1cebfc3cdabd85e503fbdb60f418321a906b83ad
|
refs/heads/main
| 2023-07-11T01:20:23.156674
| 2021-08-06T03:54:13
| 2021-08-06T03:54:13
| 393,219,446
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
"""
WSGI config for web project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
application = get_wsgi_application()
|
[
"brian@quinceleaf.dev"
] |
brian@quinceleaf.dev
|
cfb0ba5f2f127c5af9c61326c3d79fe9574f9dd7
|
1760e87ada878d3d016b68eac4194701fada19d4
|
/piGAN_lib/fid_evaluation.py
|
82804971b482f250b54b48f9d505cd5926bca4ca
|
[
"MIT"
] |
permissive
|
tonywork/CIPS-3D
|
5fd04b56fafeb46e9f3396314dec1d6f302da740
|
9244193048c73f55270d2df28fb160f42d5953ad
|
refs/heads/main
| 2023-08-26T04:52:40.484046
| 2021-11-01T06:17:12
| 2021-11-01T06:17:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,692
|
py
|
"""
Contains code for logging approximate FID scores during training.
If you want to output ground-truth images from the training dataset, you can
run this file as a script.
"""
import os
import shutil
import torch
import copy
import argparse
from torchvision.utils import save_image
from pytorch_fid import fid_score
from tqdm import tqdm
import curriculums
# import datasets
from exp.pigan import datasets
def output_real_images(dataloader, num_imgs, real_dir):
img_counter = 0
batch_size = dataloader.batch_size
dataloader = iter(dataloader)
for i in range(num_imgs//batch_size):
real_imgs, _ = next(dataloader)
for img in real_imgs:
save_image(img, os.path.join(real_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=(-1, 1))
img_counter += 1
def setup_evaluation(dataset_name, generated_dir, target_size=128, num_imgs=8000, outdir=None, **kwargs):
# Only make real images if they haven't been made yet
if outdir:
real_dir = os.path.join(outdir, 'evaluation', dataset_name + '_real_images_' + str(target_size))
else:
real_dir = os.path.join('EvalImages', dataset_name + '_real_images_' + str(target_size))
if os.path.exists(real_dir) and len(os.listdir(real_dir)) == 0:
os.rmdir(real_dir)
if not os.path.exists(real_dir):
os.makedirs(real_dir)
dataloader, CHANNELS = datasets.get_dataset(dataset_name, img_size=target_size, shuffle=False, **kwargs)
print('outputting real images...')
output_real_images(dataloader, num_imgs, real_dir)
print('...done')
if generated_dir is not None:
os.makedirs(generated_dir, exist_ok=True)
return real_dir
def output_images(generator, input_metadata, rank, world_size, output_dir, num_imgs=2048):
metadata = copy.deepcopy(input_metadata)
metadata['img_size'] = 128
metadata['batch_size'] = 4
metadata['h_stddev'] = metadata.get('h_stddev_eval', metadata['h_stddev'])
metadata['v_stddev'] = metadata.get('v_stddev_eval', metadata['v_stddev'])
metadata['sample_dist'] = metadata.get('sample_dist_eval', metadata['sample_dist'])
metadata['psi'] = 1
img_counter = rank
generator.eval()
img_counter = rank
if rank == 0: pbar = tqdm("generating images", total = num_imgs)
with torch.no_grad():
while img_counter < num_imgs:
z = torch.randn((metadata['batch_size'], generator.module.z_dim), device=generator.module.device)
generated_imgs, _ = generator.module.staged_forward(z, **metadata)
for img in generated_imgs:
save_image(img, os.path.join(output_dir, f'{img_counter:0>5}.jpg'), normalize=True, range=(-1, 1))
img_counter += world_size
if rank == 0: pbar.update(world_size)
if rank == 0: pbar.close()
def calculate_fid(dataset_name, generated_dir, target_size=256, outdir=None):
if outdir:
real_dir = os.path.join(outdir, 'evaluation', dataset_name + '_real_images_' + str(target_size))
else:
real_dir = os.path.join('EvalImages', dataset_name + '_real_images_' + str(target_size))
fid = fid_score.calculate_fid_given_paths([real_dir, generated_dir], 128, 'cuda', 2048)
torch.cuda.empty_cache()
return fid
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='CelebA')
parser.add_argument('--img_size', type=int, default=128)
parser.add_argument('--num_imgs', type=int, default=8000)
opt = parser.parse_args()
real_images_dir = setup_evaluation(opt.dataset, None, target_size=opt.img_size, num_imgs=opt.num_imgs)
|
[
"zhoupengcv@sjtu.edu.cn"
] |
zhoupengcv@sjtu.edu.cn
|
c016c10d81333bd078fbdb4fbe5d3567e161c4d5
|
ea285978bd60c8de8783a729effa7c92eeeb98e8
|
/DBcm.py
|
31cf80fdf2abed2c7b846332bf60a70b6e17c242
|
[] |
no_license
|
detalikota/website1
|
2426667ff8e838e0c4609a6694795c22d088a59d
|
7eb75c52697ed7fbe77a2b5178117049bdd1bdd6
|
refs/heads/master
| 2023-01-11T16:03:52.456786
| 2020-11-02T19:20:04
| 2020-11-02T19:20:04
| 304,236,859
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 442
|
py
|
import mysql.connector
class UseDatabase:
def __init__(self, config: dict) -> None:
self.configuration = config
def __enter__(self) -> 'Cursor':
self.conn = mysql.connector.connect(**self.configuration)
self.cursor = self.conn.cursor()
return self.cursor
def __exit__(self, exc_type, exc_value, exc_trace) -> None:
self.conn.commit()
self.cursor.close()
self.conn.close()
|
[
"detalikota@gmail.com"
] |
detalikota@gmail.com
|
f660dc9314f35d14fc9c3c1466520c7b209c5e3b
|
641fa8341d8c436ad24945bcbf8e7d7d1dd7dbb2
|
/components/font_service/DEPS
|
a32f59c6a0f4be10e379efea183a5c36e3ed2e7d
|
[
"BSD-3-Clause"
] |
permissive
|
massnetwork/mass-browser
|
7de0dfc541cbac00ffa7308541394bac1e945b76
|
67526da9358734698c067b7775be491423884339
|
refs/heads/master
| 2022-12-07T09:01:31.027715
| 2017-01-19T14:29:18
| 2017-01-19T14:29:18
| 73,799,690
| 4
| 4
|
BSD-3-Clause
| 2022-11-26T11:53:23
| 2016-11-15T09:49:29
| null |
UTF-8
|
Python
| false
| false
| 163
|
include_rules = [
"+services/service_manager",
"+mojo/common",
"+mojo/public",
"+services/tracing/public/cpp",
"+skia",
"+third_party/skia/include",
]
|
[
"xElvis89x@gmail.com"
] |
xElvis89x@gmail.com
|
|
d6b6bc26a9dbc253af21a1a6c574c56a67df447f
|
50ca6df816baeeb59e2cfb0320d46d621df165d3
|
/Python/201910/191023/cksession.py
|
c7b17dbe8534d4bc95694425e3887f9cc17743d5
|
[] |
no_license
|
96no3/PythonStudy
|
6606342e788c63ca35e2a6cf21a432fc5274a343
|
2bf21081dd4803c7f4702b6cfccbaca3d2aa6f7b
|
refs/heads/master
| 2020-08-07T04:19:35.341606
| 2019-12-18T05:35:10
| 2019-12-18T05:35:10
| 213,292,674
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,941
|
py
|
#!/usr/bin/env python3
# クッキーを使ったセッション
from http import cookies
import os,json
import datetime,random,hashlib
import cgitb
class CookieSession:
"""クッキーを使ったセッションのクラス"""
SESSION_ID = "CookieSessionId"
# セッションデータの保存先を指定 os.path.dirname()でパスのディレクトリ名を取得
SESSION_DIR = os.path.dirname(os.path.abspath(__file__)) + "/SESSION"
def __init__(self):
# セッションデータの保存パスを確認
if not os.path.exists(self.SESSION_DIR):
os.mkdir(self.SESSION_DIR)
# クッキーからセッションIDを得る
rc = os.environ.get("HTTP_COOKIE","")
self.cookie = cookies.SimpleCookie(rc)
if self.SESSION_ID in self.cookie:
self.sid = self.cookie[self.SESSION_ID].value
else:
# 初回の訪問ならセッションIDを生成する
self.sid = self.gen_sid()
# 保存してあるデータを読み出す
self.modified = False
self.values = {}
path = self.SESSION_DIR + "/" + self.sid
if os.path.exists(path):
with open(path,"r",encoding="utf-8") as f:
a_json = f.read()
# JSON形式のデータを復元
self.values = json.loads(a_json)
def gen_sid(self):
"""セッションIDを生成する"""
token = ":#sa$2jAiN"
now = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
rnd = random.randint(0,100000)
key = (token + now + str(rnd)).encode("utf-8")
sid = hashlib.sha256(key).hexdigest()
return sid
def output(self):
"""クッキーヘッダを書き出す"""
self.cookie[self.SESSION_ID] = self.sid
self.save_data()
return self.cookie.output()
def save_data(self):
"""セッションデータをファイルに書き出す"""
if not self.modified:
return
path = self.SESSION_DIR + "/" + self.sid
# JSON形式に変換して保存
a_json = json.dumps(self.values)
with open(path,"w",encoding="utf-8") as f:
f.write(a_json)
# 添字アクセスのための特殊メソッドの定義
def __getitem__(self,key):
return self.values[key]
def __setitem__(self,key,value):
self.modified = True
self.values[key] = value
def __contains__(self,key):
return key in self.values
def clear(self):
self.values = {}
if __name__ == "__main__":
cgitb.enable()
# 実行テスト(訪問カウンタの例)
ck = CookieSession()
counter = 1
if "counter" in ck:
counter = int(ck["counter"]) + 1
ck["counter"] = counter
print("Content-Type: text/html; charset=utf-8")
print(ck.output())
print("")
print("counter=",counter)
|
[
"44739759+96no3@users.noreply.github.com"
] |
44739759+96no3@users.noreply.github.com
|
ba17a8097673f1389fb4890fb8b41fcd93bd6d19
|
209aae9f40657d48461bed5e081c4f235f86090a
|
/2020/day11-2.py
|
b0cf3c89cba7f1a4458990d2cb39256d430e06a9
|
[] |
no_license
|
scheidguy/Advent_of_Code
|
6e791132157179928e1415f49467ad221ef1e258
|
fbc09e4d26502b9a77e0c8d2840b11ec85a3c478
|
refs/heads/main
| 2023-03-05T12:34:15.343642
| 2021-02-20T00:27:58
| 2021-02-20T00:27:58
| 329,106,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,656
|
py
|
import copy
def neighbors(R , C, grid):
num = 0
r = R; c = C
while r > 0: # north
r -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1: # south
r += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while c > 0: # west
c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while c < len(grid[0])-1: # east
c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r > 0 and c > 0: # northwest
r -= 1; c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r > 0 and c < len(grid[0])-1: # northeast
r -= 1; c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1 and c > 0: # southwest
r += 1; c -= 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
r = R; c = C
while r < len(grid)-1 and c < len(grid[0])-1: # southeast
r += 1; c += 1
if grid[r][c] == '#': num += 1; break
if grid[r][c] == 'L': break
return num
f = open('day11-1_input.txt')
prevgrid = f.readlines()
f.close()
# encircle the grid with floor spaces to simplify processing
rows = len(prevgrid)
for i in range(rows):
prevgrid[i] = '.' + prevgrid[i].strip() + '.'
cols = len(prevgrid[0])
prevgrid.append(cols * '.')
prevgrid.insert(0, cols * '.')
nowgrid = copy.deepcopy(prevgrid)
rows = len(prevgrid)
unstable = True
while unstable:
for row in range(rows):
for col in range(cols):
seat = prevgrid[row][col]
if seat == 'L':
if row == 90 and col == 20:
print('')
neigh = neighbors(row, col, prevgrid)
if neigh == 0:
updated = list(nowgrid[row])
updated[col] = '#'
updated = "".join(updated)
nowgrid[row] = updated
elif seat == '#':
neigh = neighbors(row, col, prevgrid)
if neigh >= 5:
updated = list(nowgrid[row])
updated[col] = 'L'
updated = "".join(updated)
nowgrid[row] = updated
if prevgrid == nowgrid:
unstable = False
print(sum([row.count('#') for row in nowgrid]))
else: prevgrid = copy.deepcopy(nowgrid)
|
[
"scheidguy@gmail.com"
] |
scheidguy@gmail.com
|
99143087e6840facd871adef59dd5d9989058001
|
61673ab9a42f7151de7337608c442fa6247f13bb
|
/peewee/execute_sql-to-dictionary/main-2-list-comprehension.py
|
11bdabfb5d57af845048800c1572af940ceac702
|
[
"MIT"
] |
permissive
|
furas/python-examples
|
22d101670ecd667a29376d7c7d7d86f8ec71f6cf
|
95cb53b664f312e0830f010c0c96be94d4a4db90
|
refs/heads/master
| 2022-08-23T23:55:08.313936
| 2022-08-01T14:48:33
| 2022-08-01T14:48:33
| 45,575,296
| 176
| 91
|
MIT
| 2021-02-17T23:33:37
| 2015-11-04T23:54:32
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
# date: 2019.05.20
# author: bartłomiej 'furas' Burek
# https://stackoverflow.com/questions/56219520/peewee-how-do-i-execute-raw-query-and-map-it-to-the-dictionary/56219996#56219996
import peewee
db = peewee.MySQLDatabase('my_database', user='my_user', password='my_password')
cursor = db.execute_sql('show table status from my_database')
column_names = [x[0] for x in cursor.description]
all_tables = [dict(zip(column_names, row)) for row in cursor.fetchall()]
print(all_tables)
|
[
"furas@tlen.pl"
] |
furas@tlen.pl
|
7bca9e6f78163b6d2e52f659b87b9562245ae0f0
|
cf7d96bdd34205ede987f0985dfc9e3ab415ee06
|
/ad_bank_loan/voucher.py
|
5e064d94136a648862eb9cf1db431f9e95edd32a
|
[] |
no_license
|
hendrasaputra0501/btxjalan
|
afc93467d54a6f20ef6ac46f7359e964ad5d42a0
|
d02bc085ad03efc982460d77f7af1eb5641db729
|
refs/heads/master
| 2020-12-30T11:02:05.416120
| 2017-07-31T01:34:08
| 2017-07-31T01:34:08
| 98,836,234
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
from osv import osv, fields
from tools.translate import _
import openerp.addons.decimal_precision as dp
import netsvc
from datetime import datetime
import time
from dateutil.relativedelta import relativedelta
class account_voucher_writeoff(osv.Model):
_inherit = "account.voucher.writeoff"
_columns = {
"interest_id" : fields.many2one("account.bank.loan.interest","Interest",ondelete="cascade"),
}
|
[
"hendrasaputra0501@gmail.com"
] |
hendrasaputra0501@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.