blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
322d450662d582c2c1f19c213517f29168d4ec15
|
a9f676c06bacee1f8b27e08d3c411c89a69cfd40
|
/falmer/events/migrations/0031_auto_20180928_1223.py
|
f7d76238b0effcfd8ed14b357d772ca4cd902f86
|
[
"MIT"
] |
permissive
|
sussexstudent/falmer
|
1b877c3ac75a0477f155ce1a9dee93a5ada686d6
|
ae735bd9d6177002c3d986e5c19a78102233308f
|
refs/heads/master
| 2022-12-11T19:40:12.232488
| 2020-03-20T13:01:47
| 2020-03-20T13:01:47
| 88,043,958
| 2
| 3
|
MIT
| 2022-12-08T03:17:26
| 2017-04-12T11:24:02
|
Python
|
UTF-8
|
Python
| false
| false
| 537
|
py
|
# Generated by Django 2.0.8 on 2018-09-28 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0030_brandingperiod_override_listings_root'),
]
operations = [
migrations.AlterField(
model_name='event',
name='ticket_type',
field=models.CharField(choices=[('NA', 'n/a'), ('NT', 'Native'), ('EB', 'Eventbrite'), ('AC', 'ACCA'), ('GN', 'Generic'), ('MSL', 'MSL')], default='NA', max_length=3),
),
]
|
[
"james@brudil.com"
] |
james@brudil.com
|
eaa42c766189d48ffb00f361d854aead4aac7002
|
1534531d248728e583310214c84cd329cfeb243b
|
/accelerator/examples/a_dsexample_multipledatasets.py
|
1539fa2b787d90fd2240e9cfee47148b70491c9b
|
[
"Apache-2.0"
] |
permissive
|
eBay/accelerator
|
415a006d18283940661c0f3cbae2c311acc1ffaa
|
8376d289e39cd90562de7dc2e3cdaa0bf080587b
|
refs/heads/master
| 2023-03-10T11:08:58.828517
| 2022-07-14T19:15:46
| 2022-07-14T19:15:46
| 130,265,539
| 146
| 30
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 459
|
py
|
def prepare(job):
dw1 = job.datasetwriter(name='first')
dw2 = job.datasetwriter(name='second')
dw3 = job.datasetwriter(name='third')
dw1.add('col1', 'int64')
dw2.add('col1', 'json')
dw3.add('col1', 'number')
dw3.add('col2', 'ascii')
dw3.add('col3', 'bool')
return dw1, dw2, dw3
def analysis(sliceno, prepare_res):
dw1, dw2, dw3 = prepare_res
dw1.write(sliceno)
dw2.write({'sliceno': sliceno})
dw3.write(sliceno, str(sliceno), sliceno % 2 == 0)
|
[
"anders@berkeman.org"
] |
anders@berkeman.org
|
41423559ea1814593584b0719e067271b835e2f5
|
36de14c6b188886df6a284ee9ce4a464a5ded433
|
/Solutions/0481/0481.py
|
9cb5c5f805a1753547fb9a793374fca17c61eb5e
|
[] |
no_license
|
washing1127/LeetCode
|
0dca0f3caa5fddd72b299e6e8f59b5f2bf76ddd8
|
b910ddf32c7e727373449266c9e3167c21485167
|
refs/heads/main
| 2023-03-04T23:46:40.617866
| 2023-02-21T03:00:04
| 2023-02-21T03:00:04
| 319,191,720
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
# -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/10/31 07:56
# File: 0481.py
# Desc:
class Solution:
def magicalString(self, n: int) -> int:
if n <= 3: return 1
l = [1,2,2]
gai = 1
idx = 2
for _ in range(2, n):
l.extend([gai] * l[idx])
idx += 1
if gai == 1: gai = 2
else: gai = 1
if len(l) >= n:
return sum([i%2 for i in l[:n]])
|
[
"1014585392@qq.com"
] |
1014585392@qq.com
|
174372ef8d2ca43068f5360b308aef75060ce3fb
|
4766d241bbc736e070f79a6ae6a919a8b8bb442d
|
/archives/20190519python/0977. Squares of a Sorted Array.py
|
31e803b0f0a7a1799a41547fd8cdb2db90103d5c
|
[] |
no_license
|
yangzongwu/leetcode
|
f7a747668b0b5606050e8a8778cc25902dd9509b
|
01f2edd79a1e922bfefecad69e5f2e1ff3a479e5
|
refs/heads/master
| 2021-07-08T06:45:16.218954
| 2020-07-18T10:20:24
| 2020-07-18T10:20:24
| 165,957,437
| 10
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
class Solution:
def sortedSquares(self, A: List[int]) -> List[int]:
if not A:
return A
if A[0]>=0:
return self.getSquares(A)
if A[0]<0:
if A[-1]<0:
A=self.getSquares(A)
return A[::-1]
k=0
while k<len(A) and A[k]<0:
k+=1
nums1=self.getSquares(A[:k][::-1])
nums2=self.getSquares(A[k:])
return self.getsortedArray(nums1,nums2)
def getsortedArray(self,nums1,nums2):
if not nums1:
return nums2
if not nums2:
return nums1
rep=[]
k1,k2=0,0
while k1<len(nums1) and k2<len(nums2):
if nums1[k1]<nums2[k2]:
rep.append(nums1[k1])
k1+=1
else:
rep.append(nums2[k2])
k2+=1
while k1<len(nums1):
rep.append(nums1[k1])
k1+=1
while k2<len(nums2):
rep.append(nums2[k2])
k2+=1
return rep
def getSquares(self,A):
for k in range(len(A)):
A[k]=A[k]*A[k]
return A
|
[
"noreply@github.com"
] |
yangzongwu.noreply@github.com
|
f2751c6ffd13b7ca049b44caf699c03881be8ee1
|
6e34d59a5220d42b8baa39bd5bc49d69f77103b6
|
/timelapse_stack.py
|
faa41545ac02dbba0480be8e54f29d69fd136595
|
[] |
no_license
|
pbmanis/timelapse
|
034dc6633fa98d43cba03faf68eb9d2636da8120
|
6a989545cbf83e2a40e2d8f87120860104ee24b6
|
refs/heads/master
| 2021-06-29T18:38:41.726219
| 2020-07-14T01:33:03
| 2020-07-14T01:33:03
| 66,783,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,952
|
py
|
"""
Script used to convert timelapse+zstack data into a max-filtered video.
Luke Campagnola and Paul Manis, 4-2015 and 3, 4-2016.
Input data structures:
1. 'auto': ImageSequence_nnn has a number if image_nnn.ma files; each of those files is a single
time point in the sequence.
2. 'manual': Each ImageSequence_nnn has a single image_000.ma file; the ImageSequence itself is
the individual time point; the slice directory handles the
"""
from acq4.util.metaarray import MetaArray
import acq4.util.DataManager as DataManager
import imreg_dft
import scipy.stats
import re
import os
import numpy as np
import pyqtgraph as pg
from collections import OrderedDict
import argparse
parser = argparse.ArgumentParser(description='Analyze time lapse stacks')
parser.add_argument('Experiment', type=int,
help='Select an Experiment number')
args = parser.parse_args()
expt = args.Experiment
app = pg.mkQApp()
basedir = '/Volumes/Backup2B/Sullivan_Chelsea/Chelsea/'
basedir = '/Volumes/Promise Pegasus/ManisLab_Data3/Sullivan_Chelsea/'
# man.setBaseDir(basedir)
#
# Analysis is driven by the filelist data structure
#
# 'filelist' is a dictionary, which contains a dict of parameters to guide the analysis.
# 'refframes' is a list of the matching frames from each of the z-stacks in the successive
# time points
# 'mode' is either 'auto', or 'manual'. If the data are collected as a time-lapse seuqence of
# z stacks, and appear as a set of "ImageSequence_000" directories, then the mode should be
# 'auto'. If the time-lapse points were manually collected, but the stacks are automatic,
# then the mode should be 'manual'.
# 'datalist' is a list of the records to include. If 'datalist' is set to None, then all
# recordings will be included. Note that if mode is "auto", then datalist should be None.
#
filelist = OrderedDict([('2015.04.17_000/slice_001/ImageSequence_000',
{'refframes': [40, 37, 33, 30, 28, 26, 23, 21, 19, 17, 16, 14],
'mode': 'auto', 'datalist': None}),
('2016.03.22_000/slice_000',
{'refframes': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'mode': 'manual', 'datalist': None}),
('2016.03.23_000',
{'refframes': [0]*39,
'mode': 'manual', 'datalist': None}),
('2016.03.28_000/slice_000',
{'refframes': [0]*len(range(0, 9)),
'mode': 'manual', 'datalist': range(0, 9)}),
('2016.04.11_000/slice_000',
{'refframes': [0]*len(range(0, 13)),
'mode': 'manual', 'datalist': range(0, 13)}),
('2016.04.13_000/slice_000',
{'refframes': [0]*len(range(2, 16)),
'mode': 'manual', 'datalist': range(2, 16)}),
('2016.04.15_000/slice_000',
{'refframes': [0]*len(range(14, 34)),
'mode': 'manual', 'datalist': range(14, 34)}),
])
# select a dataset to analyze:
ff = filelist.keys()[expt] # gets the dataset name
fullpath = os.path.join(basedir, ff)
print 'File: ', fullpath
dh = DataManager.getDirHandle(fullpath, create=False)
# collect all data with depth corrected
#dh = man.currentFile
found = False
for n in filelist.keys():
if n in dh.name():
found = True
break
if not found:
raise ValueError('Unknown file: %s' % dh.name())
print 'Dataset found.'
indexes = filelist[n]['refframes']
if filelist[n]['mode'] == 'auto':
z_length = len(dh.info()['zStackValues'])
offsets = [-min(indexes), z_length - max(indexes)]
print 'Analyzing in Auto mode'
print '\tTimes in timelapse: ', z_length
print '\tIndexes: ', indexes
print '\tOffsets: ', offsets
data = [dh['image_%03d.ma'%i].read()[indexes[i]+offsets[0]:indexes[i]+offsets[1]].
asarray()[np.newaxis, ...] for i in range(len(indexes))]
elif filelist[n]['mode'] == 'manual':
nframes = dh['ImageSequence_%03d' % filelist[n]['datalist'][0]]['image_000.ma'].read().shape[0]
ts = []
if filelist[n]['datalist'] != None :
sequence = filelist[n]['datalist']
else:
sequence = range(len(indexes))
for i in sequence:
th = dh['ImageSequence_%03d'%i]['image_000.ma']
if th.exists() and th.read().shape[0] == nframes:
ts.append(i)
z_length = len(ts)
offsets = [-min(indexes), z_length - max(indexes)]
print 'Analyzing in Manual mode'
print '\t# of depths in timelapse: ', z_length
print '\t# of frames in each: ', nframes
print '\tIndexes: ', indexes
print '\tOffsets: ', offsets
try:
print indexes
print offsets
print 'list of indexes reading: ', [[indexes[i]+offsets[0],indexes[i]+offsets[1]] for i in ts]
data = [dh['ImageSequence_%03d'%i]['image_000.ma'].read()[indexes[i]+offsets[0]:indexes[i]+offsets[1]].
asarray()[np.newaxis, ...] for i in range(len(ts[:-2]))]
except:
print 'error'
print 'len ts: ', len(ts)
print 'ts: ', ts
print 'i: ', i
print 'index[i], o: ', indexes[i], offsets[0], offsets[1]
raise ValueError('Indexing error for ImageSequence image data set %d' % i)
else:
raise ValueError('Unknown data mode: %s' % filelist[n]['mode'])
print 'data shape: ', [len(k) for k in data]
data = np.concatenate(data, axis=0)
# print 'data shape (t, z, x, y): ', data.shape
# dim edges to avoid artifacts at the edges of depth range
dim = data.copy()
dim[:,0] *= 0.33
dim[:,1] *= 0.66
dim[:,-1] *= 0.33
dim[:,-2] *= 0.66
# flatten stacks
m = dim.max(axis=1)
nreg = m.shape[0]
ireg = int(nreg/2) # get one near the middle of the sequence.
# correct for lateral motion
off = [imreg_dft.translation(m[ireg], m[i])[0] for i in range(0, m.shape[0])]
offt = np.array(off).T
# find boundaries of outer rectangle including all images as registered
minx = np.min(offt[0])
maxx = np.max(offt[0])
miny = np.min(offt[1])
maxy = np.max(offt[1])
# build canvas
canvas = np.zeros(shape=(m.shape[0], m.shape[1]-minx+maxx,
m.shape[2]-miny+maxy), dtype=m.dtype)
# set initial image (offsets were computed relative to this, so it has no offset)
# canvas[0, -minx:-minx+m.shape[1], -miny:-miny+m.shape[2]] = m[0]
for i in range(0, m.shape[0]):
ox = offt[0][i] - minx
oy = offt[1][i] - miny
canvas[i, ox:(ox+m.shape[1]), oy:(oy+m.shape[2])] = m[i]
# print 'canvas %d set' % i
# correct for bleaching
levels = np.array([np.median(m[m>scipy.stats.scoreatpercentile(m[i], 95)]) for i in range(m.shape[0])])
norm = canvas / levels[:, np.newaxis, np.newaxis]
w = pg.image()
w.setImage(norm)
# write the resulting compressed z-stacks to a file in the original directory.
ma = MetaArray(norm, info=[{'name': 'Time'}, {'name': 'X'}, {'name': 'Y'}, {}])
ma.write(dh.name() + '/max_stack.ma')
pg.show()
import sys
if sys.flags.interactive == 0:
app.exec_()
|
[
"pmanis@med.unc.edu"
] |
pmanis@med.unc.edu
|
a2912aa8082dd0c86f50d80a953f898ece522e01
|
e733d07a1492f6e9b762d9ca496ec59668aedb95
|
/qcloudsdkcvm/InquiryInstancePriceHourRequest.py
|
906b50e9717e953b7d3cb28273d3cd7f2de595fb
|
[
"Apache-2.0"
] |
permissive
|
QcloudApi/qcloudcli
|
1f67d8467b81ac8964362491cd4f3104f8e59161
|
ba16161f65df5f621d9f1c5587b9900dca600cb5
|
refs/heads/master
| 2023-08-15T01:51:05.236254
| 2018-07-11T08:07:29
| 2018-07-11T08:07:29
| 100,922,202
| 8
| 6
| null | 2018-03-29T11:57:26
| 2017-08-21T06:55:45
|
Python
|
UTF-8
|
Python
| false
| false
| 2,262
|
py
|
# -*- coding: utf-8 -*-
from qcloudsdkcore.request import Request
class InquiryInstancePriceHourRequest(Request):
def __init__(self):
super(InquiryInstancePriceHourRequest, self).__init__(
'cvm', 'qcloudcliV1', 'InquiryInstancePriceHour', 'cvm.api.qcloud.com')
def get_bandwidth(self):
return self.get_params().get('bandwidth')
def set_bandwidth(self, bandwidth):
self.add_param('bandwidth', bandwidth)
def get_bandwidthType(self):
return self.get_params().get('bandwidthType')
def set_bandwidthType(self, bandwidthType):
self.add_param('bandwidthType', bandwidthType)
def get_cpu(self):
return self.get_params().get('cpu')
def set_cpu(self, cpu):
self.add_param('cpu', cpu)
def get_goodsNum(self):
return self.get_params().get('goodsNum')
def set_goodsNum(self, goodsNum):
self.add_param('goodsNum', goodsNum)
def get_imageId(self):
return self.get_params().get('imageId')
def set_imageId(self, imageId):
self.add_param('imageId', imageId)
def get_imageType(self):
return self.get_params().get('imageType')
def set_imageType(self, imageType):
self.add_param('imageType', imageType)
def get_instanceModel(self):
return self.get_params().get('instanceModel')
def set_instanceModel(self, instanceModel):
self.add_param('instanceModel', instanceModel)
def get_mem(self):
return self.get_params().get('mem')
def set_mem(self, mem):
self.add_param('mem', mem)
def get_rootSize(self):
return self.get_params().get('rootSize')
def set_rootSize(self, rootSize):
self.add_param('rootSize', rootSize)
def get_storageSize(self):
return self.get_params().get('storageSize')
def set_storageSize(self, storageSize):
self.add_param('storageSize', storageSize)
def get_storageType(self):
return self.get_params().get('storageType')
def set_storageType(self, storageType):
self.add_param('storageType', storageType)
def get_zoneId(self):
return self.get_params().get('zoneId')
def set_zoneId(self, zoneId):
self.add_param('zoneId', zoneId)
|
[
"zhiqiangfan@tencent.com"
] |
zhiqiangfan@tencent.com
|
24de9783acf09079e0e372ead54d08b82db5e567
|
91d1a6968b90d9d461e9a2ece12b465486e3ccc2
|
/storagegateway_write_f/smb-file-share_update.py
|
3357133385bf1dc97e35672040d499ce1326752f
|
[] |
no_license
|
lxtxl/aws_cli
|
c31fc994c9a4296d6bac851e680d5adbf7e93481
|
aaf35df1b7509abf5601d3f09ff1fece482facda
|
refs/heads/master
| 2023-02-06T09:00:33.088379
| 2020-12-27T13:38:45
| 2020-12-27T13:38:45
| 318,686,394
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
#!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
create-smb-file-share : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/storagegateway/create-smb-file-share.html
describe-smb-file-shares : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/storagegateway/describe-smb-file-shares.html
"""
write_parameter("storagegateway", "update-smb-file-share")
|
[
"hcseo77@gmail.com"
] |
hcseo77@gmail.com
|
2dd74bd70f39713191de8dc6a0ece7478c6387db
|
6e158a54409937515b14676730adfadfd457d4ae
|
/gaussian_spheres/pwl.py
|
829e513a721f8fe6245d003f3e220aab7e410ea8
|
[] |
no_license
|
Tjstretchalot/machinelearning
|
e2b277efd99f6e45005cb92a0cc17e90bf7d37e4
|
5a3b17c49211a63f71cdf40ca35e00a3af4b198a
|
refs/heads/master
| 2020-05-02T09:25:25.032430
| 2019-07-25T14:37:43
| 2019-07-25T14:37:43
| 177,871,307
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,665
|
py
|
"""A gaussian spheres input technique. Randomly places cluster centers in an n-dimensional cube.
Then assigns each cluster a label. To generate points, a cluster is selected uniformly at random,
then a radius is selected from a normal distribution, then a point is selected uniformly from
within a sphere centered at the cluster center with the given radius."""
import torch
import typing
import scipy.spatial.distance as distance
import numpy as np
from shared.pwl import PointWithLabelProducer, PointWithLabel
class GaussianSpheresPWLP(PointWithLabelProducer):
"""Produces points selected from gaussian spheres. Marks are ignored.
Attributes:
clusters (list[PointWithLabel])
radius_dist (torch.distributions.distribution.Distribution)
"""
def __init__(self, epoch_size: int, input_dim: int, output_dim: int,
clusters: typing.List[PointWithLabel], std_dev: float, mean: float):
super().__init__(epoch_size, input_dim, output_dim)
self.clusters = clusters
self.radius_dist = torch.distributions.normal.Normal(
torch.tensor([float(mean)]), torch.tensor([float(std_dev)])) #pylint: disable=not-callable
@classmethod
def create(cls, epoch_size: int, input_dim: int, output_dim: int, cube_half_side_len: float,
num_clusters: int, std_dev: float, mean: float, min_sep: float,
force_split: bool = False):
"""Creates a new gaussian spheres pwlp, pulling points from the cube with a side length
of 2*cube_half_side_len centered at the origin
Arguments:
epoch_size (int): the number of points we will consider 1 epoch
input_dim (int): the input dimension (i.e., number of coordinates per point)
output_dim (int): the output dimension (i.e., number of unique labels)
cube_half_side_len (float): if '1', each coordinate is uniform from [-1, 1]
num_clusters (int): the number of clusters
std_dev (float): standard deviation of the radius
mean (float): mean of the radius
min_sep (float): minimum separation between points
force_split (bool, optional): if True then there will be an even as possible
distribution of cluster labels. if False then there will be a multinomial
distribution of cluster labels with the same probability for each
"""
# rejection sampling
clust_centers = np.zeros((num_clusters, input_dim), dtype='double')
clusters = []
if force_split:
next_label = 0
for i in range(num_clusters):
rejections = 0
center = torch.zeros((input_dim,), dtype=torch.double)
while True:
torch.rand(input_dim, out=center)
center = (center - 0.5) * 2 * cube_half_side_len
distances = distance.cdist(center.reshape(1, -1).numpy(), clust_centers)
if np.min(distances) < min_sep:
rejections += 1
if rejections > 10000:
raise ValueError('rejected too many points!')
else:
break
clust_centers[i, :] = center.numpy()
if force_split:
clust_label = next_label
next_label = (next_label + 1) % output_dim
else:
clust_label = torch.randint(output_dim, (1,)).item()
clusters.append(PointWithLabel(point=center, label=clust_label))
return cls(epoch_size, input_dim, output_dim, clusters, std_dev, mean)
def _fill_with_clusters(self, points: torch.tensor, labels: torch.tensor,
cluster_inds: torch.tensor):
vec = torch.zeros((self.input_dim,), dtype=torch.double)
for i in range(points.shape[0]):
clust = self.clusters[cluster_inds[i].item()]
radius = torch.abs(self.radius_dist.sample()).double()
torch.randn(self.input_dim, out=vec)
vec *= (radius / torch.norm(vec))
labels[i] = clust.label
points[i, :] = clust.point + vec
def _fill(self, points: torch.tensor, labels: torch.tensor):
batch_size = points.shape[0]
cluster_inds = torch.randint(len(self.clusters), (batch_size,), dtype=torch.long)
self._fill_with_clusters(points, labels, cluster_inds)
def fill_uniform(self, points: torch.tensor, labels: torch.tensor):
"""Fills the specified points and labels such that the labels are spread
evenly"""
batch_size = points.shape[0]
num_per_label = batch_size // self.output_dim
if num_per_label * self.output_dim != batch_size:
raise ValueError(f'cannot fill {batch_size} uniformly when output dim is {self.output_dim}')
cluster_lbls = np.zeros(len(self.clusters), dtype='int32')
for ind, clust in enumerate(self.clusters):
cluster_lbls[ind] = clust.label
cluster_inds = torch.zeros(0, dtype=torch.long)
for lbl in range(self.output_dim):
mask = cluster_lbls == lbl
viable_clust_inds = np.arange(len(self.clusters), dtype='int64')[mask]
lbl_clust_inds = np.random.choice(viable_clust_inds, (num_per_label,))
cluster_inds = torch.cat((cluster_inds, torch.from_numpy(lbl_clust_inds)))
self._fill_with_clusters(points, labels, cluster_inds)
def _position(self, pos: int):
pass
|
[
"mtimothy984@gmail.com"
] |
mtimothy984@gmail.com
|
e9ad0d1f8948db6b00d4c77f6d1d720bfbf254d9
|
6b699b7763a0ff8c32b85014d96f6faf02514a2e
|
/models/research/object_detection/models/embedded_ssd_mobilenet_v1_feature_extractor.py
|
96586e17f65a9ead1cb9d1c93540cd46477f3b2f
|
[
"Apache-2.0"
] |
permissive
|
leizeling/Base_tensorflow-object_detection_2Dcord
|
df7c195685fed21fd456f1dd79881a198cf8b6e0
|
d07418eb68543adc2331211ccabbc27137c8676e
|
refs/heads/master
| 2020-03-19T11:51:57.961688
| 2018-06-07T14:47:16
| 2018-06-07T14:47:16
| 136,481,479
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,185
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedded-friendly SSDFeatureExtractor for MobilenetV1 features."""
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from nets import mobilenet_v1
slim = tf.contrib.slim
class EmbeddedSSDMobileNetV1FeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""Embedded-friendly SSD Feature Extractor using MobilenetV1 features.
This feature extractor is similar to SSD MobileNetV1 feature extractor, and
it fixes input resolution to be 256x256, reduces the number of feature maps
used for box prediction and ensures convolution kernel to be no larger
than input tensor in spatial dimensions.
This feature extractor requires support of the following ops if used in
embedded devices:
- Conv
- DepthwiseConv
- Relu6
All conv/depthwiseconv use SAME padding, and no additional spatial padding is
needed.
"""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""MobileNetV1 Feature Extractor for Embedded-friendly SSD Models.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to. For EmbeddedSSD it must be set to 1.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the
base feature extractor.
reuse_weights: Whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
Raises:
ValueError: upon invalid `pad_to_multiple` values.
"""
if pad_to_multiple != 1:
raise ValueError('Embedded-specific SSD only supports `pad_to_multiple` '
'of 1.')
super(EmbeddedSSDMobileNetV1FeatureExtractor, self).__init__(
is_training, depth_multiplier, min_depth, pad_to_multiple,
conv_hyperparams_fn, reuse_weights, use_explicit_padding, use_depthwise,
override_base_feature_extractor_hyperparams)
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Raises:
ValueError: if image height or width are not 256 pixels.
"""
image_shape = preprocessed_inputs.get_shape()
image_shape.assert_has_rank(4)
image_height = image_shape[1].value
image_width = image_shape[2].value
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.equal(tf.shape(preprocessed_inputs)[1], 256),
tf.equal(tf.shape(preprocessed_inputs)[2], 256)),
['image size must be 256 in both height and width.'])
with tf.control_dependencies([shape_assert]):
preprocessed_inputs = tf.identity(preprocessed_inputs)
elif image_height != 256 or image_width != 256:
raise ValueError('image size must be = 256 in both height and width;'
' image dim = %d,%d' % (image_height, image_width))
feature_map_layout = {
'from_layer': [
'Conv2d_11_pointwise', 'Conv2d_13_pointwise', '', '', ''
],
'layer_depth': [-1, -1, 512, 256, 256],
'conv_kernel_size': [-1, -1, 3, 3, 2],
'use_explicit_padding': self._use_explicit_padding,
'use_depthwise': self._use_depthwise,
}
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(is_training=None)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
# TODO(skligys): Enable fused batch norm once quantization supports it.
with slim.arg_scope([slim.batch_norm], fused=False):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
with slim.arg_scope(self._conv_hyperparams_fn()):
# TODO(skligys): Enable fused batch norm once quantization supports it.
with slim.arg_scope([slim.batch_norm], fused=False):
feature_maps = feature_map_generators.multi_resolution_feature_maps(
feature_map_layout=feature_map_layout,
depth_multiplier=self._depth_multiplier,
min_depth=self._min_depth,
insert_1x1_conv=True,
image_features=image_features)
return feature_maps.values()
|
[
"1072113944@qq.comm"
] |
1072113944@qq.comm
|
1bcc583b18dbe1c149df61636316daf19ebb3da8
|
bc3bd7601fa427d638f872b4ddfdebe4ce23a25c
|
/bitbucketopenapi/models/branching_model_all_of_branch_types.py
|
5c19cc4a2bfc8439d01eead784bcecbd8c6be7a7
|
[] |
no_license
|
magmax/bitbucket-openapi
|
59ef55ab3aa42940c8211d3ecd16ef7d6fc74c21
|
836ae762735ae5b1ececcee5287fa271d7d8de5b
|
refs/heads/master
| 2020-07-28T16:10:32.736169
| 2019-09-19T04:17:09
| 2019-09-19T04:17:09
| 209,460,884
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,858
|
py
|
# coding: utf-8
"""
Bitbucket API
Code against the Bitbucket API to automate simple tasks, embed Bitbucket data into your own site, build mobile or desktop apps, or even add custom UI add-ons into Bitbucket itself using the Connect framework. # noqa: E501
The version of the OpenAPI document: 2.0
Contact: support@bitbucket.org
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class BranchingModelAllOfBranchTypes(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kind': 'str',
'prefix': 'str'
}
attribute_map = {
'kind': 'kind',
'prefix': 'prefix'
}
def __init__(self, kind=None, prefix=None): # noqa: E501
"""BranchingModelAllOfBranchTypes - a model defined in OpenAPI""" # noqa: E501
self._kind = None
self._prefix = None
self.discriminator = None
self.kind = kind
self.prefix = prefix
@property
def kind(self):
"""Gets the kind of this BranchingModelAllOfBranchTypes. # noqa: E501
The kind of branch. # noqa: E501
:return: The kind of this BranchingModelAllOfBranchTypes. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this BranchingModelAllOfBranchTypes.
The kind of branch. # noqa: E501
:param kind: The kind of this BranchingModelAllOfBranchTypes. # noqa: E501
:type: str
"""
if kind is None:
raise ValueError("Invalid value for `kind`, must not be `None`") # noqa: E501
allowed_values = ["feature", "bugfix", "release", "hotfix"] # noqa: E501
if kind not in allowed_values:
raise ValueError(
"Invalid value for `kind` ({0}), must be one of {1}" # noqa: E501
.format(kind, allowed_values)
)
self._kind = kind
@property
def prefix(self):
"""Gets the prefix of this BranchingModelAllOfBranchTypes. # noqa: E501
The prefix for this branch type. A branch with this prefix will be classified as per `kind`. The prefix must be a valid prefix for a branch and must always exist. It cannot be blank, empty or `null`. # noqa: E501
:return: The prefix of this BranchingModelAllOfBranchTypes. # noqa: E501
:rtype: str
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
"""Sets the prefix of this BranchingModelAllOfBranchTypes.
The prefix for this branch type. A branch with this prefix will be classified as per `kind`. The prefix must be a valid prefix for a branch and must always exist. It cannot be blank, empty or `null`. # noqa: E501
:param prefix: The prefix of this BranchingModelAllOfBranchTypes. # noqa: E501
:type: str
"""
if prefix is None:
raise ValueError("Invalid value for `prefix`, must not be `None`") # noqa: E501
self._prefix = prefix
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BranchingModelAllOfBranchTypes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"miguelangel.garcia@gmail.com"
] |
miguelangel.garcia@gmail.com
|
00d30d1d0c99da6ea3aafe35fc7e3c3e88eb6f3e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/ZMGOCycleFlexConfig.py
|
a346c3b220c6ea3812a72dfb56fd69a64303cdc3
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,868
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZMGOCycleFlexConfig(object):
def __init__(self):
self._cycle_flex_withhold_fee_name = None
self._cycle_flex_withhold_max_price = None
self._cycle_flex_withhold_total_period_count = None
@property
def cycle_flex_withhold_fee_name(self):
return self._cycle_flex_withhold_fee_name
@cycle_flex_withhold_fee_name.setter
def cycle_flex_withhold_fee_name(self, value):
self._cycle_flex_withhold_fee_name = value
@property
def cycle_flex_withhold_max_price(self):
return self._cycle_flex_withhold_max_price
@cycle_flex_withhold_max_price.setter
def cycle_flex_withhold_max_price(self, value):
self._cycle_flex_withhold_max_price = value
@property
def cycle_flex_withhold_total_period_count(self):
return self._cycle_flex_withhold_total_period_count
@cycle_flex_withhold_total_period_count.setter
def cycle_flex_withhold_total_period_count(self, value):
self._cycle_flex_withhold_total_period_count = value
def to_alipay_dict(self):
params = dict()
if self.cycle_flex_withhold_fee_name:
if hasattr(self.cycle_flex_withhold_fee_name, 'to_alipay_dict'):
params['cycle_flex_withhold_fee_name'] = self.cycle_flex_withhold_fee_name.to_alipay_dict()
else:
params['cycle_flex_withhold_fee_name'] = self.cycle_flex_withhold_fee_name
if self.cycle_flex_withhold_max_price:
if hasattr(self.cycle_flex_withhold_max_price, 'to_alipay_dict'):
params['cycle_flex_withhold_max_price'] = self.cycle_flex_withhold_max_price.to_alipay_dict()
else:
params['cycle_flex_withhold_max_price'] = self.cycle_flex_withhold_max_price
if self.cycle_flex_withhold_total_period_count:
if hasattr(self.cycle_flex_withhold_total_period_count, 'to_alipay_dict'):
params['cycle_flex_withhold_total_period_count'] = self.cycle_flex_withhold_total_period_count.to_alipay_dict()
else:
params['cycle_flex_withhold_total_period_count'] = self.cycle_flex_withhold_total_period_count
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZMGOCycleFlexConfig()
if 'cycle_flex_withhold_fee_name' in d:
o.cycle_flex_withhold_fee_name = d['cycle_flex_withhold_fee_name']
if 'cycle_flex_withhold_max_price' in d:
o.cycle_flex_withhold_max_price = d['cycle_flex_withhold_max_price']
if 'cycle_flex_withhold_total_period_count' in d:
o.cycle_flex_withhold_total_period_count = d['cycle_flex_withhold_total_period_count']
return o
|
[
"jishupei.jsp@alibaba-inc.com"
] |
jishupei.jsp@alibaba-inc.com
|
33142582fc29000c271ed5a172c7f98479c6dbda
|
2f09a5d75343702a0aecf10112b77b00c2063816
|
/setup.py
|
466ca0e3e205500ff3041ffbb29c2b4101f5c4d1
|
[
"Apache-2.0"
] |
permissive
|
tracer0tong/statsitemap
|
8ac963d03ab53a61c942eeb7c1d63d4fb03c0c24
|
0e0cc4387b98cd91ffc717f5494e0a2168127992
|
refs/heads/master
| 2016-09-10T21:36:22.734268
| 2015-04-21T22:02:49
| 2015-04-21T22:02:49
| 34,352,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 524
|
py
|
from distutils.core import setup
setup(
name='statsitemap',
packages=['statsitemap'],
version='0.1',
description='Library for building statistical graph (sitemap) from called URI/referer pairs',
author='Yury Leonychev (@tracer0tong)',
author_email='yuriy.leonychev@gmail.com',
url='https://github.com/tracer0tong/statsitemap',
download_url='https://github.com/tracer0tong/statsitemap/tarball/0.1',
keywords=['statsitemap', 'graph', 'nginx', 'apache', 'accesslog'],
classifiers=[],
)
|
[
"johndoe@example.com"
] |
johndoe@example.com
|
c8f7add0004abb00bdec5a84216d5e250182acc9
|
54f352a242a8ad6ff5516703e91da61e08d9a9e6
|
/Source Codes/AtCoder/agc007/B/4738230.py
|
5ff3a4324a3c6b6e9adee2311585322f49bfc888
|
[] |
no_license
|
Kawser-nerd/CLCDSA
|
5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb
|
aee32551795763b54acb26856ab239370cac4e75
|
refs/heads/master
| 2022-02-09T11:08:56.588303
| 2022-01-26T18:53:40
| 2022-01-26T18:53:40
| 211,783,197
| 23
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
n = int(input())
x = list(map(int, input().split()))
a = [20001*i for i in range(1,n+1)]
b = [20001*(n+1-i) for i in range(1,n+1)]
for i in range(n):
b[x[i]-1] += i
for x in a:print(x, end=' ')
print()
for x in b:print(x, end=' ')
|
[
"kwnafi@yahoo.com"
] |
kwnafi@yahoo.com
|
e914f312a676793d37bbd0b6ebd106a5a1ed8467
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/sankey/node/line/_widthsrc.py
|
f4b333792ea5fe507c7ee8f990eed4194966516e
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 419
|
py
|
import _plotly_utils.basevalidators
class WidthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="widthsrc", parent_name="sankey.node.line", **kwargs
):
super(WidthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
5cd9a52daf3835b8c6d129f6f036940adfa985e3
|
700bf615913fa5dd8686fac51b444f691023a035
|
/igrins/recipes/recipe_wvlsol_sky2.py
|
2ccbf18967d717ff08c39a6b8788abb53a01a2d4
|
[] |
no_license
|
shihyuntang/plp
|
e24da3d23debabb61edfca5416a72577717c6195
|
842a5db71dfe57d1b3bf8ac292dabdb69830ac7f
|
refs/heads/master
| 2023-03-09T06:13:50.874801
| 2021-02-26T20:03:20
| 2021-02-26T20:03:20
| 285,095,289
| 0
| 0
| null | 2020-08-04T20:42:51
| 2020-08-04T20:42:50
| null |
UTF-8
|
Python
| false
| false
| 6,854
|
py
|
# This is to use new framework. Let's use this to measure flexure
# between emission spectra (e.g., sky, UNe, etc.)
#import os
import numpy as np
import pandas as pd
def save_qa(obsset):
df = obsset.load_data_frame("SKY_FITTED_PIXELS_JSON", orient="split")
msk_ = df["slit_center"] == 0.5
dfm_ = df[msk_]
msk = np.isfinite(dfm_["pixels"])
dfm = dfm_[msk]
lines_map = dict((o, (_["pixels"].values, _["wavelength"].values))
for o, _ in dfm.groupby("order"))
from matplotlib.figure import Figure
from igrins.libs.ecfit import check_fit
d = obsset.load_item("SKY_WVLSOL_JSON")
orders = d["orders"]
fit_results = obsset.load_item("SKY_WVLSOL_FIT_RESULT_JSON")
# fit_results = dict(xyz=[xl[msk], yl[msk], zl[msk]],
# fit_params=fit_params,
# fitted_model=poly_2d)
# xl, yl, zl = get_ordered_line_data(reidentified_lines_map)
xl, yl, zlo = fit_results["xyz"]
xl, yl, zlo = [np.array(_) for _ in [xl, yl, zlo]]
zl = zlo
m = np.array(fit_results["fitted_mask"])
lines_map_filtered = dict((o, (_["pixels"].values,
_["wavelength"].values))
for o, _ in dfm[m].groupby("order"))
modeul_name, class_name, serialized = fit_results["fitted_model"]
from igrins.libs.astropy_poly_helper import deserialize_poly_model
p = deserialize_poly_model(modeul_name, class_name, serialized)
if 1:
fig1 = Figure(figsize=(12, 7))
check_fit(fig1, xl, yl, zl, p,
orders,
lines_map)
fig1.tight_layout()
fig2 = Figure(figsize=(12, 7))
check_fit(fig2, xl[m], yl[m], zl[m], p,
orders,
lines_map_filtered)
fig2.tight_layout()
from igrins.libs.qa_helper import figlist_to_pngs
dest_dir = obsset.query_item_path("qa_sky_fit2d_dir",
subdir="sky_fit2d")
figlist_to_pngs(dest_dir, [fig1, fig2])
# sky_basename = helper.get_basename(band, obsids[0])
# sky_figs = helper.get_section_filename_base("QA_PATH",
# "oh_fit2d",
# "oh_fit2d_"+sky_basename)
# figlist_to_pngs(sky_figs, [fig1, fig2])
def save_distortion_db(obsset):
db = obsset.load_db("distortion")
db.update(obsset.band, obsset.basename)
def save_wvlsol_db(obsset):
db = obsset.load_db("wvlsol")
db.update(obsset.band, obsset.basename)
# if 1:
# thar_db.update(band, thar_basename)
# 20151003 : Below is an attemp to modularize the recipes, which has
# not finished. Initial solution part is done, but the distortion part
# is not.
def save_ordermap_slitposmap(obsset):
from aperture_helper import get_simple_aperture_from_obsset
wvlsol_v0 = obsset.load_resource_for("wvlsol_v0")
orders = wvlsol_v0["orders"]
ap = get_simple_aperture_from_obsset(obsset,
orders=orders)
order_map = ap.make_order_map()
slitpos_map = ap.make_slitpos_map()
order_map2 = ap.make_order_map(mask_top_bottom=True)
obsset.store_image("ordermap_fits", order_map)
obsset.store_image("slitposmap_fits", slitpos_map)
obsset.store_image("ordermap_masked_fits", order_map2)
def save_wavelength_map(obsset):
fit_results = obsset.load_item("SKY_WVLSOL_FIT_RESULT_JSON")
from igrins.libs.astropy_poly_helper import deserialize_poly_model
module_name, klass_name, serialized = fit_results["fitted_model"]
poly_2d = deserialize_poly_model(module_name, klass_name, serialized)
order_map = obsset.load_item("ordermap_fits")[0].data
# slitpos_map = caldb.load_item_from(basename, "slitposmap_fits")
offset_map = obsset.load_item("slitoffset_fits")[0].data
msk = order_map > 0
_, pixels = np.indices(msk.shape)
orders = order_map[msk]
wvl = poly_2d(pixels[msk] - offset_map[msk], orders) / orders
wvlmap = np.empty(msk.shape, dtype=float)
wvlmap.fill(np.nan)
wvlmap[msk] = wvl
obsset.store_image("WAVELENGTHMAP_FITS", wvlmap)
from igrins.libs.recipe_helper import RecipeHelper
from process_wvlsol_v0 import extract_spectra_multi
from process_wvlsol_v0 import make_combined_image
def process_band(utdate, recipe_name, band,
groupname,
obsids, frametypes,
aux_infos, config_name):
from igrins import get_caldb, get_obsset
caldb = get_caldb(config_name, utdate)
obsset = get_obsset(caldb, band, recipe_name, obsids, frametypes)
# STEP 1 :
## make combined image
make_combined_image(obsset)
# Step 2
extract_spectra_multi(obsset)
from process_identify_multiline import identify_multiline
identify_multiline(obsset)
from process_wvlsol_volume_fit import volume_fit, generate_slitoffsetmap
volume_fit(obsset)
save_distortion_db(obsset)
save_ordermap_slitposmap(obsset)
generate_slitoffsetmap(obsset)
from process_derive_wvlsol import derive_wvlsol
derive_wvlsol(obsset)
save_wvlsol_db(obsset)
save_wavelength_map(obsset)
from process_save_wat_header import save_wat_header
save_wat_header(obsset)
# save_wavelength_map(helper, band, obsids)
# #fit_wvl_sol(helper, band, obsids)
save_qa(obsset)
# some of the fugures are missing.
# save_figures()
from igrins.libs.recipe_factory import new_recipe_class, new_recipe_func
# If the recipe is != "SKY", the resulting combined image will be A-B.
_recipe_class_wvlsol_sky = new_recipe_class("RecipeWvlsolSky",
["SKY", "SKY_AB"], process_band)
wvlsol_sky = new_recipe_func("wvlsol_sky",
_recipe_class_wvlsol_sky)
sky_wvlsol = new_recipe_func("sky_wvlsol",
_recipe_class_wvlsol_sky)
__all__ = wvlsol_sky, sky_wvlsol
# if 0:
# # Step 3:
# identify_lines(helper, band, obsids)
# get_1d_wvlsol(helper, band, obsids)
# save_1d_wvlsol(extractor,
# orders_w_solutions, wvl_sol, p)
# save_qa(extractor, orders_w_solutions,
# reidentified_lines_map, p, m)
# save_figures(helper, band, obsids)
# save_db(helper, band, obsids)
if __name__ == "__main__":
utdate = "20140709"
obsids = [62, 63]
utdate = "20140525"
obsids = [29]
utdate = "20150525"
obsids = [52]
recipe_name = "SKY"
# utdate = "20150525"
# obsids = [32]
# recipe_name = "THAR"
band = "K"
#helper = RecipeHelper("../recipe.config", utdate)
config_name = "../recipe.config"
process_band(utdate, recipe_name, band, obsids, config_name)
|
[
"lee.j.joon@gmail.com"
] |
lee.j.joon@gmail.com
|
06470ed428c9e68e74635aeeb5e1e3f853111727
|
5686100c4ed0436347107f4e9faae30fca609c09
|
/leetcode/1030. Matrix Cells in Distance Order/Solution.py
|
926b64215552cf842d6465f0b2786a2cc0af72cb
|
[] |
no_license
|
adamqddnh/algorithm-questions
|
7d4f56b7e5ac2ff9460774d43ecf8cba2cd7b0cb
|
93a1b082e10ade0dd464deb80b5df6c81552f534
|
refs/heads/master
| 2023-06-29T04:51:26.635740
| 2021-07-23T09:11:45
| 2021-07-23T09:11:45
| 252,675,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
class Solution(object):
def allCellsDistOrder(self, R, C, r0, c0):
"""
:type R: int
:type C: int
:type r0: int
:type c0: int
:rtype: List[List[int]]
"""
maxLength = R + C + 1
distance = [[] for i in range(0, maxLength)]
for i in range(0, R):
for j in range(0, C):
temp = abs(r0 - i) + abs(c0 - j)
distance[temp].append([i, j])
result = []
for i in range(0, maxLength):
for temp in distance[i]:
result.append(temp)
return result
|
[
"noreply@github.com"
] |
adamqddnh.noreply@github.com
|
f35da223ba059f5824b45b5398b78811b900ca89
|
aef40813a1b92cec0ea4fc25ec1d4a273f9bfad4
|
/Q15__/04_Count_Submatrices_With_All_Ones/Solution.py
|
c0f071e4eb738bc0cdf16e003c0295732600aa05
|
[
"Apache-2.0"
] |
permissive
|
hsclinical/leetcode
|
e9d0e522e249a24b28ab00ddf8d514ec855110d7
|
48a57f6a5d5745199c5685cd2c8f5c4fa293e54a
|
refs/heads/main
| 2023-06-14T11:28:59.458901
| 2021-07-09T18:57:44
| 2021-07-09T18:57:44
| 319,078,569
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,238
|
py
|
from typing import List
class Solution:
def numSubmat(self, mat: List[List[int]]) -> int:
rowLen = len(mat)
colLen = len(mat[0])
medium = [ [0] * colLen for _ in range(rowLen) ]
#stack to bottom
for j in range(colLen):
for i in range(rowLen):
if mat[i][j] == 1:
if i == 0 or mat[i-1][j] == 0:
medium[i][j] = 1
else:
medium[i][j] = medium[i-1][j] + 1
#print(medium)
result = [ [0] * colLen for _ in range(rowLen) ]
total = 0
for i in range(rowLen):
for j in range(colLen):
if mat[i][j] == 1:
cellTotal = medium[i][j]
# backward column
minStack = cellTotal
for k in range(j-1, -1, -1):
if mat[i][k] == 0:
break
else:
minStack = min(minStack, medium[i][k])
cellTotal += minStack
total += cellTotal
result[i][j] = cellTotal
#print(result)
return total
|
[
"luhongisu@gmail.com"
] |
luhongisu@gmail.com
|
dc9f823aea13cfa9dee9a53a698656d62586ac18
|
d50d24a111f7fc078ef98bc5059355793fe7dd37
|
/tao_bao/db/dbhelper.py
|
97d38ade4af4772e1f66711aeb87dcddf3af6dc3
|
[] |
no_license
|
liangxuCHEN/scrapy_taobao
|
f2cbd38b5d746052bac3366aa035edf988cb1115
|
595fb9a9dcd45a32b43e0478580a7a936d1b55a2
|
refs/heads/master
| 2021-05-08T11:50:12.854647
| 2018-04-11T03:20:17
| 2018-04-11T03:20:17
| 119,913,389
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,986
|
py
|
# -*- coding: utf-8 -*-
from scrapy.utils.project import get_project_settings #导入seetings配置
from sqlalchemy import create_engine, Column, String, DateTime, Integer, Float, func
from sqlalchemy.ext.declarative import declarative_base
import redis
# 初始化数据库连接:
# engine = create_engine('mysql+pymysql://root:123asd@localhost:3306/utf_sys?charset=utf8')
engine = create_engine('postgresql+psycopg2://postgres:123asd@192.168.0.186/execdb')
# 初始化redis数据库连接
Redis = redis.StrictRedis(host='localhost',port=6379,db=0)
Base = declarative_base()
class TaoBaoModel(Base):
__tablename__ = 'tab_taobao_item'
id = Column(Integer, primary_key=True)
page_number = Column(Integer)
job_id = Column(String(50))
item_id = Column(String(50))
name = Column(String(100))
main_pic = Column(String(200))
price = Column(Float)
pay_person = Column(Integer)
province = Column(String(20))
city = Column(String(20))
shop_name = Column(String(50))
detail_url = Column(String(200))
category_id = Column(String(50))
category = Column(String(50))
is_tmall = Column(Integer)
user_id = Column(String(50))
market = Column(String(20))
record_date = Column(DateTime)
#做一个表记录搜索历史
class TaoBaoProjectModel(Base):
"""
参数说明:
_id: 这个搜索项目的ID, (TODO:以后在数据库生成)
project_name: 项目名称
market: 1 - -》 淘宝, 2 - -》 天猫
keyword: 输入搜索框的关键字
pageNumber: 需要爬取的页数,最大100页
min_price: 选填,搜索得到宝贝价格的最低价
max_price: 选填,搜索得到宝贝价格的最高价
status: 1:新任务, 2:进行中, 3:已经完成
created: 创建时间
"""
__tablename__ = 'tab_project'
id = Column(Integer, primary_key=True)
market = Column(String(10))
project_name = Column(String(50))
key_word = Column(String(200))
page_number = Column(Integer)
min_price = Column(String(20))
max_price = Column(String(20))
status = Column(String(20), server_default='new')
created_at = Column(DateTime, server_default=func.now())
updated_at = Column(DateTime, server_default=func.now(), server_onupdate=func.now())
def to_json(self):
return {
'id': self.id,
'project_name': self.project_name,
'market': self.market,
'key_word': self.key_word,
'page_number': self.page_number,
'min_price': self.min_price,
'max_price': self.max_price,
'status': self.status,
'created_at': self.created_at.strftime("%Y-%m-%d %H:%M:%S") if self.created_at is not None else "",
'updated_at': self.updated_at.strftime("%Y-%m-%d %H:%M:%S") if self.updated_at is not None else ""
}
#创建数据表,如果数据表存在则忽视!!!
Base.metadata.create_all(engine)
|
[
"chenliangxu68@gmail.com"
] |
chenliangxu68@gmail.com
|
09dc5e4b00b0196437111230fc9b278edca31401
|
cb6f9cf1901b68cad07def3dd0fad75ab046e330
|
/constructor/migrations/0037_auto_20210824_0700.py
|
28b5d8a020dee9473cd9b792f1def7f8a2828737
|
[] |
no_license
|
Junaid522/Cons
|
631dd9e0e3f1249bd911196ba4a2fef8357bd8fb
|
cdceb1d07728209dad827917c8ba88e1319c94ad
|
refs/heads/master
| 2023-08-10T17:23:28.942917
| 2021-09-20T07:30:02
| 2021-09-20T07:30:02
| 408,348,598
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
# Generated by Django 3.1.2 on 2021-08-24 07:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('constructor', '0036_currency_description'),
]
operations = [
migrations.AddField(
model_name='course',
name='career_prospects',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='course',
name='overview',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='course',
name='structure',
field=models.TextField(blank=True, null=True),
),
]
|
[
"junaidtariq166@gmail.com"
] |
junaidtariq166@gmail.com
|
be03a987402a3e0420168cf4f91c126ffd69c9de
|
d7641647d67d110e08997767e85bbea081c2537b
|
/bitmovin_api_sdk/encoding/inputs/udp_multicast/__init__.py
|
82c7c6f74bb923c3eaaf1fb60471039f9d174982
|
[
"MIT"
] |
permissive
|
aachenmax/bitmovin-api-sdk-python
|
d3ded77c459852cbea4927ff28c2a4ad39e6026a
|
931bcd8c4695a7eb224a7f4aa5a189ba2430e639
|
refs/heads/master
| 2022-11-16T08:59:06.830567
| 2020-07-06T07:16:51
| 2020-07-06T07:16:51
| 267,538,689
| 0
| 1
|
MIT
| 2020-07-06T07:16:52
| 2020-05-28T08:44:44
|
Python
|
UTF-8
|
Python
| false
| false
| 322
|
py
|
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_api import UdpMulticastApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.udp_multicast.udp_multicast_input_list_query_params import UdpMulticastInputListQueryParams
|
[
"openapi@bitmovin.com"
] |
openapi@bitmovin.com
|
8a51a15dd22f14caa28b6fec7f2520ee774af67f
|
ed06ef44c944707276a2fca16d61e7820596f51c
|
/Python/path-with-minimum-effort.py
|
46d3f4ed6f06ca05828dba1b5a76a7f30589242a
|
[] |
no_license
|
sm2774us/leetcode_interview_prep_2021
|
15842bef80637c6ff43542ed7988ec4b2d03e82c
|
33b41bea66c266b733372d9a8b9d2965cd88bf8c
|
refs/heads/master
| 2023-05-29T14:14:49.074939
| 2021-06-12T19:52:07
| 2021-06-12T19:52:07
| 374,725,760
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,239
|
py
|
# Time: O(m * n * log(m * n))
# Space: O(m * n)
import heapq
# Dijkstra algorithm solution
class Solution(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
dst = (len(heights)-1, len(heights[0])-1)
dist = [[float("inf")]*len(heights[0]) for _ in range(len(heights))]
min_heap = [(0, 0, 0)]
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
while min_heap:
d, r, c = heapq.heappop(min_heap)
if lookup[r][c]:
continue
lookup[r][c] = True
if (r, c) == dst:
return d
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and 0 <= nc < len(heights[0]) and not lookup[nr][nc]):
continue
nd = max(d, abs(heights[nr][nc]-heights[r][c]))
if nd < dist[nr][nc]:
dist[nr][nc] = nd
heapq.heappush(min_heap, (nd, nr, nc))
return -1
# Time: O(m * n * log(m * n) + m * n * α(m * n)) = O(m * n * log(m * n))
# Space: O(m * n)
import collections
class UnionFind(object): # Time: (n * α(n)), Space: O(n)
def __init__(self, n):
self.set = range(n)
self.rank = [0]*n
def find_set(self, x):
stk = []
while self.set[x] != x: # path compression
stk.append(x)
x = self.set[x]
while stk:
self.set[stk.pop()] = x
return x
def union_set(self, x, y):
x_root, y_root = map(self.find_set, (x, y))
if x_root == y_root:
return False
if self.rank[x_root] < self.rank[y_root]: # union by rank
self.set[x_root] = y_root
elif self.rank[x_root] > self.rank[y_root]:
self.set[y_root] = x_root
else:
self.set[y_root] = x_root
self.rank[x_root] += 1
return True
# union find solution
class Solution2(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
def index(n, i, j):
return i*n + j
diffs = []
for i in range(len(heights)):
for j in range(len(heights[0])):
if i > 0:
diffs.append((abs(heights[i][j]-heights[i-1][j]), index(len(heights[0]), i-1, j), index(len(heights[0]), i, j)))
if j > 0:
diffs.append((abs(heights[i][j]-heights[i][j-1]), index(len(heights[0]), i, j-1), index(len(heights[0]), i, j)))
diffs.sort()
union_find = UnionFind(len(heights)*len(heights[0]))
for d, i, j in diffs:
if union_find.union_set(i, j):
if union_find.find_set(index(len(heights[0]), 0, 0)) == \
union_find.find_set(index(len(heights[0]), len(heights)-1, len(heights[0])-1)):
return d
return 0
# Time: O(m * n * logh)
# Space: O(m * n)
# bi-bfs solution
class Solution3(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x): # bi-bfs
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
left, right = {(0, 0)}, {(len(heights)-1, len(heights[0])-1)}
while left:
for r, c in left:
lookup[r][c] = True
new_left = set()
for r, c in left:
if (r, c) in right:
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
new_left.add((nr, nc))
left = new_left
if len(left) > len(right):
left, right = right, left
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(m * n * logh)
# Space: O(m * n)
import collections
# bfs solution
class Solution4(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x):
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
q = collections.deque([(0, 0)])
while q:
r, c = q.popleft()
if (r, c) == (len(heights)-1, len(heights[0])-1):
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
lookup[nr][nc] = True
q.append((nr, nc))
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
# Time: O(m * n * logh)
# Space: O(m * n)
# dfs solution
class Solution5(object):
def minimumEffortPath(self, heights):
"""
:type heights: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
def check(heights, x):
lookup = [[False]*len(heights[0]) for _ in range(len(heights))]
stk = [(0, 0)]
while stk:
r, c = stk.pop()
if (r, c) == (len(heights)-1, len(heights[0])-1):
return True
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(heights) and
0 <= nc < len(heights[0]) and
abs(heights[nr][nc]-heights[r][c]) <= x and
not lookup[nr][nc]):
continue
lookup[nr][nc] = True
stk.append((nr, nc))
return False
left, right = 0, 10**6
while left <= right:
mid = left + (right-left)//2
if check(heights, mid):
right = mid-1
else:
left = mid+1
return left
|
[
"sm2774us@gmail.com"
] |
sm2774us@gmail.com
|
5e8af7a5f3c6134790c205055461a82ddd53a5a9
|
292437b85108504a7ca91571f26a639a313501b6
|
/venv/lib/python2.7/site-packages/oslo_middleware/tests/test_correlation_id.py
|
6dde5d8af681454e6d686cb8ed827fed79d6f0df
|
[] |
no_license
|
heekof/monitoring-agent
|
c86bebcf77091490df7a6b8c881b85fdb2b9e4eb
|
b1c079efdf2dabe854f2aa3d96f36d2ec7021070
|
refs/heads/master
| 2021-01-15T15:39:01.512801
| 2016-08-31T20:53:38
| 2016-08-31T20:53:38
| 58,620,098
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import mock
from oslotest import base as test_base
from oslotest import moxstubout
from oslo_middleware import correlation_id
class CorrelationIdTest(test_base.BaseTestCase):
def setUp(self):
super(CorrelationIdTest, self).setUp()
self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
def test_process_request(self):
app = mock.Mock()
req = mock.Mock()
req.headers = {}
mock_uuid4 = mock.Mock()
mock_uuid4.return_value = "fake_uuid"
self.stubs.Set(uuid, 'uuid4', mock_uuid4)
middleware = correlation_id.CorrelationId(app)
middleware(req)
self.assertEqual(req.headers.get("X_CORRELATION_ID"), "fake_uuid")
def test_process_request_should_not_regenerate_correlation_id(self):
app = mock.Mock()
req = mock.Mock()
req.headers = {"X_CORRELATION_ID": "correlation_id"}
middleware = correlation_id.CorrelationId(app)
middleware(req)
self.assertEqual(req.headers.get("X_CORRELATION_ID"), "correlation_id")
|
[
"bendriss-jaafar@live.fr"
] |
bendriss-jaafar@live.fr
|
6497240502fa621dc2ea8c4dcbce6f85011972b3
|
e8b6a669bdec937a4226e749a98c7e3905e327db
|
/rainbow/settings.py
|
445b701eab004e8de35ade8739bda4dbea1d130e
|
[] |
no_license
|
danielmoniz/Rainbow
|
bef52a7bd18f225d48822aa563af03bbba862b9e
|
a9085476dc83a582b87927251cc269f228ecf557
|
refs/heads/master
| 2016-09-11T03:58:41.528607
| 2012-06-13T19:39:58
| 2012-06-13T19:39:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,675
|
py
|
# Django settings for rainbow project.
from private_settings import get_database_settings
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
# This data comes from private_settings.py
DATABASES = get_database_settings()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/home/daniel/python_practice/rainbow/sitestatic/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# The URL all users will be redirected to after login.
# @TODO Make this dynamic! Users should be redirected to their last location.
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/daniel/projects/rainbow/static",
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$w&7dt1vzfgex6d0(_jzrf&&k^7j8gm&18r9kawiufns*59(e3'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'rainbow.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'rainbow.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
"/home/daniel/projects/rainbow/django_templates",
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.webdesign',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# @TODO Surely I need to put the installed apps here? Eg. build_world and users
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.request",
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
[
"daniel.moniz@gmail.com"
] |
daniel.moniz@gmail.com
|
b65f1abafd197004b408adad6774a73815be6aa0
|
6b9adefb8c3730e1b9edab5605e86ee4f1cfe53c
|
/treedb/__init__.py
|
610918df1417c888efe026a930ef6e857065fd8c
|
[
"MIT"
] |
permissive
|
glottolog/treedb
|
8ac4b5dd6923a196ceb02f191200cd8053a2cd88
|
81e6a855e5d69bebc86e1fca05c938621c87ba7c
|
refs/heads/master
| 2023-07-21T04:04:27.709761
| 2023-07-17T20:10:20
| 2023-07-17T20:10:20
| 194,383,732
| 5
| 2
|
MIT
| 2022-05-24T17:48:32
| 2019-06-29T08:41:10
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,066
|
py
|
"""Load Glottolog lanuoid tree ``md.ini`` files into SQLite3 database."""
from ._globals import SESSION as Session # noqa: N811
from ._tools import sha256sum
from .backend import (print_versions,
set_engine,
connect,
scalar,
iterrows)
from .backend.export import (print_dataset,
print_schema,
print_query_sql,
backup,
dump_sql,
csv_zipfile,
print_rows,
write_csv,
hash_csv)
from .backend.load import main as load
from .backend.models import Dataset, Producer, Config
from .backend.pandas import pd_read_sql, pd_read_json_lines
from .backend.sqlite_master import print_table_sql, select_tables_nrows
from .backend.views import TABLES as views # noqa: N811
from .languoids import set_root, iterfiles
from .checks import check, compare_languoids
from .export import (print_languoid_stats,
iterlanguoids,
checksum,
write_json_lines as write_languoids,
pd_read_languoids,
write_files)
from .glottolog import glottolog_version, checkout_or_clone
from .logging_ import configure_logging
from .models import LEVEL, Languoid
from .queries import (get_example_query,
get_json_query as get_languoids_query,
iterdescendants)
from .settings import configure, get_default_root
__all__ = ['Session',
'sha256sum',
'print_versions',
'set_engine', 'connect', 'scalar', 'iterrows',
'print_dataset',
'print_schema', 'print_query_sql',
'backup', 'dump_sql', 'csv_zipfile',
'print_rows', 'write_csv', 'hash_csv',
'load',
'Dataset', 'Producer', 'Config',
'pd_read_sql', 'pd_read_json_lines',
'print_table_sql', 'select_tables_nrows',
'views',
'set_root', 'iterfiles',
'check', 'compare_languoids',
'print_languoid_stats',
'iterlanguoids',
'checksum',
'write_languoids',
'pd_read_languoids',
'write_files',
'glottolog_version', 'checkout_or_clone',
'configure_logging',
'LEVEL', 'Languoid',
'get_example_query',
'get_languoids_query',
'iterdescendants',
'configure',
'engine', 'root']
__title__ = 'treedb'
__version__ = '2.6.3.dev0'
__author__ = 'Sebastian Bank <sebastian.bank@uni-leipzig.de>'
__license__ = 'MIT, see LICENSE.txt'
__copyright__ = 'Copyright (c) 2017-2023 Sebastian Bank'
# default engine: in-memory database
engine = set_engine(None)
# default root: GLOTTOLOG_REPO_ROOT, or treedb.ini glottolog:repo_root, or ./glottolog
root = set_root(get_default_root(env_var='GLOTTOLOG_REPO_ROOT'))
|
[
"sebastian.bank@uni-leipzig.de"
] |
sebastian.bank@uni-leipzig.de
|
188529fb0dbd729ac43830eb4e0ca46e6b0fad6a
|
88be4d5657d19462eb1d74d2d4d98180b423a889
|
/scripts/plot_experiment.py
|
6579ab2bdda9aaa2117fdcbaab143a8dce51aafd
|
[
"BSD-3-Clause"
] |
permissive
|
domingoesteban/robolearn
|
bc58278fe38894f4ca9ec9e657ee13a479a368b7
|
0d20125425c352b80ef2eeed1c0b11ab6497b11a
|
refs/heads/master
| 2020-04-15T22:38:25.343229
| 2019-01-29T17:01:42
| 2019-01-29T17:01:42
| 165,080,647
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,524
|
py
|
import os
import argparse
import numpy as np
import matplotlib.pyplot as plt
from robolearn.utils.plots import plot_process_iu_returns
from robolearn.utils.plots import plot_process_iu_avg_rewards
from robolearn.utils.plots import plot_process_iu_policies
from robolearn.utils.plots import plot_process_iu_values_errors
from robolearn.utils.plots import plot_process_iu_alphas
from robolearn.utils.plots import plot_process_general_data
from robolearn.utils.plots.learning_process_plots import plot_process_haarnoja
import json
def main(args):
# Load environment
dirname = os.path.dirname(args.file)
with open(os.path.join(dirname, 'variant.json')) as json_data:
algo_name = json.load(json_data)['algo_name']
# Plot according to RL algorithm
if algo_name in ['HIUSAC', 'HIUSACNEW', 'SAC', 'HIUSACEpisodic']:
plot_process_iu_values_errors(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
block=False, plot_intentional=args.no_in,
deterministic=False)
plot_process_iu_alphas(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
block=False)
plot_process_iu_avg_rewards(csv_file=args.file,
n_unintentional=args.un,
block=False)
elif algo_name in ['HIUDDPG']:
plot_process_iu_policies(csv_file=args.file, n_unintentional=args.un,
block=False, plot_intentional=args.no_in,
deterministic=True)
plot_process_iu_returns(csv_file=args.file, n_unintentional=args.un,
block=False)
else:
plot_process_general_data(csv_file=args.file, block=False)
# plot_process_haarnoja(csv_file=args.file)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, default='./progress.csv',
help='path to the progress.csv file')
parser.add_argument('--un', type=int, default=-1,
help='Unintentional id')
parser.add_argument('--no_in', action='store_false')
args = parser.parse_args()
main(args)
input('Press a key to close script')
|
[
"domingo.esteban@iit.it"
] |
domingo.esteban@iit.it
|
9b5418cd23ca662fe1c45fcca5e76495bc07df0a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-iotanalytics/huaweicloudsdkiotanalytics/v1/model/obs_content_req.py
|
22822ea39ffd91a7bf569bf3a12ad4b92bc758b9
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,178
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ObsContentReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'bucket_name': 'str',
'ak': 'str',
'sk': 'str'
}
attribute_map = {
'bucket_name': 'bucket_name',
'ak': 'ak',
'sk': 'sk'
}
def __init__(self, bucket_name=None, ak=None, sk=None):
"""ObsContentReq
The model defined in huaweicloud sdk
:param bucket_name: 桶名称
:type bucket_name: str
:param ak: 租户的AK
:type ak: str
:param sk: 租户的SK
:type sk: str
"""
self._bucket_name = None
self._ak = None
self._sk = None
self.discriminator = None
self.bucket_name = bucket_name
self.ak = ak
self.sk = sk
@property
def bucket_name(self):
"""Gets the bucket_name of this ObsContentReq.
桶名称
:return: The bucket_name of this ObsContentReq.
:rtype: str
"""
return self._bucket_name
@bucket_name.setter
def bucket_name(self, bucket_name):
"""Sets the bucket_name of this ObsContentReq.
桶名称
:param bucket_name: The bucket_name of this ObsContentReq.
:type bucket_name: str
"""
self._bucket_name = bucket_name
@property
def ak(self):
"""Gets the ak of this ObsContentReq.
租户的AK
:return: The ak of this ObsContentReq.
:rtype: str
"""
return self._ak
@ak.setter
def ak(self, ak):
"""Sets the ak of this ObsContentReq.
租户的AK
:param ak: The ak of this ObsContentReq.
:type ak: str
"""
self._ak = ak
@property
def sk(self):
"""Gets the sk of this ObsContentReq.
租户的SK
:return: The sk of this ObsContentReq.
:rtype: str
"""
return self._sk
@sk.setter
def sk(self, sk):
"""Sets the sk of this ObsContentReq.
租户的SK
:param sk: The sk of this ObsContentReq.
:type sk: str
"""
self._sk = sk
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ObsContentReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
65ae635d43801f0ac9401fab6afbe228040b58f9
|
8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4
|
/pyobjc-framework-Cocoa/PyObjCTest/test_cfuuid.py
|
12c65667350ddf8c8be73389156dd615e6a3126b
|
[
"MIT"
] |
permissive
|
strogo/pyobjc
|
ac4201c7742eb75348328eeecb7eedf4e3458de3
|
2579c5eaf44b0c5af77ee195c417d2c65e72dfda
|
refs/heads/master
| 2023-07-13T00:41:56.448005
| 2021-08-24T06:42:53
| 2021-08-24T06:42:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,982
|
py
|
import re
import CoreFoundation
from PyObjCTools.TestSupport import TestCase
class TestCFUUIDAPI(TestCase):
def testTypes(self):
self.assertIsCFType(CoreFoundation.CFUUIDRef)
def testTypeID(self):
v = CoreFoundation.CFUUIDGetTypeID()
self.assertIsInstance(v, int)
def testCreate(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreate)
uuid = CoreFoundation.CFUUIDCreate(None)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertIsInstance(text, str)
m = re.match("^[0-9A-Z]{8}(-[0-9A-Z]{4}){3}-[0-9A-Z]{12}$", text)
self.assertIsNot(m, None)
def testCreateWithBytes(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateWithBytes)
uuid = CoreFoundation.CFUUIDCreateWithBytes(
None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateString)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
self.assertRaises(
ValueError,
CoreFoundation.CFUUIDCreateWithBytes,
None,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
300,
)
self.assertRaises(
ValueError,
CoreFoundation.CFUUIDCreateWithBytes,
None,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
300,
16,
)
def testCreateFromString(self):
self.assertResultIsCFRetained(CoreFoundation.CFUUIDCreateFromString)
uuid1 = CoreFoundation.CFUUIDCreateFromString(
None, "01020304-0506-0708-090A-0B0C0D0E0F10"
)
self.assertIsNot(uuid1, None)
self.assertIsInstance(uuid1, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid1)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
uuid2 = CoreFoundation.CFUUIDCreateFromString(
None, "01020304-0506-0708-090A-0B0C0D0E0F10"
)
text = CoreFoundation.CFUUIDCreateString(None, uuid2)
self.assertEqual(text, "01020304-0506-0708-090A-0B0C0D0E0F10")
# CoreFoundation.CFUUID interns values
self.assertIs(uuid1, uuid2)
def testGetBytes(self):
uuid = CoreFoundation.CFUUIDCreateWithBytes(
None, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
bytes_value = CoreFoundation.CFUUIDGetUUIDBytes(uuid)
self.assertIsInstance(bytes_value, CoreFoundation.CFUUIDBytes)
self.assertEqual(bytes_value.byte0, 1)
self.assertEqual(bytes_value.byte1, 2)
self.assertEqual(bytes_value.byte2, 3)
self.assertEqual(bytes_value.byte3, 4)
self.assertEqual(bytes_value.byte4, 5)
self.assertEqual(bytes_value.byte5, 6)
self.assertEqual(bytes_value.byte6, 7)
self.assertEqual(bytes_value.byte7, 8)
self.assertEqual(bytes_value.byte8, 9)
self.assertEqual(bytes_value.byte9, 10)
self.assertEqual(bytes_value.byte10, 11)
self.assertEqual(bytes_value.byte11, 12)
self.assertEqual(bytes_value.byte12, 13)
self.assertEqual(bytes_value.byte13, 14)
self.assertEqual(bytes_value.byte14, 15)
self.assertEqual(bytes_value.byte15, 16)
def testConstant(self):
# This is an interesting one, the result of
# CoreFoundation.CFUUIDGetConstantUUIDWithBytes should not be released.
uuid = CoreFoundation.CFUUIDGetConstantUUIDWithBytes(None, *range(16))
CoreFoundation.CFRetain(
CoreFoundation.CFUUIDGetConstantUUIDWithBytes
) # Ensure the value won't be released.
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
s = CoreFoundation.CFUUIDCreateString(None, uuid)
uuid = None
del uuid
uuid = CoreFoundation.CFUUIDGetConstantUUIDWithBytes(None, *range(16))
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
t = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(s, t)
def testCreateFromUUIDBytes(self):
bytes_value = CoreFoundation.CFUUIDBytes(*range(16, 32))
uuid = CoreFoundation.CFUUIDCreateFromUUIDBytes(None, bytes_value)
self.assertIsNot(uuid, None)
self.assertIsInstance(uuid, CoreFoundation.CFUUIDRef)
text = CoreFoundation.CFUUIDCreateString(None, uuid)
self.assertEqual(text, "10111213-1415-1617-1819-1A1B1C1D1E1F")
def testStructs(self):
o = CoreFoundation.CFUUIDBytes()
self.assertHasAttr(o, "byte0")
self.assertHasAttr(o, "byte1")
self.assertHasAttr(o, "byte2")
self.assertHasAttr(o, "byte3")
self.assertHasAttr(o, "byte4")
self.assertHasAttr(o, "byte5")
self.assertHasAttr(o, "byte6")
self.assertHasAttr(o, "byte7")
self.assertHasAttr(o, "byte8")
self.assertHasAttr(o, "byte9")
self.assertHasAttr(o, "byte10")
self.assertHasAttr(o, "byte11")
self.assertHasAttr(o, "byte12")
self.assertHasAttr(o, "byte13")
self.assertHasAttr(o, "byte14")
self.assertHasAttr(o, "byte15")
|
[
"ronaldoussoren@mac.com"
] |
ronaldoussoren@mac.com
|
d3778584e95ef333ce94c9c0141d55f17ae297a7
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2214/60586/309439.py
|
43887ed3a7a647d20cd7bf664444a594497e9b76
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 144
|
py
|
a=input()
b=input()
if a=='1+1i' and 'b==1+1i':
print("0+2i")
elif a=='0+1i' and 'b==0+1i':
print("-1+0i")
else:
print("0+-2i")
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
30bc551d847eeb3753771764c28f595558bbc9a0
|
e3b5e20bcb560a3c37c09f728b9340b1715c1818
|
/venv/lib/python3.7/site-packages/plotly/validators/scattercarpet/_hovertextsrc.py
|
748253ef5dd9d965185ead830412b2629267562e
|
[
"MIT"
] |
permissive
|
180Studios/LoginApp
|
63bc50b1f91e7221c7581627ab166eeb01758f5c
|
66ff684a81b23d8f45eef2c56be19a2afd95ab29
|
refs/heads/master
| 2022-12-24T00:33:08.481826
| 2020-02-03T05:14:41
| 2020-02-03T05:14:41
| 144,414,562
| 0
| 1
|
MIT
| 2022-12-08T01:38:26
| 2018-08-11T19:57:44
|
Python
|
UTF-8
|
Python
| false
| false
| 498
|
py
|
import _plotly_utils.basevalidators
class HovertextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name='hovertextsrc',
parent_name='scattercarpet',
**kwargs
):
super(HovertextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'none'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
[
"kylenahas@gmail.com"
] |
kylenahas@gmail.com
|
35d3332b9b2acae00406b27ed618a1477d42c45d
|
3474b315da3cc5cb3f7823f19a18b63a8da6a526
|
/scratch/KRAMS/src/apps/scratch/faezeh/working_area/inputexport.py
|
1edffac01025a9a4ef04fe864a2eaeb85fcefe02
|
[] |
no_license
|
h4ck3rm1k3/scratch
|
8df97462f696bc2be00f1e58232e1cd915f0fafd
|
0a114a41b0d1e9b2d68dbe7af7cf34db11512539
|
refs/heads/master
| 2021-01-21T15:31:38.718039
| 2013-09-19T10:48:24
| 2013-09-19T10:48:24
| 29,173,525
| 0
| 0
| null | 2015-01-13T04:58:57
| 2015-01-13T04:58:56
| null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
#!/usr/bin/env python
"""
This demonstrates how to create a plot offscreen and save it to an image
file on disk.
"""
# Standard library imports
import os, sys
# Major library imports
from numpy import fabs, linspace, pi, sin
from scipy.special import jn
# Enthought library imports
from enthought.traits.api import false
# Chaco imports
from enthought.chaco.api import ArrayPlotData, Plot, PlotGraphicsContext
from enthought.chaco.example_support import COLOR_PALETTE
from enthought.traits.api import HasTraits
from enthought.traits.api import Float, HasTraits, Button
from enthought.traits.ui.menu import OKButton, CancelButton
from enthought.traits.ui.api import View, Item, InstanceEditor
import os
DPI = 72.0
# This is a bit of a hack, to work around the fact that line widths don't scale
# with the GraphicsContext's CTM.
dpi_scale = DPI / 72.0
def create_plot():
numpoints = 100
low = -5
high = 15.0
x = linspace(low, high, numpoints)
pd = ArrayPlotData(index=x)
p = Plot(pd, bgcolor="lightgray", padding=50, border_visible=True)
for i in range(10):
pd.set_data("y" + str(i), jn(i,x))
p.plot(("index", "y" + str(i)), color=tuple(COLOR_PALETTE[i]),
width = 2.0 * dpi_scale)
p.x_grid.visible = True
# p.x_grid.line_width *= dpi_scale
p.x_grid.line_width = InputParameter().width
p.y_grid.visible = True
# p.y_grid.line_width *= dpi_scale
p.y_grid.line_width = InputParameter().height
p.legend.visible = True
return p
def draw_plot(filename, size=(800,600)):
container = create_plot()
container.outer_bounds = list(size)
container.do_layout(force=True)
gc = PlotGraphicsContext(size, dpi=DPI)
gc.render_component(container)
gc.save(filename)
return
def draw_pdf(filename, size=(800,600), width=0.0, height=0.0):
from enthought.chaco.pdf_graphics_context import PdfPlotGraphicsContext
container = create_plot()
container.bounds = list(size)
container.do_layout(force=True)
width = InputParameter().width
height = InputParameter().height
gc = PdfPlotGraphicsContext(filename=filename, dest_box = (0.5, 0.5, width, height))
gc.render_component(container)
gc.save()
def get_directory(filename):
print 'Please enter a path in which to place generated plots.'
print 'Press <ENTER> to generate in the current directory.'
path = raw_input('Path: ').strip()
# /home/faeze/ as path
if len(path) > 0 and not os.path.exists(path):
print 'The given path does not exist.'
sys.exit()
if not os.path.isabs(path):
print 'Creating image: ' + os.path.join(os.getcwd(), path, filename)
else:
print 'Creating image: ' + os.path.join(path, filename)
return os.path.join(path, filename)
class InputParameter(HasTraits):
width = 3.0
height = 4.0
class InputParam(HasTraits):
height = Float()
width = Float()
export_param = Button()
def _export_param_fired(self):
test_file = os.path.join('', 'Export_file')
output_file = open(test_file + '.rtf','w')
output_file.write(self.height.__str__() + '\n'+ self.width.__str__())
# print 'width', self.width
# print 'height', self.height
view_traits = View( Item("height"),
Item("width"),
Item('export_param', label = 'Export to file', style='simple', show_label = False),
resizable = True,
buttons = [ OKButton, CancelButton ],
height = 0.2,
width = 0.2 )
if __name__ == "__main__":
draw_plot(get_directory('noninteractive.png'), size=(800, 600))
# If you have ReportLab installed, you can uncomment the following:
draw_pdf(get_directory('noninteractive.pdf'), size=(400,300))
ip = InputParam()
ip.configure_traits()
# EOF
|
[
"Axel@Axel-Pc"
] |
Axel@Axel-Pc
|
10fbec86fd6c3e609e74bfe53d632b78788da28c
|
08c8e80dc009166a8d678fd36b34dc6ddbbeecc7
|
/TTRPlayer.py
|
05d919340b3b6ecf7d02b12c5726ee121dbdafb9
|
[] |
no_license
|
wqa/TicketToRide
|
12f6361f2b1a0461c645817c505d0ebf7a3b9ea8
|
dbf9ea161c5bbc456b3980a019b93dc1499ba83d
|
refs/heads/master
| 2020-03-07T21:06:42.089183
| 2018-04-02T07:52:57
| 2018-04-02T07:52:57
| 127,717,750
| 0
| 0
| null | 2018-04-02T07:00:15
| 2018-04-02T07:00:15
| null |
UTF-8
|
Python
| false
| false
| 2,626
|
py
|
import collections
class Player(object):
def __init__(self,
startingHand,
startingTickets,
playerBoard,
playerPosition,
numTrains
):
"""orderNumber: int
startingHand: list
startingTickets: list
playerBoard: PlayerBoard object from the TTRBoard module
playerPosition: int
"""
self.name = '' #ask for them to enter it on first turn
#implimented as a collection to avoid O(n) hand.remove(x)
self.hand = collections.Counter(startingHand)
self.tickets = {x:False for x in startingTickets}
self.numTrains = numTrains
self.points = 0
self.playerPosition = playerPosition
#custom board to represent
self.playerBoard = playerBoard
def removeCardsFromHand(self, color, numColor):
"""removes one ore more cards from hand
assumes all cards are in hand, error if not
cards: list
"""
assert self.hand[color] >= numColor
self.hand[color] -= numColor
#add card to hand
def addCardToHand(self, card):
"""adds a single card to hand
assumes card is a valid choice
card: String
"""
if card != None:
self.hand[card] += 1
#add ticket to hand
def addTicket(self, ticket):
"""adds a single ticket to tickets
ticket: tuple(city1, city2, value)
"""
self.tickets[ticket] = False
def completeTicket(self, ticket):
"""updates the value in the tickets dict to True for key: ticket
ticket: tuple(city1, city2, value)
"""
assert ticket in self.tickets
self.tickets = True
def getHand(self):
return self.hand
def addPoints(self, numPoints):
self.points += numPoints
def subtractPoints(self, numPoints):
self.points -= numPoints
def getPoints(self):
return self.points
def getTickets(self):
return self.tickets
def getNumTrains(self):
return self.numTrains
def playNumTrains(self, numTrains):
assert numTrains <= self.numTrains
self.numTrains -= numTrains
def setPlayerName(self, name):
"""sets playerName to name
name: string
"""
self.name = name
def getName(self):
return self.name
|
[
"codeprogress1@gmail.com"
] |
codeprogress1@gmail.com
|
4c5d1f202e7cb831b781f2fdf9e12481448d3c4d
|
7482a2601861b61f61ad082dbca521a94c36dc92
|
/image_captioning/config/defaults.py
|
3c6c9351b41efc3e54c2292e17e424a4652735c1
|
[] |
no_license
|
congve1/ImageCaptioning
|
0da1945ac27e406c388824f47e7e4545691ef0a1
|
ae2d4ec2dc45bc00ff12cde4f55197654100c309
|
refs/heads/master
| 2020-05-04T17:17:42.993462
| 2019-07-07T08:58:28
| 2019-07-07T08:58:28
| 179,305,660
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,738
|
py
|
import os
from yacs.config import CfgNode as CN
# ------------------------------------------------------------------------------
# Convention about Tranining / Test specific parameters
# ------------------------------------------------------------------------------
# Whenever an argument can be either used for training or for testing, the
# corresponding name will be post-fixed by a _TRAIN for a training parameter,
# or _TEST for a test-specific parameter.
# For example, the number of images during training will be
# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
# IMAGES_PER_BATCH_TEST
# ------------------------------------------------------------------------------
# Config definition
# ------------------------------------------------------------------------------
_C = CN()
_C.MODEL = CN()
_C.MODEL.DEVICE = "cuda"
# if the WEIGHT starts with a catalog://, like :R-50, the code will look for
# the path in paths_catalog. Else, it will use it as the specified absolute
# path
_C.MODEL.WEIGHT = ""
# ------------------------------------------------------------------------------
# INPUT
# ------------------------------------------------------------------------------
_C.INPUT = CN()
_C.INPUT.SIZE = 256
# ------------------------------------------------------------------------------
# VOCAB
# ------------------------------------------------------------------------------
_C.VOCAB = CN()
_C.VOCAB.WORD_COUNT_THRESHOLD = 5
# ------------------------------------------------------------------------------
# Dataset
# ------------------------------------------------------------------------------
_C.DATASET = CN()
_C.DATASET.SEQ_MAX_LEN = 20 # 50 in all coco captions
_C.DATASET.SEQ_PER_IMG = 5
_C.DATASET.TRAIN = ''
_C.DATASET.VAL = ''
_C.DATASET.TEST = ''
_C.DATASET.VOCAB_PATH = ''
# ------------------------------------------------------------------------------
# DataLoader
# ------------------------------------------------------------------------------
_C.DATALOADER = CN()
_C.DATALOADER.NUM_WORKERS = 0
# ------------------------------------------------------------------------------
# Encoder options
# ------------------------------------------------------------------------------
_C.MODEL.ENCODER = CN()
# The encoder conv body to use
# The string must match a function that is imported in modeling.model_builder
_C.MODEL.ENCODER.CONV_BODY = "R-101-C5"
# Add StopGrad at a specified stage so the bottom layers are frozen
_C.MODEL.ENCODER.FREEZE_CONV_BODY_AT = 0
_C.MODEL.ENCODER.ATT_SIZE = 14
# 2048 for C5; 1024 for C4 ### must be consistent with CONV_BODY
_C.MODEL.ENCODER.FEATURE_SIZE = 2048
# ------------------------------------------------------------------------------
# ResNe[X]t options (ResNets = {ResNet, ResNeXt})
# ------------------------------------------------------------------------------
_C.MODEL.RESNETS = CN()
# Number of groups to use: 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 1
# Baseline width of each group
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; Use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
# Residual Transformation function
_C.MODEL.RESNETS.TRANS_FUNC = "BottleneckWithBatchNorm"
# ResNet's stem function (conv1 and pool1)
_C.MODEL.RESNETS.STEM_FUNC = "StemWithBatchNorm"
# Apply dilation in stage "res5"
_C.MODEL.RESNETS.RES5_DILATION = 1
_C.MODEL.RESNETS.BACKBONE_OUT_CHANNELS = 256
_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
_C.MODEL.FPN = CN()
_C.MODEL.FPN.USE_GN = False
_C.MODEL.FPN.USE_RELU = False
# -------------------------------------------------------------------------------
# Group Norm
# -------------------------------------------------------------------------------
_C.MODEL.GROUP_NORM = CN()
# Number of dimensions per group in GroupNorm (-1 if using NUM_GROUPS)
_C.MODEL.GROUP_NORM.DIM_PER_GP = -1
# Number of groups in GroupNorm(-1 if using DIM_PER_GP)
_C.MODEL.GROUP_NORM.NUM_GROUPS = 32
# GroupNorm's small constant in the denominator
_C.MODEL.GROUP_NORM.EPS = 1e-5
# ------------------------------------------------------------------------------
# Decoder options
# ------------------------------------------------------------------------------
_C.MODEL.DECODER = CN()
_C.MODEL.DECODER.ARCH = "TopDown"
# word embedding size
_C.MODEL.DECODER.EMBEDDING_SIZE = 512
# num of hidden units of the rnn
_C.MODEL.DECODER.HIDDEN_SIZE = 512
# strength of dropout in the language model rnn.
_C.MODEL.DECODER.DROPOUT_PROB = 0.5
# the hidden size of the attention in MLP, only useful in show_attend_tell;
# 0 if not using hidden layer
_C.MODEL.DECODER.ATT_HIDDEN_SIZE = 512
_C.MODEL.DECODER.BEAM_SIZE = 3
# ------------------------------------------------------------------------------
# Solver
# ------------------------------------------------------------------------------
_C.SOLVER = CN()
_C.SOLVER.MAX_ITER = 40000
_C.SOLVER.OPTIMIZER = "SGD"
_C.SOLVER.SCHEDULER = "WarmupMultiStepLR"
_C.SOLVER.BASE_LR = 0.1
_C.SOLVER.BIAS_LR_FACTOR = 2
# after how many iterations to start self-critical training
# -1 for disable, 0 from the beginning
_C.SOLVER.SCST_AFTER = -1
# clip gradients at this norm
_C.SOLVER.GRAD_CLIP = 10.0
_C.SOLVER.MOMENTUM = 0.9
_C.SOLVER.WEIGHT_DECAY = 0.0005
_C.SOLVER.WEIGHT_DECAY_BIAS = 0
# Adam beta
_C.SOLVER.BETAS = (0.9, 0.999)
# SGDR settings
_C.SOLVER.T_MAX = 5000
_C.SOLVER.T_MULTI = 2
_C.SOLVER.ETA_MIN = 0.00001
# WarmupMultiStep Scheduler settings
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000, )
_C.SOLVER.WARMUP_FACTOR = 1.0 / 3
_C.SOLVER.WARMUP_ITERS = 500
_C.SOLVER.WARMUP_METHOD = "linear"
# Step Scheduler settings
_C.SOLVER.STEP_SIZE = 1200
_C.SOLVER.CHECKPOINT_PERIOD = 2500
_C.SOLVER.LOG_PERIOD = 100
_C.SOLVER.VAL_PERIOD = 1000
# Number of images per batch
# This is global
_C.SOLVER.IMS_PER_BATCH = 16
_C.SOLVER.METRIC_LOGGER_NAME = 'model'
# ------------------------------------------------------------------------------
# Specific test options
# ------------------------------------------------------------------------------
_C.TEST = CN()
# Number of images per batch
# This is global
_C.TEST.IMS_PER_BATCH = 8
_C.TEST.BEAM_SIZE = 3
# ------------------------------------------------------------------------------
# Misc options
# ------------------------------------------------------------------------------
_C.OUTPUT_DIR = "save"
_C.PATHS_CATALOG = os.path.join(os.path.dirname(__file__), "paths_catalog_lmdb.py")
|
[
"congve1@live.com"
] |
congve1@live.com
|
8c636ef816876d5a9d8bccdf63b5af6f4356b911
|
252b3451ad9683166937152444fedec8b5da6647
|
/obsolete/py_deprecated/LookupTables.py
|
cd7936866ad410f7fce43018c4363adbe2b33474
|
[
"MIT"
] |
permissive
|
faithcomesbyhearing/dbp-etl
|
993c4c329d8f1950234f02f7fb048ec29b1883da
|
eb56863415d0d83f7f7928d0fcf927425c039f95
|
refs/heads/master
| 2023-08-08T15:11:37.815057
| 2023-07-10T16:10:21
| 2023-07-10T16:10:21
| 204,502,403
| 1
| 1
|
MIT
| 2023-07-24T23:39:37
| 2019-08-26T15:12:47
|
Python
|
UTF-8
|
Python
| false
| false
| 5,498
|
py
|
# LookupTables
class LookupTables:
def __init__(self):
# these are not being used yet?
self.otBooks=[ "GEN", "EXO", "LEV", "NUM", "DEU", "JOS", "JDG", "RUT",
"1SA", "2SA", "1KI", "2KI", "1CH", "2CH", "EZR", "NEH",
"EST", "JOB", "PSA", "PRO", "ECC", "SNG", "ISA", "JER",
"LAM", "EZK", "DAN", "HOS", "JOL", "AMO", "OBA", "JON",
"MIC", "NAM", "HAB", "ZEP", "HAG", "ZEC", "MAL"]
self.ntBooks=[ "MAT", "MRK", "LUK", "JHN", "ACT", "ROM", "1CO", "2CO",
"GAL", "EPH", "PHP", "COL", "1TH", "2TH", "1TI", "2TI",
"TIT", "PHM", "HEB", "JAS", "1PE", "2PE", "1JN", "2JN",
"3JN", "JUD", "REV"]
self.apBooks=[ "1ES", "1MA", "1MQ", "2BA", "2ES", "2MA", "2MQ", "3MA",
"3MQ", "4BA", "4MA", "5EZ", "6EZ", "BAR", "BEL", "DAG",
"ENO", "ESG", "EZA", "JDT", "JUB", "LAO", "LBA", "LJE",
"MAN", "ODA", "PS2", "PS3", "PSS", "REP", "S3Y", "SIR",
"SUS", "TOB", "WIS"]
def bookIdBySequence(self, seqCode):
seqDict = {
"B01": "MAT",
"B02": "MRK",
"B03": "LUK",
"B04": "JHN",
"B05": "ACT",
"B06": "ROM",
"B07": "1CO",
"B08": "2CO",
"B09": "GAL",
"B10": "EPH",
"B11": "PHP",
"B12": "COL",
"B13": "1TH",
"B14": "2TH",
"B15": "1TI",
"B16": "2TI",
"B17": "TIT",
"B18": "PHM",
"B19": "HEB",
"B20": "JAS",
"B21": "1PE",
"B22": "2PE",
"B23": "1JN",
"B24": "2JN",
"B25": "3JN",
"B26": "JUD",
"B27": "REV",
"A01": "GEN",
"A02": "EXO",
"A03": "LEV",
"A04": "NUM",
"A05": "DEU",
"A06": "JOS",
"A07": "JDG",
"A08": "RUT",
"A09": "1SA",
"A10": "2SA",
"A11": "1KI",
"A12": "2KI",
"A13": "1CH",
"A14": "2CH",
"A15": "EZR",
"A16": "NEH",
"A17": "EST",
"A18": "JOB",
"A19": "PSA",
"A20": "PRO",
"A21": "ECC",
"A22": "SNG",
"A23": "ISA",
"A24": "JER",
"A25": "LAM",
"A26": "EZK",
"A27": "DAN",
"A28": "HOS",
"A29": "JOL",
"A30": "AMO",
"A31": "OBA",
"A32": "JON",
"A33": "MIC",
"A34": "NAM",
"A35": "HAB",
"A36": "ZEP",
"A37": "HAG",
"A38": "ZEC",
"A39": "MAL"}
return seqDict.get(seqCode)
# This should replaced with a query to table books after more is added
def bookIdBy2Char(self, twoCharCode):
twoCharDict = {
# New Testament
"MT": "MAT",
"MK": "MRK",
"LK": "LUK",
"JN": "JHN",
"AC": "ACT",
"RM": "ROM",
"C1": "1CO",
"C2": "2CO",
"GL": "GAL",
"EP": "EPH",
"PP": "PHP",
"CL": "COL",
"H1": "1TH",
"H2": "2TH",
"T1": "1TI",
"T2": "2TI",
"TT": "TIT",
"PM": "PHM",
"HB": "HEB",
"JM": "JAS",
"P1": "1PE",
"P2": "2PE",
"J1": "1JN",
"J2": "2JN",
"J3": "3JN",
"JD": "JUD",
"RV": "REV",
# Old Testament
"GN": "GEN",
"EX": "EXO",
"LV": "LEV",
"NU": "NUM",
"DT": "DEU",
"JS": "JOS",
"JG": "JDG",
"RT": "RUT",
"S1": "1SA",
"S2": "2SA",
"K1": "1KI",
"K2": "2KI",
"R1": "1CH",
"R2": "2CH",
"ER": "EZR",
"NH": "NEH",
"ET": "EST",
"JB": "JOB",
"PS": "PSA",
"PR": "PRO",
"EC": "ECC",
"SS": "SNG",
"IS": "ISA",
"JR": "JER",
"LM": "LAM",
"EK": "EZK",
"DN": "DAN",
"HS": "HOS",
"JL": "JOL",
"AM": "AMO",
"OB": "OBA",
"JH": "JON",
"MC": "MIC",
"NM": "NAM",
"HK": "HAB",
"ZP": "ZEP",
"HG": "HAG",
"ZC": "ZEC",
"ML": "MAL",
# Apocrypha
"E1": "1ES", # 3 Esdras
"E2": "2ES", # 4 Esdras
"M1": "1MA", # 1 Maccabees
"M2": "2MA", # 2 Maccabees
"M3": "3MA", # 3 Maccabees
"M4": "4MA", # 4 Maccabees
"BR": "BAR", # First book of Baruch
"BL": "BEL", # Rest of Daniel
"DG": "DAG", # Daniel 14
"EG": "ESG", # Greek Esther
"JT": "JDT", # Judith
#"LJ": None, # Apocryphal something
#"PA": None, # Apocryphal something
#"PN": None, # Apocryphal something
"PX": "PS2", # Psalms 152
"SR": "SIR", # Sirach
"SN": "SUS", # Greek Daniel
"TB": "TOB", # Tobit
"WS": "WIS", # Wisdom of Solomon
# USFM Peripheral Book Codes
"FR": "FRT", # Front Matter
"IN": "INT", # Introduction
"BK": "BAK", # Back Matter
"CN": "CNC", # Concordance
"GS": "GLO", # Glossary
"TX": "TDX", # Topical Index
"OH": "OTH", # Other
"XA": "XXA", #
"XB": "XXB", #
"XC": "XXC", #
"XD": "XXD", #
"XE": "XXE", #
"XF": "XXF", #
"XG": "XXG" #
}
return twoCharDict.get(twoCharCode)
def scriptCode(self, script):
scriptDic = {
"Amharic":"Ethi",
"Arabic":"Arab",
"Armenian":"Armn",
"Bengali":"Beng",
"Bengali Script":"Beng",
"Berber":"Tfng",
"Burmese":"Mymr",
"Canadian Aboriginal Syllabic":"Cans",
"Canadian Aboriginal Syllabics":"Cans",
"Cherokee Sylabary":"Cher",
"Cyrillic":"Cyrl",
"Devanagari":"Deva",
"Devangari":"Deva",
"Ethiopic":"Ethi",
"Ethoiopic":"Ethi",
"Ethopic":"Ethi",
"Ge'ez":"Ethi",
"Greek":"Grek",
"Gujarati":"Gujr",
"Gurmukhi":"Guru",
"Han":"Hani",
"Hangul (Korean)":"Kore",
"Hebrew":"Hebr",
"Japanese":"Jpan",
"Kannada":"Knda",
"Khmer":"Khmr",
"Khmer Script":"Khmr",
"Lao":"Laoo",
"Latin":"Latn",
"Latin (Africa)":"Latn",
"Latin (African)":"Latn",
"Latin (Latin America)":"Latn",
"Latin (Latin American)":"Latn",
"Latin (PNG)":"Latn",
"Latin (SE Asia)":"Latn",
"Malayalam":"Mlym",
"NA":"Zyyy",
"Oriya":"Orya",
"Tamil":"Taml",
"Telugu":"Telu",
"Thai":"Thai",
"Tibetan":"Tibt"}
return scriptDic[script] # not found should be a fatal error. That is intentional
|
[
"gary@shortsands.com"
] |
gary@shortsands.com
|
27b669ce53b5e339d24c950c5bbf673fec5fd017
|
1ca315ac6f10d91784c9d2e9e0c993d1a528c644
|
/locators/class_parse_locators.py
|
1168ff983b02f53c51788fee41cf24dd739271cf
|
[] |
no_license
|
osakoh/Scrapping-example
|
711df9e735f62dfdf5aad5891f8cda4cda678f44
|
6964f86f56a5797c8152f2e50048190d331ec6d4
|
refs/heads/main
| 2022-12-27T20:08:17.684832
| 2020-10-09T17:18:11
| 2020-10-09T17:18:11
| 302,692,216
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
class ParsedItemLocators:
"""
Locates an item in the HTML page.
Allows us to see what our code will be looking at as well as change it quickly if the locator changes.
"""
NAME_LOCATOR = 'article.product_pod h3 a'
LINK_LOCATOR = 'article.product_pod h3 a'
PRICE_LOCATOR = 'article.product_pod div.product_price p.price_color'
RATING_LOCATOR = 'article.product_pod p.star-rating'
|
[
"macmive@gmail.com"
] |
macmive@gmail.com
|
718ce3d58ea9a8e84fe1c8a98f3bed47d617e276
|
743fe3fd926c4f23353e4d2801b007721f3dd1a1
|
/docstrings/bitmap.py
|
f9d253fbdea77806791284b8dfc21e3431bef2d8
|
[
"BSD-2-Clause-Views"
] |
permissive
|
mdboom/freetypy
|
357cc7570987bf07daa3abd348ed616ad085186a
|
01b72ad35a613f1366accf16318d078b1e0dfc83
|
refs/heads/master
| 2021-01-18T07:03:53.232617
| 2015-11-04T01:20:01
| 2015-11-04T01:20:01
| 45,196,480
| 1
| 1
| null | 2015-10-29T16:38:09
| 2015-10-29T16:38:09
| null |
UTF-8
|
Python
| false
| false
| 5,308
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Michael Droettboom All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# The views and conclusions contained in the software and
# documentation are those of the authors and should not be interpreted
# as representing official policies, either expressed or implied, of
# the FreeBSD Project.
from __future__ import print_function, unicode_literals, absolute_import
Bitmap__init__ = """
A structure used to describe a bitmap or pixmap to the raster.
`Bitmap` supports the Python buffer interface, so it is easy to
convert it to a Numpy array. For example::
>>> import numpy as np
>>> a = np.asarray(bitmap)
"""
Bitmap_buffer = """
Get the bitmap's contents as a buffer.
In most cases, the preferred method to get the data is to cast the
`Bitmap` object to a memoryview, since that will also have size and
type information.
"""
Bitmap_convert = """
Convert a `Bitmap` to 8 bits per pixel. Given a `Bitmap` with depth
1bpp, 2bpp, 4bpp, or 8bpp converts it to one with depth 8bpp, making
the number of used bytes per line (a.k.a. the ‘pitch’) a multiple of
`alignment`.
Parameters
----------
alignment : int, optional
The pitch of the bitmap is a multiple of this parameter. Common
values are 1, 2, or 4.
Returns
-------
target : Bitmap
The bitmap, converted to 8bpp.
"""
Bitmap_num_grays = """
The number of gray levels used in the bitmap. This field is only used
with `PIXEL_MODE.GRAY`.
"""
Bitmap_pitch = """
The number of bytes taken by one bitmap row.
Includes padding.
The pitch is positive when the bitmap has a ‘down’ flow, and negative
when it has an ‘up’ flow. In all cases, the pitch is an offset to add
to a bitmap pointer in order to go down one row.
Note that ‘padding’ means the alignment of a bitmap to a byte border,
and FreeType functions normally align to the smallest possible integer
value.
For the B/W rasterizer, `pitch` is always an even number.
To change the pitch of a bitmap (say, to make it a multiple of 4), use
`Bitmap.convert`. Alternatively, you might use callback functions to
directly render to the application's surface.
"""
Bitmap_pixel_mode = """
The `PIXEL_MODE`, i.e., how pixel bits are stored.
"""
Bitmap_rows = """
The number of bitmap rows.
"""
Bitmap_to_list = """
|freetypy| Convert the bitmap to a nested list.
"""
Bitmap_width = """
The number of pixels in bitmap row.
"""
PIXEL_MODE = """
Constants related to the pixel mode of bitmaps.
- `MONO`: A monochrome bitmap, using 1 bit per pixel. Note that pixels
are stored in most-significant order (MSB), which means that the
left-most pixel in a byte has value 128.
- `GRAY`: An 8-bit bitmap, generally used to represent anti-aliased
glyph images. Each pixel is stored in one byte. Note that the number
of ‘gray’ levels is stored in the ‘num_grays’ field of the Bitmap
structure (it generally is 256).
- `GRAY2`: A 2-bit per pixel bitmap, used to represent embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `GRAY4`: A 4-bit per pixel bitmap, representing embedded
anti-aliased bitmaps in font files according to the OpenType
specification. We haven't found a single font using this format,
however.
- `LCD`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on LCD displays; the bitmap is three times
wider than the original glyph image. See also `RENDER_MODE.LCD`. On
many freetype builds, this functionality will be disabled due to
patent restrictions, in which case the resulting bitmap will be
grayscale.
- `LCD_V`: An 8-bit bitmap, representing RGB or BGR decimated glyph
images used for display on rotated LCD displays; the bitmap is three
times taller than the original glyph image. See also
`RENDER_MODE.LCD_V`. On many freetype builds, this functionality
will be disabled due to patent restrictions, in which case the
resulting bitmap will be grayscale.
"""
|
[
"mdboom@gmail.com"
] |
mdboom@gmail.com
|
8180d20b9bd00bbe47b490f34495c8f341ec30bc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_narrators.py
|
7da72b67f09225deba5ce48d2f216e8a178bf663
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
py
|
from xai.brain.wordbase.nouns._narrator import _NARRATOR
#calss header
class _NARRATORS(_NARRATOR, ):
def __init__(self,):
_NARRATOR.__init__(self)
self.name = "NARRATORS"
self.specie = 'nouns'
self.basic = "narrator"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1e694dedbb1c710bec69da84e4e55be2f1a13405
|
d21dbab3f374eb42a10f9ec7c434c1ca6fb2bff7
|
/Python/03 String/04 Mutations.py
|
ba1b1e83ce4d06eb5589e674c69d305472bec8cd
|
[] |
no_license
|
almamuncsit/HackerRank
|
5360ad1d54aa01075dba5527f6ae695e4c6d9c7a
|
6599cde4c7541ebf27bacff8af02dc0c3eaaa678
|
refs/heads/master
| 2021-07-06T00:33:25.912754
| 2021-01-13T09:09:29
| 2021-01-13T09:09:29
| 222,364,072
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 270
|
py
|
def mutate_string(string, position, character):
str_list = list(string)
str_list[position] = character
return ''.join(str_list)
if __name__ == '__main__':
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
|
[
"msarkar.cse@gmail.com"
] |
msarkar.cse@gmail.com
|
8ef9e1347e94621ccc9ed1cd1d225d3447098d25
|
02e4920166051129d1ca28a0da80405a982f1cfe
|
/curso_py/ex014.py
|
ae83f034a8736245163401189b8c4d757af60e64
|
[] |
no_license
|
felipeonf/Exercises_Python
|
1ab40cea2466d6bb5459b5384a1dde8e1066b3b4
|
8eb2d17a35a6352fd5268a5fa43b834443171c70
|
refs/heads/main
| 2023-07-23T22:30:13.567469
| 2021-08-25T03:34:33
| 2021-08-25T03:34:33
| 397,062,295
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 208
|
py
|
import random
lista = []
i = 0
while i < 4 :
lista.append(input(f'Digite o nome do {1+i}° aluno: '))
i+=1
random.shuffle(lista)
print(f'A ordem de apresentação escolhida foi {lista}')
|
[
"noreply@github.com"
] |
felipeonf.noreply@github.com
|
133bb56e795f1304f909216fc94676e89bfc8e04
|
43eb31fb324240cf6f4150e310c5a7ec4087bbed
|
/online_inference/requests/make_request.py
|
324e23627b425313868aa3e32c1bd9f9e36c1ccb
|
[] |
no_license
|
made-ml-in-prod-2021/bulaevvi
|
67cb63c0573b71eb152d102b4091f79eb887bfd5
|
6c0de66bb24c248b9291c03ddeed95ed1e990c61
|
refs/heads/main
| 2023-06-01T17:24:32.476388
| 2021-06-22T06:17:21
| 2021-06-22T06:17:21
| 354,891,217
| 0
| 0
| null | 2021-06-22T06:17:21
| 2021-04-05T16:03:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
import pandas as pd
import requests
import time
ENDPOINT = "http://127.0.0.1:8000/predict"
REQUEST_FILE = "requests.csv"
NUM_REQUESTS = 100
if __name__ == "__main__":
data = pd.read_csv(REQUEST_FILE)
for i in range(NUM_REQUESTS):
request_data = data.iloc[i].to_dict()
request_data["id"] = i
response = requests.get(
ENDPOINT,
json=[request_data],
)
print(f'Request: {request_data}')
print(f'Response CODE: {response.status_code}')
print(f'Response BODY: {response.json()}')
|
[
"noreply@github.com"
] |
made-ml-in-prod-2021.noreply@github.com
|
50dda7b0b5ff6c2dd3b252bb5836dffa34c2fd4e
|
3f05ce6a332003595064d14b24b57fc36021da92
|
/matscholar_web/tests/test_util.py
|
a630bf3cc334db58f3314765428bfb6b5e431020
|
[
"MIT"
] |
permissive
|
materialsintelligence/matscholar-web
|
392f845dff515cf7f4f4684d5b105e4c40a40d88
|
95231228fc6da1a596653d774307b10916fb5847
|
refs/heads/master
| 2023-05-10T06:54:44.213445
| 2023-01-24T20:45:00
| 2023-01-24T20:45:00
| 151,479,404
| 9
| 13
|
MIT
| 2023-05-02T18:28:28
| 2018-10-03T20:57:22
|
CSS
|
UTF-8
|
Python
| false
| false
| 832
|
py
|
import json
import os
import unittest
import matscholar_web
from matscholar_web.util import load_static_data_file
"""
Tests for core utilities.
"""
class TestCoreUtils(unittest.TestCase):
def setUp(self) -> None:
rootdir = os.path.dirname(os.path.abspath(matscholar_web.__file__))
data_dir = os.path.join(rootdir, "assets/data/")
self.test_fname = "test_file.json"
self.test_fpath = os.path.join(data_dir, self.test_fname)
self.true_data = {"a": [1, 2, 3], "b": "something"}
def test_load_static_file(self):
with open(self.test_fpath, "w") as f:
json.dump(self.true_data, f)
loaded_data = load_static_data_file(self.test_fname)
self.assertEqual(self.true_data, loaded_data)
def tearDown(self) -> None:
os.remove(self.test_fpath)
|
[
"ardunn@lbl.gov"
] |
ardunn@lbl.gov
|
c7d3f584401e6ffd29d0dc4040517f4a44631100
|
e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6
|
/plugins/module_utils/network/ftd/operation.py
|
72d3e157b3287863a06c9ac800e2fde6a814e605
|
[] |
no_license
|
ansible-collection-migration/misc.not_a_real_collection
|
b3ef8090c59de9ac30aca083c746ec3595d7f5f5
|
7ab1af924a3db4ada2f714b09bb392614344cb1e
|
refs/heads/master
| 2020-12-18T13:48:51.849567
| 2020-01-22T17:39:18
| 2020-01-22T17:39:18
| 235,400,821
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,619
|
py
|
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.ftd.configuration import ParamName, PATH_PARAMS_FOR_DEFAULT_OBJ
class FtdOperations:
"""
Utility class for common operation names
"""
GET_SYSTEM_INFO = 'getSystemInformation'
GET_MANAGEMENT_IP_LIST = 'getManagementIPList'
GET_DNS_SETTING_LIST = 'getDeviceDNSSettingsList'
GET_DNS_SERVER_GROUP = 'getDNSServerGroup'
def get_system_info(resource):
"""
Executes `getSystemInformation` operation and returns information about the system.
:param resource: a BaseConfigurationResource object to connect to the device
:return: a dictionary with system information about the device and its software
"""
path_params = {ParamName.PATH_PARAMS: PATH_PARAMS_FOR_DEFAULT_OBJ}
system_info = resource.execute_operation(FtdOperations.GET_SYSTEM_INFO, path_params)
return system_info
|
[
"ansible_migration@example.com"
] |
ansible_migration@example.com
|
66c716da60208194ea298347d9a3cd37cde9cdbe
|
51e6015db62fd30ff9eaa724926e8373aedf796e
|
/custom_components/zhibot/chatbot.py
|
f071c55cac18fde4f0e9057457baed88c9e1df32
|
[] |
no_license
|
xxx2016/HAExtra
|
ed0cb3c0a5876c11894bde8a96fde31ca8a9e3a5
|
e71dc33f51455e2d91ab2d7eec39a931d06847d9
|
refs/heads/master
| 2023-02-24T04:41:38.779325
| 2021-01-25T13:04:12
| 2021-01-25T13:04:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,692
|
py
|
from homeassistant.components.http import HomeAssistantView
from homeassistant.util.json import load_json, save_json
# from homeassistant.components.http import KEY_HASS
# Logging
import logging
_LOGGER = logging.getLogger(__name__)
class chatbotView(HomeAssistantView):
"""View to handle Configuration requests."""
def __init__(self, hass, conf):
self.name = self.__class__.__name__.rstrip('View').lower()
self.url = '/' + self.name
self.requires_auth = False
self.hass = hass
self.password = conf.get('password')
if self.password is None: # Auth: config UI confirmation, intead of pre shared password
self._configuring = None
self.conf = load_json(hass.config.path('.' + self.name))
if not self.conf:
self.conf = []
async def post(self, request):
try:
# request[KEY_REAL_IP]
# request.app[KEY_HASS]
data = await request.json()
_LOGGER.debug("REQUEST: %s", data)
answer = await self.handle(data) if self.check(request, data) else "没有访问授权!"
except:
import traceback
_LOGGER.error(traceback.format_exc())
answer = "程序出错啦!"
_LOGGER.debug("RESPONSE: %s", answer)
return self.json(self.response(answer))
def response(self, answer):
return None
async def handle(self, data):
return "未能处理"
def check(self, request, data):
if self.password is not None:
return self.password == request.query.get('password') or self.password == ''
return self.config(data)
def config(self, data):
configurator = self.hass.components.configurator
if self._configuring:
configurator.async_request_done(self._configuring)
def config_callback(fields):
configurator.request_done(self._configuring)
self._configuring = None
_LOGGER.debug(fields)
if fields.get('agree') == 'ok':
self.config_done(data)
save_json(self.hass.config.path('.' + self.name), self.conf)
self._configuring = configurator.async_request_config(
'智加加', config_callback,
description=self.config_desc(data),
submit_caption='完成',
fields=[{'id': 'agree', 'name': '如果允许访问,请输入“ok”'}],
)
return False
def config_done(self, data):
pass
def config_desc(self, data):
return "授权访问"
|
[
"Yonsm@qq.com"
] |
Yonsm@qq.com
|
46dbd48b6d5e6c8ffb047612a78d868f11973154
|
17ba39d104403a36ecdfe83da0d5424feb3fdf24
|
/accounts/serializers.py
|
5758bb9f9ed301c9eb631dfdfec009cdfca7f20d
|
[] |
no_license
|
bellomusodiq/obs
|
46bc3bfc316d224c732a8747649016ca2fdf9493
|
a207bf51c2e21c10996e53f01e56368b648c7e6e
|
refs/heads/master
| 2020-04-02T21:35:46.813250
| 2018-10-30T11:11:38
| 2018-10-30T11:11:38
| 151,956,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,275
|
py
|
from rest_framework import serializers
from rest_framework_jwt.settings import api_settings
from .models import User, CuponCode
from django.template.loader import render_to_string
from django.core.mail import EmailMessage
import string, random
class UserSerializer(serializers.ModelSerializer):
def gen_token(self):
choices = string.ascii_letters + string.digits + string.hexdigits
gen = ''
for i in range(10):
gen += random.choice(choices)
return gen
class Meta:
model = User
fields = (
'id', 'email', 'username', 'firstname', 'lastname',
'referral_allowance', 'read_allowance', 'comment_allowance', 'referral_code',
'is_admin', 'is_active', 'password', 'cupon_code'
)
read_only_fields = ('last_login','activation_token', 'is_active')
extra_kwargs = {
'password': {'write_only': True},
'referral_allowance': {'read_only': True},
'read_allowance': {'read_only': True},
'referral_code': {'read_only': True},
}
def validate_cupon_code(self, value):
if value in [code.code for code in CuponCode.objects.all()] or value == '':
return value
if(value in [user.referral_code for user in User.objects.filter(is_admin=False)]):
return value
raise serializers.ValidationError('Incorrect cupon code, input a correct cupon code or leave blank')
def create(self, validated_data, *args, **kwargs):
user = User(
username = validated_data['username'],
email = validated_data['email'],
firstname = validated_data['firstname'],
lastname = validated_data['lastname'],
cupon_code = validated_data['cupon_code'],
referral_code = self.gen_token()
)
user.set_password(validated_data['password'])
user.save()
if not user.is_admin:
cupon_code = validated_data['cupon_code']
if(cupon_code in [user.referral_code for user in User.objects.filter(is_admin=False).exclude(pk=user.pk)]):
user_obj = User.objects.get(referral_code=cupon_code)
user_obj.referral_allowance = float(user_obj.referral_allowance) + 200
user_obj.save()
if(cupon_code in [cupon_code.code for cupon_code in CuponCode.objects.all()]):
user.referral_allowance = float(user.referral_allowance) + 500
CuponCode.objects.get(code=cupon_code).delete()
user.save()
return user
class UserLoginSerializer(serializers.ModelSerializer):
token = serializers.CharField(allow_blank=True, read_only=True)
username = serializers.CharField()
class Meta:
fields = [
'username',
'password',
'email',
'token',
'id',
'is_active',
'is_admin',
]
model = User
extra_kwargs = {
"password": {"write_only": True},
"email": {"read_only": True},
"is_active": {"read_only": True},
"is_admin": {"read_only": True},
"id": {"read_only": True},
}
def validate(self, data):
username = data.get('username', None)
password = data['password']
if not username:
raise serializers.ValidationError('A username is required to login')
user = User.objects.get(username=username)
if not user:
raise serializers.ValidationError('the username is not valid')
if(user):
if not user.check_password(password):
raise serializers.ValidationError('Incorrect Credential please try again')
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
data['token'] = token
data['id'] = user.id
return data
class CuponCodeSerializer(serializers.ModelSerializer):
class Meta:
model = CuponCode
fields = ['id', 'code']
extra_kwargs = {
"code": {"read_only": True},
}
|
[
"bmayowa25@gmail.com"
] |
bmayowa25@gmail.com
|
067961039a165f93e34347f56a947b446b17133d
|
1117ae9a0bc4bbbe0e505e573af70a9629ec8c45
|
/App/models.py
|
865bdb8ca8a7b9edfe0f5c7e75a61cbabda2da10
|
[] |
no_license
|
Chukslord1/E-LIBRARY
|
003eadf124be91e40586f2f6661b5895a93d6a60
|
c16f6d7ab2efb2be136251298f28119c2023b19f
|
refs/heads/master
| 2023-01-10T17:43:28.293541
| 2020-11-17T20:27:52
| 2020-11-17T20:27:52
| 267,709,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,724
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Book(models.Model):
title=models.TextField()
isbn = models.IntegerField()
summary= models.TextField()
author = models.TextField()
position=models.CharField(max_length=100)
genre=models.CharField(max_length=100)
language=models.TextField()
total_copies=models.IntegerField()
available_copies=models.IntegerField()
pic=models.ImageField(blank=True, null=True)
review=models.IntegerField()
paginate_by = 2
def __str__(self):
return self.title
class Language(models.Model):
name = models.CharField(max_length=200,
help_text="Enter the book's natural language (e.g. English, French, Japanese etc.)")
code=models.CharField(max_length=200,
help_text="Enter the book's natural language code")
def __str__(self):
return self.name
class Genre(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
return self.name
class Series(models.Model):
name = models.CharField(max_length=200, help_text="Enter a book that is a Series")
def __str__(self):
return self.name
class Book_Allotment(models.Model):
book_title=models.TextField()
book_number=models.IntegerField()
member=models.CharField(max_length=100)
email=models.CharField(max_length=100)
issue_date=models.TextField(null=True,blank=True)
return_date=models.TextField(null=True,blank=True)
book_status=models.TextField(null=True,blank=True)
def __str__(self):
return self.book_title
class Member(models.Model):
full_name=models.TextField()
address=models.TextField()
email=models.CharField(max_length=100)
phone_number=models.IntegerField()
def __str__(self):
return self.full_name
class Publisher(models.Model):
full_name=models.TextField()
email=models.CharField(max_length=100)
def __str__(self):
return self.full_name
class Author(models.Model):
full_name=models.TextField()
email=models.CharField(max_length=100)
def __str__(self):
return self.full_name
class Liberian(models.Model):
user = models.OneToOneField(User, related_name="profile", on_delete=models.CASCADE)
username=models.CharField(max_length=100, null=True,blank=True)
name= models.CharField(max_length=100)
address= models.TextField()
phone_number=models.IntegerField()
class Settings(models.Model):
image=models.ImageField(null=True,blank=True)
name=models.TextField(null=True,blank=True)
|
[
"chukslord1@gmail.com"
] |
chukslord1@gmail.com
|
74e1f700ad462338166fff4c0f99dcfb6d303a54
|
0a5db329e6ca4690f6f5f84d34ed51c0f54273b4
|
/6 Extra 3/Example 1.py
|
42e838ef0565d84e028cb1912447b5a8a3e24c4a
|
[] |
no_license
|
wiput1999/Python101
|
b7778e8feacdf95039260ba4e7d149a1fea30304
|
5d0eac78417cc1139f884652c5a6c6995dfb9e22
|
refs/heads/master
| 2021-01-20T15:31:19.170451
| 2017-04-08T16:03:38
| 2017-04-08T16:03:38
| 90,780,461
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 213
|
py
|
num = []
for i in range(10):
num.append(int(input("Num : ")))
for j in range(0,len(num)):
for i in range(0,len(num)-1-j):
if num[i] > num[i+1]:
t = num[i]
num[i] = num[i+1]
num[i+1] = t
print(num)
|
[
"wiput.pootong@gmail.com"
] |
wiput.pootong@gmail.com
|
c96f702c3c4d089f96e3879dc22c4ec60f1ad720
|
29c58b3bec6ac0fcdb3070efc118600ee92004da
|
/test/test_connector_sync_event_dto.py
|
488ce7ea7b82b940e80e1efabe7d0b5b32f66c67
|
[
"MIT"
] |
permissive
|
mailslurp/mailslurp-client-python
|
a2b5a0545206714bd4462ae517f242852b52aaf9
|
5c9a7cfdd5ea8bf671928023e7263847353d92c4
|
refs/heads/master
| 2023-06-23T00:41:36.257212
| 2023-06-14T10:10:14
| 2023-06-14T10:10:14
| 204,662,133
| 8
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,387
|
py
|
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: contact@mailslurp.dev
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import mailslurp_client
from mailslurp_client.models.connector_sync_event_dto import ConnectorSyncEventDto # noqa: E501
from mailslurp_client.rest import ApiException
class TestConnectorSyncEventDto(unittest.TestCase):
"""ConnectorSyncEventDto unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test ConnectorSyncEventDto
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = mailslurp_client.models.connector_sync_event_dto.ConnectorSyncEventDto() # noqa: E501
if include_optional :
return ConnectorSyncEventDto(
id = '0',
connector_id = '0',
sync_status = 'SUCCESS',
sync_count = 56,
message = '0',
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f')
)
else :
return ConnectorSyncEventDto(
id = '0',
connector_id = '0',
sync_status = 'SUCCESS',
sync_count = 56,
created_at = datetime.datetime.strptime('2013-10-20 19:20:30.00', '%Y-%m-%d %H:%M:%S.%f'),
)
def testConnectorSyncEventDto(self):
"""Test ConnectorSyncEventDto"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
|
[
"contact@mailslurp.dev"
] |
contact@mailslurp.dev
|
b9c545c2775953db809aa333c5571186f061f2f1
|
6080bfbc95ef2e4103fbd9c75c6b30402fe08aa5
|
/helpers/ccsm/ccsm2icar.py
|
1db50ddfa270a90c8105cc3a3cc03832c34efb97
|
[
"MIT"
] |
permissive
|
d-reynolds/HICAR
|
0628f2a65922b61e7c68749ccc5b4328fe7c5dec
|
0ae97ec4556624bd5fe288420f0dde2f737bf1f8
|
refs/heads/master
| 2023-05-27T09:55:13.262316
| 2023-03-31T12:43:55
| 2023-03-31T12:43:55
| 284,660,559
| 1
| 1
|
MIT
| 2020-09-29T14:12:28
| 2020-08-03T09:40:03
|
Fortran
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
#!/usr/bin/env python
import os,traceback,sys
import config
import io_routines
import output
import convert
def main(info):
for k in info.keys():
if k!="times" and k!="lat_data" and k!="lon_data":
print(k,info[k])
print(info.times[0],info.times[-1])
curtime=info.times[0]
curpos=0
while curtime<=info.end_date:
raw_data=io_routines.load_data(curtime,info)
processed_data=convert.ccsm2icar(raw_data)
output.write_file(curtime,info,processed_data)
curpos+=raw_data.atm.ntimes
curtime=info.times[curpos]
if __name__ == '__main__':
try:
info=config.parse()
config.update_info(info)
exit_code = main(info)
if exit_code is None:
exit_code = 0
sys.exit(exit_code)
except KeyboardInterrupt as e: # Ctrl-C
raise e
except SystemExit as e: # sys.exit()
raise e
except Exception as e:
print('ERROR, UNEXPECTED EXCEPTION')
print(str(e))
traceback.print_exc()
os._exit(1)
|
[
"gutmann@ucar.edu"
] |
gutmann@ucar.edu
|
21ffe4553a4099601f96bfe38dde4dcef4cce140
|
7a3114bedb5e866fc85fecca44432d1ce60e4262
|
/where/postprocessors/__init__.py
|
e701fda648f7bd0a1601571994da4ea81345f780
|
[
"MIT"
] |
permissive
|
kartverket/where
|
99f26e5d5f2f23a79921bad0fb60cb8a99d05e7f
|
0c8c5c68adca08f97e22cab1bce10e382a7fbf77
|
refs/heads/master
| 2023-08-31T03:26:23.222100
| 2023-08-30T08:27:07
| 2023-08-30T08:27:07
| 111,802,841
| 21
| 15
|
MIT
| 2019-02-01T15:42:36
| 2017-11-23T11:44:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
"""Framework for post-processing data
Description:
------------
Each postprocessor should be defined in a separate .py-file. The function inside the .py-file that should be called
needs to be decorated with the :func:`~midgard.dev.plugins.register` decorator as follows::
from midgard.dev import plugins
@plugins.register
def gnss_linear_combination(dset):
...
"""
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where.lib import log
def apply_postprocessors(config_key, dset):
"""Apply postprocessors for a given session
Args:
config_key (String): The configuration key listing which postprocessors to apply.
dset (Dataset): Dataset containing analysis data.
"""
prefix = dset.vars["pipeline"]
postprocessors = config.tech[config_key].list
log.info(f"Applying postprocessors")
return plugins.call_all(package_name=__name__, plugins=postprocessors, prefix=prefix, dset=dset)
|
[
"ask1982@yahoo.com"
] |
ask1982@yahoo.com
|
9e616c90dd516ff508549568cd468a52e1e61faf
|
731ebf286a169b5f4dae914bcb0970c2388ba875
|
/tigereye/helpers/tetime.py
|
1def5631fe0d37df838160507a265594d8ef3325
|
[] |
no_license
|
ljxproject/tigereye
|
f8e86287b03102b713b4179a9fa023f03cfd36ea
|
406024d88450b6dcbec7a337a79339ff8c97a3e3
|
refs/heads/master
| 2020-03-26T08:48:48.595467
| 2018-08-14T13:05:26
| 2018-08-14T13:05:26
| 144,721,251
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 190
|
py
|
from datetime import datetime
DEFAULT_DATETIME_FORMAT = '%Y%m%d%H%M%S'
SIMPLE_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
def now():
return datetime.now().strftime(DEFAULT_DATETIME_FORMAT)
|
[
"403496369@qq.com"
] |
403496369@qq.com
|
37ce0dc192e7a51cb2d91c3c8bbc9b94d5546a56
|
16ad791ae0fcf9b00fb3f3938e3e69fd86d91715
|
/solved/probs050-099/euler055.py
|
fd7e12947d84d15f28aec0927a223aa37fab9937
|
[] |
no_license
|
chrisgilmerproj/project_euler
|
9a6cf051ddc1882d803531cb02cc356a94d9bdf4
|
5a2c72ae40cfff32b79b35bb93db2b93a84afc25
|
refs/heads/master
| 2020-05-07T16:44:22.052645
| 2011-05-03T15:35:34
| 2011-05-03T15:35:34
| 1,447,959
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
# This is a problem from the Project Euler Website
# http://projecteuler.net/
#
# Euler Problem #055
#
# Problem: How many Lychrel numbers are there below ten-thousand?
#
# Hint: If we take 47, reverse and add, 47 + 74 = 121, which is palindromic.
#
# Not all numbers produce palindromes so quickly. For example,
#
# 349 + 943 = 1292
# 1292 + 2921 = 4213
# 4213 + 3124 = 7337
#
# That is, 349 took three iterations to arrive at a palindrome.
#
# Although no one has proved it yet, it is thought that some numbers,
# like 196, never produce a palindrome. A number that never forms a
# palindrome through the reverse and add process is called a Lychrel
# number. Due to the theoretical nature of these numbers, and for the
# purpose of this problem, we shall assume that a number is Lychrel
# until proven otherwise. In addition you are given that for every
# number below ten-thousand, it will either (i) become a palindrome
# in less than fifty iterations, or, (ii) no one, with all the
# computing power that exists, has managed so far to map it to a
# palindrome. In fact, 10677 is the first number to be shown to
# require over fifty iterations before producing a palindrome:
# 4668731596684224866951378664 (53 iterations, 28-digits).
#
# Surprisingly, there are palindromic numbers that are themselves
# Lychrel numbers; the first example is 4994.
#
# NOTE: Wording was modified slightly on 24 April 2007 to emphasise
# the theoretical nature of Lychrel numbers.
#
# Written by Chris Gilmer
# Solved: 12/08/2008
# Answer: 249
#
# Notes:
if __name__ == '__main__':
i = 1
limit = 10000
lychrel = []
while i < limit:
palindrome = False
count = 1
total = i
print i
while palindrome == False and count < 50:
new_i = total
reverse_i = list(str(new_i))
reverse_i.reverse()
reverse_i = int(str(''.join(reverse_i)))
total = new_i + reverse_i
print "\t%s + %s = %s" % (new_i, reverse_i, total)
str_total = list(str(total))
reverse_total = list(str(total))
reverse_total.reverse()
if str_total == reverse_total:
palindrome = True
count += 1
if count == 50:
lychrel.append(i)
print "\n\tLychrel number:", i
i += 1
print '\nThere are %s Lychrel numbers below %s' % (len(lychrel),limit)
print 'These numbers are:', lychrel
|
[
"chris.gilmer@gmail.com"
] |
chris.gilmer@gmail.com
|
501225b7b62991c2bb7a453bcb123b336846959d
|
8b9a418950a8c3ee42e4a4692a0f690c033ba401
|
/emulators/csp_vis_sender_02/app/__main__.py
|
e9dfdda4a5725883e6a15b577f3cddae1c04009f
|
[
"BSD-3-Clause"
] |
permissive
|
jan2nov/integration-prototype
|
da5b0f8b168365856dabb644bd1d2440ebced9e8
|
5b4db822b0d49ab45d10365d5c7aaa86954dc2e0
|
refs/heads/master
| 2020-03-20T14:54:23.937780
| 2018-06-04T12:09:21
| 2018-06-04T12:09:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
# -*- coding: utf-8 -*-
"""Module main to stream SPEAD visibility data."""
import sys
import argparse
import logging
import json
from .simulator import SimpleSimulator
def _init_log(level=logging.DEBUG):
"""Initialise the logging object.
Args:
level (int): Logging level.
Returns:
Logger: Python logging object.
"""
log = logging.getLogger(__file__)
log.setLevel(level)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(level)
formatter = logging.Formatter('%(asctime)s: %(message)s',
'%Y/%m/%d-%H:%M:%S')
ch.setFormatter(formatter)
log.addHandler(ch)
return log
def _parse_command_line():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
prog='csp_visibility_sender',
description='Send fake visibility data using the SPEAD protocol.')
parser.add_argument('config_file', type=argparse.FileType('r'),
help='JSON configuration file.')
parser.add_argument('-v', '--verbose', help='Enable verbose messages.',
action='store_true')
parser.add_argument('-p', '--print_settings', help='Print settings file.',
action='store_true')
return parser.parse_args()
def main(config, log):
"""Main script function"""
# Create simulation object, and start streaming SPEAD heaps
sim = SimpleSimulator(config, log)
sim.simulate_heaps()
if __name__ == '__main__':
# Parse command line arguments
args = _parse_command_line()
# Initialise logging.
_log = _init_log(level=logging.DEBUG if args.verbose else logging.INFO)
# Load configuration.
_log.info('Loading config: {}'.format(args.config_file.name))
_config = json.load(args.config_file)
if args.print_settings:
_log.debug('Settings:\n {}'.format(json.dumps(_config, indent=4,
sort_keys=True)))
main(_config, _log)
|
[
"ben.mort@gmail.com"
] |
ben.mort@gmail.com
|
219470a0ff5bb403514695edf64cf9ab42c04142
|
6791fd830e1e3bb1b3e31bac32c8c43debc6e45b
|
/hash_table/files.py
|
bb007467042726baa23b98afaff41ac9fa007b62
|
[] |
no_license
|
niccokunzmann/pydht
|
e3205eb4f93840531ef79019c7e47156aed44d29
|
89621647455657291dbb27f966a53ab10c6862f5
|
refs/heads/master
| 2020-05-30T17:48:43.608086
| 2013-10-09T20:34:28
| 2013-10-09T20:34:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,556
|
py
|
import io
import tempfile
import os
from .. import hashing
from ..errors import ContentAltered
class HashTableFileMixin:
"""A mixin for a hashtable that adds itself to the hashtable when closed"""
def __init__(self, hash_table, *args, **kw):
super().__init__(*args, **kw)
self._hash_table = hash_table
def add_to_hashtable(self):
self.seek(0)
self._hash_table._add_hash_table_file(self)
def close(self, *args, **kw):
self.add_to_hashtable()
super().close(*args, **kw)
def __enter__(self, *args, **kw):
if hasattr(super(), '__enter__'):
super().__enter__(*args, **kw)
return self
def __exit__(self, *args, **kw):
self.add_to_hashtable()
if hasattr(super(), '__exit__'):
return super().__exit__(*args, **kw)
class BytesIO(HashTableFileMixin, io.BytesIO):
"""A io.BytesIO for a hashtable that adds itself to the hashtable when closed"""
pass
class SpooledTemporaryFile(HashTableFileMixin, tempfile.SpooledTemporaryFile):
"""A tempfile.SpooledTemporaryFile for a hashtable that adds itself to the hashtable when closed"""
pass
class HashingFile:
"""One can read from this file and the hash is updated"""
default_chunk_size = 4096
is_hash = staticmethod(hashing.is_hash)
def __init__(self, file, length = None):
self._file = file
self._read = self._file.read
if hasattr(file, 'fileno'):
self.fileno = file.fileno
self._algorithm = hashing.algorithm()
self._length = self.get_length_of_file(file, length)
@property
def length(self):
"""=> length of the file or None"""
return self._length
def get_length_of_file(self, file, length = None):
if length is not None: return length
if hasattr(file, '__len__'):
return len(file)
if hasattr(file, 'fileno'):
try: return os.fstat(file.fileno()).st_size
except OSError: pass
if hasattr(file, 'seek') and hasattr(file, 'tell'):
start = file.tell()
file.seek(0, 2) # end of stream
try: return file.tell() - start
finally: file.seek(start)
if hasattr(file, 'getvalue'):
return len(file.getvalue())
def read(self, *args):
bytes = self._read(*args)
self._algorithm.update(bytes)
return bytes
@property
def hash(self):
return self._algorithm.hexdigest()
def __len__(self):
if self._length is None:
raise TypeError('length not supported for {}'.format(self._file))
return self._length
def __iter__(self):
# should be readline but is not required yet
data = self.read(self.default_chunk_size)
while data:
yield data
data = self.read(self.default_chunk_size)
class HashCheckingFile(HashingFile):
def __init__(self, expected_hash, file, length = None):
assert self.is_hash(expected_hash)
super().__init__(file, length = length)
self.expected_hash = expected_hash
self._bytes_read = 0
@property
def bytes_read(self):
"""=> the number of bytes read from this file"""
return self._bytes_read
def is_valid(self):
"""=> whether the hash of the content matches"""
return self.hash == self.expected_hash and self.is_read_completed()
def is_read_completed(self):
"""=> whether something can be expected to be read from the file
if the file has a length"""
return self.bytes_read == self.length
def read(self, *args):
"""read from the file and at check for consistency when its end is reached"""
bytes = super().read(*args)
self._bytes_read += len(bytes)
if self.is_read_completed() and not self.is_valid():
return self.error_hash_does_not_match()
return bytes
def error_hash_does_not_match(self):
"""Throw an error that the content differs from the expected"""
raise ContentAltered("Expected the hash {} for the ressource {}"
" but got the hash {}".format(self.expected_hash,
self._file,
self.hash))
class NonCheckingBytesIO(io.BytesIO):
@staticmethod
def is_valid():
return True
__all__ = ['BytesIO', 'SpooledTemporaryFile', 'HashingFile', 'HashCheckingFile']
|
[
"niccokunzmann@googlemail.com"
] |
niccokunzmann@googlemail.com
|
d95f07794f027cd5d62ae5f538ba541367149f10
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_97/492.py
|
a1f42066fd7dc40e8cdcf88aee4718a892fe6447
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 657
|
py
|
def find_recycled(n, b):
ns = str(n)
reclist = []
for i in xrange(1, len(ns), 1):
nrec = ns[i:len(ns)] + ns[0:i]
if nrec[0] != "0":
nrec = eval(nrec)
if nrec <= b and nrec > n and (n, nrec) not in reclist:
reclist.append((n,nrec))
return reclist
inp = file("input.in")
T = eval(inp.readline())
out = file("output.txt", "w")
d = []
for n in xrange(12, 2000000, 1):
d.extend(find_recycled(n, 2000000))
for i in xrange(T):
a, b = inp.readline().strip().split()
a = eval(a)
b = eval(b)
nrec = 0
for item in d:
if item[0] > b:
break
if item[0] >= a and item[1] <= b:
nrec += 1
out.write("Case #%d: %d\n" %(i + 1, nrec))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
b211f6d3238e69fee4c33b2e8d89b34fe17e5730
|
f56a16a03346eb2854eaeae0a13a92a222806551
|
/test/functional/interface_bitcoin_cli.py
|
74867f2d89fe0cdd48537eea6e2d53ac2dc0d1e0
|
[
"MIT"
] |
permissive
|
minblock/mishcoin
|
77b64c00043557cde6b49d4f58612b8ff670d8f6
|
65d47897b2413b83480d1f04eb2031f62b36a708
|
refs/heads/master
| 2021-05-22T23:56:07.613136
| 2020-04-05T03:14:27
| 2020-04-05T03:14:27
| 253,146,024
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mishcoin-cli"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_process_error, get_auth_cookie
class TestBitcoinCli(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
"""Main test logic"""
cli_response = self.nodes[0].cli("-version").send_cli()
assert("Mishcoin Core RPC client version" in cli_response)
self.log.info("Compare responses from getwalletinfo RPC and `mishcoin-cli getwalletinfo`")
if self.is_wallet_compiled():
cli_response = self.nodes[0].cli.getwalletinfo()
rpc_response = self.nodes[0].getwalletinfo()
assert_equal(cli_response, rpc_response)
self.log.info("Compare responses from getblockchaininfo RPC and `mishcoin-cli getblockchaininfo`")
cli_response = self.nodes[0].cli.getblockchaininfo()
rpc_response = self.nodes[0].getblockchaininfo()
assert_equal(cli_response, rpc_response)
user, password = get_auth_cookie(self.nodes[0].datadir)
self.log.info("Test -stdinrpcpass option")
assert_equal(0, self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input=password).getblockcount())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdinrpcpass', input="foo").echo)
self.log.info("Test -stdin and -stdinrpcpass")
assert_equal(["foo", "bar"], self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input=password + "\nfoo\nbar").echo())
assert_raises_process_error(1, "Incorrect rpcuser or rpcpassword", self.nodes[0].cli('-rpcuser=%s' % user, '-stdin', '-stdinrpcpass', input="foo").echo)
self.log.info("Test connecting to a non-existing server")
assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcport=1').echo)
self.log.info("Test connecting with non-existing RPC cookie file")
assert_raises_process_error(1, "Could not locate RPC credentials", self.nodes[0].cli('-rpccookiefile=does-not-exist', '-rpcpassword=').echo)
self.log.info("Make sure that -getinfo with arguments fails")
assert_raises_process_error(1, "-getinfo takes no arguments", self.nodes[0].cli('-getinfo').help)
self.log.info("Compare responses from `mishcoin-cli -getinfo` and the RPCs data is retrieved from.")
cli_get_info = self.nodes[0].cli('-getinfo').send_cli()
if self.is_wallet_compiled():
wallet_info = self.nodes[0].getwalletinfo()
network_info = self.nodes[0].getnetworkinfo()
blockchain_info = self.nodes[0].getblockchaininfo()
assert_equal(cli_get_info['version'], network_info['version'])
assert_equal(cli_get_info['protocolversion'], network_info['protocolversion'])
if self.is_wallet_compiled():
assert_equal(cli_get_info['walletversion'], wallet_info['walletversion'])
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['blocks'], blockchain_info['blocks'])
assert_equal(cli_get_info['timeoffset'], network_info['timeoffset'])
assert_equal(cli_get_info['connections'], network_info['connections'])
assert_equal(cli_get_info['proxy'], network_info['networks'][0]['proxy'])
assert_equal(cli_get_info['difficulty'], blockchain_info['difficulty'])
assert_equal(cli_get_info['testnet'], blockchain_info['chain'] == "test")
if self.is_wallet_compiled():
assert_equal(cli_get_info['balance'], wallet_info['balance'])
assert_equal(cli_get_info['keypoololdest'], wallet_info['keypoololdest'])
assert_equal(cli_get_info['keypoolsize'], wallet_info['keypoolsize'])
assert_equal(cli_get_info['paytxfee'], wallet_info['paytxfee'])
assert_equal(cli_get_info['relayfee'], network_info['relayfee'])
# unlocked_until is not tested because the wallet is not encrypted
if __name__ == '__main__':
TestBitcoinCli().main()
|
[
"POSTMASTER@provgn.com"
] |
POSTMASTER@provgn.com
|
655f1c6517e4c4109ba59bf8025f9fc0cb629994
|
41a0c25333100fd551e7a49ceec128c1cd80857f
|
/Scripts/doSuperResolution.py
|
7a61ed8b685cd6ce72250a81e478dc58cf0f5443
|
[] |
no_license
|
ANTsXNet/MRISuperResolution
|
7a6a6b8e3290d993f79a8d0bc9aa357fee755cb0
|
3568ad193e124d2000a39e89f11e50231443fff6
|
refs/heads/master
| 2021-08-16T10:16:41.781465
| 2020-09-22T15:37:53
| 2020-09-22T15:37:53
| 223,532,949
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,879
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et
import os
import sys
import time
import numpy as np
import keras
import ants
import antspynet
args = sys.argv
if len(args) != 3:
help_message = ("Usage: python doSuperResolution.py inputFile outputFile")
raise AttributeError(help_message)
else:
input_file_name = args[1]
output_file_name = args[2]
start_time_total = time.time()
print("Reading ", input_file_name)
start_time = time.time()
input_image = ants.image_read(input_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time: ", elapsed_time, " seconds)")
dimension = len(input_image.shape)
input_image_list = list()
if dimension == 4:
input_image_list = ants.ndimage_to_list(input_image)
elif dimension == 2:
raise ValueError("Model for 3-D or 4-D images only.")
elif dimension == 3:
input_image_list.append(input_image)
model = antspynet.create_deep_back_projection_network_model_3d(
(*input_image_list[0].shape, 1),
number_of_outputs=1, number_of_base_filters=64,
number_of_feature_filters=256, number_of_back_projection_stages=7,
convolution_kernel_size=(3, 3, 3), strides=(2, 2, 2),
number_of_loss_functions=1)
print( "Loading weights file" )
start_time = time.time()
weights_file_name = "./mriSuperResolutionWeights.h5"
if not os.path.exists(weights_file_name):
weights_file_name = antspynet.get_pretrained_network("mriSuperResolution", weights_file_name)
model.load_weights(weights_file_name)
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time: ", elapsed_time, " seconds)")
number_of_image_volumes = len(input_image_list)
output_image_list = list()
for i in range(number_of_image_volumes):
print("Applying super resolution to image", i, "of", number_of_image_volumes)
start_time = time.time()
input_image = ants.iMath(input_image_list[i], "TruncateIntensity", 0.0001, 0.995)
output_sr = antspynet.apply_super_resolution_model_to_image(input_image, model, target_range=(127.5, -127.5))
input_image_resampled = ants.resample_image_to_target(input_image, output_sr)
output_image_list.append(antspynet.regression_match_image(output_sr, input_image_resampled, poly_order = 2))
end_time = time.time()
elapsed_time = end_time - start_time
print(" (elapsed time:", elapsed_time, "seconds)")
print("Writing output image.")
if number_of_image_volumes == 1:
ants.image_write( output_image_list[0], output_file_name)
else:
output_image = ants.list_to_ndimage(input_image, output_image_list)
ants.image_write(output_image, output_file_name)
end_time_total = time.time()
elapsed_time_total = end_time_total - start_time_total
print( "Total elapsed time: ", elapsed_time_total, "seconds" )
|
[
"ntustison@gmail.com"
] |
ntustison@gmail.com
|
ca829bb3b5c37e7e3f12c3fdecba9401acdbba5d
|
bccfab4d853f7417401a084be95de293e66ccd2a
|
/mySpider/auxiliary_files/Exhibition136_supporting.py
|
7999b97dede8993a5006d00cee82bfa1e3c14a46
|
[] |
no_license
|
CS1803-SE/The-First-Subsystem
|
a8af03ce04a9de72a6b78ece6411bac4c02ae170
|
4829ffd6a83133479c385d6afc3101339d279ed6
|
refs/heads/main
| 2023-05-06T02:32:08.751139
| 2021-05-24T06:09:37
| 2021-05-24T06:09:37
| 363,400,147
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# @Time : 2021/5/11 20:14
# @Author : 10711
# @File : Exhibition136_supporting.py
# @Software: PyCharm
class Exhibition136Supporting:
startUrl = ['http://www.changjiangcp.com/view/16351.html']
|
[
"1300978939@qq.com"
] |
1300978939@qq.com
|
e340886d15839bcf81591484b8f866d8ee964e49
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02806/s940830540.py
|
373fb938a67908fb9d236775b0b7b61aa4ef974e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 195
|
py
|
N = int(input())
st = [input().split() for i in range(N)]
X = input()
flg = 0
ans = 0
for s, t in st:
if flg:
ans += int(t)
else:
if s == X:
flg = 1
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c1b942cfb9f3e78d5166a2ba3efb2c10a7cea81b
|
ab5cdf8f2de94c327e4679da84f941b1f3c04db4
|
/kubernetes/test/test_version_api.py
|
2949fd9417eb62fa0c11cfb164bab7abe4314d78
|
[
"Apache-2.0"
] |
permissive
|
diannaowa/client-python
|
a4a92a125178db26004eaef5062f9b1b581b49a8
|
5e268fb0b6f21a535a14a7f968b84ed4486f6774
|
refs/heads/master
| 2020-12-02T22:06:03.687696
| 2017-06-30T21:42:50
| 2017-06-30T21:42:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 832
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.version_api import VersionApi
class TestVersionApi(unittest.TestCase):
""" VersionApi unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.version_api.VersionApi()
def tearDown(self):
pass
def test_get_code(self):
"""
Test case for get_code
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"mehdy@google.com"
] |
mehdy@google.com
|
e125277049f9d9995016ddc244b396fee1ba6e28
|
40bc68b22e37e77ff7d7ed75f08b7195d16f9fde
|
/2019/day07/solutionsvm.py
|
4a2d5b272357149ee73475ed6667046cf732278d
|
[] |
no_license
|
fuglede/adventofcode
|
1dd61b3bfd8db0346c8cb6838da8da5adf3d5296
|
e3c85daf96889dd7aac04a0e741d1409f74e549d
|
refs/heads/master
| 2023-09-03T08:52:48.575960
| 2023-08-26T18:29:19
| 2023-08-26T18:29:19
| 159,918,186
| 59
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
from collections import deque
from itertools import cycle, permutations
from math import inf
from vm import VM, read_program
p07 = read_program(7)
# Part one
m = -inf
for perm in permutations(range(5)):
vms = []
signal = 0
for phase in perm:
vm = VM(p07)
signal = next(VM(p07, deque([phase, signal])))
m = max(m, signal)
print(m)
# Part two
m = -inf
for perm in permutations(range(5, 10)):
vms = [VM(p07, deque([phase])) for phase in perm]
signal = 0
try:
for i in cycle(range(5)):
vms[i].inputs.append(signal)
signal = next(vms[i])
except StopIteration:
m = max(m, signal)
print(m)
|
[
"github@fuglede.dk"
] |
github@fuglede.dk
|
d6c70c773646c56f5d50057fddd579b9c60a264a
|
213be849a50c84e9fc01aade5ff064a9aa7eb8c6
|
/nautobot_golden_config/__init__.py
|
133f8c98424367855de2c97353490af77f994942
|
[
"Apache-2.0"
] |
permissive
|
nniehoff/nautobot-plugin-golden-config
|
c8d62b381727c9ba76740e4dfa81835561738840
|
5c5f051d244b277dc9d1dbd6a11c9b236ee9a229
|
refs/heads/main
| 2023-08-03T22:11:46.669288
| 2021-09-24T14:56:28
| 2021-09-24T14:56:28
| 413,983,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 938
|
py
|
"""Plugin declaration for nautobot_golden_config."""
__version__ = "0.9.7"
from nautobot.extras.plugins import PluginConfig
class GoldenConfig(PluginConfig):
"""Plugin configuration for the nautobot_golden_config plugin."""
name = "nautobot_golden_config"
verbose_name = "Golden Configuration"
version = __version__
author = "Network to Code, LLC"
author_email = "opensource@networktocode.com"
description = "A plugin for managing Golden Configurations."
base_url = "golden-config"
required_settings = []
# min_version = "0"
# max_version = "100"
default_settings = {
"enable_backup": True,
"enable_golden": True,
"enable_compliance": True,
"enable_sotagg": True,
"per_feature_width": 13,
"per_feature_height": 4,
"per_feature_bar_width": 0.3,
}
caching_config = {}
config = GoldenConfig # pylint:disable=invalid-name
|
[
"ken@celenza.org"
] |
ken@celenza.org
|
2bca0b2d85d4f4a26bf43a98631bde8cfd883738
|
e2cd4f444b18adca671ae2ac8856594b22c6d2ae
|
/arc/migrations/0091_remove_story_details_change_date.py
|
1fd910194b0f36e1c5aaa93c79ea00542003a3f9
|
[] |
no_license
|
anshumanairy/Sprint-Management
|
36c54c03b66a0d02071a337e8217144a0b0c9578
|
0c4e8fe87ec4099253d894b7876f0b5b914a2652
|
refs/heads/master
| 2022-12-28T02:46:25.847713
| 2020-10-02T10:02:13
| 2020-10-02T10:02:13
| 195,804,893
| 0
| 0
| null | 2020-10-02T10:02:14
| 2019-07-08T12:11:27
|
CSS
|
UTF-8
|
Python
| false
| false
| 341
|
py
|
# Generated by Django 2.2 on 2019-08-21 10:47
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('arc', '0090_story_details_change_date'),
]
operations = [
migrations.RemoveField(
model_name='story_details',
name='change_date',
),
]
|
[
"anshuman.airy04@gmail.com"
] |
anshuman.airy04@gmail.com
|
95592d26dae1df0cb62329b8a85140998af39521
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-2/7c65ad11e2914bc9774abd37cdd1ac455f1c9433-<list_all>-fix.py
|
14a5a1ef5bf50d8cde4ae3f6831042b8b6ceec9b
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
def list_all(self):
self.log('List all items')
try:
response = self.storage_client.storage_accounts.list()
except Exception as exc:
self.fail('Error listing all items - {0}'.format(str(exc)))
return response
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
369727caa9b1f274cb3338d70531988c54568528
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_098/ch86_2020_06_21_19_53_15_222963.py
|
1fcf5f95c6e8c012df1e1136e94771e9acbee12f
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import csv
with open('dados.csv','r') as csv_file, open('dados.tsv', 'w') as tsv_file:
csv_file,tsv_file = csv.reader(csv_file), csv.writer(tsv_file, delimiter='\t')
for linha in csv_file:
tsv_file.writerow(linha)
|
[
"you@example.com"
] |
you@example.com
|
0a1229effa14b40210a1abe973f19d6d8e697ee6
|
384d31fe319844c171891f7453b73df84a77bdcc
|
/src/apps_base/order/constants.py
|
efbdbf9e0bf0fc6b7c9cad05af960452d6cb5749
|
[] |
no_license
|
danielhuamani/fanntop
|
0227a1b5337a45b3b91ab16c614e206f10efc891
|
1adb65f617f1e418cad75588fa60af909c1e690a
|
refs/heads/master
| 2021-03-22T00:46:51.355400
| 2018-10-06T05:46:07
| 2018-10-06T05:46:07
| 114,432,640
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 485
|
py
|
PENDIENTE = "PE"
RECHAZADO = "RC"
PAGADO = "PG"
CANCELADO = "CN"
DENEGADO = "DN"
PROCESO = 'PR_1'
PROCESO_2 = 'PR_2'
ORDER_VALIDATED = 'VAL'
ORDER_USED = 'USE'
REEMBOLSO = 'RE'
TYPE_STATUS = (
(PROCESO, "Pendiente"),
("RC", "Rechazado"),
("PG", "Pagado"),
# ("PE", "Pendiente"),
# (PROCESO_2, "Proceso Paso 2"),
("RE", "Reembolso"),
)
ALMACEN = 'AL'
TYPE_STATUS_SHIPPING = (
(ALMACEN, "En Almacén"),
("DS", "En Despacho"),
("EG", "Entregado"),
)
|
[
"danielhuamani15@gmail.com"
] |
danielhuamani15@gmail.com
|
cf39c48564e9b19e551240ce121a93cc7743fb4b
|
dbf4f74403dec9c5531118a858c7b208c43323d4
|
/airflow/dags/lib/common.py
|
1ce3c55bbcfb30bcba974a240da78b23c1a92130
|
[] |
no_license
|
LaoKpa/short_sale_volume
|
03208c6a5830b61a8e98ba3854b0ada45ee2a666
|
02c2fb9f91ca94845768554074a5a3018e87b0fe
|
refs/heads/master
| 2022-04-06T15:05:19.678439
| 2020-02-27T20:05:18
| 2020-02-27T20:05:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
from airflow.configuration import conf as airflow_config
import configparser
import json
import os
config = configparser.ConfigParser()
airflow_dir = os.path.split(airflow_config['core']['dags_folder'])[0]
config.read('{}/config.cfg'.format(airflow_dir))
CLUSTER_NAME = config['AWS']['CLUSTER_NAME']
VPC_ID = config['AWS']['VPC_ID']
SUBNET_ID = config['AWS']['SUBNET_ID']
if config['App']['STOCKS'] == '':
STOCKS = []
else:
STOCKS = json.loads(config.get('App', 'STOCKS').replace("'", '"'))
if config['App']['STOCK_LIMITS'] == '':
LIMIT = None
else:
LIMIT = int(config['App']['STOCK_LIMITS'])
|
[
"teguhwpurwanto@gmail.com"
] |
teguhwpurwanto@gmail.com
|
877b61c1bf6a0f9f65e65d4dddc3d75e1788ad23
|
3ff1c245d945acf82e48f388d2457204e202275f
|
/desafio/migrations/0022_atributos.py
|
d65eeb6862db78a6105419422e55ae646f1da42a
|
[] |
no_license
|
rauldosS/desafio_compiladores
|
075e7dcb3a167d20d71928727db6c1cb500e23af
|
da01adf41c47dafd50b1487bb4ad8d27c4f2d199
|
refs/heads/main
| 2023-01-03T09:13:18.990618
| 2020-10-29T01:25:59
| 2020-10-29T01:25:59
| 305,174,524
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 702
|
py
|
# Generated by Django 3.1.2 on 2020-10-28 23:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('desafio', '0021_auto_20201026_0732'),
]
operations = [
migrations.CreateModel(
name='Atributos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('atributo', models.CharField(max_length=255)),
],
options={
'verbose_name': 'Atributo',
'verbose_name_plural': 'Atributos',
'ordering': ('id', 'atributo'),
},
),
]
|
[
"48498755+rauldosS@users.noreply.github.com"
] |
48498755+rauldosS@users.noreply.github.com
|
d01c0cfc4e6c223bd56c8cba997a671ee074cc0a
|
642b7138da231474154a83c2dc3b4a2a42eb441b
|
/array/sub_arr_with_least_avg.py
|
4adb849677697c4f94b4740e71555febb2a85ea6
|
[] |
no_license
|
somanshu/python-pr
|
15465ed7182413591c709f9978420f6a16c9db91
|
7bfee6fc2a8340ba3e343f991a1da5bdb4ae9cb2
|
refs/heads/master
| 2020-07-02T17:21:37.132495
| 2019-08-22T08:04:11
| 2019-08-22T08:04:11
| 201,602,731
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 690
|
py
|
# https://www.geeksforgeeks.org/find-subarray-least-average/
def leastAvg(arr, k):
summation = []
summation.append(0)
summation.append(arr[0])
min_avg = 9999
min_avg_last_index = None
for i in range(2, len(arr) + 1):
summation.append(summation[i-1] + arr[i-1])
for i in range(3, len(arr) + 1):
cur_sum = summation[i] - summation[i-k]
avg_sum = cur_sum // k
if avg_sum < min_avg:
min_avg = avg_sum
min_avg_last_index = i - 1
return (min_avg, min_avg_last_index - k + 1, min_avg_last_index)
arr = [3, 7, 90, 20, 10, 50, 40]
arr = [3, 7, 5, 20, -10, 0, 12]
k = 2
res = leastAvg(arr, k)
print(res)
|
[
"somanshu@logos.social"
] |
somanshu@logos.social
|
756c1be0b975f8ff483955c9a83fcd8608da7e75
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_136/3240.py
|
a6126108a389ce9f2e9429f3b4ddc959c34189ad
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 776
|
py
|
#!/usr/bin/env python
import sys
# import numpy
import operator
def main(filename):
f = open(filename, 'r')
T = int(f.readline())
for t in xrange(T):
result = solve(f)
print "Case #%i: %.7f" % (t+1, result)
def solve(f):
F0 = 2.0
C, F, X = map(float, f.readline().split())
best_time = X / F0
current_time = 0.0
current_rate = F0
while True:
current_time += C / current_rate
current_rate += F
new_completion_time = X / current_rate + current_time
if new_completion_time < best_time:
best_time = new_completion_time
else:
break
return best_time
if __name__ == "__main__":
sys.exit(main(sys.argv[1]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0afde19c13d759b12976085d7ffb89bde8ee1f5e
|
9aaa39f200ee6a14d7d432ef6a3ee9795163ebed
|
/Algorithm/Python/812. Largest Triangle Area.py
|
892e5661352025b255cbf0cabc57d72ec735f4c0
|
[] |
no_license
|
WuLC/LeetCode
|
47e1c351852d86c64595a083e7818ecde4131cb3
|
ee79d3437cf47b26a4bca0ec798dc54d7b623453
|
refs/heads/master
| 2023-07-07T18:29:29.110931
| 2023-07-02T04:31:00
| 2023-07-02T04:31:00
| 54,354,616
| 29
| 16
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,046
|
py
|
# -*- coding: utf-8 -*-
# Created on Mon Apr 09 2018 15:45:58
# Author: WuLC
# EMail: liangchaowu5@gmail.com
# get the area of triangle with Heron's formula
# reference https://en.wikipedia.org/wiki/Heron%27s_formula
class Solution(object):
def largestTriangleArea(self, points):
"""
:type points: List[List[int]]
:rtype: float
"""
n = len(points)
result = 0
for i in xrange(n):
for j in xrange(i+1, n):
for k in xrange(j+1, n):
result = max(result, self.area(points[i], points[j], points[k]))
return result
def area(self, p1, p2, p3):
a = ((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) ** 0.5
b = ((p1[0] - p3[0])**2 + (p1[1] - p3[1])**2) ** 0.5
c = ((p2[0] - p3[0])**2 + (p2[1] - p3[1])**2) ** 0.5
if a+b <= c or a+c <=b or b+c <= a: # three points may not be able to contruct a triangle
return 0
s = (a+b+c)/2.0
return (s*(s-a)*(s-b)*(s-c))**0.5
|
[
"liangchaowu5@gmail.com"
] |
liangchaowu5@gmail.com
|
f1196a410af03757d39757835dc4e5a8603ad26a
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/comparison/test_comment03.py
|
28e4c327e5958ca3a4b6bd46ac2f1bfae30638fe
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742
| 2023-08-31T19:30:52
| 2023-08-31T19:30:52
| 7,433,211
| 3,251
| 712
|
BSD-2-Clause
| 2023-08-28T18:52:14
| 2013-01-04T01:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 961
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("comment03.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with comments."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write("A1", "Foo")
worksheet.write_comment("A1", "Some text")
worksheet.write_comment("XFD1048576", "Some text")
worksheet.set_comments_author("John")
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
09dddad36a31c3941f9759d85f109af7ad424d73
|
28c0bcb13917a277cc6c8f0a34e3bb40e992d9d4
|
/koku/api/migrations/0010_auto_20200128_2138.py
|
7aceb81ec568991a0e9b1d2e34bcf43e8c3ff8f9
|
[
"Apache-2.0"
] |
permissive
|
luisfdez/koku
|
43a765f6ba96c2d3b2deda345573e1d97992e22f
|
2979f03fbdd1c20c3abc365a963a1282b426f321
|
refs/heads/main
| 2023-06-22T13:19:34.119984
| 2021-07-20T12:01:35
| 2021-07-20T12:01:35
| 387,807,027
| 0
| 1
|
Apache-2.0
| 2021-07-20T13:50:15
| 2021-07-20T13:50:14
| null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
# Generated by Django 2.2.8 on 2020-01-28 21:38
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("api", "0009_providerstatus_squashed_0042_auto_20200116_2048")]
operations = [
migrations.RunSQL(
"""
UPDATE public.api_provider
SET type = 'Azure'
WHERE type = 'AZURE'
;
UPDATE public.api_providerinfrastructuremap
SET infrastructure_type = 'Azure'
WHERE infrastructure_type = 'AZURE'
;
"""
)
]
|
[
"noreply@github.com"
] |
luisfdez.noreply@github.com
|
de6c66ecf43e841a117ca0be3fd1b576c402f4e8
|
51e7336e8bb447187cbe6ede2910f40700316dc1
|
/simics/monitorCore/diddler.py
|
9bb67e16e8360d641a011f184d6c1538ec82b788
|
[] |
no_license
|
hacker-steroids/RESim
|
69bac74a1b119c54d03b9ea0fda7a85cc45ea854
|
94498c699575f5078de415fac8c517d520cb2f94
|
refs/heads/master
| 2020-05-30T12:33:53.799610
| 2019-06-01T00:51:20
| 2019-06-01T00:51:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,721
|
py
|
#!/usr/bin/env python
import os
import re
from simics import *
def nextLine(fh):
retval = None
while retval is None:
line = fh.readline()
if line is None or len(line) == 0:
break
if line.startswith('#'):
continue
retval = line.strip('\n')
return retval
class Diddler():
class Fiddle():
def __init__(self, match, was, becomes, cmds=[]):
self.match = match
self.was = was
self.becomes = becomes
self.cmds = cmds
def __init__(self, path, mem_utils, cell_name, lgr):
self.kind = None
self.fiddles = []
self.mem_utils = mem_utils
self.lgr = lgr
self.stop_hap = None
self.cell_name = cell_name
self.path = path
self.operation = None
if os.path.isfile(path):
with open(path) as fh:
done = False
kind_line = nextLine(fh)
parts = kind_line.split()
self.kind = parts[0]
if len(parts) > 1:
self.operation = parts[1]
else:
self.lgr.error('Diddle command missing operation %s' % kind_line)
return
self.lgr.debug('Diddle of kind %s cell is %s' % (self.kind, self.cell_name))
if self.kind == 'full_replace':
match = nextLine(fh)
becomes=''
while not done:
line = fh.readline()
if line is None or len(line)==0:
done = True
break
if len(becomes)==0:
becomes=line
else:
becomes=becomes+line
self.fiddles.append(self.Fiddle(match, None, becomes))
elif self.kind == 'match_cmd':
match = nextLine(fh)
was = nextLine(fh)
cmds=[]
while not done:
line = nextLine(fh)
if line is None or len(line)==0:
done = True
break
cmds.append(line)
self.fiddles.append(self.Fiddle(match, was, None, cmds=cmds))
elif self.kind == 'sub_replace':
while not done:
match = nextLine(fh)
if match is None:
done = True
break
was = nextLine(fh)
becomes = nextLine(fh)
self.fiddles.append(self.Fiddle(match, was, becomes))
else:
print('Unknown diddler kind: %s' % self.kind)
return
self.lgr.debug('Diddler loaded %d fiddles of kind %s' % (len(self.fiddles), self.kind))
else:
self.lgr.debug('Diddler, no file at %s' % path)
def subReplace(self, cpu, s, addr):
rm_this = None
for fiddle in self.fiddles:
#self.lgr.debug('Diddle checkString %s to %s' % (fiddle.match, s))
if re.search(fiddle.match, s, re.M|re.I) is not None:
if re.search(fiddle.was, s, re.M|re.I) is not None:
#self.lgr.debug('Diddle replace %s with %s in \n%s' % (fiddle.was, fiddle.becomes, s))
new_string = re.sub(fiddle.was, fiddle.becomes, s)
self.mem_utils.writeString(cpu, addr, new_string)
else:
#self.lgr.debug('Diddle found match %s but not string %s in\n%s' % (fiddle.match, fiddle.was, s))
pass
rm_this = fiddle
break
return rm_this
def fullReplace(self, cpu, s, addr):
rm_this = None
fiddle = self.fiddles[0]
if fiddle.match in s:
count = len(fiddle.becomes)
self.mem_utils.writeString(cpu, addr, fiddle.becomes)
esp = self.mem_utils.getRegValue(cpu, 'esp')
count_addr = esp + 3*self.mem_utils.WORD_SIZE
self.mem_utils.writeWord(cpu, count_addr, count)
#cpu.iface.int_register.write(reg_num, count)
self.lgr.debug('diddle fullReplace %s in %s wrote %d bytes' % (fiddle.match, s, count))
rm_this = fiddle
#SIM_break_simulation('deeedee')
return rm_this
def stopAlone(self, fiddle):
self.stop_hap = SIM_hap_add_callback("Core_Simulation_Stopped", self.stopHap, fiddle)
SIM_break_simulation('matchCmd')
def matchCmd(self, s):
''' The match lets us stop looking regardless of whether or not the values are
bad. The "was" tells us a bad value, i.e., reason to run commands '''
rm_this = None
fiddle = self.fiddles[0]
#self.lgr.debug('look for match of %s in %s' % (fiddle.match, s))
if re.search(fiddle.match, s, re.M|re.I) is not None:
#self.lgr.debug('found match of %s in %s' % (fiddle.match, s))
rm_this = fiddle
if re.search(fiddle.was, s, re.M|re.I) is not None:
SIM_run_alone(self.stopAlone, fiddle)
return rm_this
def checkString(self, cpu, addr, count):
retval = False
byte_string, byte_array = self.mem_utils.getBytes(cpu, count, addr)
s = ''.join(map(chr,byte_array))
if self.kind == 'sub_replace':
rm_this = self.subReplace(cpu, s, addr)
elif self.kind == 'full_replace':
rm_this = self.fullReplace(cpu, s, addr)
elif self.kind == 'match_cmd':
rm_this = self.matchCmd(s)
else:
print('Unknown kind %s' % self.kind)
return
if rm_this is not None:
self.lgr.debug('Diddler checkString found match cell %s path %s' % (self.cell_name, self.path))
self.fiddles.remove(rm_this)
if len(self.fiddles) == 0:
self.lgr.debug('Diddler checkString removed last fiddle')
retval = True
return retval
def stopHap(self, fiddle, one, exception, error_string):
SIM_hap_delete_callback_id("Core_Simulation_Stopped", self.stop_hap)
self.lgr.debug('Diddler stop hap')
for cmd in fiddle.cmds:
SIM_run_command(cmd)
def getOperation(self):
return self.operation
def getPath(self):
return self.path
if __name__ == '__main__':
print('begin')
d = Diddler('dog.diddle')
|
[
"mfthomps@nps.edu"
] |
mfthomps@nps.edu
|
baebd8438e36c15261b39d8240930bbf3b21cfac
|
42fdf741bf64ea2e63d1546bb08356286f994505
|
/macrocab_ex2/rasp30_vmm_frame6.py
|
5ccb702b8fe5f8f28ed7cfe61ee3204261d47609
|
[] |
no_license
|
skim819/RASP_Workspace_sihwan
|
7e3cd403dc3965b8306ec203007490e3ea911e3b
|
0799e146586595577c8efa05c647b8cb92b962f4
|
refs/heads/master
| 2020-12-24T05:22:25.775823
| 2017-04-01T22:15:18
| 2017-04-01T22:15:18
| 41,511,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
'cab_vmm.O[0:5]' ,[range( 29, 23, -1), 21]] ## o/ps connectn to i/ps?? ummmmm !!! ---we need this
self.li = smDictFromList(li_sm)
li0b = recStrExpand(li_sm_0b)
li0b.reverse()
self.li0 = recStrExpand(li_sm_0a) + li0b
self.li1 = recStrExpand(li_sm_1)
#pdb.set_trace()
#CAB Devices ## order is very important here
|
[
"ubuntu@ubuntu-VirtualBox.(none)"
] |
ubuntu@ubuntu-VirtualBox.(none)
|
ded8ee76872f157d15e6d9423f30b3068ac198ae
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02392/s638672279.py
|
d0cf75d9c7215e1b6ef0047f7a19216eae852dd7
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
import fileinput
for line in fileinput.input():
tokens = list(map(int, line.strip().split()))
a, b, c = tokens[0], tokens[1], tokens[2]
if a < b and b < c:
print("Yes")
else:
print("No")
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
ceb816aa78646111fde73dd941d40a313982aebe
|
95100d3a58122a81946eac46618b9e59bef1ba22
|
/Bin/autoTestClass.py
|
82c5b1f2fd5691c930ede521b36ef16f95f2afad
|
[] |
no_license
|
ayiya-hui/automation
|
6fc65bf7168a2ca663d17ead66ad83adffb61cb4
|
d100795db2275994a40199b8935296ae30a9eb0e
|
refs/heads/master
| 2022-12-31T08:32:53.558207
| 2020-05-25T09:17:53
| 2020-05-25T09:17:53
| 135,518,107
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,566
|
py
|
import logging
class Config:
def __init__(self):
self.dataCollector=''
self.appServer=''
self.user=''
self.password=''
self.testModule=''
self.testTask=''
self.testSuites=''
self.excludeSuites=''
self.option=''
self.sleep=''
self.version=''
class TestCategory:
def __init__(self):
self.suites=''
class TestSuite:
def __init__(self):
self.name=''
self.method=''
self.setupTasks=[]
self.testcases=[]
self.fileName=''
class configImportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class RBACSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class adminRole:
def __init__(self):
self.userName=''
self.org=''
self.password=''
self.scope=''
class setupTask:
def __init__(self):
self.setupName=''
self.setupValue=[]
class verifyTask:
def __init__(self):
self.type=''
class eventTypeQuery:
def __init__(self):
self.name=''
class reportQuery:
def __init__(self):
self.id=''
self.eventType=''
self.key=''
class rbacEventQuery:
def __init__(self):
self.name=''
self.condition=''
class readEventType:
def __init__(self):
self.name=''
class createDevice:
def __init__(self):
self.deviceList=[]
class device:
def __init__(self):
self.name=''
self.type=''
self.ip=''
self.custId=''
class sentEvent:
def __init__(self):
self.eventList=[]
class event:
def __init__(self):
self.eventType=''
self.reporter=''
class sentIncident:
def __init__(self):
self.incidentList=[]
class incident:
def __init__(self):
self.incidentType=''
self.reporter=''
class eventExportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class eventParsingSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
def getKeyMap(self):
eventKey=[]
reporterKey=[]
for case in self.testcases:
event=case.eventType.strip()
if event not in eventKey:
eventKey.append(event)
reporter=case.reporter
if reporter not in reporterKey:
reporterKey.append(reporter)
eventStr='","'.join(eventKey)
reporterStr=','.join(reporterKey)
keyMap={}
keyMap['eventType']='"'+eventStr+'"'
keyMap['reporter']=reporterStr
return keyMap
class eventTypeSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class logDiscoverySuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class incidentSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
def getKeyMap(self):
eventKey=[]
reporterKey=[]
for case in self.testcases:
event=case.eventType.strip()
if event not in eventKey:
eventKey.append(event)
reporter=case.reporter
if reporter not in reporterKey:
reporterKey.append(reporter)
eventStr='","'.join(eventKey)
reporterStr='","'.join(reporterKey)
keyMap={}
keyMap['eventType']='"'+eventStr+'"'
keyMap['reporter']='"'+reporterStr+'"'
return keyMap
class incidentTimeBasedSuite(incidentSuite):
def __init__(self):
incidentSuite.__init__(self)
self.sendEvent=''
class incidentPatternBasedSuite(incidentTimeBasedSuite):
def __init__(self):
incidentTimeBasedSuite.__init__(self)
class linuxFileMonitorSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
self.linuxHost=''
self.linuxUsers=[]
self.monPath=''
self.monConfig=''
class linuxUser:
def __init__(self):
self.name=''
self.password=''
class reportSuite(TestSuite):
def __init__(self):
TestSuite.__init__(self)
class TestCase:
def __init__(self):
self.name=''
self.reporter=''
class configImportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
class RBACCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyName=''
self.eventType=''
self.desc=''
self.roleName=''
self.verifyTasks=[]
class eventExportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.deviceName=''
self.timeZone=''
self.option=''
self.startTime=''
self.endTime=''
self.custName=''
class eventParsingCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.eventType=''
self.parseEvent=''
self.key=''
self.parameters=''
class eventTypeCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyTasks=[]
class logDiscoveryCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.discoverEvent=''
self.parameters=''
class incidentCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.eventType=''
self.createDevice=''
self.deviceType=''
self.deviceName=''
self.custId=''
self.repeatCount=''
self.repeatInterval=''
self.domainController=''
self.events=[]
self.parameters=''
class incidentEvent:
def __init__(self):
self.incidentMsg=''
class incidentTimeBasedCase(incidentCase):
def __init__(self):
incidentCase.__init__(self)
self.sendCount=''
self.sendInterval=''
self.clearInterval=''
self.clearWait=''
class incidentPatternBasedCase(incidentTimeBasedCase):
def __init__(self):
incidentTimeBasedCase.__init__(self)
self.clearEvent=''
self.clearCount=''
class reportCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.verifyTasks=[]
class linuxFileMonitorCase(TestCase):
def __init__(self):
TestCase.__init__(self)
self.resultOption=''
self.parameters=''
self.tasks=[]
class task:
def __init__(self):
self.taskName=''
self.taskType=''
self.targetPath=''
self.target=''
self.recurse=''
self.excuteUser=''
class RbacProfile:
def __init__(self):
self.name=''
self.description=''
self.config=''
self.eventFilter=''
class eventFilter:
def __init__(self):
self.name=''
self.singleConstraint=''
self.groupConstraint=''
self.groupBy=''
self.index=''
self.singleConditions=[]
self.groupConditions=[]
class domain:
def __init__(self):
self.name=''
self.domainId=''
self.companyName=''
self.description=''
self.primaryContactUser=''
self.secondaryContactUser=''
self.initialized=True
self.lastDataDistributedTime=''
self.timeZoneOffset=''
self.logoURL=''
self.encKey=''
self.disabled=False
self.custKey=''
self.includeRange=''
self.excludeRange=''
self.address=''
self.phone=''
self.collectors=[]
|
[
"hhuang@fortinet.com"
] |
hhuang@fortinet.com
|
8a08595a18180fdd63eb5e412db51e021f22bf79
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03050/s149029376.py
|
4af3d320d49d90f7eaaaa5bb9349d14574c742f6
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
def main():
def trial_division(n):
divs = []
for i in range(1, int(n**0.5)+1):
if n % i == 0:
divs.append(i)
if i != n//i:
divs.append(n//i)
return divs
N = int(input())
divs = trial_division(N)
ans = 0
for d in divs:
if d != 1 and N//(d-1) == N % (d-1):
ans += (d-1)
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
965b64261b972827ee492000da2e36bc4999cadf
|
2776195dc0863f5e43c5394767f1f950ce7672bb
|
/util/command_console_tui.py
|
646711bad18fe43b632ea9eb243f651dc9bf2df2
|
[
"MIT"
] |
permissive
|
sharkbound/PythonTwitchBotFramework
|
a5e6f55c89a0639cb8e3dd16b99bb6388ee5f5f8
|
3d9aff994d531272d53b869c3dac6602b04a9d70
|
refs/heads/master
| 2023-09-04T06:34:44.456338
| 2023-08-16T21:32:58
| 2023-08-16T21:32:58
| 134,095,615
| 111
| 47
|
MIT
| 2023-09-14T20:40:04
| 2018-05-19T20:24:24
|
Python
|
UTF-8
|
Python
| false
| false
| 6,689
|
py
|
#!/usr/bin/env python3
import asyncio
import json
import click
import websockets
from urwid import AsyncioEventLoop, Edit, ExitMainLoop, Filler, Frame, MainLoop, Text, connect_signal
COMMAND_READ, PASSWORD_READ = 0, 1
SEND_PASSWORD = 'send_password'
BAD_PASSWORD = 'bad_password'
DISCONNECTING = 'disconnecting'
LIST_CHANNELS = 'list_channels'
BAD_DATA = 'bad_data'
AUTHENTICATION_SUCCESSFUL = 'authentication_successful'
SEND_PRIVMSG = 'send_privmsg'
CHANNEL_NOT_FOUND = 'channel_not_found'
SUCCESS = 'success'
RUN_COMMAND = 'run_command'
loop = asyncio.get_event_loop()
@click.command()
@click.option('--host', prompt='Command server host', default='localhost')
@click.option('--port', prompt='Command server port', default='1337')
def run(host, port):
"""
Start a websocket client and a terminal UI to interact with it.
"""
# connection state
channels = []
bound_channel = None
ws = None
# UI state
lines = ['example text\n']
output = Text(lines)
input_field = Edit('>> ')
input_state = COMMAND_READ
widget = Frame(Filler(output, 'top'), footer=input_field)
widget.focus_position = 'footer'
# event wiring
event_loop = AsyncioEventLoop(loop=loop)
input_cb = None
def write(msg):
"""
Show an additional line of text.
"""
lines.append(msg + '\n')
output.set_text(lines)
def prompt_for_password(msg):
"""
Change prompt to password prompt. Return a future for the typed password.
"""
nonlocal input_cb, input_state
input_cb = loop.create_future()
input_state = PASSWORD_READ
input_cb.add_done_callback(_password_done)
input_field.set_mask('*')
input_field.set_caption(msg)
return input_cb
def _password_done(_):
nonlocal input_state
input_field.set_mask(None)
input_state = COMMAND_READ
def accept_input(key):
"""
Process typed lines of text. Dispatches to password prompt or command prompt
as needed.
"""
if key == 'enter':
if input_state == PASSWORD_READ:
input_cb.set_result(input_field.edit_text)
elif input_state == COMMAND_READ:
cmd_dispatch(input_field.edit_text)
input_field.set_edit_text('')
def update_channels(new_channels):
"""
Receive channel data.
"""
nonlocal channels, bound_channel
channels = new_channels
if len(channels) == 1:
bound_channel = channels[0]
write(f'bound console to channel "{bound_channel}"')
else:
write(f'bot is in these channels: {", ".join(channels)}')
async def ws_dispatch():
"""
Handle websocket messages.
"""
nonlocal ws
ws = await websockets.connect(f'ws://{host}:{port}')
while True:
try:
msg = json.loads(await ws.recv())
if msg['type'] == SEND_PASSWORD:
loop.create_task(ws.send(await prompt_for_password("Server password:")))
elif msg['type'] == DISCONNECTING:
write('server terminated connection...')
ws = None
elif msg['type'] == BAD_PASSWORD:
write('authentication failed... password did not match!')
elif msg['type'] == LIST_CHANNELS:
update_channels(msg['data']['channels'])
elif msg['type'] == AUTHENTICATION_SUCCESSFUL:
write('logged into command server!')
except Exception as e:
write(f'Error: {e}')
raise
def print_help():
write('/channel <channel> : binds this console to a bot-joined channel (needed for /chat)')
write('/chat <msg> : sends the chat message to the channel bound to this console')
write('/sendcmd <commands> [args...]: tells the bot run a command')
write('/quit: exit console')
write('/help to see this message again')
def cmd_dispatch(command):
write(f"dispatching {repr(command)}")
nonlocal bound_channel
if not ws:
write('Not connected')
return
parts = command.split()
if not parts:
print_help()
command_part = parts[0].lower()
if command_part[0] == '/':
command_part = command_part[1:]
args = parts[1:]
if command_part == 'help':
print_help()
elif command_part == 'sendcmd':
if not bound_channel:
write('there is not a bound channel! use `/channel <channel>` to bind one!')
elif not args:
write('you must provide a command to run to /sendcmd, ex: /sendcmd help')
else:
loop.create_task(ws.send(
json.dumps(
{
'type': RUN_COMMAND,
'channel': bound_channel,
'command': args[0],
'args': args[1:],
'silent': True,
}
)
))
elif command_part == 'chat':
if not bound_channel:
write('there is not a bound channel! use `/channel <channel>` to bind one!')
else:
loop.create_task(ws.send(
json.dumps(
{
'type': SEND_PRIVMSG,
'channel': bound_channel,
'message': ' '.join(args),
}
)
))
elif command_part == 'channel':
if not channels:
write('the bot is not currently in any channels, please have the bot join at least one than relaunch this console')
elif not args:
write(f'the bot is currently in these channels: {", ".join(channels)}\ndo `/channel <channel>` to bind this channel to one')
elif args[0] not in channels:
write(f'the bot is not currently in "{args[0]}"')
else:
bound_channel = args[0]
elif command_part == 'quit':
raise ExitMainLoop()
else:
write(f"Unrecognized command {repr(command_part)}")
event_loop.alarm(0, lambda: loop.create_task(ws_dispatch()))
mainloop = MainLoop(widget, event_loop=event_loop, unhandled_input=accept_input)
mainloop.run()
if __name__ == '__main__':
run()
|
[
"ashort@mozilla.com"
] |
ashort@mozilla.com
|
38507d07b45390ec1f2ae7abcb4b09bafc861be6
|
bea3febeda4c0688dfbb2db584ab4f7d710040e0
|
/django/instad/insta/settings.py
|
6010fe845419449c188ed9aa6dcdd3369ae86c0d
|
[] |
no_license
|
airpong/TIL-c9
|
c471ac73e23716cf677ba590dd6099e584c42883
|
069cc53820a09cd9787765ad41ba7e792dc342b5
|
refs/heads/master
| 2022-12-12T22:26:23.147651
| 2019-06-27T08:24:44
| 2019-06-27T08:24:44
| 166,777,129
| 0
| 0
| null | 2022-11-22T03:46:57
| 2019-01-21T08:34:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,302
|
py
|
"""
Django settings for insta project.
Generated by 'django-admin startproject' using Django 2.1.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x0@q$j%oeql+7&2jpw@4r0^v^7(&%ov5*9#)@1a!qo(c4!y%wr'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['playground-airpong.c9users.io']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'imagekit',
'accounts',
'posts',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'insta.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'insta','templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'insta.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# Media
MEDIA_URL = '/mediaimage/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media')
|
[
"giponge@gmail.com"
] |
giponge@gmail.com
|
e62b8f2033d88c7c11b3b8e799e603b19e5974b7
|
4bfbcb96dcfe05fee32d222cb7a274099db055bd
|
/bilinear_LSTM_hingeloss/utils.py
|
ece2b6692b2ebe9ab87dc7f335d8cdfc537b3af4
|
[] |
no_license
|
naushadzaman/ACL_CKBC
|
5c5c8b0669e059f9f08090b9500dff84af94d2e6
|
655f3aaf28ff5040f50e72fb8118934766306969
|
refs/heads/master
| 2020-07-17T07:30:07.943944
| 2018-11-19T01:27:13
| 2018-11-19T01:27:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,095
|
py
|
from scipy.io import loadmat
import numpy as np
import math
from random import shuffle
from random import choice
from random import randint
from theano import tensor as T
def lookup(We,words,w):
if w in words:
return We[words[w],:]
else:
#print 'find UUUNKKK words',w
return We[words['UUUNKKK'],:]
def lookupIDX(We,words,w):
if w in words:
return words[w]
else:
#print 'find UUUNKKK words',w
return words['UUUNKKK']
def lookupRelIDX(We,words,w):
w = w.lower()
if w in words:
return words[w]
else:
#print 'find UUUNKKK words',w
return words['UUUNKKK']
def lookup_with_unk(We,words,w):
if w in words:
return We[words[w],:],False
else:
#print 'find Unknown Words in WordSim Task',w
return We[words['UUUNKKK'],:],True
def lookupwordID(We,words,w):
#w = w.strip()
result = []
array = w.split(' ')
for i in range(len(array)):
if(array[i] in words):
result.append(words[array[i]])
else:
#print "Find Unknown Words ",w
result.append(words['UUUNKKK'])
return result
def getData(f):
data = open(f,'r')
lines = data.readlines()
examples = []
for i in lines:
i=i.strip()
if(len(i) > 0):
i=i.split('\t')
e = (i[0], i[1], i[2], float(i[3]))
examples.append(e)
shuffle(examples)
return examples
def getWordmap(textfile):
words={}
We = []
f = open(textfile,'r')
lines = f.readlines()
for (n,i) in enumerate(lines):
i=i.split()
j = 1
v = []
while j < len(i):
v.append(float(i[j]))
j += 1
words[i[0]]=n
We.append(v)
return (words, np.matrix(We))
def getRelation(relationfile):
rel = {}
f = open(relationfile,'r')
lines = f.readlines()
for (n,i) in enumerate(lines):
i = i.strip()
rel[i] = n
return rel
#modified
def getPairMax(label,vec_r,vec,idx,d,We,words,rel,Rel,wi,wj,Weight,Offset,activation):
min = -5000
best = None
for i in range(len(d)):
if i == idx:
continue
(r,w1,w2,l) = d[i]
v1 = getVec(We,words,w1)
#v2 = getVec(We,words,w2)
if(activation.lower()=='relu'):
gv1 = Relu(np.dot(Weight,v1)+Offset[0])
gv2 = Relu(np.dot(Weight,vec)+Offset[0])
if(activation.lower()=='tanh'):
gv1 = np.tanh(np.dot(Weight,v1)+Offset[0])
gv2= np.tanh(np.dot(Weight,vec)+Offset[0])
if(activation.lower()=='sigmoid'):
gv1 = Sigmoid(np.dot(Weight,v1)+Offset[0])
gv2= Sigmoid(np.dot(Weight,vec)+Offset[0])
temp1 = np.dot(gv1, vec_r)
np1 = np.inner(temp1,gv2)
if(np1 > min and not(wi == w1) and not(wj==w1)):
min = np1
best = w1
return best
def getPairRand(label,vec,idx,d,We,words,wi,wj):
wpick = None
while(wpick == None or wpick == wi or wpick == wj):
ww = choice(d)
ridx = randint(0,1)
wpick = ww[ridx]
#print wpick
return wpick
def getPairMix(label,vec,idx,d,We,words,wi,wj):
r1 = randint(0,1)
if r1 == 1:
return getPairMax(label,vec,idx,d,We,words,wi,wj,Weight,Offset,activation)
else:
return getPairRand(label,vec,idx,d,We,words,wi,wj)
def getVec(We,words,t):
t = t.strip()
array = t.split(' ')
if array[0] in words:
vec = We[words[array[0]],:]
else:
#print 'find UUUNKKK words',array[0].lower()
vec = We[words['UUUNKKK'],:]
for i in range(len(array)-1):
#print array[i+1]
if array[i+1] in words:
vec = vec + We[words[array[i+1]],:]
else:
#print 'can not find corresponding vector:',array[i+1].lower()
vec = vec + We[words['UUUNKKK'],:]
vec = vec/len(array)
return vec
def getPairs(d, words, We, rel, Rel, type, size,Weight,Offset,activation):
pairs = []
for i in range(len(d)):
(r, t1, t2, s) = d[i]
v1 = getVec(We,words,t1)
v2 = getVec(We,words,t2)
v_r = Rel[rel[r.lower()]*size:rel[r.lower()]*size+size,:]
p1 = None
#p2 = None
if type == "MAX":
#print w1
#only change the first term
p1 = getPairMax(s,v_r,v2,i,d,We,words,rel,Rel,t1,t2,Weight,Offset,activation)
if type == "RAND":
#print w1
p1 = getPairRand(s,v1,i,d,We,words,rel,Rel,r,t1,t2)
if type == "MIX":
#print w1
p1 = getPairMix(s,v1,i,d,We,words,rel,Rel,r,t1,t2)
pairs.append(p1)
# 'getPairs'+str(len(pairs))
#print pairs
return pairs
def getPairsBatch(d, words, We, rel, Rel, batchsize, type, size,Weight,Offset,activation):
idx = 0
pairs = []
while idx < len(d):
batch = d[idx: idx + batchsize if idx + batchsize < len(d) else len(d)]
if(len(batch) <= 2):
print "batch too small."
continue #just move on because pairing could go faulty
p = getPairs(batch,words,We,rel,Rel,type,size,Weight,Offset,activation)
pairs.extend(p)
idx += batchsize
#print 'getPairsBatch'+str(len(pairs))
return pairs
def convertToIndex(e,words, We, rel, Rel):
if str(e).find(',') != -1:
(r,p1,p2,s) = e
new_e = (lookupRelIDX(Rel, rel, r),lookupwordID(We, words, p1), lookupwordID(We, words, p2), float(s))
#print new_e
return new_e
else:
p1 = e
new_e = (lookupwordID(We, words, p1))
#print new_e
return new_e
def ReluT(x):
return T.switch(x<0, 0 ,x)
def Relu(x):
result = np.zeros(x.shape)
#print x.shape
for i in xrange(result.shape[0]):
if(x[i]>0):
result[i]=x[i]
return result
def Sigmoid(x):
result = np.zeros(x.shape)
for i in xrange(result.shape[0]):
for j in xrange(result.shape[1]):
result[i][j] = 1 / (1 + math.exp(-x[i][j]))
return result
|
[
"abbeyli92@gmail.com"
] |
abbeyli92@gmail.com
|
7c447cdc98629e0992225a79f10d08e2ae28ed04
|
650aed41de2191565dce812a3c4d2b049928f5a4
|
/tornado_overview/chapter01/blockio_test.py
|
1c48fd520ee956a849f583d2d50952c3f9107b0f
|
[
"Apache-2.0"
] |
permissive
|
mtianyan/TornadoForum
|
a41dfc57f1a9ca60a0991dcaa4374cd4a8b6ba93
|
5698dd5cc0e399d3d0ec53e159b8e1f1cddfbe71
|
refs/heads/master
| 2022-04-23T09:48:25.933781
| 2020-04-20T17:06:23
| 2020-04-20T17:06:23
| 168,485,700
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
# 阻塞io
import socket
import requests
html = requests.get("http://www.baidu.com").text
# #1. 三次握手建立tcp连接,
# # 2. 等待服务器响应
print(html)
print("*" * 30)
# 如何通过socket直接获取html
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host = "www.baidu.com"
client.connect((host, 80)) # 阻塞io, 意味着这个时候cpu是空闲的
client.send("GET {} HTTP/1.1\r\nHost:{}\r\nConnection:close\r\n\r\n".format("/", host).encode("utf8"))
data = b""
while 1:
d = client.recv(1024) # 阻塞直到有數據
if d:
data += d
else:
break
data = data.decode("utf8")
print(data)
|
[
"1147727180@qq.com"
] |
1147727180@qq.com
|
e1bd696eaab1b5eebddfedbc850748664f84c256
|
bb983b38f9be7b6fd4ab1a651484db37c1aeff39
|
/0705/test2_physical_properties.py
|
f6a28c9ca6581f46cb5dcee2ac17325035455cc2
|
[] |
no_license
|
nakanishi-akitaka/python2018_backup
|
c214df78372cca993d69f8001010ec2f6dcaf1be
|
45766d3c3777de2a91b3e2cf50c6bfedca8627da
|
refs/heads/master
| 2023-02-18T08:04:28.625532
| 2022-06-07T01:02:53
| 2022-06-07T01:02:53
| 201,399,236
| 5
| 30
| null | 2023-02-10T21:06:51
| 2019-08-09T05:48:22
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 583
|
py
|
# -*- coding: utf-8 -*-
"""
Check physical properties of materials
Created on Thu Jul 5 14:04:59 2018
@author: Akitaka
"""
from mendeleev import element
for i in range(1,100):
x=element(i)
# print(x.symbol)
# print(x,x.electron_affinity,"electron affinity")
if(x.electron_affinity==None):
print(x,x.electron_affinity,"electron affinity")
elif(x.electron_affinity<0.0):
print(x,x.electron_affinity,"electron affinity")
# if(x.thermal_conductivity==None):
# print(x,x.thermal_conductivity,"thermal conductivity")
|
[
"noreply@github.com"
] |
nakanishi-akitaka.noreply@github.com
|
4b8beff234eb9196456cb171893224665acf0ae0
|
b580fd482147e54b1ca4f58b647fab016efa3855
|
/host_im/mount/malware-classification-master/samples/not/sample_good386.py
|
12e873b5df7e5bce9581700bed1365123b6204c4
|
[] |
no_license
|
Barnsa/Dissertation
|
1079c8d8d2c660253543452d4c32799b6081cfc5
|
b7df70abb3f38dfd446795a0a40cf5426e27130e
|
refs/heads/master
| 2022-05-28T12:35:28.406674
| 2020-05-05T08:37:16
| 2020-05-05T08:37:16
| 138,386,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import re
import datetime
import math
import array
import random
import readline
import textwrap
import stringprep
import difflib
nterms = 719
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 == True & 0 < 719:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count += 1
|
[
"barnsa@uni.coventry.ac.uk"
] |
barnsa@uni.coventry.ac.uk
|
8e632b71a2abf023a97cded3ffed0e7a87717c64
|
5138b8077a944e655570c3d15389ccaac0dafceb
|
/scripts/fileserver.py
|
c20b8737a7e1bddee6c8ef5a3c1e1060c3c2b821
|
[] |
no_license
|
cms-btv-pog/CTagTraining
|
9740abaf4a5a05500782695723cace90f6d8882e
|
affb2dc09a3bb812d59302990f59cbfaa06370f4
|
refs/heads/master
| 2021-01-18T22:29:05.148151
| 2016-04-07T07:12:29
| 2016-04-07T07:12:29
| 42,971,506
| 2
| 4
| null | 2016-01-25T10:15:07
| 2015-09-23T01:42:03
|
Python
|
UTF-8
|
Python
| false
| false
| 998
|
py
|
'''
Workaround to allow xrd access to root files, given that the ROOT version shipped with anaconda does not
provide the functionality. Files are transferred on demand and deleted when not needed any longer.
'''
import subprocess
import os
import uuid
class PoolFile(object):
def __init__(self, path, delete_on_exit=True):
self.path = path
self.del_once_done = delete_on_exit
def __del__(self):
if self.del_once_done:
print 'deleting %s' % self.path
os.remove(self.path)
def serve(path):
if path.startswith('root://'):
fname = '%s.root' % uuid.uuid1()
print '%s --> %s' % (path, fname)
proc = subprocess.Popen(['xrdcp', path, fname], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
exitcode = proc.wait()
if exitcode != 0:
_, stderr = proc.communicate()
raise RuntimeError('Problem copying file %s, Error: %s' % (path, stderr))
return PoolFile(fname)
else:
return PoolFile(path, False)
|
[
"mverzett@cern.ch"
] |
mverzett@cern.ch
|
39c8bc0e2a0434d7c3f69aa93bb3a118e6a627a0
|
fa5e890e95f35744a42ae231c6678b8295502c12
|
/lectures/migrations/0001_initial.py
|
5b9fa25a457967d2fff42b78c8734607ad809eae
|
[] |
no_license
|
JeeHyungPark/first_MVP
|
4518ae01114686e9ad9fde45112c2eef438e1054
|
c4a673a69772260d1ebdb16f73b242c4f90da674
|
refs/heads/master
| 2023-01-05T19:31:35.018377
| 2020-09-02T07:38:34
| 2020-09-02T07:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
# Generated by Django 3.0.9 on 2020-09-01 16:13
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Lecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, unique=True, verbose_name='제목')),
('video', models.URLField(unique=True, verbose_name='강의출처')),
('description', models.TextField(blank=True, verbose_name='강의설명')),
('lecturer', models.CharField(blank=True, max_length=50, verbose_name='강사')),
('main_category', models.CharField(blank=True, choices=[('코딩', '코딩'), ('미술', '미술'), ('디자인/편집', '디자인/편집')], max_length=16, verbose_name='대분류')),
('sub_category', models.CharField(blank=True, choices=[('Python', 'Python'), ('HTML/CSS', 'HTML/CSS'), ('Javascript', 'Javascript'), ('C', 'C'), ('Java', 'Java'), ('Git', 'Git'), ('연필', '연필'), ('디지털드로잉', '디지털드로잉'), ('색연필', '색연필'), ('수채화', '수채화'), ('펜', '펜'), ('캘리그래피', '캘리그래피'), ('아크릴', '아크릴'), ('Premiere Pro', 'Premiere Pro'), ('Photoshop', 'Photoshop'), ('After Effect', 'After Effect'), ('InDesign', 'InDesign'), ('Illustrator', 'Illustrator'), ('Sketch', 'Sketch')], max_length=18, verbose_name='중분류')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'verbose_name': '강의',
'verbose_name_plural': '강의',
'ordering': ['title'],
},
),
]
|
[
"cdkrcd8@gmail.com"
] |
cdkrcd8@gmail.com
|
fa338f2fa35a152beb28b1af654dc0bd2c3f620e
|
e1efc8e0b0e4629dea61504fbc816c0527691bd9
|
/6.redis/redis12-线程模型.py
|
ed2756dce5974767a3cd606fad0585653fcbcf93
|
[] |
no_license
|
xiongmengmeng/xmind-technology
|
2bb67a0bf92cfd660cac01f8ab3a2454423ccba5
|
e2fdb6987ef805a65f0a4feb52d84383853f4b77
|
refs/heads/main
| 2023-07-31T07:10:29.868120
| 2021-09-11T08:18:17
| 2021-09-11T08:18:17
| 307,636,242
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,214
|
py
|
import os,sys
parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0,parentdir)
import xmind
from xmind.core.markerref import MarkerId
xmind_name="redis"
w = xmind.load(os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
s2=w.createSheet()
s2.setTitle("线程模型")
r2=s2.getRootTopic()
r2.setTitle("线程模型")
content={
'线程模型':[
'基于Reactor模式开发',
'文件事件处理器'
],
'Redis称单线程模型':[
'文件事件分派器队列的消费是单线程的'
],
'文件事件处理器4部分组成':[
{'多个套接字':[
'会并发产生不同的操作,每个操作对应不同文件事件',
{'文件事件':[
'对socket操作的抽象',
'当一个socket准备好执行连接accept、read、write、close操作时,会产生一个文件事件'
]}
]},
{'IO多路复用程序':[
'监听多个socket,将socket产生的事件放入队列',
'通过队列以有序、同步且每次一个socket的方式向文件事件分派器传送socket',
'当上一个socket产生的事件被对应事件处理器执行完后,I/O多路复用程序才会向文件事件分派器传送下个socket'
]},
{'文件事件分派器':[
'接收I/O多路复用程序传来的socket',
'根据socket产生的事件类型,调用相应的事件处理器'
]},
{'事件处理器':[
'连接应答处理器',
'命令请求处理器',
'命令回复处理器'
]}
],
'客户端和Redis服务器通信过程':[
'1.客户端向服务器发起【连接请求】,socket产生一个AE_READABLE事件',
'2.AE_READABLE事件映射到【连接应答处理器】',
'3.客户端向服务器发起【命令请求】(不管读还是写请求),socket产生一个AE_READABLE事件',
'4.AE_READABLE事件映射到【命令请求处理器】',
'5.服务器向客户端发起【命令响应】',
'6.AE_WRITABLE事件映射到【命令回复处理器】'
]
}
#构建xmind
xmind.build(content,r2)
#保存xmind
xmind.save(w,os.path.dirname(os.path.abspath(__file__))+"\\"+xmind_name+".xmind")
|
[
"xiongmengmeng@qipeipu.com"
] |
xiongmengmeng@qipeipu.com
|
efd6dd85796d1d65f530aaa37a650624b1f19999
|
cc8905a957e9e0fa211e5e14e6cda6957727c5dc
|
/ipwxlearn/tests/glue/test_updates.py
|
91d6c4e7364f03c7f83768bba473cdaea04aab4c
|
[] |
no_license
|
korepwx/ipwxlearn
|
630ae276e1a8b95e68d466debdaf4f51c5c6d634
|
afbfe8ee1af114a8bf6aac73aee36c4d0930b8fc
|
refs/heads/master
| 2021-01-14T10:15:05.883476
| 2016-07-15T01:59:23
| 2016-07-15T01:59:23
| 57,875,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,116
|
py
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
from ipwxlearn import glue
from ipwxlearn.glue import G
class UpdatesTestCase(unittest.TestCase):
def _do_test_update(self, optimizer, n_dim=256, *args, **kwargs):
graph = G.Graph()
with graph.as_default():
# okay, compose the quadratic function.
x = G.make_variable('x', shape=[n_dim], init=G.init.Uniform([-1, 1]), dtype=glue.config.floatX)
# finally, create the training function.
loss = G.op.dot(x, x)
train_fn = G.make_function(updates=optimizer(loss, [x], *args, **kwargs), outputs=loss)
with G.Session(graph):
best_x = G.get_variable_values(x)
best_loss = np.dot(best_x, best_x)
self.assertGreater(np.mean((best_x - np.zeros_like(best_x)) ** 2), 1e-2)
for i in range(700):
train_loss = train_fn()
if train_loss < best_loss:
best_x = G.get_variable_values(x)
best_loss = train_loss
self.assertLess(np.mean((best_x - np.zeros_like(best_x)) ** 2), 1e-7)
def test_sgd(self):
"""Test training with SGD."""
self._do_test_update(G.updates.sgd, learning_rate=0.01)
def test_momentum(self):
"""Test training with momentum."""
self._do_test_update(G.updates.momentum, learning_rate=0.001)
@unittest.skipIf(glue.config.backend == 'tensorflow', 'TensorFlow has not supported Nesterov momentum yet.')
def test_nesterov_momentum(self):
"""Test training with nesterov momentum."""
self._do_test_update(G.updates.nesterov_momentum, learning_rate=0.001)
def test_adagrad(self):
"""Test training with AdaGrad."""
self._do_test_update(G.updates.adagrad, learning_rate=1.0)
def test_rmsprop(self):
"""Test training with RMSProp."""
self._do_test_update(G.updates.rmsprop, learning_rate=10.0, rho=0.999)
def test_adam(self):
"""Test training with Adam."""
self._do_test_update(G.updates.adam, learning_rate=0.01)
|
[
"public@korepwx.com"
] |
public@korepwx.com
|
05a0b55aa941375cb364396a2d5cb1c4b6bd978a
|
1382e88bc948a1f6b506018521827a1fafb9c2df
|
/modules/dictionary/dictionary.py
|
39305b1b1169a9c819196605b33ef8f97574931d
|
[] |
no_license
|
nano13/tambi
|
73c405d333b91dc478d7cd274e3f8516fde15bd5
|
9475110ddd9ebb153de4bc8c734ce95c11d63186
|
refs/heads/master
| 2021-01-18T12:52:22.386453
| 2019-01-19T13:52:50
| 2019-01-19T13:52:50
| 100,367,577
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
# -*- coding: utf_8 -*-
from interpreter.exceptions import CommandNotInThisModule
from interpreter.structs import Result
import sqlite3
class Dictionary(object):
def __init__(self):
pass
def initDbConnection(self):
self.connection = sqlite3.connect("./modules/vocable/vocables.db")
self.cursor = self.connection.cursor()
def getCommands(self):
return {
"dictionary.commands" : self.commands,
"dictionary.hebrew" : self.hebrew,
"dictionary.greek" : self.greek,
"dictionary.aramaic" : self.aramaic,
"dictionary.akkadian" : self.akkadian,
}
def interpreter(self, command, args, queue):
print("args:", args)
commands = self.getCommands()
return commands.get(command, self.commandNotFound)(command, args)
def commandNotFound(self, c, a):
raise CommandNotInThisModule("command not found in module quran")
def commands(self, c, a):
dic = self.getCommands()
commands = sorted(dic.items())
all_commands = []
for key in commands:
line = str(key).split(",")[0]
all_commands.append(str(line[2:-1]))
result_object = Result()
result_object.category = "list"
result_object.payload = all_commands
return result_object
def hebrew(self, c, args):
return self.dictionaryHelper(args, 'hebrew')
def greek(self, c, args):
return self.dictionaryHelper(args, 'greek')
def aramaic(self, c, args):
return self.dictionaryHelper(args, 'aramaic')
def akkadian(self, c, args):
return self.dictionaryHelper(args, 'akkadian')
def dictionaryHelper(self, args, language):
result_object = Result()
query = """
SELECT display, gloss
FROM {0}
WHERE display LIKE ? OR gloss LIKE ?
""".format(language)
try:
param = '%'+str(args[0])+'%'
except IndexError:
result_object.error = 'invalid parameter'
else:
self.initDbConnection()
self.cursor.execute(query, [param, param])
result_object.payload = self.cursor.fetchall()
result_object.category = "itemized"
result_object.name = "dictionary result"
return result_object
|
[
"nano13@gmx.net"
] |
nano13@gmx.net
|
c626b14c3515006e2869ad2a09ddea3d53f9a59a
|
9468a03f04f91bbb76338253ccb53b885b65698a
|
/beam_models/EMSS/with_elevation/SKADCBeamPatterns/2019_08_06_SKA_Ku/interpolated/interpolate_beam_Ku.py
|
035dcf0d39393dc2b17e0baaeeffdfca603503e6
|
[
"BSD-3-Clause"
] |
permissive
|
ska-telescope/sim-mid-pointing
|
53a9cd1cb1e66584a72b4f50e51b3e15942d1de7
|
0f11d37e6fac231d7f20e4a7e20ee76e7d2d560f
|
refs/heads/master
| 2020-05-20T06:47:51.107561
| 2020-01-23T13:50:22
| 2020-01-23T13:50:22
| 185,428,675
| 0
| 1
|
BSD-3-Clause
| 2019-10-15T16:21:26
| 2019-05-07T15:22:02
|
Python
|
UTF-8
|
Python
| false
| false
| 3,440
|
py
|
import logging
import sys
import numpy
from processing_library.image.operations import create_empty_image_like
from rascil.processing_components.image.operations import export_image_to_fits, import_image_from_fits
import matplotlib.pyplot as plt
log = logging.getLogger()
log.setLevel(logging.INFO)
log.addHandler(logging.StreamHandler(sys.stdout))
mpl_logger = logging.getLogger("matplotlib")
mpl_logger.setLevel(logging.WARNING)
import pprint
pp = pprint.PrettyPrinter()
from scipy import interpolate
# x = np.arange(0, 10)
# y = np.exp(-x/3.0)
# f = interpolate.interp1d(x, y)
#
# xnew = np.arange(0,9, 0.1)
# ynew = f(xnew) # use interpolation function returned by `interp1d`
# plt.plot(x, y, 'o', xnew, ynew, '-')
# plt.show()
elevations_in = numpy.array([15, 45, 90], dtype='float')
elevations_out = numpy.array([15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90], dtype='float')
elevations_out = numpy.arange(15.0, 90, 1.0)
default = 1
nchan = 1
npol = 4
ny = 1024
nx = 1024
array_in = numpy.zeros([nchan, npol, ny, ny, len(elevations_in)])
array_out = numpy.zeros([nchan, npol, ny, ny, len(elevations_out)])
im_in = "../Ku_{el:d}_11700_{type}.fits"
im_out = "Ku_{el:d}_11700_{type}_interpolated.fits"
im_diff_out = "Ku_{el:d}_11700_{type}_interpolated_difference.fits"
im_template = None
for type in ['real', 'imag']:
for iel, el in enumerate(elevations_in):
print("Reading elevation %s part elevation %.0f" % (type, el))
im_in_file = im_in.format(el=int(el), type=type)
im = import_image_from_fits(im_in_file)
array_in[..., iel] = im.data
if im_template is None:
im_template = create_empty_image_like(im)
f = interpolate.interp1d(elevations_in, array_in, axis=4, kind='quadratic')
array_out = f(elevations_out)
rms_vp = []
max_vp = []
min_vp = []
rms_diff = []
max_diff = []
min_diff = []
for iel, el in enumerate(elevations_out):
print("Writing elevation %s part %.0f" % (type, el))
im_template.data = array_out[..., iel]
im_out_file = im_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_out_file)
rms_vp.append(numpy.std(im_template.data[0,0:1,...]))
max_vp.append(numpy.max(im_template.data[0,0:1,...]))
min_vp.append(numpy.min(im_template.data[0,0:1,...]))
im_template.data -= array_in[..., default]
im_diff_out_file = im_diff_out.format(el=int(el), type=type)
export_image_to_fits(im_template, im_diff_out_file)
rms_diff.append(numpy.std(im_template.data[0,0:1,...]))
max_diff.append(numpy.max(im_template.data[0,0:1,...]))
min_diff.append(numpy.min(im_template.data[0,0:1,...]))
plt.clf()
plt.plot(elevations_out, rms_vp, '-', color='r', label='VP rms')
if type == 'imag':
plt.plot(elevations_out, max_vp, '.', color='g', label='VP max')
plt.plot(elevations_out, min_vp, '-', color='b', label='VP min')
plt.plot(elevations_out, rms_diff, '.', color='r', label='VP diff rms')
plt.plot(elevations_out, max_diff, '.', color='g', label='VP diff max')
plt.plot(elevations_out, min_diff, '.', color='b', label='VP diff min')
plt.xlabel('Elevation')
plt.ylabel('Value')
plt.title('Statistics in %s part of 11700MHz voltage pattern' % type)
plt.legend()
plt.savefig('%s_vp_statistics.png' % type)
plt.show(block=False)
|
[
"realtimcornwell@gmail.com"
] |
realtimcornwell@gmail.com
|
6d5689b96edd16de7af3d2cdb8ee31be61120d55
|
dcbb531eada723b717cf7243fbeac6d3738007b4
|
/chapter3/BX-CSV-Dump/users.py
|
ba7426d264ec460afc5d144cd1afc3500153ad3b
|
[] |
no_license
|
yangtao0304/recommendation-system
|
14a023a57d38a2450d44467bb85c441bd067e8f9
|
995b93ed0fd146d5bb6d837055b8e150a8b145c7
|
refs/heads/master
| 2020-09-12T05:56:00.173486
| 2020-03-10T01:24:28
| 2020-03-10T01:24:28
| 222,332,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import pandas as pd
file_path = 'BX-Users.csv'
users = pd.read_table(file_path, sep=';', header=0, encoding='ISO-8859-1')
print('前5条数据为:\n{}\n'.format(users.head()))
print('总的数据条数为:\n{}\n'.format(users.count()))
print('年龄区间:<{},{}>'.format(users['Age'].min(), users['Age'].max()))
'''
总的数据条数为:
User-ID 278858
Location 278858
Age 168096
年龄区间:<0.0,244.0>
'''
# Age列,对于NULL,pandas处理为NaN
# 最大、最小年龄有误
# 这里可以采用1.符合事实范围的随机数;2.平均数填充
|
[
"im.yangtao0304@gmail.com"
] |
im.yangtao0304@gmail.com
|
06017e09936000545346137f186f35e3dd4590ef
|
a1aba83b90285def84cc425c0b089dd632a01a51
|
/py千峰/day13线程与协程/xiecheng03.py
|
8ba4d3827ffd07f072a48671005c6c1fcbd1b612
|
[] |
no_license
|
15929134544/wangwang
|
8ada14acb505576f07f01e37c936500ee95573a0
|
47f9abbf46f8d3cbc0698cb64c043735b06940d4
|
refs/heads/master
| 2023-05-11T19:59:54.462454
| 2021-05-25T15:19:43
| 2021-05-25T15:19:43
| 328,119,916
| 1
| 1
| null | 2021-05-11T16:13:18
| 2021-01-09T09:33:29
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,285
|
py
|
"""
greenlet已经实现了协程,但是这个是人工切换,是不是觉得太麻烦了,不要着急
python还有一个比greenlet更强大的并且能够自动切换任务的模块gevent
其原理就是当一个greenlet遇到了IO(指的是input output输入输出,比如网络、文件操作等)
操作时,比如访问网络,就自动切换到其他的greenlet,等到IO完成,
在适当的时候切换回来继续执行。
由于IO操作非常耗时,经常使程序处于等待状态,有了gevent我们自动切换协程,
就保证总有greenlet在运行,而不是等待IO。
"""
import time
import gevent as gevent
from greenlet import greenlet
from gevent import monkey
monkey.patch_all() # 打补丁
def a(): # 任务A
for i in range(5):
print('A' + str(i))
# gb.switch() # 切换
time.sleep(0.1)
def b(): # 任务B
for i in range(5):
print('B' + str(i))
# gc.switch()
time.sleep(0.1)
def c(): # 任务C
for i in range(5):
print('C' + str(i))
# ga.switch()
time.sleep(0.1)
if __name__ == '__main__':
g1 = gevent.spawn(a)
g2 = gevent.spawn(b)
g3 = gevent.spawn(c)
g1.join()
g2.join()
g3.join()
print('---------------')
|
[
"you@example.com"
] |
you@example.com
|
64f802ee3da662f7515a4b931b1bd80bc895e282
|
e2992e19ebc728387125a70c72a702a076de7a12
|
/Python/01_My_Programs_Hv/05_List/102_C5_E3.py
|
20429dcf098b179f726d90ec28f04fadd4ca8fe1
|
[] |
no_license
|
harsh1915/Machine_Learning
|
c9c32ed07df3b2648f7796f004ebb38726f13ae4
|
c68a973cfbc6c60eeb94e253c6f2ce34baa3686e
|
refs/heads/main
| 2023-08-27T15:01:16.430869
| 2021-11-15T07:53:36
| 2021-11-15T07:53:36
| 377,694,941
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
ls= ["abc", "def", "ghi"]
print(ls[0][::-1])
def list_reverse(ls):
ls1= []
for i in ls:
ls1.append(i[::-1])
return ls1
print(list_reverse(ls))
|
[
"“hdjethva6@gmail.com”"
] |
“hdjethva6@gmail.com”
|
a18d86d09a8f17900f98f2b1c6064003b6ee5ec0
|
50e10e8f304d32329ba88aa3fa8f8250c0a6a84d
|
/standard/girc.py
|
594043511c56131f646724eb2d265123d12a8728
|
[
"Apache-2.0"
] |
permissive
|
candeira/duxlot
|
0a1b4468e1d93f3db90219ea21d45a8e494aaabb
|
69f4234e14ac8ad1ef53a0d663a7240d6e321e46
|
refs/heads/master
| 2021-01-20T04:26:10.588945
| 2012-09-13T17:00:18
| 2012-09-13T17:00:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,750
|
py
|
# Copyright 2012, Sean B. Palmer
# Code at http://inamidst.com/duxlot/
# Apache License 2.0
# @@ this can't be named irc.py
import duxlot
# Save PEP 3122!
if "." in __name__:
from . import api
else:
import api
command = duxlot.command
# @@ ask, not tell yourself
# IRC
@command
def ask(env):
"Ask another user an enquiry"
if not env.arg:
return env.reply(ask.__doc__)
env.verb = "ask"
to(env)
# IRC
@command
def parsed_message(env):
"Show parsed input message"
env.reply(repr(env.message))
# IRC
@command
def schedule(env):
"Schedule an event"
# @@ database.timezones
if not env.arg:
return env.reply(schedule.__doc__)
t, text = env.arg.split(" ", 1)
t = float(t)
env.schedule((t, env.sender, env.nick, text))
env.reply("Scheduled")
# @@ test to make sure the right time is given!
# IRC
@command
def seen(env):
"Find out whether somebody has been around recently"
if not env.arg:
return env.say(seen.__doc__)
if env.arg == env.options["nick"]:
return env.reply("I'm right here")
# env.database.seen.get.verb.verb.verb
result = env.database.cache.seen.get(env.arg)
if not result:
env.say("Haven't seen %s" % env.arg)
else:
unixtime, place = result
offset, abbreviation = zone_from_nick(env, env.nick)
dt = api.clock.format_datetime(
format="%Y-%m-%d %H:%M:%S $TZ",
offset=offset,
tz=abbreviation,
unixtime=unixtime
)
env.say("On %s at %s" % (place, dt))
# IRC
# @@ a check that commands are covered here
@command
def stats(env):
"Display information about the most used commands"
usage = env.database.cache.usage
usage = sorted(((b, a) for (a, b) in usage.items()), reverse=True)
usage = list(usage)[:10]
usage = ["%s (%s)" % (b, a) for (a, b) in usage]
env.reply("Top used commands: " + ", ".join(usage))
# IRC
@command
def tell(env):
"Tell another user a message"
# Inspired by Monty, by Paul Mutton
# http://www.jibble.org/
if not env.arg:
return env.reply(tell.__doc__)
env.verb = "tell"
to(env)
# IRC
@command
def timezone(env):
"Set the user's timezone to an IANA Time Zone Database value"
tz = env.database.cache.timezones.get(env.nick, None)
if not env.arg:
if tz:
return env.reply("Your timezone is currently set to %s" % tz)
else:
return env.reply("You do not currently have a timezone set")
if env.arg in {"None", "-", "delete", "remove", "unset"}:
if tz is None:
return env.reply("You do not current have a timezone set")
with env.database.context("timezones") as timezones:
del timezones[env.nick]
return env.reply("Your timezone has been un-set")
if env.arg in {"geo", "guess"}:
zonename = api.geo.timezone_info(
address=env.message["prefix"]["host"]
).zone
else:
zonename = env.arg
import os.path
zoneinfo = env.options["zoneinfo"]
zonefile = os.path.join(zoneinfo, zonename)
try: opt = api.clock.zoneinfo_offset(filename=zonefile)
except Exception:
env.reply("Unrecognised zone. Try using one of the TZ fields here:")
env.reply("http://en.wikipedia.org/wiki/List_of_tz_database_time_zones")
else:
tz = round(opt.offset, 2)
with env.database.context("timezones") as timezones:
timezones[env.nick] = zonename
# message = "Set your zone to %s, which is currently %s (%s)"
message = "Set your TZ to %s; currently %s (UTC %s)"
hours = round(tz / 3600, 3)
hours = "+" + str(hours) if (hours >=0) else str(hours)
hours = hours.rstrip("0").rstrip(".")
env.reply(message % (zonename, opt.abbreviation, hours))
# @@ check nickname sanity
# IRC
@command
def to(env):
"Send a message to another user"
if not env.arg:
return env.reply(to.__doc__)
# import time
# could be partly moved to api?
recipient, message = env.arg.split(" ", 1)
# check syntax of env.nick!
# "self!" syntax to force a message to self
if env.nick == recipient:
return env.reply("You can tell yourself that")
if env.options["nick"] == recipient:
return env.reply("Understood")
if not hasattr(input, "verb"):
env.verb = None
# @@ check nick format
item = (int(time.time()), env.nick, env.verb, recipient, message)
with env.database.context("messages") as messages:
messages.setdefault(recipient, [])
messages[recipient].append(item)
env.reply("Will pass your message to %s" % recipient)
|
[
"sean@miscoranda.com"
] |
sean@miscoranda.com
|
5262ad751574f1650ce9fde9ee1b73565b930cb2
|
d7379fa682e25d1d40b93b61dfe7c1fc2a64e0ff
|
/test/test_variables.py
|
fb481be5d642768a394481a1a887f86acd895855
|
[
"Apache-2.0"
] |
permissive
|
renuacpro/unit
|
f7b00cfc059b1ff9298824ead28b1ac404b86ff0
|
22c88f0253d57756ad541326df09d1398a871708
|
refs/heads/master
| 2022-12-10T08:27:15.371966
| 2020-09-07T12:21:14
| 2020-09-07T12:21:14
| 293,599,216
| 2
| 0
| null | 2020-09-07T18:08:47
| 2020-09-07T18:08:47
| null |
UTF-8
|
Python
| false
| false
| 3,888
|
py
|
from unit.applications.proto import TestApplicationProto
class TestVariables(TestApplicationProto):
prerequisites = {}
def setUp(self):
super().setUp()
self.assertIn(
'success',
self.conf(
{
"listeners": {"*:7080": {"pass": "routes/$method"}},
"routes": {
"GET": [{"action": {"return": 201}}],
"POST": [{"action": {"return": 202}}],
"3": [{"action": {"return": 203}}],
"4*": [{"action": {"return": 204}}],
"blahGET}": [{"action": {"return": 205}}],
"5GET": [{"action": {"return": 206}}],
"GETGET": [{"action": {"return": 207}}],
"localhost": [{"action": {"return": 208}}],
},
},
),
'configure routes',
)
def conf_routes(self, routes):
self.assertIn('success', self.conf(routes, 'listeners/*:7080/pass'))
def test_variables_method(self):
self.assertEqual(self.get()['status'], 201, 'method GET')
self.assertEqual(self.post()['status'], 202, 'method POST')
def test_variables_uri(self):
self.conf_routes("\"routes$uri\"")
self.assertEqual(self.get(url='/3')['status'], 203, 'uri')
self.assertEqual(self.get(url='/4*')['status'], 204, 'uri 2')
self.assertEqual(self.get(url='/4%2A')['status'], 204, 'uri 3')
def test_variables_host(self):
self.conf_routes("\"routes/$host\"")
def check_host(host, status=208):
self.assertEqual(
self.get(headers={'Host': host, 'Connection': 'close'})[
'status'
],
status,
)
check_host('localhost')
check_host('localhost.')
check_host('localhost:7080')
check_host('.localhost', 404)
check_host('www.localhost', 404)
check_host('localhost1', 404)
def test_variables_many(self):
self.conf_routes("\"routes$uri$method\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many')
self.conf_routes("\"routes${uri}${method}\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many 2')
self.conf_routes("\"routes${uri}$method\"")
self.assertEqual(self.get(url='/5')['status'], 206, 'many 3')
self.conf_routes("\"routes/$method$method\"")
self.assertEqual(self.get()['status'], 207, 'many 4')
self.conf_routes("\"routes/$method$uri\"")
self.assertEqual(self.get()['status'], 404, 'no route')
self.assertEqual(self.get(url='/blah')['status'], 404, 'no route 2')
def test_variables_replace(self):
self.assertEqual(self.get()['status'], 201)
self.conf_routes("\"routes$uri\"")
self.assertEqual(self.get(url='/3')['status'], 203)
self.conf_routes("\"routes/${method}\"")
self.assertEqual(self.post()['status'], 202)
self.conf_routes("\"routes${uri}\"")
self.assertEqual(self.get(url='/4*')['status'], 204)
self.conf_routes("\"routes/blah$method}\"")
self.assertEqual(self.get()['status'], 205)
def test_variables_invalid(self):
def check_variables(routes):
self.assertIn(
'error',
self.conf(routes, 'listeners/*:7080/pass'),
'invalid variables',
)
check_variables("\"routes$\"")
check_variables("\"routes${\"")
check_variables("\"routes${}\"")
check_variables("\"routes$ur\"")
check_variables("\"routes$uriblah\"")
check_variables("\"routes${uri\"")
check_variables("\"routes${{uri}\"")
if __name__ == '__main__':
TestVariables.main()
|
[
"zelenkov@nginx.com"
] |
zelenkov@nginx.com
|
7e96ded78edf879fd044bae181c6553700ee19a1
|
3db9ef78b62b01bf79dff6671b02c24192cd4648
|
/13/8.py
|
b0c91ec8d0d5b114b03beb2ee22681599281cb1e
|
[] |
no_license
|
rheehot/python-for-coding-test
|
401f5655af1a8cf20bc86edb1635bdc4a9e88e52
|
be95a0d0b3191bb21eab1075953fa472f4102351
|
refs/heads/master
| 2022-11-11T19:35:56.680749
| 2020-06-24T02:19:48
| 2020-06-24T02:19:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,759
|
py
|
from collections import deque
def get_next_pos(pos, board):
next_pos = [] # 반환 결과 (이동 가능한 위치들)
pos = list(pos) # 현재 위치
pos1_x, pos1_y, pos2_x, pos2_y = pos[0][0], pos[0][1], pos[1][0], pos[1][1]
# (상, 하, 좌, 우)로 이동하는 경우에 대해서 처리
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
for i in range(4):
pos1_next_x, pos1_next_y, pos2_next_x, pos2_next_y = pos1_x + dx[i], pos1_y + dy[i], pos2_x + dx[i], pos2_y + dy[i]
# 이동하고자 하는 두 칸이 모두 비어있다면
if board[pos1_next_x][pos1_next_y] == 0 and board[pos2_next_x][pos2_next_y] == 0:
next_pos.append({(pos1_next_x, pos1_next_y), (pos2_next_x, pos2_next_y)})
# 현재 로봇이 가로로 놓여 있는 경우
if pos1_x == pos2_x:
for i in [-1, 1]: # 위쪽으로 회전하거나, 아래쪽으로 회전
if board[pos1_x + i][pos1_y] == 0 and board[pos2_x + i][pos2_y] == 0: # 위쪽 혹은 아래쪽 두 칸이 모두 비어 있다면
next_pos.append({(pos1_x, pos1_y), (pos1_x + i, pos1_y)})
next_pos.append({(pos2_x, pos2_y), (pos2_x + i, pos2_y)})
# 현재 로봇이 세로로 놓여 있는 경우
elif pos1_y == pos2_y:
for i in [-1, 1]: # 왼쪽으로 회전하거나, 오른쪽으로 회전
if board[pos1_x][pos1_y + i] == 0 and board[pos2_x][pos2_y + i] == 0: # 왼쪽 혹은 오른쪽 두 칸이 모두 비어 있다면
next_pos.append({(pos1_x, pos1_y), (pos1_x, pos1_y + i)})
next_pos.append({(pos2_x, pos2_y), (pos2_x, pos2_y + i)})
# 현재 위치에서 이동할 수 있는 위치를 반환
return next_pos
def solution(board):
# 맵의 외곽에 벽을 두는 형태로 맵 변형
n = len(board)
new_board = [[1] * (n + 2) for _ in range(n + 2)]
for i in range(n):
for j in range(n):
new_board[i + 1][j + 1] = board[i][j]
# 너비 우선 탐색(BFS) 수행
q = deque()
visited = []
pos = {(1, 1), (1, 2)} # 시작 위치 설정
q.append((pos, 0)) # 큐에 삽입한 뒤에
visited.append(pos) # 방문 처리
# 큐가 빌 때까지 반복
while q:
pos, cost = q.popleft()
# (n, n) 위치에 로봇이 도달했다면, 최단 거리이므로 반환
if (n, n) in pos:
return cost
# 현재 위치에서 이동할 수 있는 위치 확인
for next_pos in get_next_pos(pos, new_board):
# 아직 방문하지 않은 위치라면 큐에 삽입하고 방문 처리
if next_pos not in visited:
q.append((next_pos, cost + 1))
visited.append(next_pos)
return 0
|
[
"noreply@github.com"
] |
rheehot.noreply@github.com
|
43fceb1cbee1e30cbb8565be49c40ba5a3866b44
|
6d9fbe6e6a2abfd8455e92f6dba67a5f02d87f41
|
/lib/phonenumbers/shortdata/region_TR.py
|
4840c1a7757ec99155ac8ae581af9b61d4516426
|
[] |
no_license
|
JamesBrace/InfluenceUWebLaunch
|
549d0b48ff3259b139cb891a19cb8b5382ffe2c8
|
332d25940e4b1b45a7a2a8200f77c8413543b199
|
refs/heads/master
| 2021-09-04T04:08:47.594900
| 2018-01-15T16:49:29
| 2018-01-15T16:49:29
| 80,778,825
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 816
|
py
|
"""Auto-generated file, do not edit by hand. TR metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_TR = PhoneMetadata(id='TR', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d{2}', possible_number_pattern='\\d{3}', possible_length=(3,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(),
emergency=PhoneNumberDesc(national_number_pattern='1(?:1[02]|55)', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='1(?:1[02]|55)', possible_number_pattern='\\d{3}', example_number='112', possible_length=(3,)),
standard_rate=PhoneNumberDesc(),
carrier_specific=PhoneNumberDesc(),
short_data=True)
|
[
"james.brace@mail.mcgill.ca"
] |
james.brace@mail.mcgill.ca
|
9026e69e8f119456f9e40a29da8f7c7d3ef7372b
|
971e0efcc68b8f7cfb1040c38008426f7bcf9d2e
|
/tests/model_control/detailed/transf_Integration/model_control_one_enabled_Integration_PolyTrend_BestCycle_MLP.py
|
5274eb0449dc0222445102838746fbe7b7badd4e
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
antoinecarme/pyaf
|
a105d172c2e7544f8d580d75f28b751351dd83b6
|
b12db77cb3fa9292e774b2b33db8ce732647c35e
|
refs/heads/master
| 2023-09-01T09:30:59.967219
| 2023-07-28T20:15:53
| 2023-07-28T20:15:53
| 70,790,978
| 457
| 77
|
BSD-3-Clause
| 2023-03-08T21:45:40
| 2016-10-13T09:30:30
|
Python
|
UTF-8
|
Python
| false
| false
| 154
|
py
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Integration'] , ['PolyTrend'] , ['BestCycle'] , ['MLP'] );
|
[
"antoine.carme@laposte.net"
] |
antoine.carme@laposte.net
|
36fa9338504116911b5efc2f47a261d074edb8a3
|
3cd4902b67de144d8e6f36335e125d0548d8cf97
|
/submissions/runs/RUN10_vc_extended_model_img_unsorted.py
|
129ab86ee4ee8a26ac6546af8dd14261d13a222a
|
[
"MIT"
] |
permissive
|
stefantaubert/imageclef-lifelog-2019
|
5d201c2a28f15f608b9b58b94ab2ecddb5201205
|
ad49dc79db98a163c5bc282fb179c0f7730546b3
|
refs/heads/master
| 2022-10-06T12:42:30.011610
| 2022-08-29T13:35:09
| 2022-08-29T13:35:09
| 196,553,184
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,215
|
py
|
from src.models.pooling.Model_opts import *
from src.data.RawPlaces365Data import name_raw_places
from src.data.IndoorOutdoorData import name_io
from src.data.CocoYoloData import name_yolo
from src.data.CocoDetectronData import name_detectron
from src.data.CocoDefaultData import name_coco_default
from src.data.OpenImagesData import name_oi
from src.data.ImageNetData import name_imagenet
from src.data.SUNattributesData import name_sun
from submissions.runs.run_base import run_on_dev
from submissions.runs.run_base import run_on_test
opts = {
opt_model: {
opt_use_seg: False,
opt_subm_imgs_per_day: 0,
opt_subm_imgs_per_day_only_on_recall: False,
opt_comp_method: comp_method_datamax,
opt_comp_use_weights: True,
opt_query_src: query_src_title,
opt_use_tokenclustering: False,
opt_optimize_labels: True,
},
opt_data: {
name_coco_default: {
opt_weight: 1,
opt_threshold: 0.9,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_detectron: {
opt_weight: 1,
opt_threshold: 0.95,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_yolo: {
opt_weight: 1,
opt_threshold: 0.9,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_imagenet: {
opt_weight: 1,
opt_threshold: 0.99,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
name_oi: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: True,
opt_idf_boosting_threshold: 0.5,
opt_intensify_factor_m: 2,
opt_intensify_factor_p: 2,
opt_ceiling: True,
},
name_raw_places: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: True,
},
name_io: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 3,
opt_intensify_factor_p: 3,
opt_ceiling: False,
},
name_sun: {
opt_weight: 1,
opt_threshold: 0,
opt_use_idf: False,
opt_idf_boosting_threshold: 0,
opt_intensify_factor_m: 1,
opt_intensify_factor_p: 1,
opt_ceiling: False,
},
},
}
if __name__ == "__main__":
run_on_dev(opts)
run_on_test(opts)
|
[
"stefan.taubert@posteo.de"
] |
stefan.taubert@posteo.de
|
99fbbf8071ba11b6ce828063c78654215208e339
|
bede13ba6e7f8c2750815df29bb2217228e91ca5
|
/medical_lab_management/__manifest__.py
|
01ea6d84e8879c00ab859c47d9b8fa1631145e57
|
[] |
no_license
|
CybroOdoo/CybroAddons
|
f44c1c43df1aad348409924603e538aa3abc7319
|
4b1bcb8f17aad44fe9c80a8180eb0128e6bb2c14
|
refs/heads/16.0
| 2023-09-01T17:52:04.418982
| 2023-09-01T11:43:47
| 2023-09-01T11:43:47
| 47,947,919
| 209
| 561
| null | 2023-09-14T01:47:59
| 2015-12-14T02:38:57
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,048
|
py
|
# -*- coding: utf-8 -*-
#############################################################################
#
# Cybrosys Technologies Pvt. Ltd.
#
# Copyright (C) 2021-TODAY Cybrosys Technologies(<https://www.cybrosys.com>).
#
# You can modify it under the terms of the GNU AFFERO
# GENERAL PUBLIC LICENSE (AGPL v3), Version 3.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE (AGPL v3) for more details.
#
# You should have received a copy of the GNU AFFERO GENERAL PUBLIC LICENSE
# (AGPL v3) along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
{
'name': "Medical Lab Management",
'version': '16.0.1.1.0',
'summary': """Manage Medical Lab Operations.""",
'description': """Manage Medical Lab General Operations, Odoo15, Odoo 15""",
'author': "Cybrosys Techno Solutions",
'maintainer': 'Cybrosys Techno Solutions',
'company': "Cybrosys Techno Solutions",
'website': "https://www.cybrosys.com",
'category': 'Industries',
'depends': ['base', 'mail', 'account', 'contacts'],
'data': [
'security/lab_users.xml',
'security/ir.model.access.csv',
'views/res_partner.xml',
'views/lab_patient_view.xml',
'views/test_unit_view.xml',
'views/lab_test_type.xml',
'views/lab_test_content_type.xml',
'views/physician_specialty.xml',
'views/physician_details.xml',
'views/lab_request.xml',
'views/lab_appointment.xml',
'views/account_invoice.xml',
'report/report.xml',
'report/lab_test_report.xml',
'report/lab_patient_card.xml',
],
'images': ['static/description/banner.png'],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
'application': True,
}
|
[
"ajmal@cybrosys.in"
] |
ajmal@cybrosys.in
|
2d48271b9fc70a4e9d62124e31981289ac41c030
|
cfb373af248f1f24124194913a52d395e6b826e7
|
/recruitment_plus/config/docs.py
|
e2d3da882af7144d3fec38727c269c5516b501da
|
[
"MIT"
] |
permissive
|
leaftechnology/recruitment-plus
|
616da8e1b9fc405d431e3e20559f55c2b5e78981
|
505478a9d4299b18089dba41a86d7ab3b4907289
|
refs/heads/master
| 2023-04-02T13:50:52.135805
| 2021-04-12T13:29:24
| 2021-04-12T13:29:24
| 328,859,542
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
"""
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/recruitment_plus"
# docs_base_url = "https://[org_name].github.io/recruitment_plus"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "Recruitment Plus"
|
[
"jangeles@bai.ph"
] |
jangeles@bai.ph
|
8351589ff5cf619e24e9651f2c6e06360a29a3d5
|
0580861bd8b993ac92faec0ed88a339975d702c0
|
/reagent/model_managers/discrete_dqn_base.py
|
ea825859334f6a14b3a64a0e0ef59b203444de62
|
[
"BSD-3-Clause"
] |
permissive
|
Sandy4321/ReAgent
|
346094ae4c98121de5c54d504186f583de21daf0
|
0a387c1aeb922d242c705338fae9379becc82814
|
refs/heads/master
| 2023-07-17T01:27:17.762206
| 2021-08-19T03:15:15
| 2021-08-19T03:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,205
|
py
|
#!/usr/bin/env python3
import abc
import logging
from typing import Dict, List, Optional, Tuple
from reagent.core import types as rlt
from reagent.core.dataclasses import dataclass, field
from reagent.core.parameters import (
EvaluationParameters,
NormalizationData,
NormalizationKey,
RLParameters,
)
from reagent.data.data_fetcher import DataFetcher
from reagent.data.manual_data_module import ManualDataModule
from reagent.data.reagent_data_module import ReAgentDataModule
from reagent.gym.policies.policy import Policy
from reagent.gym.policies.predictor_policies import create_predictor_policy_from_model
from reagent.gym.policies.samplers.discrete_sampler import (
GreedyActionSampler,
)
from reagent.gym.policies.scorers.discrete_scorer import discrete_dqn_scorer
from reagent.model_managers.model_manager import ModelManager
from reagent.models.model_feature_config_provider import RawModelFeatureConfigProvider
from reagent.preprocessing.batch_preprocessor import (
BatchPreprocessor,
DiscreteDqnBatchPreprocessor,
)
from reagent.preprocessing.preprocessor import Preprocessor
from reagent.preprocessing.types import InputColumn
from reagent.reporting.discrete_dqn_reporter import DiscreteDQNReporter
from reagent.training import ReAgentLightningModule
from reagent.workflow.identify_types_flow import identify_normalization_parameters
from reagent.workflow.types import (
Dataset,
ModelFeatureConfigProvider__Union,
PreprocessingOptions,
ReaderOptions,
ResourceOptions,
RewardOptions,
TableSpec,
)
logger = logging.getLogger(__name__)
@dataclass
class DiscreteDQNBase(ModelManager):
target_action_distribution: Optional[List[float]] = None
state_feature_config_provider: ModelFeatureConfigProvider__Union = field(
# pyre-fixme[28]: Unexpected keyword argument `raw`.
default_factory=lambda: ModelFeatureConfigProvider__Union(
raw=RawModelFeatureConfigProvider(float_feature_infos=[])
)
)
preprocessing_options: Optional[PreprocessingOptions] = None
reader_options: Optional[ReaderOptions] = None
eval_parameters: EvaluationParameters = field(default_factory=EvaluationParameters)
def __post_init_post_parse__(self):
super().__post_init_post_parse__()
@property
@abc.abstractmethod
def rl_parameters(self) -> RLParameters:
pass
def create_policy(
self,
trainer_module: ReAgentLightningModule,
serving: bool = False,
normalization_data_map: Optional[Dict[str, NormalizationData]] = None,
) -> Policy:
"""Create an online DiscreteDQN Policy from env."""
if serving:
assert normalization_data_map
return create_predictor_policy_from_model(
self.build_serving_module(trainer_module, normalization_data_map),
rl_parameters=self.rl_parameters,
)
else:
sampler = GreedyActionSampler()
# pyre-fixme[6]: Expected `ModelBase` for 1st param but got
# `Union[torch.Tensor, torch.nn.Module]`.
scorer = discrete_dqn_scorer(trainer_module.q_network)
return Policy(scorer=scorer, sampler=sampler)
@property
def state_feature_config(self) -> rlt.ModelFeatureConfig:
return self.state_feature_config_provider.value.get_model_feature_config()
def get_state_preprocessing_options(self) -> PreprocessingOptions:
state_preprocessing_options = (
self.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id for ffi in self.state_feature_config.float_feature_infos
]
logger.info(f"state allowedlist_features: {state_features}")
state_preprocessing_options = state_preprocessing_options._replace(
allowedlist_features=state_features
)
return state_preprocessing_options
@property
def multi_steps(self) -> Optional[int]:
return self.rl_parameters.multi_steps
def get_data_module(
self,
*,
input_table_spec: Optional[TableSpec] = None,
reward_options: Optional[RewardOptions] = None,
reader_options: Optional[ReaderOptions] = None,
setup_data: Optional[Dict[str, bytes]] = None,
saved_setup_data: Optional[Dict[str, bytes]] = None,
resource_options: Optional[ResourceOptions] = None,
) -> Optional[ReAgentDataModule]:
return DiscreteDqnDataModule(
input_table_spec=input_table_spec,
reward_options=reward_options,
setup_data=setup_data,
saved_setup_data=saved_setup_data,
reader_options=reader_options,
resource_options=resource_options,
model_manager=self,
)
def get_reporter(self):
return DiscreteDQNReporter(
self.trainer_param.actions,
target_action_distribution=self.target_action_distribution,
)
class DiscreteDqnDataModule(ManualDataModule):
@property
def should_generate_eval_dataset(self) -> bool:
return self.model_manager.eval_parameters.calc_cpe_in_training
def run_feature_identification(
self, input_table_spec: TableSpec
) -> Dict[str, NormalizationData]:
preprocessing_options = (
self.model_manager.preprocessing_options or PreprocessingOptions()
)
state_features = [
ffi.feature_id
for ffi in self.model_manager.state_feature_config.float_feature_infos
]
logger.info(f"Overriding allowedlist_features: {state_features}")
preprocessing_options = preprocessing_options._replace(
allowedlist_features=state_features
)
return {
NormalizationKey.STATE: NormalizationData(
dense_normalization_parameters=identify_normalization_parameters(
input_table_spec, InputColumn.STATE_FEATURES, preprocessing_options
)
)
}
def query_data(
self,
input_table_spec: TableSpec,
sample_range: Optional[Tuple[float, float]],
reward_options: RewardOptions,
data_fetcher: DataFetcher,
) -> Dataset:
return data_fetcher.query_data(
input_table_spec=input_table_spec,
discrete_action=True,
actions=self.model_manager.action_names,
include_possible_actions=True,
sample_range=sample_range,
custom_reward_expression=reward_options.custom_reward_expression,
multi_steps=self.model_manager.multi_steps,
gamma=self.model_manager.rl_parameters.gamma,
)
def build_batch_preprocessor(self) -> BatchPreprocessor:
state_preprocessor = Preprocessor(
self.state_normalization_data.dense_normalization_parameters,
use_gpu=self.resource_options.use_gpu,
)
return DiscreteDqnBatchPreprocessor(
num_actions=len(self.model_manager.action_names),
state_preprocessor=state_preprocessor,
use_gpu=self.resource_options.use_gpu,
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.