blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1b45af41c9bb4a3ddf55e8aac6b235e7d8843cac
|
f5d2a1459c81eb23a745bd63f41ef980c41ea0a4
|
/ZG-PhaseFour/code/controller/diffcontroller.py
|
22365e714fd1d2fa625911ff9f88462b4fcaa379
|
[] |
no_license
|
ErBingBing/django-tonado-crawler
|
6800bb0269e99e2454fb0a9079175ffe9d4d0a0b
|
db31b4cdf7ecc509f1a87aa325621943df825e98
|
refs/heads/master
| 2021-08-22T11:30:08.419583
| 2017-11-30T04:04:40
| 2017-11-30T04:04:40
| 112,562,722
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,508
|
py
|
# -*- coding: utf-8 -*-
###################################################################################################
# @file: diffcontroller.py
# @author: Sun Xinghua
# @date: 2016/11/21 0:15
# @version: Ver0.0.0.100
# @note:
###################################################################################################
from configuration import constant
from configuration.environment.configure import SpiderConfigure
from dao.spiderdao import SpiderDao
from log.spiderlog import Logger
################################################################################################################
# @class:DiffController
# @author:Sun Xinghua
# @date:2016/11/21 9:44
# @note:
################################################################################################################
from utility import const
from utility.fileutil import FileUtility
from utility.timeutility import TimeUtility
class DiffController:
DIFF_FILE_NAME_FORMAT = '{suffix}_{ts}_diff.txt'
###################################################################################################
# @functions:__init__
# @param: none
# @return:none
# @note:初始化内部变量
###################################################################################################
def __init__(self):
self.database = SpiderDao()
suffix = SpiderConfigure.getconfig(const.SPIDER_STORAGE_DOMAIN,
const.SPIDER_OUTPUT_FILENAME_SUFFIX)
ts = TimeUtility.getcurrentdate(TimeUtility.TIMESTAMP_FORMAT)
self.difffile = '{path}/{dt}/{file}'.format(
path=SpiderConfigure.getinstance().getconfig(const.SPIDER_STORAGE_DOMAIN, const.SPIDER_OUTPUT_PATH),
dt=TimeUtility.getcurrentdate(),
file=DiffController.DIFF_FILE_NAME_FORMAT.format(suffix=suffix, ts=ts))
###################################################################################################
# @functions:printdetail
# @param: none
# @return:none
# @note:输出差分信息到日志
###################################################################################################
def show(self):
diffinfolist = {}
predict = self.database.getall()
instances = URLStorage.getinstances()
Logger.getlogging().info(
'##############################################################################################')
Logger.getlogging().info('%8s|%8s|%8s|%8s|%8s|%8s|%8s|%20s|%16s' %
('key',
'flag',
'cmtnum',
'clicknum',
'votenum',
'fansnum',
'realnum',
'pubtime',
'timestamp'))
for ins in instances.keys():
diffinfolist[ins] = DiffInfomation()
if ins != constant.SPIDER_CHANNEL_S1:
diffinfolist[ins].channel = constant.SPIDER_CHANNEL_S2
diffinfolist[ins].query = ins
for key in instances[ins].urlinfodict:
if instances[ins].urlinfodict[key].realnum > 0:
StatisticsManager.updategotcomments(1)
elif instances[ins].urlinfodict[key].cmtnum > 0:
StatisticsManager.updatefailgotcomment(1)
if predict and key in predict:
info = URLCommentInfo.fromstring(predict[key])
if not instances[ins].urlinfodict[key].isequal(info):
self.printinfo(ins, info, '-')
self.printinfo(ins, instances[ins].urlinfodict[key], '+')
if instances[ins].urlinfodict[key].cmtnum > 0:
diffinfolist[ins].deltacmt += self.diff(instances[ins].urlinfodict[key].cmtnum, info.cmtnum)
else:
diffinfolist[ins].deltacmt += self.diff(instances[ins].urlinfodict[key].realnum,
info.realnum)
diffinfolist[ins].deltaclick += self.diff(instances[ins].urlinfodict[key].clicknum,
info.clicknum)
diffinfolist[ins].deltavote += self.diff(instances[ins].urlinfodict[key].votenum, info.votenum)
diffinfolist[ins].deltafans += self.diff(instances[ins].urlinfodict[key].fansnum, info.fansnum)
else:
self.printinfo(ins, instances[ins].urlinfodict[key], '+')
if instances[ins].urlinfodict[key].cmtnum > 0:
diffinfolist[ins].deltacmt += instances[ins].urlinfodict[key].cmtnum
else:
diffinfolist[ins].deltacmt += max(0, instances[ins].urlinfodict[key].realnum)
diffinfolist[ins].deltaclick += max(0, instances[ins].urlinfodict[key].clicknum)
diffinfolist[ins].deltavote += max(0, instances[ins].urlinfodict[key].votenum)
diffinfolist[ins].deltafans += max(0, instances[ins].urlinfodict[key].fansnum)
Logger.getlogging().info(
'##############################################################################################')
if FileUtility.exists(self.difffile):
FileUtility.remove(self.difffile)
for key in diffinfolist.keys():
Logger.getlogging().info(diffinfolist[key].tostring())
FileUtility.writeline(self.difffile, diffinfolist[key].tostring())
###################################################################################################
# @functions:printinfo
# @param: info 信息
# @param: flag 添加为+ 删除为-
# @return:none
# @note:输出差分信息到日志
###################################################################################################
def printinfo(self, key, info, flag):
Logger.getlogging().info('%8s|%8s|%8s|%8s|%8s|%8s|%8s|%20s|%16s' %
(key,
flag,
str(info.cmtnum if info.cmtnum > 0 else info.realnum),
str(info.clicknum),
str(info.votenum),
str(info.fansnum),
str(info.realnum),
str(info.pubtime),
str(info.timestamp)))
def diff(self, x, y):
delta = max(0, x) - max(0, y)
return max(0, delta)
class DiffInfomation:
STRING_FORMAT = '{channel}\t{query}\t{cmtnum}\t{clicknum}\t{votenum}\t{fansnum}'
def __init__(self):
self.channel = constant.SPIDER_CHANNEL_S1
self.query = ''
self.deltacmt = 0
self.deltaclick = 0
self.deltavote = 0
self.deltafans = 0
def tostring(self):
return DiffInfomation.STRING_FORMAT.format(channel=self.channel, query=self.query, cmtnum=self.deltacmt,
clicknum=self.deltaclick, votenum=self.deltavote,
fansnum=self.deltafans)
|
[
"913702626@qq.com"
] |
913702626@qq.com
|
8a1cc7180086b8e03033515e70b945d413b517ef
|
7fe5f16fe49e71926c1dfc3a3b41e28741176f06
|
/example.py
|
4608fef3c68d41da1e74e4d68aeba516f6aac7ee
|
[] |
no_license
|
codesharedot/augur-price
|
5b7b315fed28a042bb32e0bf5059e96a263bf6f5
|
2fb9e29ba3eab108e09b5d95c5f390bedfd89530
|
refs/heads/master
| 2020-07-27T04:09:20.915412
| 2020-03-05T17:50:04
| 2020-03-05T17:50:04
| 208,862,862
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 601
|
py
|
import requests
import json
from forex_python.converter import CurrencyRates
import os
c = CurrencyRates()
rate = c.get_rate('USD', 'EUR')
print(rate)
augur_api_url = 'https://api.coinmarketcap.com/v1/ticker/augur/'
response = requests.get(augur_api_url)
response_json = response.json()
print(response_json)
for coin in response.json():
price = coin.get("price_usd", "U$S Price not provided")
coin_price = float(("{0:.2f}").format(float(price)))
print("$ " + str(coin_price))
coin_price_eur = float(("{0:.2f}").format(float(price)*rate))
print("€ " + str(coin_price_eur))
|
[
"codeto@sent.com"
] |
codeto@sent.com
|
bc290340823ce97833d91f4123951f04075608e3
|
a84e1a1aac96612b32ba5adcc49a4005c0c5129e
|
/tensorflow_probability/python/experimental/mcmc/__init__.py
|
9bebbe5296b9f126968b664c8cafa86a5e6c0a37
|
[
"Apache-2.0"
] |
permissive
|
jedisom/probability
|
4fc31473d691d242a3e88c179ae3a9c555a29bb6
|
6791e7ce1c2b0a9057a19a8ea697aeaf796d4da7
|
refs/heads/master
| 2022-04-23T00:21:46.097126
| 2020-04-22T20:03:04
| 2020-04-22T20:04:59
| 258,031,151
| 1
| 0
|
Apache-2.0
| 2020-04-22T22:08:57
| 2020-04-22T22:08:56
| null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""TensorFlow Probability experimental NUTS package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_probability.python.experimental.mcmc.elliptical_slice_sampler import EllipticalSliceSampler
from tensorflow_probability.python.experimental.mcmc.nuts import NoUTurnSampler
from tensorflow_probability.python.experimental.mcmc.particle_filter import ess_below_threshold
from tensorflow_probability.python.experimental.mcmc.particle_filter import infer_trajectories
from tensorflow_probability.python.experimental.mcmc.particle_filter import particle_filter
from tensorflow_probability.python.experimental.mcmc.particle_filter import reconstruct_trajectories
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_deterministic_minimum_error
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_independent
from tensorflow_probability.python.experimental.mcmc.particle_filter import resample_minimum_variance
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import default_make_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import gen_make_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import gen_make_transform_hmc_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import make_rwmh_kernel_fn
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import sample_sequential_monte_carlo
from tensorflow_probability.python.experimental.mcmc.sample_sequential_monte_carlo import simple_heuristic_tuning
from tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import
_allowed_symbols = [
'EllipticalSliceSampler',
'NoUTurnSampler',
'ess_below_threshold',
'infer_trajectories',
'default_make_hmc_kernel_fn',
'gen_make_hmc_kernel_fn',
'gen_make_transform_hmc_kernel_fn',
'make_rwmh_kernel_fn',
'particle_filter',
'sample_sequential_monte_carlo',
'simple_heuristic_tuning',
'reconstruct_trajectories',
'resample_independent',
'resample_minimum_variance',
'resample_deterministic_minimum_error',
]
remove_undocumented(__name__, _allowed_symbols)
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
492e20fa5f9a33cc62fcd94e23aae05134077702
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/mixedreality/list_object_anchors_account_keys.py
|
477a8c32f1615fa9c02298fcbe21bb5a88e16df1
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 2,818
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'ListObjectAnchorsAccountKeysResult',
'AwaitableListObjectAnchorsAccountKeysResult',
'list_object_anchors_account_keys',
]
@pulumi.output_type
class ListObjectAnchorsAccountKeysResult:
"""
Developer Keys of account
"""
def __init__(__self__, primary_key=None, secondary_key=None):
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
value of primary key.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
value of secondary key.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListObjectAnchorsAccountKeysResult(ListObjectAnchorsAccountKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListObjectAnchorsAccountKeysResult(
primary_key=self.primary_key,
secondary_key=self.secondary_key)
def list_object_anchors_account_keys(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListObjectAnchorsAccountKeysResult:
"""
Developer Keys of account
API Version: 2021-03-01-preview.
:param str account_name: Name of an Mixed Reality Account.
:param str resource_group_name: Name of an Azure resource group.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:mixedreality:listObjectAnchorsAccountKeys', __args__, opts=opts, typ=ListObjectAnchorsAccountKeysResult).value
return AwaitableListObjectAnchorsAccountKeysResult(
primary_key=__ret__.primary_key,
secondary_key=__ret__.secondary_key)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
55579935208423de76144450d6a979bb0a66cb9c
|
f856c993a34fa2fbb228369dd267909445fa69b5
|
/vel/augmentations/to_tensor.py
|
d97ac0fb52984932b782019a024aec8100f5995f
|
[
"MIT"
] |
permissive
|
cclauss/vel
|
06fabeb75925ac2509162f12ac82fff3b8291720
|
78a6a20af80ff613898d2983c83fdb223634aaad
|
refs/heads/master
| 2020-04-01T03:46:50.339279
| 2018-10-09T05:36:21
| 2018-10-09T05:36:21
| 152,836,186
| 0
| 0
|
MIT
| 2018-10-13T04:48:44
| 2018-10-13T04:48:44
| null |
UTF-8
|
Python
| false
| false
| 436
|
py
|
import numpy as np
import torchvision.transforms.functional as F
import vel.api.data as data
class ToTensor(data.Augmentation):
def __init__(self, mode='x', tags=None):
super().__init__(mode, tags)
def __call__(self, datum):
return F.to_tensor(datum)
def denormalize(self, datum):
return np.transpose(datum.numpy(), (1, 2, 0))
def create(mode='x', tags=None):
return ToTensor(mode, tags)
|
[
"jerry@millionintegrals.com"
] |
jerry@millionintegrals.com
|
41cc274eb12a46f98a11e97f115641445f2a7322
|
d0bdf444c71b724ecfd59b5bc6850962c56494cb
|
/labs/03-apply_vis/tests/q1_3.py
|
f28f5962b8a4bbc4dfe837cb9f86d0772094554c
|
[] |
no_license
|
ucsd-ets/dsc10-su20-public
|
10e3d0ff452b337f222baee330fe60d1465b0071
|
38787e6cc3e6210b4cc8a46350e5120845971c9f
|
refs/heads/master
| 2022-12-13T23:28:20.512649
| 2020-09-03T19:28:06
| 2020-09-03T19:28:06
| 275,905,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 957
|
py
|
test = {
'hidden': False,
'name': '1.3',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> # Your answer should be a number
>>> type(mark_hurd_pay) != str
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Don't forget to give your answer in dollars, not millions of
>>> # Dollars!
>>> mark_hurd_pay != 5325
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Don't forget to give your answer in dollars, not millions of
>>> # Dollars!
>>> mark_hurd_pay == 53250000
True
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
[
"eldridgejm@gmail.com"
] |
eldridgejm@gmail.com
|
c12deb8dc47fab1d4779dededfad990fb6c4aaec
|
85c82274a3888fa61795bb0600ab96eaf7665b6a
|
/UTS/D_letterTArray.py
|
16e3541a8e3a1482da2c3cb3821ac21e8b71dafd
|
[] |
no_license
|
refeed/StrukturDataA
|
8e5a214569f41b19c05842d003ede5941800482a
|
4d3b77bbd28158f1f1e64a49b8e90da731859407
|
refs/heads/master
| 2023-06-03T08:22:12.442536
| 2021-07-01T03:24:29
| 2021-07-01T03:24:29
| 360,478,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
'''
Letter T Array
Batas Run-time: 1 detik / test-case
Batas Memori: 32 MB
DESKRIPSI SOAL
Bayangkan sebuah tabung seperti gambar di atas. Tabung dengan tiga cabang dan
salah satunya menghadap ke atas. Kali ini kita akan bermain dengan sebuah array
yang bentuknya seperti di atas. Proses “Push” data baru seakan menjatuhkan bola
melalui cabang yang menghadap ke atas. Ketika banyak bola di bagian bawah adalah
genap, maka bola baru akan jatuh tepat di tengah, sedangkan jika banyak bola di
bagian bawah adalah ganjil maka bola jatuh akan berada di tepat sebelah kiri
bola paling tengah. (Contoh dapat dilihat bagian akhir soal)
PETUNJUK MASUKAN
Baris pertama adalah bilangan bulat N, banyak data yang akan di-”Push”. N buah
data selanjutnya adalah bilangan bulat yang akan di-”Push” pada array tersebut
secara terurut.
PETUNJUK KELUARAN
Outputkan dari kiri ke kanan data yang ditampilkan pada bagian bawah array
setelah semua data masuk
CONTOH MASUKAN 1
5
1 2 3 4 5
CONTOH KELUARAN 1
2 4 5 3 1
CONTOH MASUKAN 2
4
4 1 3 2
CONTOH KELUARAN 2
1 2 3 4
KETERANGAN
'''
num_of_data = int(input())
data_list = list(map(int, input().split()))
data_in_letter_t = []
for data in data_list:
data_in_letter_t.insert((len(data_in_letter_t) // 2), data)
print(' '.join(list(map(str, data_in_letter_t))) + ' ')
|
[
"rafidteam@gmail.com"
] |
rafidteam@gmail.com
|
39870bafb24d8c96b9d084eed585673395b338de
|
e61717bebf8f7d3790b0e98d868ea4ce33f9cc59
|
/TSIS10_upd/inserting many data.py
|
56017b448f12644780c3f8749161a0b6f3557868
|
[] |
no_license
|
KanagatS/PP2
|
81672264b9720af8b15408c9d8228eb6da25378e
|
b53f5164d6fb753392870607d0506c5a3daaef88
|
refs/heads/master
| 2023-04-20T10:29:53.298342
| 2021-05-21T18:24:55
| 2021-05-21T18:24:55
| 334,276,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
import psycopg2
con = psycopg2.connect(
host='localhost',
database='tsis',
user='postgres',
port=6666,
password=''
)
cur = con.cursor()
# ===============================================
sql = """INSERT INTO student(name) VALUES(%s);"""
cur.executemany(sql, [('is',), ('KBTU',), ('student',)])
con.commit()
# ===============================================
cur.close()
con.close()
|
[
"k_sapiya@kbtu.kz"
] |
k_sapiya@kbtu.kz
|
08de08127f62aa59ec24287edeb7a29787f3ee2f
|
a46d135ba8fd7bd40f0b7d7a96c72be446025719
|
/packages/python/plotly/plotly/validators/mesh3d/colorbar/title/_side.py
|
95426ff6b73e9ae7aeac6fa6b0ff209b476d779f
|
[
"MIT"
] |
permissive
|
hugovk/plotly.py
|
5e763fe96f225d964c4fcd1dea79dbefa50b4692
|
cfad7862594b35965c0e000813bd7805e8494a5b
|
refs/heads/master
| 2022-05-10T12:17:38.797994
| 2021-12-21T03:49:19
| 2021-12-21T03:49:19
| 234,146,634
| 0
| 0
|
MIT
| 2020-01-15T18:33:43
| 2020-01-15T18:33:41
| null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
import _plotly_utils.basevalidators
class SideValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="side", parent_name="mesh3d.colorbar.title", **kwargs
):
super(SideValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["right", "top", "bottom"]),
**kwargs
)
|
[
"noreply@github.com"
] |
hugovk.noreply@github.com
|
a057006ec8e593fa858cdfaccd187d99b327366a
|
2be63b91334873f3044a0306344cc907828837b3
|
/deluxhotel/blog/admin.py
|
d61c286a38247c7ff27eb520836214b22d8382fb
|
[] |
no_license
|
DmitriiGrekov/delux_hotel
|
ffcb34c99d5740e8591f5eb7a15ea5e72cd0f5be
|
0ac14d018166752827f486ba9d3e9553f0b52b67
|
refs/heads/master
| 2023-07-03T02:46:41.355875
| 2021-08-05T16:21:22
| 2021-08-05T16:21:22
| 393,068,592
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 988
|
py
|
from django.contrib import admin
from .models import TagsModel, BlogPostModel, CommentModel
@admin.register(TagsModel)
class TagsAdmin(admin.ModelAdmin):
list_display = ('name', 'slug')
prepopulated_fields = {'slug': ('name',)}
@admin.register(BlogPostModel)
class BlogAdmin(admin.ModelAdmin):
list_display = ('title',
'author',
'publish_date',
'active')
list_display_links = ('title', 'author')
search_fields = ('title', 'author', 'text',)
list_filter = ('author', 'publish_date', 'tags')
prepopulated_fields = {'slug': ('title', 'author',)}
@admin.register(CommentModel)
class CommentAdmin(admin.ModelAdmin):
list_display = ('name',
'email',
'date_publish')
list_display_links = ('name',
'email')
search_fields = ('name',
'email')
list_filter = ('date_publish',
'post')
|
[
"grekovdima7@gmail.com"
] |
grekovdima7@gmail.com
|
e67ff95685ab64f98a147c59594b3b7a7c4791ce
|
f59c06566e729380b032f050f852621f425553ac
|
/plugins/maze.py
|
83aeeebf2601c7cabe7ca38404b7a26d1aa3638a
|
[] |
no_license
|
JonnoFTW/TonsleyLEDManager
|
c23e27cf7e9f61f97d2c42e3331bceae3fe66231
|
681771584f2b105a2b190641be2d2d1d9d785be1
|
refs/heads/master
| 2021-06-07T18:24:54.113308
| 2021-05-02T09:43:19
| 2021-05-02T09:43:19
| 55,032,673
| 4
| 7
| null | 2017-09-04T04:13:26
| 2016-03-30T04:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 6,100
|
py
|
class Runner:
blue = [0, 0, 255]
white = [255, 255, 255]
black = [0, 0, 0]
green = [0, 255, 0]
red = [255, 0, 0]
def __init__(self, board_dimensions):
self.dims = board_dimensions
import numpy as np
self.np = np
np.set_printoptions(threshold=np.nan)
self.width = board_dimensions[1]
self.height = board_dimensions[0]
self.reset()
# blue for the runner position
# white for path
# red for frontier
# black for walls
def reset(self):
self.maze = self.np.zeros((self.height, self.width), dtype=self.np.uint8)
for x in range(self.maze.shape[0]):
if x % 2 == 0:
self.maze[x].fill(1)
for y in range(self.maze.shape[1]):
if y % 2 == 0:
self.maze[:, y].fill(1)
self.generated = False
# both need to be odd numbers
self.C = [(self.np.random.choice(range(3, self.height-3, 2)),
self.np.random.choice(range(3, self.width-3, 2)), 'W')]
t = self.C[0]
self.maze[t[0], t[1]] = 0
self.maze[t[0]-1, t[1]] = 0
self.maze[t[0]+1, t[1]] = 0
self.maze[t[0], t[1]+1] = 0
self.maze[t[0], t[1]-1] = 0
self.maze_generator = self.step()
self.maze[0].fill(1)
self.maze[-1].fill(1)
def render_maze(self):
out = self.np.empty((self.height, self.width, 3), dtype=self.np.uint8)
for x, row in enumerate(self.maze):
for y, cell in enumerate(row):
if cell <= 0 or cell == 4:
out[x, y] = self.white
elif cell == 1:
out[x, y] = self.black
elif cell == 2:
out[x, y] = self.red
elif cell == 3 or cell == -2:
out[x, y] = self.green
elif cell == 5:
out[x, y] = self.blue
return out
def step(self):
while self.C:
target = self.C[self.np.random.randint(0, len(self.C))]
n = self.neighbours(target[0], target[1])
self.np.random.shuffle(n)
if not n:
self.maze[target[0], target[1]] = 4
if target[2] == 'S':
self.maze[target[0], target[1]-1] = 4
elif target[2] == 'N':
self.maze[target[0], target[1]+1] = 4
elif target[2] == 'E':
self.maze[target[0]-1, target[1]] = 4
elif target[2] == 'W':
self.maze[target[0]+1, target[1]] = 4
self.C.remove(target)
else:
# mark visited cells as 2
new_cell = n.pop()
self.maze[new_cell[0], new_cell[1]] = 2
if new_cell[2] == 'S':
self.maze[new_cell[0], new_cell[1]-1] = 2
elif new_cell[2] == 'N':
self.maze[new_cell[0], new_cell[1]+1] = 2
elif new_cell[2] == 'E':
self.maze[new_cell[0]-1, new_cell[1]] = 2
elif new_cell[2] == 'W':
self.maze[new_cell[0]+1, new_cell[1]] = 2
self.C.append(new_cell)
yield self.render_maze()
def neighbours(self, x, y, v=2):
return [(nx, ny, d) for nx, ny, d in [(x, y+v, 'S'), (x, y-v, 'N'), (x+v, y, 'E'), (x-v, y, 'W')]
if 1 <= nx < self.maze.shape[0] and 0 <= ny < self.maze.shape[1] and self.maze[nx, ny] <= 0]
def solve(self):
#run the next step in maze
# update runner position
# get the random neighbours and move into one of them
while self.stack:
# get the neighbours of the current cell
x, y, d = self.runner
self.maze[x, y] = 5
n = self.neighbours(x, y, 1)
if x >= self.height - 2:
print "Solved"
break
if not n:
self.runner = self.stack.pop()
self.maze[self.runner[0], self.runner[1]] = 2
yield
else:
self.stack.extend(n)
new_cell = n[0]
self.runner = new_cell
self.maze[new_cell[0], new_cell[1]] = 0
yield
def run(self):
if not self.generated:
# do the next step in the maze generator
try:
return self.maze_generator.next()
except StopIteration:
self.generated = True
for x in range(self.maze.shape[0]):
for y in range(self.maze.shape[1]):
if self.maze[x, y] != 1:
self.maze[x, y] = 0
starts = list(self.np.where(self.maze[1] == 0)[0])# firsts white cell in the first column
self.runner = [0, starts.pop(), 'E']
self.maze_solver = self.solve()
self.stack = [self.runner]
return self.render_maze()
else:
try:
self.maze_solver.next()
except StopIteration:
# we hit the end of the maze or it's unsolvable!
self.reset()
return self.render_maze()
if __name__ == "__main__":
import pygame, sys
FPS = 60
fpsClock = pygame.time.Clock()
rows = 17
cols = 165
board_dimensions = (cols, rows)
disp_size = (cols * 8, rows * 8)
pygame.init()
size = width, height = board_dimensions
screen = pygame.display.set_mode(disp_size)
runner = Runner(board_dimensions)
while True:
for e in pygame.event.get():
if e.type == pygame.QUIT:
sys.exit()
screen.fill((0, 0, 0))
# draw the pixels
pixels = runner.run()
temp_surface = pygame.Surface(board_dimensions)
pygame.surfarray.blit_array(temp_surface, pixels)
pygame.transform.scale(temp_surface, disp_size, screen)
pygame.display.flip()
|
[
"jonmac1@gmail.com"
] |
jonmac1@gmail.com
|
4280ff24cdcb735005428f197ee64f440e0f77ac
|
3a09048cb841d91ee39ef054f35b8572f3c166fb
|
/OnlineJudge/ojproblem/apps.py
|
1b720ddb973a9fee0b68995e95e12486f9580439
|
[] |
no_license
|
lyyyuna/LihuLabOJ
|
91eddf27a16dca5488d5406e0224cf84544254b9
|
e1e8e5ae9da629a201f734a33d264bcb6ae2f420
|
refs/heads/master
| 2022-12-14T02:53:24.786670
| 2019-08-29T03:07:22
| 2019-08-29T03:07:22
| 89,581,070
| 1
| 2
| null | 2022-12-08T08:32:24
| 2017-04-27T09:34:55
|
Python
|
UTF-8
|
Python
| false
| false
| 158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class OjproblemConfig(AppConfig):
name = 'ojproblem'
|
[
"lyyyuna@gmail.com"
] |
lyyyuna@gmail.com
|
bae085a67b4f224655e429058f60fbc44a5a185e
|
81407be1385564308db7193634a2bb050b4f822e
|
/the-python-standard-library-by-example/argparse/argparse_fromfile_prefix_chars.py
|
0d40b273f431ba758c22fcbbd05759f0f70e9057
|
[
"MIT"
] |
permissive
|
gottaegbert/penter
|
6db4f7d82c143af1209b4259ba32145aba7d6bd3
|
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
|
refs/heads/master
| 2022-12-30T14:51:45.132819
| 2020-10-09T05:33:23
| 2020-10-09T05:33:23
| 305,266,398
| 0
| 0
|
MIT
| 2020-10-19T04:56:02
| 2020-10-19T04:53:05
| null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
import argparse
parser = argparse.ArgumentParser(description='Short sample app',
fromfile_prefix_chars='@',
)
parser.add_argument('-a', action="store_true", default=False)
parser.add_argument('-b', action="store", dest="b")
parser.add_argument('-c', action="store", dest="c", type=int)
print(parser.parse_args(['@argparse_fromfile_prefix_chars.txt']))
|
[
"350840291@qq.com"
] |
350840291@qq.com
|
fa07e854a21f6965ab962f6b3f56dc7d7a79a9ad
|
e5453b6a4b84a32ccca7281d438b7a7fa1853f58
|
/src/ibmc/checks/huawei_ibmc_memory_check.py
|
e9a2b6fbe8b988c64e006753256d5d2b4991b3ab
|
[
"MIT"
] |
permissive
|
Huawei/Server_Management_Plugin_Check_MK
|
88445d9da581c347c5e82cf590453c4cb2c3d53c
|
88398c7c8affe0b2064f418de931d69e36afde67
|
refs/heads/master
| 2021-05-11T11:40:55.302518
| 2021-01-27T09:53:17
| 2021-01-27T09:53:17
| 117,641,709
| 1
| 4
| null | 2018-01-31T05:38:01
| 2018-01-16T06:30:39
| null |
UTF-8
|
Python
| false
| false
| 2,009
|
py
|
#!/usr/bin/python
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# tails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
_health_map = {"1": 0, "2": 1, "3": 1, "4": 2, "5": 3, "6": 3}
_health_str = {0: "OK", 1: "WARNING", 2: "CRITICAL", 3: "ABSENCE", 4: "UNKOWN"}
def inventory_hw_memory_health(info):
return [('MEMORY status', None)]
def check_hw_memory_health(item, params, info):
_health_status = 3
_msg = ''
try:
for state in info[0][0]:
_health_status = _health_map.get(state)
for state, index in info[1]:
_each_status = _health_map.get(state)
if _each_status is not None:
if _each_status == 3:
continue
_health_msg = _health_str.get(_each_status)
_msg = _msg + " %s health status is %s;" % (str(index), _health_msg)
return _health_status, "healthy status is %s, %s" % (_health_str.get(_health_status), _msg)
except IndexError:
return "healthy status is not queried."
check_info["huawei_ibmc_memory_check"] = {
"inventory_function": inventory_hw_memory_health,
"check_function": check_hw_memory_health,
"service_description": "%s",
"includes": ["huawei_ibmc_util_.include"],
"snmp_info": [
(".1.3.6.1.4.1.2011.2.235.1.1.16", ["1.0", ]),
(".1.3.6.1.4.1.2011.2.235.1.1.16", ["50.1.6", "50.1.10"])
],
"snmp_scan_function": scan,
}
|
[
"31431891+serverplugin@users.noreply.github.com"
] |
31431891+serverplugin@users.noreply.github.com
|
6f0d1ed0816ccbc48e4a42bfff7f7583a50f9a16
|
781f408fd9dc9fd111d5ac47009ab580636625e5
|
/examples/test_get_pdf_text.py
|
32573412e9a3e0199172d9ba0bd2f4394ab87c0d
|
[
"MIT"
] |
permissive
|
doiteachday/SeleniumBase
|
fb003257b63e157b734d2b34a9c5794d74748322
|
8ded5fac84b85f1d4f43384d0836dbf4a1fc390e
|
refs/heads/master
| 2023-04-10T10:13:50.372864
| 2021-05-04T02:51:43
| 2021-05-04T02:51:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from seleniumbase import BaseCase
class PdfTests(BaseCase):
def test_get_pdf_text(self):
pdf = ("https://nostarch.com/download/"
"Automate_the_Boring_Stuff_sample_ch17.pdf")
pdf_text = self.get_pdf_text(pdf, page=1)
print("\n" + pdf_text)
|
[
"mdmintz@gmail.com"
] |
mdmintz@gmail.com
|
2594bcbf34b79c8031b60bfcbb34bbb0796cf491
|
0175bdc4c896e8019b2c5f7442097cf6b9c1d14a
|
/pylibs/BasePage.py
|
59ed9e3979940a13702c32503f2b2f7648643462
|
[] |
no_license
|
GGGYB/shiiia
|
323ecee869dcd66510baf0ea7bc30b29c2bfb5ad
|
9760f170cbbec37cc340c3b020f36cdd9855e7cd
|
refs/heads/master
| 2023-05-02T07:58:37.023266
| 2021-05-31T09:41:07
| 2021-05-31T09:41:07
| 334,103,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,408
|
py
|
# -*- coding: utf-8 -*-
# Author: sharon
# Datetime: 2021/1/29 14:32
# File: $ {NAME}
from pylibs.MyDriver import Driver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import time
class BasePage():
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
def get_element(self,locator):
WebDriverWait(driver=Driver.wd,timeout=10,poll_frequency=0.5).until(
EC.visibility_of_element_located(locator)
)
return Driver.wd.find_element(*locator)
def get_elements(self,locator):
WebDriverWait(driver=Driver.wd,timeout=10,poll_frequency=0.5).until(
EC.visibility_of_element_located(locator)
)
return Driver.wd.find_elements(*locator)
def get_element_text(self,locators):
eleText = []
for ele in self.get_elements(locators):
eleText.append(ele.text)
print(eleText)
return eleText
def scroll_to_window(self,step,scrollSize):
for i in range(step):
Driver.wd.execute_script(f'window.scrollBy(0,{scrollSize})')
def to_page(self,url):
Driver.wd.get(url)
# 可以修改scrollTop的值来定位右侧滚动条的位置,0是最最顶部,10000是最底部。
def scroll_to_extreme(self,num):
js = f"var q=document.documentElement.scrollTop={num}"
Driver.wd.execute_script(js)
|
[
"nlnongling@163.com"
] |
nlnongling@163.com
|
bc860517d0de7a0508431b8414cb45c85ec7b3e7
|
979cf7d5e2136e7e701df27da29622f9196f219e
|
/Files/views.py
|
6a8462440f919b5546e343a386846815375f1e1c
|
[] |
no_license
|
RafayelGardishyan/DjangoTeamwork
|
e68c33844680c6a4e345fe8dfc2d3b4b49ccf2ef
|
6b030b161b67976445b292f0d5f7366a5eb48560
|
refs/heads/master
| 2021-09-16T07:03:51.280141
| 2018-01-06T20:50:11
| 2018-01-06T20:50:11
| 114,727,815
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,901
|
py
|
import random
from django.http import HttpResponse
from django.shortcuts import redirect
from django.template import loader
from Start.models import Admin
from .forms import FileForm
from .models import File
from webhooks import Webhook
# Create your views here.
values = {
'securitykey': "",
'whurl': "https://discordapp.com/api/webhooks/399280451258417162/ex_ix9eIhkltscgcS3AyiDt4iVqBpowzAg4LZIFsbuwcJ01jUMkM8Jp78B5YWX6zPoLM",
}
def index(request):
if request.session.get('logged_in'):
files = File.objects.order_by('added_on')
template = loader.get_template('files/index.html')
context = {
'files': files,
}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
def delete(request, slug):
if request.session.get('logged_in'):
file = File.objects.get(slug=slug)
filename = file.name
user = Admin.objects.get(id=1)
if request.GET:
if request.GET['ak'] == values['securitykey']:
file.deletefile()
file.delete()
template = loader.get_template('error.html')
context = {
'message': 'Successfully deleted file ' + filename,
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
embed = Webhook(values['whurl'], color=123123)
embed.set_author(name='Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_desc('Deleted File')
embed.add_field(name='Name', value=filename)
embed.set_thumbnail('https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_footer(text='This message was automatically sent form Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png', ts=True)
embed.post()
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Wrong Admin Key',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
else:
securitykey = ""
for i in range(6):
securitykey += str(random.randint(0, 9))
print(securitykey)
user.sendemail('Delete File', 'Your Security Key is ' + str(securitykey))
values['securitykey'] = securitykey
template = loader.get_template('files/delete.html')
context = {}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
def add(request):
# if this is a POST request we need to process the form data
if request.session.get('logged_in'):
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = FileForm(request.POST, files=request.FILES)
# check whether it's valid:
if form.is_bound:
if form.is_valid():
form.save()
template = loader.get_template('error.html')
context = {
'message': 'Added File',
'link': {
'text': 'Return to Files home',
'url': '/files',
},
'slink': {
'text': 'Add an other File',
'url': '/files/add'
},
}
embed = Webhook(values['whurl'], color=123123)
embed.set_author(name='Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_desc('Added File')
embed.add_field(name='Name', value=form.cleaned_data['file'])
embed.set_thumbnail('https://codename-codeniacs.herokuapp.com/static/favicon.png')
embed.set_footer(text='This message was automatically sent form Codeniacs Website',
icon='https://codename-codeniacs.herokuapp.com/static/favicon.png', ts=True)
embed.post()
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Form is not valid',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
else:
template = loader.get_template('error.html')
context = {
'message': 'Form is not bound',
'link': {
'text': 'Return to Files home',
'url': '/files'
}
}
return HttpResponse(template.render(context, request))
# if a GET (or any other method) we'll create a blank form
else:
form = FileForm()
template = loader.get_template('files/add.html')
context = {'form': form}
return HttpResponse(template.render(context, request))
else:
return redirect('/')
|
[
"rgardishyan@gmail.com"
] |
rgardishyan@gmail.com
|
9531a59085c598825838be55b85bd85e79853aaa
|
327e3c96db66c055d47be868ef5346ae3515b752
|
/SpiralMatrix.py
|
589762d3c9b41840dab60d26be27ea76aec14b69
|
[] |
no_license
|
dabay/LeetCodePython
|
790a17893c46aa3a003ef95026471c21d869570d
|
fdac2086bc793584e05445f5d9afa74fee6fcb33
|
refs/heads/master
| 2021-03-12T23:34:04.496651
| 2017-08-24T15:55:02
| 2017-08-24T15:55:02
| 27,840,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,611
|
py
|
# -*- coding: utf8 -*-
'''
Given a matrix of m x n elements (m rows, n columns), return all elements of the matrix in spiral order.
For example,
Given the following matrix:
[
[ 1, 2, 3 ],
[ 4, 5, 6 ],
[ 7, 8, 9 ]
]
You should return [1,2,3,6,9,8,7,4,5].
'''
class Solution:
# @param strs, a list of strings
# @return a list of strings
def spiralOrder(self, matrix):
def spiral_order(result, matrix, start_x, start_y, m, n):
if n==0 or m==0:
return
if n == 1:
for i in xrange(m):
result.append(matrix[start_x+i][start_y])
return
if m == 1:
for i in xrange(n):
result.append(matrix[start_x][start_y+i])
return
for i in xrange(start_y, start_y+n):
result.append(matrix[start_x][i])
for i in xrange(start_x+1, start_x+m):
result.append(matrix[i][start_y+n-1])
for i in xrange(start_y+n-1-1, start_y-1, -1):
result.append(matrix[start_x+m-1][i])
for i in xrange(start_x+m-1-1, start_x, -1):
result.append(matrix[i][start_y])
return spiral_order(result, matrix, start_x + 1, start_y + 1, m-2, n-2)
if len(matrix) == 0:
return []
result = []
spiral_order(result, matrix, 0, 0, len(matrix), len(matrix[0]))
return result
if __name__ == "__main__":
s = Solution()
input = [[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]]
print s.spiralOrder(input)
|
[
"dabay.wong@gmail.com"
] |
dabay.wong@gmail.com
|
4b47d4fae81b2e9fe90c6198f017118e6e06407e
|
0b1c6a559c8f8f38ec0a9b62c5fdec786488c77e
|
/appspot/time_clock/migrations/0003_auto_20171005_1604.py
|
7f9c3ef6d7a0795f1365aaad23df686301d777d4
|
[] |
no_license
|
smartworld1000/django_appspot
|
9372b1edeb3e9d2507ca49463d34b0cf22e652ed
|
d801d910ff52b83a45f3bf68334bb06a91b81221
|
refs/heads/master
| 2021-05-14T03:39:07.613510
| 2017-11-05T07:42:59
| 2017-11-05T07:42:59
| 116,621,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 780
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-10-05 16:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_clock', '0002_auto_20171005_1551'),
]
operations = [
migrations.AlterField(
model_name='time_clock',
name='timein',
field=models.DateTimeField(help_text=''),
),
migrations.AlterField(
model_name='time_clock',
name='timeout',
field=models.DateTimeField(help_text=''),
),
migrations.AlterField(
model_name='time_clock',
name='workdate',
field=models.DateField(help_text=''),
),
]
|
[
"itokonoha8972@outlook.com"
] |
itokonoha8972@outlook.com
|
0b3008da0bf7f113d48b9ab99344fb70cf022591
|
90f729624737cc9700464532a0c67bcbfe718bde
|
/lino_xl/lib/cv/mixins.py
|
2cf6c081a9ea76a806bd42afe738a54f12383a91
|
[
"AGPL-3.0-only"
] |
permissive
|
lino-framework/xl
|
46ba6dac6e36bb8e700ad07992961097bb04952f
|
642b2eba63e272e56743da2d7629be3f32f670aa
|
refs/heads/master
| 2021-05-22T09:59:22.244649
| 2021-04-12T23:45:06
| 2021-04-12T23:45:06
| 52,145,415
| 1
| 5
|
BSD-2-Clause
| 2021-03-17T11:20:34
| 2016-02-20T09:08:36
|
Python
|
UTF-8
|
Python
| false
| false
| 6,328
|
py
|
# -*- coding: UTF-8 -*-
# Copyright 2013-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.conf import settings
from django.db import models
from django.utils.translation import gettext
from lino.api import dd, rt, _
from etgen.html import E, join_elems, forcetext
from lino.mixins.periods import DateRange
NONE = _("Not specified")
class BiographyOwner(dd.Model):
class Meta:
abstract = True
_cef_levels = None
_mother_tongues = None
def load_language_knowledge(self):
if self._mother_tongues is not None:
return
LanguageKnowledge = rt.models.cv.LanguageKnowledge
self._cef_levels = dict()
self._mother_tongues = []
qs = LanguageKnowledge.objects.filter(person=self)
# if dd.plugins.cv.with_language_history:
# qs = qs.order_by('-entry_date', 'id')
# else:
# qs = qs.order_by('id')
for lk in qs:
if lk.native:
self._mother_tongues.append(lk.language)
# if lk.language.iso2 in ("de", "fr", "en"):
if lk.cef_level is not None:
if not lk.language.iso2 in self._cef_levels:
lkinfo = str(lk.cef_level.value)
if lk.has_certificate:
lkinfo += " ({})".format(_("Certificate"))
self._cef_levels[lk.language.iso2] = lkinfo
@dd.htmlbox(_("Language knowledge"))
def language_knowledge(self, ar):
return self.get_language_knowledge()
def get_language_knowledge(self, *buttons):
self.load_language_knowledge()
lst = []
for lng in settings.SITE.languages:
lst.append("{}: {}".format(
lng.name, self._cef_levels.get(lng.django_code, NONE)))
# if cl is None:
# lst.append("{}: {}".format(lng.name, ))
# else:
# lst.append("{}: {}".format(lng.name, cl))
if len(self._mother_tongues):
lst.append("{}: {}".format(
_("Mother tongues"), self.mother_tongues))
lst += buttons
lst = join_elems(lst, E.br)
return E.p(*lst)
@dd.displayfield(_("Mother tongues"))
def mother_tongues(self, ar):
self.load_language_knowledge()
return ' '.join([str(lng) for lng in self._mother_tongues])
# @dd.displayfield(_("CEF level (de)"))
@dd.displayfield()
def cef_level_de(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('de', NONE)
# @dd.displayfield(_("CEF level (fr)"))
@dd.displayfield()
def cef_level_fr(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('fr', NONE)
# @dd.displayfield(_("CEF level (en)"))
@dd.displayfield()
def cef_level_en(self, ar):
self.load_language_knowledge()
return self._cef_levels.get('en', NONE)
class EducationEntryStates(dd.ChoiceList):
verbose_name = _("State")
add = EducationEntryStates.add_item
add('0', _("Success"), 'success')
add('1', _("Failure"), 'failure')
add('2', _("Ongoing"), 'ongoing')
class HowWell(dd.ChoiceList):
verbose_name = _("How well?")
add = HowWell.add_item
add('0', _("not at all"))
add('1', _("a bit"))
add('2', _("moderate"), "default")
add('3', _("quite well"))
add('4', _("very well"))
class CefLevel(dd.ChoiceList):
verbose_name = _("CEF level")
verbose_name_plural = _("CEF levels")
# show_values = True
#~ @classmethod
#~ def display_text(cls,bc):
#~ def fn(bc):
#~ return u"%s (%s)" % (bc.value,unicode(bc))
#~ return lazy(fn,unicode)(bc)
add = CefLevel.add_item
add('A0')
add('A1')
add('A1+')
add('A2')
add('A2+')
add('B1')
add('B2')
add('B2+')
add('C1')
add('C2')
add('C2+')
# add('A0', _("basic language skills"))
# add('A1', _("basic language skills"))
# add('A1+', _("basic language skills"))
# add('A2', _("basic language skills"))
# add('A2+', _("basic language skills"))
# add('B1', _("independent use of language"))
# add('B2', _("independent use of language"))
# add('B2+', _("independent use of language"))
# add('C1', _("proficient use of language"))
# add('C2', _("proficient use of language"))
# add('C2+', _("proficient use of language"))
class SectorFunction(dd.Model):
class Meta:
abstract = True
sector = dd.ForeignKey("cv.Sector", blank=True, null=True)
function = dd.ForeignKey("cv.Function", blank=True, null=True)
@dd.chooser()
def function_choices(cls, sector):
if sector is None:
return rt.models.cv.Function.objects.all()
return sector.function_set.all()
class PersonHistoryEntry(DateRange):
class Meta:
abstract = True
person = dd.ForeignKey(dd.plugins.cv.person_model)
duration_text = models.CharField(
_("Duration"), max_length=200, blank=True)
class HistoryByPerson(dd.Table):
master_key = 'person'
order_by = ["start_date"]
auto_fit_column_widths = True
@classmethod
def create_instance(self, req, **kw):
obj = super(HistoryByPerson, self).create_instance(req, **kw)
if obj.person_id is not None:
previous_exps = self.model.objects.filter(
person=obj.person).order_by('start_date')
if previous_exps.count() > 0:
exp = previous_exps[previous_exps.count() - 1]
if exp.end_date:
obj.start_date = exp.end_date
else:
obj.start_date = exp.start_date
return obj
@classmethod
def get_table_summary(cls, mi, ar):
if mi is None:
return
items = []
ar = ar.spawn(cls, master_instance=mi, is_on_main_actor=False)
for obj in ar:
chunks = []
for e in cls.get_handle().get_columns():
if e.hidden:
continue
v = e.field._lino_atomizer.full_value_from_object(obj, ar)
if v:
if len(chunks) > 0:
chunks.append(", ")
chunks += [e.get_label(), ": ", E.b(e.format_value(ar, v))]
items.append(E.li(*forcetext(chunks)))
return E.ul(*items)
|
[
"luc.saffre@gmail.com"
] |
luc.saffre@gmail.com
|
13097b1d3f56a2e6dabdbab7527c0f64a21c2ad4
|
4732684be0b1a45c2aebe45d22558a9e1bd7f377
|
/src/main.py
|
8ba296d023c3861d1fe711862ef41a6e31bdf7b5
|
[] |
no_license
|
Griffinem/Trade-Up-EV
|
a7e0175d333daa04d94268e9342ade2084440084
|
b9b8b5954517432f9e2d57b45e7ee658008eca6c
|
refs/heads/master
| 2022-08-28T09:22:10.180323
| 2022-08-18T14:26:44
| 2022-08-18T14:26:44
| 247,586,523
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,474
|
py
|
#from api_utils import *
import json
import requests
import time
item_price_data_url = 'http://csgobackpack.net/api/GetItemsList/v2/'
weapon_data_file_path = '..\scraping\weapon_data_file.json'
ev_output_file_path = 'ev_data_file.json'
float_cutoffs = {'Factory New': [0.0, 0.07], 'Minimal Wear': [0.07, 0.15], 'Field-Tested': [0.15, 0.38], 'Well-Worn': [0.38, 0.45], 'Battle-Scarred': [0.45, 1.0]}
wear_int_dict = {0: 'Factory New', 1: 'Minimal Wear', 2: 'Field-Tested', 3: 'Well-Worn', 4: 'Battle-Scarred'}
grade_int_dict = {0: 'consumer', 1: 'industrial', 2: 'milspec', 3: 'restricted', 4: 'classified', 5: 'covert'}
metadata = {}
ev_dict = {}
def get_item_best_wear(wear_min):
if wear_min < float_cutoffs['Factory New'][1]:
return 0
elif wear_min < float_cutoffs['Minimal Wear'][1]:
return 1
elif wear_min < float_cutoffs['Field-Tested'][1]:
return 2
elif wear_min < float_cutoffs['Well-Worn'][1]:
return 3
else:
return 4
def get_item_worst_wear(wear_max):
if wear_max >= float_cutoffs['Battle-Scarred'][0]:
return 4
elif wear_max >= float_cutoffs['Well-Worn'][0]:
return 3
elif wear_max >= float_cutoffs['Field-Tested'][0]:
return 2
elif wear_max >= float_cutoffs['Minimal Wear'][0]:
return 1
else:
return 0
def get_tradeup_ev(coll, grade):
for (i, weapon_i) in enumerate(coll[ grade_int_dict[grade] ]):
# Get best wear and worst wear as int
item_best_wear, item_worst_wear = get_item_best_wear(weapon_i['wear_min']), get_item_worst_wear(weapon_i['wear_max'])
''' The tertiary loop will iterate over each weapon wear '''
for wear_val in range(item_best_wear, item_worst_wear+1):
break_val = False
# Get the tradeup cost
weapon_key_str = weapon_i['name'] + ' (' + wear_int_dict[wear_val] + ')'
try:
tradeup_cost = price_data[weapon_key_str]['price'][ metadata['time'] ][ metadata['metric'] ] * 10
except KeyError:
#print('Error getting {0}. Breaking...'.format(weapon_key_str))
break_val = True
break
#print('Trading up {}'.format(weapon_key_str))
# Get tradeup float avg
tradeup_float_avg = 0.0
if metadata['float'] == 'median':
# Special cases
if wear_val == item_best_wear:
tradeup_float_avg = (weapon_i['wear_min'] + float_cutoffs[ wear_int_dict[wear_val] ][1]) / 2.0
elif wear_val == item_worst_wear:
tradeup_float_avg = (float_cutoffs[ wear_int_dict[wear_val] ][0] + weapon_i['wear_max']) / 2.0
#Default
else:
tradeup_float_avg = (float_cutoffs[ wear_int_dict[wear_val] ][0] + float_cutoffs[ wear_int_dict[wear_val] ][1]) / 2.0
elif metadata['float'] == 'min':
# Special cases
if wear_val == item_best_wear:
tradeup_float_avg = weapon_i['wear_min']
# Default
else:
tradeup_float_avg = float_cutoffs[ wear_int_dict[wear_val] ][0]
elif metadata['float'] == 'max':
# Special cases
if wear_val == item_worst_wear:
tradeup_float_avg = weapon_i['wear_max']
# Default
else:
tradeup_float_avg = float_cutoffs[ wear_int_dict[wear_val] ][1]
''' The quat...iary loop will iterate over each weapon in the next-highest weapon group to get the EV'''
ev = 0
tradeup_gross_list = []
all_profit = True
for (j, weapon_tu_j) in enumerate(coll[ grade_int_dict[grade+1] ]):
# Calculation:
# Resulting Float = (Avg(Tradeup Float) * [Result_Max - Result_Min]) + Result_Min
j_float = (tradeup_float_avg * (weapon_tu_j['wear_max'] - weapon_tu_j['wear_min'])) + weapon_tu_j['wear_min']
j_wear = 0
if j_float < 0.07:
j_wear = 0
elif j_float < 0.15:
j_wear = 1
elif j_float < 0.38:
j_wear = 2
elif j_float < 0.45:
j_wear = 3
else:
j_wear = 4
j_weapon_key_str = weapon_tu_j['name'] + ' (' + wear_int_dict[j_wear] + ')'
try:
tradeup_net = price_data[j_weapon_key_str]['price'][ metadata['time'] ][ metadata['metric'] ]
except KeyError:
#print('Error getting {0}. Breaking...'.format(j_weapon_key_str))
break_val = True
break
# Rough gross value - steam fees
# TODO: Modify this to work with bitskins/other site prices
tradeup_gross = tradeup_net * 0.87
# For checking variance
tradeup_gross_list.append(tradeup_gross)
# For checking all profit
profit = tradeup_gross - tradeup_cost
if profit < 0:
all_profit = False
#print('1/{0} chance for {1}'.format(len(coll[ grade_int_dict[grade+1] ]), j_weapon_key_str))
ev += ( (profit) / len(coll[ grade_int_dict[grade+1] ]) )
if break_val != True:
#print('Trade up 10x {0} at {1} float values results in Expected Value of ${2:.4f}'.format(weapon_key_str, metadata['float'], ev))
ev_dict[weapon_key_str] = [ev, tradeup_cost, tradeup_gross_list, all_profit]
if __name__ == '__main__':
ev_output_file_path = str(input('Enter output file path ("ev_output_file_.json"): '))
''' Gather metadata for query params '''
md_time = str(input('Enter price search time [24_hours, 7_days, 30_days, all_time]: '))
while md_time not in ['24_hours', '7_days', '30_days', 'all_time']:
md_time = str(input('Please enter one of the following price search times [24_hours, 7_days, 30_days, all_time]: '))
metadata['time'] = md_time
md_metric = str(input('Enter price metric [median, average, lowest_price, highest_price]: '))
while md_metric not in ['median', 'average', 'lowest_price', 'highest_price']:
md_metric = str(input('Please enter one of the following price metrics [median, average, lowest_price, highest_price]: '))
metadata['metric'] = md_metric
#md_sold_min = int(input('Enter minimum sold (holds for all items individually in the calculation): '))
#while type(md_sold_min) != 'int':
# md_sold_min = input('Please enter an integer value: ')
#metadata['sold_min'] = int(md_sold_min)
md_float = str(input('Enter float [min, median, max]: '))
while md_float not in ['min', 'median', 'max']:
md_float = str(input('Float must be in [min, median, max]: '))
metadata['float'] = md_float
''' Generate price data from csgobackpack API '''
start_a = time.time()
response = requests.get(item_price_data_url).json()
timestamp = response['timestamp']
price_data = response['items_list']
# Get items data from scraper (use utf8 for the chinese m4 I think)
with open(weapon_data_file_path, 'r', encoding='utf8') as weapon_data_file:
weapon_data = json.load(weapon_data_file)
elapsed_a = time.time() - start_a
print('Load finished in {0} seconds'.format(elapsed_a))
''' The main loop will iterate over individual case/collection '''
start_b = time.time()
for key in weapon_data.keys():
coll = weapon_data[key]
''' The secondary loop will iterate over rarity '''
## Consumer Grade
if len(coll['industrial']) > 0:
get_tradeup_ev(coll, 0)
## Industrial Grade
if len(coll['milspec']) > 0:
get_tradeup_ev(coll, 1)
## Mil-Spec Grade
if len(coll['restricted']) > 0:
get_tradeup_ev(coll, 2)
## Restricted Grade
if len(coll['classified']) > 0:
get_tradeup_ev(coll, 3)
## Classified Grade
if len(coll['covert']) > 0:
get_tradeup_ev(coll, 4)
elapsed_b = time.time() - start_b
ev_dict_sorted = {k: v for k, v in sorted(ev_dict.items(), key=lambda item: item[1], reverse=True)}
with open(ev_output_file_path, 'w', encoding='utf8') as ev_output_file:
json.dump(ev_dict_sorted, ev_output_file)
print('EV check finished in {0} seconds'.format(elapsed_b))
|
[
"unconfigured@null.spigotmc.org"
] |
unconfigured@null.spigotmc.org
|
467540c5dee5db0e3e5e016eb7da46ba682879e5
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_muzzle.py
|
b303154fa083e4f00aab3ba2e61101f50ea18ee8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
#calss header
class _MUZZLE():
def __init__(self,):
self.name = "MUZZLE"
self.definitions = [u'the mouth and nose of an animal, especially a dog, or a covering put over this in order to prevent the animal from biting', u'the end of a gun barrel, where the bullets come out']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
343d00dfa90099304af3a226951dacbb1f31c590
|
2f0c30fda27d1167f5a4850bdf9b5040815a162e
|
/bin/ext_service/reset_habitica_timestamps.py
|
61527988d6e4ef8bfd71c972e4eb8b140849ae22
|
[
"BSD-3-Clause"
] |
permissive
|
ankur-gos/e-mission-server
|
1117e8154174a953c7df47a1f1aa15c29a2a1819
|
64b098540e331ef2bb41bd9fe7a165ff53cc7a87
|
refs/heads/master
| 2021-01-01T18:10:26.314393
| 2017-07-26T06:03:56
| 2017-07-26T06:03:56
| 98,269,025
| 0
| 0
| null | 2017-07-25T05:48:37
| 2017-07-25T05:48:37
| null |
UTF-8
|
Python
| false
| false
| 3,059
|
py
|
"""
Script to launch the pipeline reset code.
Options documented in
https://github.com/e-mission/e-mission-server/issues/333#issuecomment-312464984
"""
import logging
import argparse
import uuid
import arrow
import copy
import pymongo
import emission.net.ext_service.habitica.executor as enehe
import emission.core.get_database as edb
def _get_user_list(args):
if args.all:
return _find_all_users()
elif args.platform:
return _find_platform_users(args.platform)
elif args.email_list:
return _email_2_user_list(args.email_list)
else:
assert args.user_list is not None
return [uuid.UUID(u) for u in args.user_list]
def _find_platform_users(platform):
return edb.get_timeseries_db().find({'metadata.platform': platform}).distinct(
'user_id')
def _find_all_users():
return edb.get_timeseries_db().find().distinct('user_id')
def _email_2_user_list(email_list):
return [ecwu.User.fromEmail(e) for e in email_list]
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Reset the habitica pipeline. Does NOT delete points, so to avoid double counting, use only in situations where the original run would not have given any points")
# Options corresponding to
# https://github.com/e-mission/e-mission-server/issues/333#issuecomment-312464984
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-a", "--all", action="store_true", default=False,
help="reset the pipeline for all users")
group.add_argument("-p", "--platform", choices = ['android', 'ios'],
help="reset the pipeline for all on the specified platform")
group.add_argument("-u", "--user_list", nargs='+',
help="user ids to reset the pipeline for")
group.add_argument("-e", "--email_list", nargs='+',
help="email addresses to reset the pipeline for")
parser.add_argument("date",
help="date to reset the pipeline to. Format 'YYYY-mm-dd' e.g. 2016-02-17. Interpreted in UTC, so 2016-02-17 will reset the pipeline to 2016-02-16T16:00:00-08:00 in the pacific time zone")
parser.add_argument("-n", "--dry_run", action="store_true", default=False,
help="do everything except actually perform the operations")
args = parser.parse_args()
print args
print "Resetting timestamps to %s" % args.date
print "WARNING! Any points awarded after that date will be double counted!"
# Handle the first row in the table
day_dt = arrow.get(args.date, "YYYY-MM-DD")
logging.debug("day_dt is %s" % day_dt)
day_ts = day_dt.timestamp
logging.debug("day_ts is %s" % day_ts)
user_list = _get_user_list(args)
logging.info("received list with %s users" % user_list)
logging.info("first few entries are %s" % user_list[0:5])
for user_id in user_list:
logging.info("resetting user %s to ts %s" % (user_id, day_ts))
enehe.reset_all_tasks_to_ts(user_id, day_ts, args.dry_run)
|
[
"shankari@eecs.berkeley.edu"
] |
shankari@eecs.berkeley.edu
|
0ea68ccacf4032b775a574b37eb328f4f7cf5840
|
92f6e90d9b13930abde894ef6bdb521e1ae2b7be
|
/Incomplete/painting_wall.py
|
a4ee9ea29433dc55ca21fad11b0f75f1f18353bc
|
[
"MIT"
] |
permissive
|
nptit/Check_iO
|
f32b68b66c7dbd47e1490aa8db0e3f4bf29716e5
|
9107241291e6f6e397c3756497e74eece782f1e4
|
refs/heads/master
| 2021-01-25T06:55:09.459265
| 2016-03-23T06:50:12
| 2016-03-23T06:50:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
def checkio(required, operations):
total = set()
hmm = list()
highest = 0
lowest = 0
missing = list()
for index, op in enumerate(operations, start=1):
start, stop = op
if not hmm: # no need to check multiple tuples for first
if (stop + 1) - start >= required:
return index
hmm.append(op)
lowest = start
highest = stop + 1
# continue # do i need this? skip because of else??
else: # multiple tuples
if start < lowest:
lowest = start
if stop > highest:
highest = stop
for pair in hmm:
lo, hi = pair
if start > hi:
missing.append((hi+1, start-1))
print(index, missing)
# # print(list(range(start, stop+1)))
# # print(set(range(start, stop+1)))
# total = total.union(set(range(start, stop+1)))
# # print(total)
# if len(total) >= required:
# return index
# # print()
return -1
# print(checkio(5, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 1
print(checkio(6, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 2
print(checkio(11, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 3
# print(checkio(16, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == 4
# print(checkio(21, [[1, 5], [11, 15], [2, 14], [21, 25]])) # == -1
# print(checkio(1000000011,[[1, 1000000000],[11, 1000000010]])) # == -1
# if __name__ == '__main__':
# assert checkio(5, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 1, "1st"
# assert checkio(6, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 2, "2nd"
# assert checkio(11, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 3, "3rd"
# assert checkio(16, [[1, 5], [11, 15], [2, 14], [21, 25]]) == 4, "4th"
# assert checkio(21, [[1, 5], [11, 15], [2, 14], [21, 25]]) == -1, "not enough"
# assert checkio(1000000011, [[1, 1000000000], [11, 1000000010]]) == -1, "large"
|
[
"the-zebulan@users.noreply.github.com"
] |
the-zebulan@users.noreply.github.com
|
3821efe47b843b6c0e67ea56bd904c71cae7edbe
|
3307766701d680af6d12a726a2d98df2cb1830e5
|
/jams/gcj/2017/1C/C/C.py
|
52f7e3992946510e28a1d7dbac618bac1426e0bb
|
[] |
no_license
|
dpaneda/code
|
c1a54037a275fa7044eb5c2d6079f052dd968615
|
7da1ede33a6a7cd19cbd0db517d91e7cccfbbfff
|
refs/heads/master
| 2023-01-07T18:41:00.816363
| 2022-12-30T09:24:22
| 2022-12-30T09:24:22
| 1,583,913
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,126
|
py
|
#!/usr/bin/python2
import sys
def equals(P):
for i in xrange(0, len(P)):
if (P[0] - P[i]) > 0.00000001:
return False
return True
def diff(P):
a = P[0]
n = 1
for i in xrange(1, len(P)):
if P[i] == a:
n += 1
else:
return n, P[i] - a
def solve():
N, K = map(int, raw_input().split())
U = float(raw_input())
P = map(float, raw_input().split())
P.sort()
while U > 0:
if N == 1:
P[0] += U
break
if equals(P):
u = U / len(P)
for i in xrange(0, len(P)):
P[i] += u
break
n, u = diff(P)
if (u * n) < U:
for i in xrange(0, n):
P[0] += u
U -= u * n
P.sort()
print P
else:
for i in xrange(0, n):
P[i] += U / n
break
p = 1
for i in xrange(0, len(P)):
p *= P[i]
return str(p)
num = int(sys.stdin.readline())
for case in range(1, num + 1):
print("Case #{0}: {1}".format(case, solve()))
|
[
"dpaneda@gmail.com"
] |
dpaneda@gmail.com
|
7985ceb35a1900004f926901a654243dccd6e223
|
e85f4714cf2b590d21582ebd567208da1b9132fc
|
/tests/test_pakit_tests.py
|
a24369e54dcc6a0174d05d577836e2b3b1380841
|
[
"BSD-3-Clause"
] |
permissive
|
pakit/pakit_tests
|
1fcc6c6974a297d1931b704a93d4580ed1eecd90
|
078203f31d56b9701781008bc90668a5a5b292ba
|
refs/heads/master
| 2020-04-15T15:58:09.280612
| 2016-01-02T04:02:07
| 2016-01-02T04:02:07
| 42,521,090
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,612
|
py
|
"""
Test pakit_tests
"""
from __future__ import absolute_import, print_function
import os
import tempfile
import mock
import pytest
from pakit_tests import (
create_args_parser, extract_repo_names, extract_repo_block, main,
scan_recipes, format_lines, write_file, TEMPLATE
)
import tests.common as tc
def test_parse_recipes_root():
root = os.path.abspath('recipes')
args = create_args_parser().parse_args([root])
assert args.recipes_root == root
assert args.output == os.path.join('tests', 'test_recipes.py')
def test_parse_output():
root = os.path.abspath('recipes')
argv = '{0} {1}'.format(root, 'test_recs.py').split()
args = create_args_parser().parse_args(argv)
assert args.recipes_root == root
assert args.output == 'test_recs.py'
def test_extract_repo_names():
text = """self.repos = {
'stable': Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}"""
assert extract_repo_names(text) == ['stable', 'unstable']
def test_extract_repo_block():
text = """class Ag(Recipe):
\"\"\"
Grep like tool optimized for speed
\"\"\"
def __init__(self):
super(Ag, self).__init__()
self.src = 'https://github.com/ggreer/the_silver_searcher.git'
self.homepage = self.src
self.repos = {
"stable": Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}
def build(self):
self.cmd('./build.sh --prefix {prefix}')
self.cmd('make install')"""
expect = """self.repos = {
"stable": Git(self.src, tag='0.31.0'),
'unstable': Git(self.src),
}"""
assert extract_repo_block(text) == expect
def test_scan_recipes():
data = scan_recipes(tc.RECIPES)
assert 'ag' in data
assert sorted(data['ag']) == ['stable', 'unstable']
def test_format_lines():
data = {
'ag': ['stable', 'unstable'],
'ack': ['stable'],
}
lines = format_lines(data)
expect = """\nclass Test_ack(RecipeTest):
def test_stable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0
\nclass Test_ag(RecipeTest):
def test_stable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0
def test_unstable(self):
assert subprocess.call(self.args, cwd=self.temp_d,
env=self.new_env) == 0"""
assert '\n'.join(lines) == expect
def test_write_file():
try:
test_file = tempfile.NamedTemporaryFile()
write_file(tc.RECIPES, test_file.name)
with open(test_file.name, 'r') as fin:
assert TEMPLATE.replace('ROOT_RECS', tc.RECIPES) in fin.read()
finally:
test_file.close()
@mock.patch('pakit.main.argparse._sys')
def test_main_args_none(mock_sys):
with pytest.raises(AttributeError):
main(['pakit_tests'])
mock_sys.exit.assert_called_with(2)
@mock.patch('pakit_tests.write_file')
def test_main_output_absolutel(mock_write, mock_print):
main(['pakit_tests', '.', '/dev/null'])
mock_print.assert_any_call('Scanning recipes under: ' + os.getcwd())
mock_print.assert_any_call('Writing tests to: /dev/null')
@mock.patch('pakit_tests.write_file')
def test_main_output_relative(mock_write, mock_print):
main(['pakit_tests', '/tmp'])
mock_print.assert_any_call('Scanning recipes under: /tmp')
mock_print.assert_any_call('Writing tests to: /tmp/tests/test_recipes.py')
mock_write.assert_any_call('/tmp', '/tmp/tests/test_recipes.py')
|
[
"unknown3000@gmail.com"
] |
unknown3000@gmail.com
|
da4aa3bdc9eddca782b1e0a4f1eca9a1d8028af1
|
2321ebc9c76e2eb95a05976e3681ed7f4e24d361
|
/pandas-for-finance/10/05.py
|
68641569fe58f3b6a01bc5b01c572044cc7080ca
|
[] |
no_license
|
sharebook-kr/books
|
71428bfec46759a8da81d70bfe28fa67e4244aee
|
7537053c559ca055bf54ab940bf4078217c288a1
|
refs/heads/master
| 2020-04-22T19:08:42.294339
| 2019-08-17T12:06:42
| 2019-08-17T12:06:42
| 170,598,895
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
import requests
from bs4 import BeautifulSoup
import time
import telepot
from telepot.loop import MessageLoop
def get_dividend_earning_rate(code):
try:
url = "http://finance.naver.com/item/main.nhn?code=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, "html5lib")
tag = soup.select("#_dvr")
return tag[0].text
except:
return 0
token = "398259524:AAHMXMTVrXDfNd-E9tAsA1eRp-u4LopefLI"
bot = telepot.Bot(token)
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
code = msg['text']
dvr = get_dividend_earning_rate(code)
text = "배당 수익률은 {} 입니다.".format(dvr)
bot.sendMessage(chat_id, text)
MessageLoop(bot, handle).run_as_thread()
while True:
time.sleep(10)
|
[
"brayden.jo@outlook.com"
] |
brayden.jo@outlook.com
|
c62de43f47a28b30ee881c1391e0c50a8a2b2ebf
|
b5f9f93a415a5cc0117a580c5da12804e68c141d
|
/scripts/motions/initr0.py
|
65093d4646203aa136da56e262759377b990ad57
|
[] |
no_license
|
akihikoy/lfd_trick
|
71f89d80abc27ffc6fbd5bc609322918a4f8264e
|
b7bf0189db7bcef07772db17de29302d6e8ba2bf
|
refs/heads/master
| 2021-01-10T14:22:53.341666
| 2016-03-29T18:16:15
| 2016-03-29T18:16:15
| 50,623,958
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
#!/usr/bin/python
from core_tool import *
def Help():
return '''Move left arm/gripper to init posture.
Usage: init0'''
def Run(t,*args):
if t.robot.Is('PR2'):
angles= [-1.5758421026969418, 1.2968352230407523, -1.6520923310211921, -2.095963566248973, 10.512690320637843, -1.469029183486648, 2.37512293699]
elif t.robot.Is('Baxter'):
angles= [0.6772525170776368, -0.8617137066101075, -0.1092961310119629, 2.4812139215698243, -0.7577865083496095, -1.4657186411499024, -0.12732040524902344]
angles[0]-= 0.6
t.robot.OpenGripper(arm=RIGHT, blocking=False)
t.robot.MoveToQ(angles,dt=4.0, arm=RIGHT,blocking=False)
|
[
"info@akihikoy.net"
] |
info@akihikoy.net
|
0cd6a4e11eea792cd0918edb44bb11e6d8b29ecd
|
3c6b36eb1f4f9760c52903f6d0ec4a501f948c90
|
/osp/corpus/models/__init__.py
|
d1c292de71e9477c50603b545d6d90ec443aee8b
|
[
"Apache-2.0"
] |
permissive
|
davidmcclure/open-syllabus-project
|
38444249af845013e3f281a7a713dca83159c56e
|
078cfd4c5a257fbfb0901d43bfbc6350824eed4e
|
refs/heads/master
| 2021-06-30T21:47:07.636558
| 2021-06-27T15:15:35
| 2021-06-27T15:15:35
| 50,152,020
| 220
| 14
|
Apache-2.0
| 2021-06-27T15:11:15
| 2016-01-22T02:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 162
|
py
|
from .document import Document
from .document_format import Document_Format
from .document_text import Document_Text
from .document_index import Document_Index
|
[
"davidwilliammcclure@gmail.com"
] |
davidwilliammcclure@gmail.com
|
a578ce80b077a6b303027caee95e8d5938e4b2a1
|
1ebe2b9d9d1f67e34cbe21c49f8710b2a1b9eeae
|
/tests/test_AppObj_getSinglePassword.py
|
b9993a19aa52502869d1ec20e6142b69d38a25a2
|
[
"MIT"
] |
permissive
|
rmetcalf9/PasswordManPro_CLI
|
93ee0daff3bfd1c445bbb364df1a59711ec6344b
|
207a624a51ac2848c48aeac3282152315b5146df
|
refs/heads/master
| 2021-06-02T00:29:40.353520
| 2020-03-30T10:27:52
| 2020-03-30T10:27:52
| 135,285,541
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 896
|
py
|
from TestHelperSuperClass import testHelperSuperClass
import passwordmanpro_cli
from unittest.mock import patch
import samplePayloadsAndEnvs
class test_AppObj(testHelperSuperClass):
@patch('passwordmanpro_cli.AppObjClass._callGet')
def test_getSinglePassword(self, getResoursesResponse):
getResoursesResponse.side_effect = [
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.resourseResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.accountsResponseRAW},
{ 'responseCode': 200, 'response': samplePayloadsAndEnvs.passwordResponseRAW}
]
fetchedPassword = passwordmanpro_cli.getSinglePassword(
resourseName="soadevteamserver-konga",
accountName="kongaadmin",
skipSSLChecks=False,
env=samplePayloadsAndEnvs.env
)
self.assertEqual(fetchedPassword, 'dummyPasswordForTest', msg='Incorrect password output')
|
[
"rmetcalf9@googlemail.com"
] |
rmetcalf9@googlemail.com
|
57c735539919e5edbbcb4ff8c16418d9f6376188
|
68bad4b3d92872bb5b77b4ee503e588d20511a27
|
/python/scripts_inhibition/old_script/simulate_inhibition_ZZZ151_slow.py
|
ab2d7fa5209a6129519eb6a8b0d03dbf06e4c97c
|
[] |
no_license
|
mickelindahl/bgmodel
|
647be626a7311a8f08f3dfc897c6dd4466fc0a92
|
78e6f2b73bbcbecd0dba25caf99f835313c914ee
|
refs/heads/master
| 2023-08-29T13:57:04.122115
| 2022-02-11T14:28:23
| 2022-02-11T14:28:23
| 17,148,386
| 7
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,492
|
py
|
'''
Created on Aug 12, 2013
@author: lindahlm
'''
from core.network.manager import Builder_striatum as Builder
from core.parallel_excecution import loop
from core.network import default_params
from scripts_inhibition.base_simulate import (get_path_logs,
get_args_list_inhibition,
get_kwargs_list_indv_nets,
par_process_and_thread,
pert_set_data_path_to_milner_on_supermicro,
pert_add_inhibition)
import scripts_inhibition.base_inhibition_striatum as module
import oscillation_perturbations151_slow as op
import pprint
pp=pprint.pprint
FILE_NAME=__file__.split('/')[-1][0:-3]
FROM_DISK_0=0
LOAD_MILNER_ON_SUPERMICRO=False
NUM_NETS=1
NUM_RUNS=len(op.get()) #A run for each perturbation
num_sim=NUM_NETS*NUM_RUNS
kwargs={
'Builder':Builder,
'cores_milner':40*1,
'cores_superm':4,
'file_name':FILE_NAME,
'from_disk':0,
'debug':False,
'do_runs':range(NUM_RUNS), #A run for each perturbation
'do_obj':False,
'i0':FROM_DISK_0,
'job_name':'inh_YYY',
'l_hours': ['00','00','00'],
'l_minutes':['15','10','5'],
'l_seconds':['00','00','00'],
'lower':1,
'local_threads_milner':20,
'local_threads_superm':1,
'module':module,
'nets':['Net_{}'.format(i) for i in range(NUM_NETS)],
'resolution':5,
'repetitions':1,
'path_code':default_params.HOME_CODE,
'path_results':get_path_logs(LOAD_MILNER_ON_SUPERMICRO,
FILE_NAME),
'perturbation_list':op.get(),
'size':3000,
'upper':3}
d_process_and_thread=par_process_and_thread(**kwargs)
pp(d_process_and_thread)
kwargs.update(d_process_and_thread)
p_list = pert_add_inhibition(**kwargs)
p_list = pert_set_data_path_to_milner_on_supermicro(p_list,
LOAD_MILNER_ON_SUPERMICRO)
for i, p in enumerate(p_list): print i, p
a_list=get_args_list_inhibition(p_list, **kwargs)
k_list=get_kwargs_list_indv_nets(len(p_list), kwargs)
for obj in a_list:
print obj.kwargs['setup'].nets_to_run
# for i, a in enumerate(args_list):
# print i, a
loop(min(num_sim, 10),[num_sim, num_sim, NUM_RUNS], a_list, k_list )
# loop(args_list, path, 1)
|
[
"mickelindahl@gmail.com"
] |
mickelindahl@gmail.com
|
1175d28772eb9d5b231c3206392fb90d67127bab
|
b8a803694c283a5acd13ab6760a36710884ab24f
|
/llvm/mc/__init__.py
|
69dd12f877e6415b53f60c7690e36b2f9d76a64c
|
[
"NCSA",
"BSD-3-Clause"
] |
permissive
|
llvmpy/llvmpy
|
8a4c31e731364ead802231b97e058b8f8c444f96
|
13130fe35f1fb03a7051ad46c36146002391a6fa
|
refs/heads/master
| 2016-09-05T16:48:54.694686
| 2015-04-28T16:21:34
| 2015-04-28T16:21:34
| 3,375,197
| 155
| 13
| null | 2015-05-27T18:36:45
| 2012-02-07T07:09:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,676
|
py
|
import sys
import llvm
if llvm.version < (3, 4):
raise Exception("mc is not supported for llvm version less than 3.4")
from io import BytesIO
import contextlib
from llvmpy import api, extra
from llvmpy.api.llvm import MCDisassembler
class Operand(object):
def __init__(self, mcoperand, target_machine):
'''
@mcoperand: an MCOperand object
@target_machine: an llvm.target.TargetMachine object
'''
self.op = mcoperand
if not self.op:
raise llvm.LLVMException("null MCOperand argument")
self.tm = target_machine
def __str__(self):
s = "invalid"
if self.is_reg():
s = "reg(%s)" % (self.reg_name())
elif self.is_imm():
s = "imm(0x%02x)" % (self.op.getImm())
elif self.is_fp_imm():
s = "imm(%r)" % (self.op.getFPImm())
elif self.is_expr():
s = "expr(%r)" % (self.op.getExpr().getKind())
elif self.is_inst():
s = repr(Instr(self.op.getInst()))
return s
def __repr__(self):
return str(self)
def reg_name(self):
if self.is_reg():
s = self.tm.reg_info.getName(self.op.getReg())
if s.strip() == "":
return "?"
else:
return s
else:
return ""
def is_reg(self):
return self.op.isReg()
def is_imm(self):
return self.op.isImm()
def is_fp_imm(self):
return self.op.isFPImm()
def is_expr(self):
return self.op.isExpr()
def is_inst(self):
return self.op.isInst()
def get_imm(self):
if self.is_imm():
return self.op.getImm()
else:
return None
def get_fp_imm(self):
if self.is_fp_imm():
return self.op.getFPImm()
else:
return None
def get_inst(self):
if self.is_inst():
return Instr(self.op.getInst())
else:
return None
class Instr(object):
def __init__(self, mcinst, target_machine):
'''
@mcinst: an MCInst object
@target_machine: an llvm.target.TargetMachine object
'''
self.mcinst = mcinst
if not self.mcinst:
raise llvm.LLVMException("null MCInst argument")
self.tm = target_machine
def __str__(self):
os = extra.make_raw_ostream_for_printing()
self.tm.inst_printer.printInst(self.mcinst, os, "")
return str(os.str())
def __repr__(self):
return str(self)
def __len__(self):
''' the number of operands '''
return int(self.mcinst.size())
def operands(self):
amt = self.mcinst.getNumOperands()
if amt < 1:
return []
l = []
for i in range(0, amt):
l.append(Operand(self.mcinst.getOperand(i), self.tm))
return l
@property
def instr_desc(self):
return self.tm.instr_info.get(self.opcode)
@property
def flags(self):
return self.instr_desc.getFlags()
@property
def ts_flags(self):
return self.instr_desc.TSFlags
@property
def opcode(self):
return self.mcinst.getOpcode()
def is_branch(self):
return self.instr_desc.isBranch()
def is_cond_branch(self):
return self.instr_desc.isConditionalBranch()
def is_uncond_branch(self):
return self.instr_desc.isUnconditionalBranch()
def is_indirect_branch(self):
return self.instr_desc.isIndirectBranch()
def is_call(self):
return self.instr_desc.isCall()
def is_return(self):
return self.instr_desc.isReturn()
def is_terminator(self):
return self.instr_desc.isTerminator()
def is_barrier(self):
return self.instr_desc.isBarrier()
class BadInstr(Instr):
pass
class Disassembler(object):
def __init__(self, target_machine):
self.tm = target_machine
@property
def mdasm(self):
return self.tm.disassembler
@property
def mai(self):
return self.tm.asm_info
def instr(self, mcinst):
return Instr(mcinst, self.tm)
def bad_instr(self, mcinst):
return BadInstr(mcinst, self.tm)
def decode(self, bs, base_addr, align=None):
'''
decodes the bytes in @bs into instructions and yields
each instruction as it is decoded. @base_addr is the base address
where the instruction bytes are from (not an offset into
@bs). yields instructions in the form of (addr, data, inst) where
addr is an integer, data is a tuple of integers and inst is an instance of
llvm.mc.Instr. @align specifies the byte alignment of instructions and
is only used if an un-decodable instruction is encountered, in which
case the disassembler will skip the following bytes until the next
aligned address. if @align is unspecified, the default alignment
for the architecture will be used, however this may not be ideal
for disassembly. for example, the default alignment for ARM is 1, but you
probably want it to be 4 for the purposes of disassembling ARM
instructions.
'''
if isinstance(bs, str) and sys.version_info.major >= 3:
bs = bytes(map(lambda c: ord(c), bs))
elif not isinstance(bs, bytes):
raise TypeError("expected bs to be either 'str' or 'bytes' but got %s" % type(bs))
code = api.llvm.StringRefMemoryObject.new(bs, base_addr)
idx = 0
if not isinstance(align, int) or align < 1:
align = self.mai.getMinInstAlignment()
while(idx < code.getExtent()):
inst = api.llvm.MCInst.new()
addr = code.getBase() + idx
status, size = self.mdasm.getInstruction(inst, code, addr)
if size < 1:
size = (align - (idx % align))
amt_left = code.getExtent() - idx
if amt_left >= size:
data = code.readBytes(addr, size)
elif amt_left < 1:
break
else:
data = code.readBytes(addr, amt_left)
if sys.version_info.major < 3:
data = tuple(map(lambda b: ord(b), data))
else:
data = tuple(data)
if status == MCDisassembler.DecodeStatus.Fail:
yield (addr, data, None)
elif status == MCDisassembler.DecodeStatus.SoftFail:
yield (addr, data, self.bad_instr(inst))
else:
yield (addr, data, self.instr(inst))
idx += size
|
[
"michael.lam.sk@gmail.com"
] |
michael.lam.sk@gmail.com
|
cd768bdf9259efd8ae6f1c74de49916277ef7c0b
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq1222.py
|
b2897a1f78eb02d31ad0854ee13aa149499f7d5a
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,794
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=49
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[1])) # number=26
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=28
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
c.append(cirq.H.on(input_qubit[1])) # number=34
c.append(cirq.CZ.on(input_qubit[4],input_qubit[1])) # number=35
c.append(cirq.rx(0.8011061266653969).on(input_qubit[2])) # number=37
c.append(cirq.H.on(input_qubit[1])) # number=36
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=46
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=47
c.append(cirq.H.on(input_qubit[0])) # number=48
c.append(cirq.X.on(input_qubit[0])) # number=39
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=40
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=42
c.append(cirq.X.on(input_qubit[1])) # number=43
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=44
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.Y.on(input_qubit[1])) # number=45
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=22
c.append(cirq.X.on(input_qubit[0])) # number=23
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=24
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=30
c.append(cirq.X.on(input_qubit[1])) # number=31
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=32
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.H.on(input_qubit[4])) # number=29
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1222.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
fb8e9457ad5e04fd8f1f282ecd96716532bbf285
|
dbfdbe3c1d5e3ad38625d8c971fe8dd45c8c3885
|
/device_agent/snmp/libs/pysmi-0.3.1/pysmi/reader/zipreader.py
|
d9f6c4aeb941e5044dd6806f94dd71c09fbca20c
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
fyfdoc/IntegrateTest
|
a58f6d0ea7cff5f67d79d7e042c0bb39c6b8bbbb
|
0d8374406c10c313d6627699879215841e0ebdb6
|
refs/heads/master
| 2022-12-03T02:32:37.388556
| 2019-01-25T02:36:42
| 2019-01-25T02:36:42
| 167,468,256
| 0
| 1
| null | 2022-11-29T20:58:41
| 2019-01-25T01:59:28
|
Python
|
UTF-8
|
Python
| false
| false
| 5,627
|
py
|
#
# This file is part of pysmi software.
#
# Copyright (c) 2015-2018, Ilya Etingof <etingof@gmail.com>
# License: http://snmplabs.com/pysmi/license.html
#
import os
import sys
import time
import datetime
import zipfile
from pysmi.reader.base import AbstractReader
from pysmi.mibinfo import MibInfo
from pysmi.compat import decode
from pysmi import debug
from pysmi import error
class FileLike(object):
"""Stripped down, binary file mock to work with ZipFile"""
def __init__(self, buf, name):
self.name = name
self.buf = buf
self.null = buf[:0]
self.len = len(buf)
self.buflist = []
self.pos = 0
self.closed = False
self.softspace = 0
def close(self):
if not self.closed:
self.closed = True
self.buf = self.null
self.pos = 0
def seek(self, pos, mode = 0):
if self.buflist:
self.buf += self.null.join(self.buflist)
self.buflist = []
if mode == 1:
pos += self.pos
elif mode == 2:
pos += self.len
self.pos = max(0, pos)
def tell(self):
return self.pos
def read(self, n=-1):
if self.buflist:
self.buf += self.null.join(self.buflist)
self.buflist = []
if n < 0:
newpos = self.len
else:
newpos = min(self.pos + n, self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
class ZipReader(AbstractReader):
"""Fetch ASN.1 MIB text by name from a ZIP archive.
*ZipReader* class instance tries to locate ASN.1 MIB files
by name, fetch and return their contents to caller.
"""
useIndexFile = False
def __init__(self, path, ignoreErrors=True):
"""Create an instance of *ZipReader* serving a ZIP archive.
Args:
path (str): path to ZIP archive containing MIB files
Keyword Args:
ignoreErrors (bool): ignore ZIP archive access errors
"""
self._name = path
self._members = {}
self._pendingError = None
try:
self._members = self._readZipDirectory(fileObj=open(path, 'rb'))
except Exception:
debug.logger & debug.flagReader and debug.logger(
'ZIP file %s open failure: %s' % (self._name, sys.exc_info()[1]))
if not ignoreErrors:
self._pendingError = error.PySmiError('file %s access error: %s' % (self._name, sys.exc_info()[1]))
def _readZipDirectory(self, fileObj):
archive = zipfile.ZipFile(fileObj)
if isinstance(fileObj, FileLike):
fileObj = None
members = {}
for member in archive.infolist():
filename = os.path.basename(member.filename)
if not filename:
continue
if (member.filename.endswith('.zip') or
member.filename.endswith('.ZIP')):
innerZipBlob = archive.read(member.filename)
innerMembers = self._readZipDirectory(FileLike(innerZipBlob, member.filename))
for innerFilename, ref in innerMembers.items():
while innerFilename in members:
innerFilename += '+'
members[innerFilename] = [[fileObj, member.filename, None]]
members[innerFilename].extend(ref)
else:
mtime = time.mktime(datetime.datetime(*member.date_time[:6]).timetuple())
members[filename] = [[fileObj, member.filename, mtime]]
return members
def _readZipFile(self, refs):
for fileObj, filename, mtime in refs:
if not fileObj:
fileObj = FileLike(dataObj, name=self._name)
archive = zipfile.ZipFile(fileObj)
try:
dataObj = archive.read(filename)
except Exception:
debug.logger & debug.flagReader and debug.logger('ZIP read component %s read error: %s' % (fileObj.name, sys.exc_info()[1]))
return '', 0
return dataObj, mtime
def __str__(self):
return '%s{"%s"}' % (self.__class__.__name__, self._name)
def getData(self, mibname, zipBlob=None):
debug.logger & debug.flagReader and debug.logger('looking for MIB %s at %s' % (mibname, self._name))
if self._pendingError:
raise self._pendingError
if not self._members:
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
for mibalias, mibfile in self.getMibVariants(mibname):
debug.logger & debug.flagReader and debug.logger('trying MIB %s' % mibfile)
try:
refs = self._members[mibfile]
except KeyError:
continue
mibData, mtime = self._readZipFile(refs)
if not mibData:
continue
debug.logger & debug.flagReader and debug.logger(
'source MIB %s, mtime %s, read from %s/%s' % (mibfile, time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(mtime)), self._name, mibfile)
)
if len(mibData) == self.maxMibSize:
raise IOError('MIB %s/%s too large' % (self._name, mibfile))
return MibInfo(path='zip://%s/%s' % (self._name, mibfile),
file=mibfile, name=mibalias, mtime=mtime), decode(mibData)
raise error.PySmiReaderFileNotFoundError('source MIB %s not found' % mibname, reader=self)
|
[
"fengyanfeng@datangmobile.cn"
] |
fengyanfeng@datangmobile.cn
|
46841f47f1f695cf591b225b1aa16e65ae0935ef
|
5dd190725aaaeb7287d935b3c99c20480b208816
|
/object_detection/utils/np_box_list_test.py
|
0cf2ef4d21dd8fea0b5d78c45776b8866d1f7cdc
|
[
"MIT"
] |
permissive
|
DemonDamon/mask-detection-based-on-tf2odapi
|
32d947164fb54395b9e45368c0d4bcf3a6ea1c28
|
192ae544169c1230c21141c033800aa1bd94e9b6
|
refs/heads/main
| 2023-05-13T05:05:44.534885
| 2021-06-08T05:56:09
| 2021-06-08T05:56:09
| 369,463,131
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,436
|
py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for object_detection.utils.np_box_list_test."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from object_detection.utils import np_box_list
class BoxListTest(tf.test.TestCase):
def test_invalid_box_data(self):
with self.assertRaises(ValueError):
np_box_list.BoxList([0, 0, 1, 1])
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 0, 1, 1]], dtype=int))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([0, 1, 1, 3, 4], dtype=float))
with self.assertRaises(ValueError):
np_box_list.BoxList(np.array([[0, 1, 1, 3], [3, 1, 1, 5]], dtype=float))
def test_has_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(boxlist.has_field('boxes'))
def test_has_field_with_nonexisted_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertFalse(boxlist.has_field('scores'))
def test_get_field_with_existed_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
self.assertTrue(np.allclose(boxlist.get_field('boxes'), boxes))
def test_get_field_with_nonexited_field(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
boxlist = np_box_list.BoxList(boxes)
with self.assertRaises(ValueError):
boxlist.get_field('scores')
class AddExtraFieldTest(tf.test.TestCase):
def setUp(self):
boxes = np.array([[3.0, 4.0, 6.0, 8.0], [14.0, 14.0, 15.0, 15.0],
[0.0, 0.0, 20.0, 20.0]],
dtype=float)
self.boxlist = np_box_list.BoxList(boxes)
def test_add_already_existed_field(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('boxes', np.array([[0, 0, 0, 1, 0]], dtype=float))
def test_add_invalid_field_data(self):
with self.assertRaises(ValueError):
self.boxlist.add_field('scores', np.array([0.5, 0.7], dtype=float))
with self.assertRaises(ValueError):
self.boxlist.add_field('scores',
np.array([0.5, 0.7, 0.9, 0.1], dtype=float))
def test_add_single_dimensional_field_data(self):
boxlist = self.boxlist
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertTrue(np.allclose(scores, self.boxlist.get_field('scores')))
def test_add_multi_dimensional_field_data(self):
boxlist = self.boxlist
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertTrue(np.allclose(labels, self.boxlist.get_field('labels')))
def test_get_extra_fields(self):
boxlist = self.boxlist
self.assertItemsEqual(boxlist.get_extra_fields(), [])
scores = np.array([0.5, 0.7, 0.9], dtype=float)
boxlist.add_field('scores', scores)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores'])
labels = np.array([[0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 0, 1]],
dtype=int)
boxlist.add_field('labels', labels)
self.assertItemsEqual(boxlist.get_extra_fields(), ['scores', 'labels'])
def test_get_coordinates(self):
y_min, x_min, y_max, x_max = self.boxlist.get_coordinates()
expected_y_min = np.array([3.0, 14.0, 0.0], dtype=float)
expected_x_min = np.array([4.0, 14.0, 0.0], dtype=float)
expected_y_max = np.array([6.0, 15.0, 20.0], dtype=float)
expected_x_max = np.array([8.0, 15.0, 20.0], dtype=float)
self.assertTrue(np.allclose(y_min, expected_y_min))
self.assertTrue(np.allclose(x_min, expected_x_min))
self.assertTrue(np.allclose(y_max, expected_y_max))
self.assertTrue(np.allclose(x_max, expected_x_max))
def test_num_boxes(self):
boxes = np.array([[0., 0., 100., 100.], [10., 30., 50., 70.]], dtype=float)
boxlist = np_box_list.BoxList(boxes)
expected_num_boxes = 2
self.assertEquals(boxlist.num_boxes(), expected_num_boxes)
if __name__ == '__main__':
tf.test.main()
|
[
"noreply@github.com"
] |
DemonDamon.noreply@github.com
|
5e82d5c5a82104ee6f3ba514fcce0106579c026f
|
715a11d7b8f15694a5cc4b47ac0e3a3cfc4ffedc
|
/peakelem.py
|
5d99b8c5e4760ff7fad5f9cbebcb6e3ce1a46279
|
[] |
no_license
|
mohanrajanr/CodePrep
|
5cd538d16598f6a0d2486357d3cc6e0fa1626e4e
|
2e23a5f996139b887bf723f58b23368cf8121cd4
|
refs/heads/main
| 2023-04-23T04:10:06.111120
| 2021-05-11T06:47:51
| 2021-05-11T06:47:51
| 366,283,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
from typing import List
def findPeakElement(nums: List[int]) -> int:
l = 0
r = len(nums) -1
while l < r:
mid = l + (r - l)//2
if nums[mid] < nums[mid + 1]:
l = mid + 1
else:
r = mid
return l
print(findPeakElement([1,2,3,1]))
print(findPeakElement([1,2,1,3,5,6,4]))
|
[
"mohanrajan1996@gmail.com"
] |
mohanrajan1996@gmail.com
|
39cbc94ee7bdfab87c35956c0e4db581e7be8f01
|
f0932f59d37adfbba9307ee31e6f78ce3c256c4a
|
/scripts/pick_primers.py
|
4970130ecdc69ece8f850de75796334dbcf07178
|
[] |
no_license
|
kalekundert/ligrna
|
3785a1e5fb8ed6d07839a5314029f3fc882d4471
|
843963973c34c4976f5adfbd4d03f5f1d0344423
|
refs/heads/master
| 2020-04-12T12:52:32.828100
| 2020-02-22T00:59:57
| 2020-02-22T00:59:57
| 162,505,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
#!/usr/bin/env python3
"""\
Automatically design primers that can be used to construct the given sgRNA
design by overlap extension PCR. There are a number of parameters controlling
how ideal the primers have to be, and you can play with them to get more or
fewer results.
I ended up not using this script in favor of ordering my designs as gBlocks
gene fragments from IDT. The PCR assembly takes as long as it takes IDT to
deliver gBlocks, and the gBlocks are much higher purity. The gBlocks are also
not that much more expensive at $90 per design. Most of the primers are ~$30,
and then you have to add reagents and my time.
Usage:
pick_primers.py <name> [options]
Options:
--max-num-primers NUM
--min-primer-len LEN [default: 40]
--max-primer-len LEN [default: 50]
--min-overlap-len LEN [default: 18]
--max-overlap-len LEN [default: 22]
--min-overlap-tm CELSIUS [default: 52.0]
--max-overlap-tm CELSIUS [default: 58.0]
--max-tm-diff DELTA-CELSIUS [default: 2.0]
--max-gc-content PERCENT [default: 0.6]
--min-gc-content PERCENT [default: 0.3]
-c, --color WHEN [default: auto]
-q, --header-only
"""
import sys, docopt
import pcr_helper, sgrna_sensor
args = docopt.docopt(__doc__)
print('$ ' + ' '.join(sys.argv))
print()
design = sgrna_sensor.from_name(args['<name>'])
assembler = pcr_helper.PcrAssembly()
assembler.max_num_primers = int(args['--max-num-primers'] or 0)
assembler.min_primer_len = int(args['--min-primer-len'])
assembler.max_primer_len = int(args['--max-primer-len'])
assembler.min_overlap_len = int(args['--min-overlap-len'])
assembler.max_overlap_len = int(args['--max-overlap-len'])
assembler.min_overlap_tm = float(args['--min-overlap-tm'])
assembler.max_overlap_tm = float(args['--max-overlap-tm'])
assembler.max_tm_diff = float(args['--max-tm-diff'])
assembler.max_gc_content = float(args['--max-gc-content'])
assembler.min_gc_content = float(args['--min-gc-content'])
assembler.use_color = args['--color']
assembler.find_primers(design)
assembler.print_primers(args['--header-only'])
|
[
"kale@thekunderts.net"
] |
kale@thekunderts.net
|
a0feaf8c56a52a21c80539eab8e8ed88e51eac94
|
781e2692049e87a4256320c76e82a19be257a05d
|
/intervention/results/control_111904_1447993241_112_7.5.py
|
845005cad1ed0885c10843c1b9b9886bcb1ed4e3
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
def num_common_letters(goal_word, guess):
common = []
for char in list(guess):
if char in list(goal_word) and char not in common:
common += [char]
return len(common)
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
0b9a88c391878412fc429f3a75e2414c760997cf
|
620323fc090cebaf7aca456ff3f7fbbe1e210394
|
/flask__webservers/get_size_upload_file_with_progress/main.py
|
35ae0b78ba57d5a84ea62acf77d081e6e19a12bf
|
[
"CC-BY-4.0"
] |
permissive
|
gil9red/SimplePyScripts
|
bd2733372728bf9b9f00570e90316fa12116516b
|
773c2c9724edd8827a1dbd91694d780e03fcb05a
|
refs/heads/master
| 2023-08-31T04:26:09.120173
| 2023-08-30T17:22:59
| 2023-08-30T17:22:59
| 22,650,442
| 157
| 46
| null | 2023-09-08T17:51:33
| 2014-08-05T16:19:52
|
Python
|
UTF-8
|
Python
| false
| false
| 4,527
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "ipetrash"
import logging
from flask import Flask, request, redirect, render_template_string, jsonify
# pip install humanize
from humanize import naturalsize as sizeof_fmt
app = Flask(__name__)
logging.basicConfig(level=logging.DEBUG)
@app.route("/")
def index():
return render_template_string(
"""\
<html>
<head>
<meta content='text/html; charset=UTF-8' http-equiv='Content-Type'/>
<title>get_size_upload_file</title>
<script type="text/javascript" src="{{ url_for('static', filename='js/jquery-3.1.1.min.js') }}"></script>
</head>
<body>
<br>
<form class="form__upload_file" action="/get_file_size" method="post" enctype="multipart/form-data">
<p>Узнайте размер файла:</p>
<p><input type="file" name="file"></p>
<p><input type="submit"></p>
</form>
<div class="block progress" style="display: none">
<p>Пожалуйста, подождите, файл загружаются.</p>
<progress class="progress upload" max="100" value="0"></progress>
</div>
<br><br>
<div class="info size" style="display: none">
<div class="show_size" style="display: inline-block;"></div><div style="display: inline-block;"> Bytes</div>
<div class="show_size_human"></div>
</div>
<script>
$(document).ready(function() {
function progress(e) {
if(e.lengthComputable) {
var max = e.total;
var current = e.loaded;
var percentage = (current * 100) / max;
console.log(percentage);
$('.progress.upload').val(percentage);
}
}
$(".form__upload_file").submit(function() {
$('.block.progress').show();
$('.info.size').hide();
var thisForm = this;
var url = $(this).attr("action");
var method = $(this).attr("method");
if (method === undefined) {
method = "get";
}
// var data = $(this).serialize();
//
// For send file object:
var input = $(".form__upload_file > input[type=file]");
var data = new FormData(thisForm);
$.ajax({
url: url,
method: method, // HTTP метод, по умолчанию GET
data: data,
dataType: "json", // тип данных загружаемых с сервера
// Без этих опций неудастся передать файл
processData: false,
contentType: false,
xhr: function() {
var myXhr = $.ajaxSettings.xhr();
if (myXhr.upload) {
myXhr.upload.addEventListener('progress', progress, false);
}
return myXhr;
},
cache:false,
success: function(data) {
console.log(data);
console.log(JSON.stringify(data));
$('.info.size > .show_size').text(data.length);
$('.info.size > .show_size_human').text(data.length_human);
$('.block.progress').hide();
$('.info.size').show();
},
});
return false;
});
});
</script>
</body>
</html>
"""
)
@app.route("/get_file_size", methods=["POST"])
def get_file_size():
print(request.files)
# check if the post request has the file part
if "file" not in request.files:
return redirect("/")
length = 0
file = request.files["file"]
if file:
data = file.stream.read()
length = len(data)
return jsonify({"length": length, "length_human": sizeof_fmt(length)})
if __name__ == "__main__":
app.debug = True
# Localhost
# port=0 -- random free port
# app.run(port=0)
app.run(port=5000)
# # Public IP
# app.run(host='0.0.0.0')
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
d9febf29c060feccd2c30acdf80550e51cdf5573
|
6a32cba18ed153b7a7611804223c74dbc923c761
|
/5.py
|
aea6c3c163bf391016e8706b78860f5c4fd2bb65
|
[] |
no_license
|
Mrhairui/leetcode
|
8cfea60868c37f2a7d0675c4ee1f6d431c75dd37
|
a0884db8fe70e63707cc0fa06c6367e42857e4de
|
refs/heads/master
| 2022-03-29T16:18:42.827912
| 2019-11-13T14:48:36
| 2019-11-13T14:48:36
| 197,877,877
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
class Solution:
def longestPalindrome(self, s: str) -> str:
size = len(s)
if size <= 1:
return s
longest_l = 1
curlen = 0
res = s[0]
dp = [[False for _ in range(size)] for _ in range(size)] # 存储用的表
for r in range(1, size):
for l in range(r): # 暴力法也得有这个循环,完全一样。但是暴力法后面判
# 断一个字符串是不是回文需要n的时间复杂度,动态规划不需要
if s[l] == s[r] and (r - l <= 2 or dp[l+1][r-1]): # 状态转移方程以及边界
dp[l][r] = True
curlen = r - l + 1 # 求得此时长度
if curlen > longest_l: # 求得最大长度并用一个字符串记录
longest_l = curlen
res = s[l:r+1]
return res
solution = Solution()
m = solution.longestPalindrome('fdfgabcbad')
print(m)
|
[
"chenwentao_622@163.com"
] |
chenwentao_622@163.com
|
99a157c10331ba134de0da50bb8c9272687b2a54
|
c6eee54ffef7f99f2825cc332a649d9a6e9e181d
|
/matrixscreener/imagej.py
|
944d1b559197f620ac9c63be68652944f0c5ee99
|
[
"MIT"
] |
permissive
|
imcf/matrixscreener
|
5d3fa6040e62c14ff504dfcbb3818e405d9e9254
|
727711654269d93e528ae9a604ce5ac5b24fa816
|
refs/heads/master
| 2021-01-17T04:25:14.575654
| 2015-02-08T15:23:39
| 2015-02-08T16:11:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,754
|
py
|
# encoding: utf-8
"""
Stitch images with ImageJ.
* ``matrixscreener.imagej._bin`` should be set if you are on Windows or Linux.
"""
import pydebug, subprocess, os, fijibin
from tempfile import NamedTemporaryFile
# debug with DEBUG=matrixscreener python script.py
debug = pydebug.debug('matrixscreener')
_bin = fijibin.BIN
def stitch_macro(folder, filenames, x_size, y_size, output_filename,
x_start=0, y_start=0, overlap=10):
"""
Creates a ImageJ Grid/Collection stitching macro. Parameters are the same as
in the plugin and are described in further detail here:
http://fiji.sc/Image_Stitching#Grid.2FCollection_Stitching.
**Default stitch parameters:**
* Filename defined positions
* Compute overlap
* Subpixel accurancy
* Save computation time (but use more RAM)
* Fusion method: Linear blending
* Regression threshold: 0.30
* Max/avg displacement threshold: 2.50
* Absolute displacement threshold: 3.50
Parameters
----------
folder : string
Path to folder with images or folders with images.
Example: */path/to/slide--S00/chamber--U01--V02/*
filenames : string
Filenames of images.
Example: *field-X{xx}-Y{yy}/image-X{xx}-Y{yy}.ome.tif*
x_size : int
Size of grid, number of images in x direction.
y_size : int
Size of grid, number of images in y direction.
output_filename : string
Where to store fused image. Should be `.png`.
x_start : int
Which x position grid start with.
y_start : int
Which y position grid start with.
overlap : number
Tile overlap in percent. ImageJ will find the optimal overlap, but a
precise overlap assumption will decrase computation time.
Returns
-------
string
IJM-macro.
"""
macro = []
macro.append('run("Grid/Collection stitching",')
macro.append('"type=[Filename defined position]')
macro.append('order=[Defined by filename ]')
macro.append('grid_size_x={}'.format(x_size))
macro.append('grid_size_y={}'.format(y_size))
macro.append('tile_overlap={}'.format(overlap))
macro.append('first_file_index_x={}'.format(x_start))
macro.append('first_file_index_y={}'.format(y_start))
macro.append('directory=[{}]'.format(folder))
macro.append('file_names={}'.format(filenames))
macro.append('output_textfile_name=TileConfiguration.txt')
macro.append('fusion_method=[Linear Blending]')
macro.append('regression_threshold=0.30')
macro.append('max/avg_displacement_threshold=2.50')
macro.append('absolute_displacement_threshold=3.50')
macro.append('compute_overlap')
macro.append('subpixel_accuracy')
macro.append('computation_parameters=[Save computation time (but use more RAM)]')
# use display, such that we can specify output filename
# this is 'Fused and display' for previous stitching version!!
macro.append('image_output=[Fuse and display]");')
# save to png
macro.append('selectWindow("Fused");')
macro.append('run("PNG ...", "save={}'.format(output_filename))
macro.append('imageiosaveas.codecname=png')
macro.append('imageiosaveas.filename={}");'.format(output_filename))
macro.append('close();')
return ' '.join(macro)
def run_imagej(macro):
"""
Runs ImageJ with the suplied macro. Output of ImageJ can be viewed by
running python script with environment variable DEBUG=matrixscreener.
Parameters
----------
macro : string
IJM-macro to run.
Returns
-------
int
ImageJ exit code.
"""
# avoid verbose output of ImageJ when DEBUG environment variable set
env = os.environ.copy()
debugging = False
if 'DEBUG' in env:
if env['DEBUG'] == 'matrixscreener' or env['DEBUG'] == '*':
debugging = True
del env['DEBUG']
with NamedTemporaryFile(mode='w', suffix='.ijm') as m:
m.write(macro)
m.flush() # make sure macro is written before running ImageJ
cmd = [_bin, '--headless', '-macro', m.name]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env=env)
out, err = proc.communicate()
for line in out.decode('latin1', errors='ignore').splitlines():
debug('stdout:' + line)
for line in err.decode('latin1', errors='ignore').splitlines():
debug('stderr:' + line)
if proc.returncode != 0 and not debugging:
print('matrixscreener ERROR: ImageJ exited with code {}.'.format(proc.returncode))
print('matrixscreener Try running script with `DEBUG=matrixscreener python script.py`')
return proc.returncode
|
[
"arve.seljebu@gmail.com"
] |
arve.seljebu@gmail.com
|
1eabae2d19ec646f5caa21ad2542291a6db58275
|
5c842a91854b0061bdec96a36e30860fb1e1321e
|
/Chapter3_MCMC/github_pull.py
|
dff8070e7791d77ff97664dc4e3c8ebedbf3adc3
|
[
"MIT"
] |
permissive
|
bzillins/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
|
fe2be77b9859566e2a923c8022a85925aa190d1d
|
c08a6344b8d0e39fcdb9702913b46e1b4e33fb9a
|
refs/heads/master
| 2020-12-30T22:09:05.936082
| 2013-02-27T04:21:05
| 2013-02-27T04:21:05
| 19,747,037
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,348
|
py
|
#github data scrapper
"""
variables of interest:
indp. variables
- language, given as a binary variable. Need 4 positions for 5 langagues
- #number of days created ago, 1 position
- has wiki? Boolean, 1 position
- followers, 1 position
- following, 1 position
- constant
dep. variables
-stars/watchers
-forks
"""
from requests import get
from json import loads
import datetime
import numpy as np
MAX = 8000000
today = datetime.datetime.today()
randint = np.random.randint
N = 120 #sample size.
auth = ("username", "password" )
language_mappings = {"Python": 0, "JavaScript": 1, "Ruby": 2, "Java":3, "Shell":4, "PHP":5}
#define data matrix:
X = np.zeros( (N , 12), dtype = int )
for i in xrange(N):
is_fork = True
is_valid_language = False
while is_fork == True or is_valid_language == False:
is_fork = True
is_valid_language = False
params = {"since":randint(0, MAX ) }
r = get("https://api.github.com/repositories", params = params, auth=auth )
results = loads( r.text )[0]
#im only interested in the first one, and if it is not a fork.
is_fork = results["fork"]
r = get( results["url"], auth = auth)
#check the language
repo_results = loads( r.text )
try:
language_mappings[ repo_results["language" ] ]
is_valid_language = True
except:
pass
#languages
X[ i, language_mappings[ repo_results["language" ] ] ] = 1
#delta time
X[ i, 6] = ( today - datetime.datetime.strptime( repo_results["created_at"][:10], "%Y-%m-%d" ) ).days
#haswiki
X[i, 7] = repo_results["has_wiki"]
#get user information
r = get( results["owner"]["url"] , auth = auth)
user_results = loads( r.text )
X[i, 8] = user_results["following"]
X[i, 9] = user_results["followers"]
#get dep. data
X[i, 10] = repo_results["watchers_count"]
X[i, 11] = repo_results["forks_count"]
print
print " -------------- "
print i, ": ", results["full_name"], repo_results["language" ], repo_results["watchers_count"], repo_results["forks_count"]
print " -------------- "
print
np.savetxt("data/github_data.csv", X, delimiter=",", fmt="%d" )
|
[
"cam.davidson.pilon@gmail.com"
] |
cam.davidson.pilon@gmail.com
|
cca8d7bb1e4c795995e6db7675bd3f7bfad39018
|
e9d139f5108ca115d6254763438dd8855fc4454d
|
/view/__init__.py
|
fc9912fe0a91e8e2066ec8e866c29d702fb6cd05
|
[] |
no_license
|
Letractively/simulation-modeling
|
119d1376a75ff825903a0dd4bbbbc161e1d19e05
|
aca18bf1f50b1083bbc9cbd97b87d3df1c71000b
|
refs/heads/master
| 2016-08-12T18:44:07.605687
| 2011-12-14T11:04:29
| 2011-12-14T11:04:29
| 45,956,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,531
|
py
|
# -*- coding: utf-8 -*-
# Импорт и настройка Jinja2
from jinja2 import Environment, PackageLoader
templates = Environment(loader=PackageLoader('view', 'templates'), extensions = ['jinja2.ext.with_'])
from GChartWrapper import Pie, Line, LineXY
from settings import app
def index(names):
'Главная страница'
template = templates.get_template('index.html')
return template.render(app = app, models = names)
def help(name, title):
'Страница справки'
template = templates.get_template('help/%s.html' % name)
return template.render(app = app, name = name, title = title)
def mss(*a, **kw):
'Страница модели массового обслуживания'
if 'output' in kw and kw['output']['requests']['total']:
# Генерация диаграммы баланса заявок
requests = kw['output']['requests']
# Исходные данные
data = []
for key in ('timeout', 'sizeout', 'shutdown'):
data.append(round(requests['denied'][key], 2))
data.append(round(requests['accepted'], 2))
# Цвета
colors = ('FBFF53', '1560BD', 'FF4D6F', '008090')
# Заголовки
labels = [str(round(value / requests['total'] * 100, 2)) + '%' for value in data]
# Размер
sizes = [250, 160]
kw['output']['requests_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, sum(data)).img(alt=u'Количество заявок')
if kw['output']['balance']['accepted']:
# Генерация диаграммы выручки
balance = kw['output']['balance']
data = [balance['income'], balance['repairs'], balance['costs']]
colors = ['8F007E', '000000', '999999']
labels = [str(round(value / balance['accepted'] * 100, 2)) + '%' for value in data]
sizes[1] = 80
kw['output']['income_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, sum(data)).img(alt=u'Распределение выручки')
# Генерация диаграммы загрузки
data = kw['output']['workload']
sizes = [560, 180]
diagram = Line(data).scale(0, round(max(data), 3)).size(sizes).color('1560BD')
diagram['chm'] = 'B,008090,0,0,0|B,FF4D6F,0,%s:,0' % (kw['form'].data['channels_count'])
diagram.axes.type('xxy')
diagram.axes.label(1, '', 'Количество заявок в системе', '')
y_labels_count = 4
diagram.axes.label(2, *(str(round(max(data) / (y_labels_count - 1) * n, 2)) + '%' for n in range(y_labels_count)))
diagram.axes.range(0, 0, len(data) - 1)
kw['output']['workload_diagram'] = diagram.img(alt=u'Вероятностное распределение количества заявок в системе', style='margin-top: 18px')
return model(*a, **kw)
def warehouse(*args, **kwargs):
'Модель склада'
if not kwargs.get('output', False):
return model(*args, **kwargs)
# Генерация диаграммы баланса
if kwargs['output']['balance']['sales'] > 0 and kwargs['output']['balance']['profit'] > 0:
balance = kwargs['output']['balance']
data = [round(balance[field], 2) for field in ('supplies', 'storage', 'fines', 'profit')]
colors = ('FBFF53', '1560BD', 'FF4D6F', '008090')
labels = [str(round(value / balance['sales'] * 100, 2)) + '%' for value in data]
sizes = [250, 160]
kwargs['output']['balance_diagram'] = Pie(data).color(*colors).label(*labels).size(*sizes).scale(0, balance['sales']).img(alt=u'Диаграмма баланса')
history = kwargs['output']['history']
if history:
# Генерация диаграммы истории
# Абсциссы и ординаты
x, y = zip(*history)
sizes = [560, 180]
diagram = LineXY([x, y]).size(sizes).scale(0, max(x), 0, max(y)).color('1560BD')
diagram['chm'] = 'B,1560BD,0,:,0'
diagram.axes.type('xxy')
diagram.axes.range(0, 0, max(x))
diagram.axes.range(2, 0, max(y))
diagram.axes.label(1, '', 'Время, ч', '')
kwargs['output']['history_diagram'] = diagram.img()
return model(*args, **kwargs)
def model(name, title, form=None, output={}, query=''):
'Страница модели'
template = templates.get_template(name + '.html')
# Ссылка
shorten = '/url/%s?%s' % (name, query) if query else None
return template.render(app=app, name=name, title=title, form=form, shorten=shorten, output=bool(output), **output)
def shorten(url):
'Укорочение URL'
template = templates.get_template('shorten.html')
return template.render(url=url)
def notfound():
'Страница не найдена'
template = templates.get_template('notfound.html')
return template.render(app=app, title=u'Страница не найдена')
def internal_error():
'Внутренняя ошибка'
template = templates.get_template('internal_error.html')
return template.render(app=app, title=u'Внутренняя ошибка')
|
[
"altaisoft@gmail.com"
] |
altaisoft@gmail.com
|
a0d3c2bd42f70d32b9c784462fa44007a8f0adf7
|
10dcfd809f342fd822b6df198f4045f92b157124
|
/bin/po2ini
|
1a43bcf93206ef9589e290b07fe2adcdcf2ee58f
|
[] |
no_license
|
mozilla/affiliates-lib
|
c731c910c8d9fe04e211541e54f304a127a0b829
|
1000f98d9df217ed66a0ecd07e1e0a1d822a712a
|
refs/heads/master
| 2023-07-03T17:18:49.841809
| 2016-02-01T10:49:47
| 2016-02-01T10:49:47
| 2,291,186
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
#!/Users/fred/.virtualenvs/playdoh/bin/python2.6
#
# Copyright 2007 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""simple script to convert a gettext .po localization file to a .ini file"""
from translate.convert import po2ini
if __name__ == '__main__':
po2ini.main()
|
[
"fwenzel@mozilla.com"
] |
fwenzel@mozilla.com
|
|
313c33cbac5a657dac8e135d547bd7a34207608b
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/pg_1532+522/sdB_pg_1532+522_lc.py
|
8187be807799c8b0ee1f61b7c1dc1a03cbbd8db0
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[233.374333,52.113889], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1532+522/sdB_pg_1532+522_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
c977864e64c62b6e7d106375290362fa31ff27ed
|
da55b6cb2b7356f589c56bb647e49f79aedfc4f1
|
/python-fabric/interface.test/example2_app/app/DemoComponent.py
|
b1b8f58cdd0340fe06bec5703129b2ed628bc745
|
[] |
no_license
|
eiselesr/sandbox
|
2d0cbb16e2601dfda9d9e5ea5a5e96a053fdf72f
|
0b3fc878a613600c6071019c820ba79e2d2a9a2d
|
refs/heads/master
| 2023-09-01T00:40:37.222840
| 2023-08-24T19:20:27
| 2023-08-24T19:20:27
| 151,609,265
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 467
|
py
|
# riaps:keep_import:begin
from riaps.run.comp import Component
# riaps:keep_import:end
class DemoComponent(Component):
# riaps:keep_constr:begin
def __init__(self):
super().__init__()
# riaps:keep_constr:end
# riaps:keep_tick:begin
def on_tick(self):
now = self.tick.recv_pyobj()
self.logger.info(f"DemoComponent | on_tick | now: {now}")
# riaps:keep_tick:end
# riaps:keep_impl:begin
# riaps:keep_impl:end
|
[
"eiselesr@gmail.com"
] |
eiselesr@gmail.com
|
a4ddf2709fce33cd5aab0418f9028ac97efa25d6
|
fab39aa4d1317bb43bc11ce39a3bb53295ad92da
|
/tools/clip_dot.py
|
e0123db1d5a9fae276ffc01267f7a1be6649f5f5
|
[
"Apache-2.0"
] |
permissive
|
dupeljan/nncf
|
8cdce27f25f01ce8e611f15e1dc3036fb8548d6e
|
0abfd7103ca212888a946ba4d0fbdb9d436fdaff
|
refs/heads/develop
| 2023-06-22T00:10:46.611884
| 2021-07-22T10:32:11
| 2021-07-22T10:32:11
| 388,719,455
| 0
| 0
|
Apache-2.0
| 2021-07-23T07:46:15
| 2021-07-23T07:43:43
| null |
UTF-8
|
Python
| false
| false
| 2,144
|
py
|
#pylint:skip-file
import sys
from argparse import ArgumentParser
import networkx as nx
def main(argv):
parser = ArgumentParser()
parser.add_argument('-i', '--input_file', help='Input .dot file',
required=True)
parser.add_argument('-s', '--start_id', help='Start ID (inclusive)',
required=True)
parser.add_argument('-f', '--finish_id', help='Finish ID (inclusive)', required=True)
parser.add_argument('-o', '--output_file', help='Output .dot file', required=True)
args = parser.parse_args(args=argv)
graph = nx.DiGraph(nx.drawing.nx_pydot.read_dot(args.input_file))
new_graph = nx.DiGraph()
start_key = None
for node_key in nx.lexicographical_topological_sort(graph):
id_portion = node_key.split()[0]
has_id = id_portion.isdigit()
if has_id:
curr_id = int(id_portion)
if curr_id == int(args.start_id):
start_key = node_key
break
if start_key is None:
raise RuntimeError("Could not find the node with ID {} to start from!".format(args.start_id))
for edge in nx.edge_bfs(graph, start_key, orientation='ignore'):
from_key, to_key, _ = edge
id_portion = from_key.split()[0]
has_id = id_portion.isdigit()
end_key = from_key
if has_id:
curr_id = int(id_portion)
if curr_id >= int(args.finish_id):
break
node_data = graph.nodes[from_key]
new_graph.add_node(from_key, **node_data)
edge_data = graph.edges[from_key, to_key]
new_graph.add_edge(from_key, to_key, **edge_data)
# for edge in nx.edge_bfs(graph, end_key, reverse=True):
# from_key, to_key = edge
# if from_key == start_key:
# break
# node_data = graph.nodes[from_key]
# new_graph.add_node(from_key, **node_data)
# edge_data = graph.edges[from_key, to_key]
# new_graph.add_edge(from_key, to_key, **edge_data)
nx.drawing.nx_pydot.write_dot(new_graph, args.output_file)
if __name__ == '__main__':
main(sys.argv[1:])
|
[
"noreply@github.com"
] |
dupeljan.noreply@github.com
|
997cfed7e90d02a247f84a51c5934988fa32ac75
|
d3cfa86c22ab3bd6f2dcbbd72f9cf5f6bf574cd1
|
/gridappsd/utils.py
|
b455a1f97896802d08297150dfac9078f62c104b
|
[] |
no_license
|
ariwoolar/gridappsd-python
|
30fbc9a36b0d7be43f6fb0ff8b0f39d76fa6f4ed
|
1e8ddc88de7fc9cabb17b1ab34f2756e8c37127c
|
refs/heads/master
| 2023-02-02T20:09:46.491618
| 2020-09-10T20:09:34
| 2020-09-10T20:09:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,691
|
py
|
import datetime, time
from dateutil import parser
import os
try: # python2.7
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
__GRIDAPPSD_URI__ = os.environ.get("GRIDAPPSD_URI", "localhost:61613")
__GRIDAPPSD_USER__ = os.environ.get("GRIDAPPSD_USER", "system")
__GRIDAPPSD_PASS__ = os.environ.get("GRIDAPPSD_PASS", "manager")
__GRIDAPPSD_URI__
if not __GRIDAPPSD_URI__.startswith("tcp://"):
__GRIDAPPSD_URI__ = "tcp://" + __GRIDAPPSD_URI__
__GRIDAPPSD_URI_PARSED__ = urlparse(__GRIDAPPSD_URI__)
def datetime_to_epoche(dt):
return int(time.mktime(dt.timetuple()) * 1000)
def datestr_to_epoche(dt_str):
dt = parser.parse(dt_str)
return datetime_to_epoche(dt)
def epoche_to_datetime(epoche):
return datetime.datetime.fromtimestamp(epoche)
def utc_timestamp():
return datetime_to_epoche(datetime.datetime.utcnow())
def validate_gridappsd_uri():
problems = []
gridapspd_uri = __GRIDAPPSD_URI__
if not gridapspd_uri.startswith("tcp://"):
gridapspd_uri = "tcp://" + gridapspd_uri
gridapspd_parsed_uri = urlparse(gridapspd_uri)
if not gridapspd_parsed_uri.port:
problems.append("Invalid port specified in URI modify environment GRIDAPPSD_URI")
if not gridapspd_parsed_uri.hostname:
problems.append("Invalid hostname not specified!")
return problems
def get_gridappsd_address():
"""
Returns the address in such a way that the response will be
able to be passed directly to a socket and/or the stomp libraray.
:return: tuple(adddress, port)
"""
return (__GRIDAPPSD_URI_PARSED__.hostname,
__GRIDAPPSD_URI_PARSED__.port)
def get_gridappsd_user():
return __GRIDAPPSD_USER__
def get_gridappsd_pass():
return __GRIDAPPSD_PASS__
def get_gridappsd_application_id():
""" Retrieve the application_id from the environment.
In order to use this function an environmental variable `GRIDAPPSD_APPLICATION_ID`
must have been set. For docker containers this is done in the
`gridappsd.app_registration` callback when the application is started. If the
environmental variable is not set an AttributeError will be raised.
"""
app_id = os.environ.get("GRIDAPPSD_APPLICATION_ID")
if not app_id:
raise AttributeError("environmental variable for GRIDAPPSD_APPLICATION_ID is not set")
return app_id
def get_gridappsd_simulation_id():
""" Retrieve simulation_id from environment.
This method will return a `None` if the GRIDAPPSD_SIMULATION_ID environmental
variable is not set.
"""
simulation_id = os.environ.get("GRIDAPPSD_SIMULATION_ID")
return simulation_id
|
[
"craig.allwardt@pnnl.gov"
] |
craig.allwardt@pnnl.gov
|
73f77c0541a623faed3445fd42c78e95a3f43de3
|
b7a98a761007cf8b913b75152a278c1693f2d00d
|
/code/ortools/simple.py
|
a22169600415ebac07b2a7f71ee7fba972372f13
|
[] |
no_license
|
VuHoangvn/descrete_optimize
|
be18fcfc230a9d1133ec94a49b1fc5cfcb50b449
|
7a17d472af7d54624860aeff590db423a7e47d59
|
refs/heads/master
| 2022-10-14T00:22:50.368095
| 2020-06-05T09:48:31
| 2020-06-05T09:48:31
| 261,913,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 829
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from ortools.sat.python import cp_model
def SimpleSatProgram():
"""Minimal CP-SAT example to showcase calling the solver."""
# Creates the model
model = cp_model.CpModel()
# Creates the variables.
num_vals = 3
x = model.NewIntVar(0, num_vals - 1, 'x')
y = model.NewIntVar(0, num_vals - 1, 'y')
z = model.NewIntVar(0, num_vals - 1, 'z')
# Creates the constraints.
model.Add(x != y)
# Creates a solver and solves the model.
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status == cp_model.FEASIBLE:
print('x = %i' % solver.Value(x))
print('y = %i' % solver.Value(y))
print('z = %i' % solver.Value(z))
SimpleSatProgram()
|
[
"20161728@student.hust.edu.vn"
] |
20161728@student.hust.edu.vn
|
0657eee5800ac668b250193a08ebbb5b92cfbdb1
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D09B/SANCRTD09BUN.py
|
b94d0510dd191e38d7227ae909cae41f4e766c11
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 4,024
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD09BUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 99},
{ID: 'STS', MIN: 0, MAX: 99},
{ID: 'LOC', MIN: 0, MAX: 99},
{ID: 'RFF', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'MEA', MIN: 0, MAX: 99},
{ID: 'MOA', MIN: 0, MAX: 99},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'CST', MIN: 0, MAX: 1},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'TDT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'SEL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
]},
{ID: 'PRC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
{ID: 'LIN', MIN: 0, MAX: 9999, LEVEL: [
{ID: 'CST', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'PIA', MIN: 0, MAX: 9},
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'GIN', MIN: 0, MAX: 9999},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 0, MAX: 9},
{ID: 'MOA', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
{ID: 'PAC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'PCI', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
]},
{ID: 'EQD', MIN: 0, MAX: 99, LEVEL: [
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'SEL', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
]},
{ID: 'PRC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'IMD', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DOC', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'TMP', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'PNA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'CTA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'COM', MIN: 0, MAX: 9},
]},
]},
]},
]},
{ID: 'CNT', MIN: 0, MAX: 9},
{ID: 'AUT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
d1c16ded968ad2ed9bdec10b3979d2cc493cc4d1
|
e835059c8084d3bd92d8f24a748136617cf8a2a3
|
/8/radd_test.py
|
87352539fc8175af3eeab0c1c908d923ecdf9d3a
|
[] |
no_license
|
Carlzkh/CrazyPythonNotes
|
7141b062722e2b8354ce3566f0b8d086bbfad9b1
|
04a8648ac2150e07520252d882c5cbc81cc3b9f9
|
refs/heads/master
| 2021-07-11T22:57:06.610728
| 2021-06-29T07:37:16
| 2021-06-29T07:37:16
| 247,253,027
| 0
| 0
| null | 2021-01-05T08:48:45
| 2020-03-14T10:08:34
|
Python
|
UTF-8
|
Python
| false
| false
| 2,076
|
py
|
"""
object.__radd__(self, other):当y提供该方法时,可执行 x + y
object.__rsub__(se f, other):当y提供该方法时,可执行 x - y
object.__rmul__(self, other):当y提供该方法时,可执行 x * y
object.__rmatmul__(self other):当y提供该方法时,可执行 x @ y
object.__rtruediv__( If, other :当y提供该方法时,可执行 x / y
object.__rfloordiv__ (self, other ): 当y提供该方法时,可执行 x // y
object.__rmod__(self, other):当y提供该方法时,可执行 x % y
object.__rdivmod__(self, other):当y提供该方法时,可执行 x divmod y
object.__rpow__(self other[, modulo]):当y提供该方法时,可执行 x ** y
object.__rlshift__(self, other):当y提供该方法时,可执行 x < y
object.__rrshift__(self, other): 当y提供该方法时,可执行 x > y
object.__rand__(self, other):当y提供该方法时,可执行 x & y
object.__rxor__(self, other) 当y提供该方法时,可执行 x ^ y
object.__ror__(self, other) :当y提供该方法时,可执行 x | y
简单来说,定义类提供了上面列 rxxx()方法,那么该自定义类的对象就可以出现在对应运算符的右边
"""
class Rectangle:
def __init__(self, width, height):
self.width = width
self.height = height
# 定义 setSize ()函数
def set_size(self, size):
self.width, self.height = size
# 定义 getSize()函数
def get_size(self):
return self.width, self.height
# 使用 property 定义属性
size = property(get_size, set_size)
# 定义__add__方法,该对象可执行“+”运算
def __radd__(self, other):
if not (isinstance(other, int) or isinstance(other, float)):
raise TypeError('+运算要求目标是数值')
return Rectangle(self.width + other, self.height + other)
def __repr__(self):
return 'Rectangle(width=%g, height=%g)' % (self.width, self.height)
r1 = Rectangle(4, 5)
r = 4 + r1
print(r) # Rectangle(width=7 , height=9)
|
[
"291747808@qq.com"
] |
291747808@qq.com
|
efa0e5ddb6154a7a27511647626b98da91f3bf51
|
1b57d1ce3baa5484cb517e916e2b3c7b66196672
|
/tests/test_config.py
|
fe63aa3f70ced5c38ca198ca7e118ae12d80d2b3
|
[
"MIT"
] |
permissive
|
cfhamlet/os-config
|
84e3b03456c554408148b448ee934a74a50b0bb0
|
5a875ac07972ef9e4d1d6887ea48c974363f2438
|
refs/heads/master
| 2020-04-09T07:43:36.575488
| 2019-01-08T04:43:20
| 2019-01-08T04:43:20
| 160,168,165
| 3
| 0
|
MIT
| 2019-01-08T04:43:21
| 2018-12-03T09:47:25
|
Python
|
UTF-8
|
Python
| false
| false
| 4,513
|
py
|
import pytest
from os_config import Config
def test_create():
with pytest.raises(TypeError):
c = Config()
c = Config.create(a=1, b=2)
assert c.a == 1
assert c.b == 2
def test_create_from_dict():
d = {'a': 3, 'b': 4}
c = Config.from_dict(d)
assert c.a == 3
assert c.b == 4
def test_simple_recursion():
c = Config.create()
with pytest.raises(AttributeError):
c.c = c
def test_tuple_recursion():
c = Config.create()
with pytest.raises(AttributeError):
c.c = (c, )
def test_deep_recursion():
a = Config.create()
b = Config.create()
c = Config.create()
b.c = c
c.a = a
with pytest.raises(AttributeError):
a.b = b
def test_invalid_attribute_name():
c = Config.create()
for k in ['1', '_a', '*']:
with pytest.raises(AttributeError):
setattr(c, k, None)
def test_valid_type():
c = Config.create()
for v in [1, False, (1, 2), None, 1.1, Config.create()]:
setattr(c, 'test_key', v)
assert getattr(c, 'test_key') == v
def test_invalid_type():
class TestClass(object):
pass
def test_method():
pass
c = Config.create()
for v in [TestClass, TestClass(), test_method, ]:
with pytest.raises(AttributeError):
c.c = v
def test_update_from_config_01():
c = Config.create(a=1, b=2)
d = Config.create()
Config.update(d, c)
assert d.a == 1
assert d.b == 2
def test_update_from_config_02():
c = Config.create(a=1, b=2)
d = Config.create()
d.a = 2
Config.update(d, c)
assert d.a == 1
def test_udpate_from_config_recursive_01():
c = Config.create()
d = Config.create()
d.m = c
with pytest.raises(AttributeError):
Config.update(c, d)
def test_udpate_from_config_recursive_02():
c = Config.create()
d = Config.create()
d.m = (c,)
with pytest.raises(AttributeError):
Config.update(c, d)
def test_udpate_from_config_recursive_03():
c = Config.create()
d = Config.create()
e = Config.create()
e.m = c
d.m = (e,)
with pytest.raises(AttributeError):
Config.update(c, d)
def test_update_from_dict_01():
c = Config.create()
Config.update(c, {'a': 1, 'b': 2})
assert c.a == 1
assert c.b == 2
def test_update_from_dict_02():
c = Config.create()
d = {'a': {'b': 1}}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_03():
c = Config.create()
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_04():
c = Config.create(a=1)
assert c.a == 1
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
assert c.a.b == 1
def test_update_from_dict_05():
c = Config.create()
b = Config.create(b=1)
d = {'a': b}
Config.update(c, d)
d = {'a': 1}
Config.update(c, d)
assert c.a == 1
def test_create_from_json_01():
d = {'a': 1}
import json
j = json.dumps(d)
c = Config.from_json(j)
assert c.a == 1
def test_dump_to_json_01():
c = Config.create(a=1)
j = Config.to_json(c)
import json
d = json.loads(j)
assert d['a'] == 1
def test_tuple_with_list():
d = {'a': (1, 2, [1, 2, 3])}
c = Config.from_dict(d)
assert c.a == (1, 2, (1, 2, 3))
def test_tuple_with_dict():
d = {'a': (1, {'b': 2}, [3, 4, 5])}
c = Config.from_dict(d)
assert c.a[1].b == 2
def test_create_from_object():
class A(object):
a = 1
c = Config.from_object(A)
assert c.a == 1
def test_sub_config():
c = Config.create()
a = Config.create()
c.a = a
c.b = a
c.a = 1
with pytest.raises(AttributeError):
a.c = c
def test_pop():
c = Config.create(a=1)
Config.pop(c, 'a')
assert len(c) == 0
def test_get():
c = Config.create(a=1)
assert Config.get(c, 'a') == 1
assert Config.get(c, 'b') is None
assert Config.get(c, 'c', 2) == 2
def test_from_pyfile(tmpdir):
txt = r'''
a = 1
b = [1,2,3]
'''
f = tmpdir.join('test_config.py')
f.write(txt)
c = Config.from_pyfile(f.strpath)
assert c.a == 1
assert c.b == (1, 2, 3)
def test_to_json():
import json
c = Config.create(a=1)
d = json.loads(Config.to_json(c))
d['a'] == 1
def test_to_dict():
d = {'a': 1, 'b': (1, 2, 3), 'c': {'e': (4, 5, 6)}}
c = Config.from_dict(d)
dd = Config.to_dict(c)
assert d == dd
|
[
"cfhamlet@gmail.com"
] |
cfhamlet@gmail.com
|
ccfe4b8ffb05eaba3eeba44922978edf51809bda
|
3aab11d445011f4a0de1376886dd3899aba44e68
|
/opps/db/conf.py
|
eedef8cb2db0ef10333332563c15c6b0d5c6b5ca
|
[
"MIT"
] |
permissive
|
opps/opps
|
4ba6a08ac5aa31be48c245b2e8f9d9a714a5e473
|
5552924fa34ea40d24febeac5046bd59f62e0e4f
|
refs/heads/master
| 2023-08-24T21:09:23.489540
| 2023-05-22T20:07:33
| 2023-05-22T20:07:33
| 7,712,379
| 166
| 76
|
MIT
| 2022-01-06T22:53:23
| 2013-01-20T03:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 572
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf import settings
from appconf import AppConf
class OppsDataBaseConf(AppConf):
HOST = getattr(settings, 'OPPS_DB_HOSR', None)
USER = getattr(settings, 'OPPS_DB_USER', None)
PASSWORD = getattr(settings, 'OPPS_DB_PASSWORD', None)
PORT = getattr(settings, 'OPPS_DB_PORT', None)
NAME = getattr(settings, 'OPPS_DB_NAME', 'opps_db')
ENGINE = getattr(settings, 'OPPS_DB_ENGINE', 'opps.db.')
OPTION = getattr(settings, 'OPPS_BD_OPTION', None)
class Meta:
prefix = 'opps_db'
|
[
"thiagoavelinoster@gmail.com"
] |
thiagoavelinoster@gmail.com
|
c618f6eb814a4c418adb0624e5c2dc54c47b4cc3
|
542c040b9b2150d789096f031dcf7a4362b034fe
|
/training/migrations/0003_auto_20210501_1721.py
|
bb92ba559a946476499ee525d0edc0012c29ba25
|
[
"Unlicense"
] |
permissive
|
rafimuhammad01/mtf-hackathon
|
3115412f673774cc3991bd2a67854bfa645966d1
|
83ab410239a93ff04e57d7ceb2d1d292ba365866
|
refs/heads/main
| 2023-04-12T15:28:41.506450
| 2021-05-22T11:27:05
| 2021-05-22T11:27:05
| 360,573,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 765
|
py
|
# Generated by Django 3.2 on 2021-05-01 10:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('training', '0002_auto_20210501_1607'),
]
operations = [
migrations.AddField(
model_name='training',
name='linkUrl',
field=models.URLField(blank=True),
),
migrations.AddField(
model_name='training',
name='location',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AddField(
model_name='training',
name='method',
field=models.IntegerField(blank=True, choices=[(0, 'Online'), (0, 'Offline')], null=True),
),
]
|
[
"rafi10muhammad@gmail.com"
] |
rafi10muhammad@gmail.com
|
abeb6300b115c2987444589968634de355eca323
|
16819f2d2a924a8df8c24754241f94247d202141
|
/backend/advyon_24955/wsgi.py
|
02ac84499c7199934b67c6b4cab2b07b0c264618
|
[] |
no_license
|
crowdbotics-apps/advyon-24955
|
922dea646dc87c5bd786ff01aa2a8ed4d19636a5
|
a153d936b8cba32003c66bccf95d617c1a63d869
|
refs/heads/master
| 2023-03-16T07:40:45.604601
| 2021-03-10T20:47:22
| 2021-03-10T20:47:22
| 346,487,669
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 401
|
py
|
"""
WSGI config for advyon_24955 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'advyon_24955.settings')
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
7ec138bc879d91a44e191223ca877c75087340d8
|
232fc2c14942d3e7e28877b502841e6f88696c1a
|
/dizoo/pomdp/entry/pomdp_ppo_default_config.py
|
8b4bbde7287f6f31890d1be86bf31692489890e4
|
[
"Apache-2.0"
] |
permissive
|
shengxuesun/DI-engine
|
ebf84221b115b38b4b3fdf3079c66fe81d42d0f7
|
eb483fa6e46602d58c8e7d2ca1e566adca28e703
|
refs/heads/main
| 2023-06-14T23:27:06.606334
| 2021-07-12T12:36:18
| 2021-07-12T12:36:18
| 385,454,483
| 1
| 0
|
Apache-2.0
| 2021-07-13T02:56:27
| 2021-07-13T02:56:27
| null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
from ding.entry import serial_pipeline
from easydict import EasyDict
pong_ppo_config = dict(
env=dict(
collector_env_num=16,
evaluator_env_num=4,
n_evaluator_episode=8,
stop_value=20,
env_id='Pong-ramNoFrameskip-v4',
frame_stack=4,
warp_frame=False,
use_ram=True,
pomdp=dict(noise_scale=0.01, zero_p=0.2, reward_noise=0.01, duplicate_p=0.2),
manager=dict(
shared_memory=False,
)
),
policy=dict(
cuda=True,
on_policy=False,
# (bool) whether use on-policy training pipeline(behaviour policy and training policy are the same)
model=dict(
obs_shape=[512, ],
action_shape=6,
encoder_hidden_size_list=[512, 512, 256],
actor_head_hidden_size=256,
actor_head_layer_num=2,
critic_head_hidden_size=256,
critic_head_layer_num=2,
),
learn=dict(
update_per_collect=16,
batch_size=128,
# (bool) Whether to normalize advantage. Default to False.
normalize_advantage=False,
learning_rate=0.0001,
weight_decay=0,
# (float) loss weight of the value network, the weight of policy network is set to 1
value_weight=0.5,
# (float) loss weight of the entropy regularization, the weight of policy network is set to 1
entropy_weight=0.03,
clip_ratio=0.1,
),
collect=dict(
# (int) collect n_sample data, train model n_iteration times
n_sample=1024,
# (float) the trade-off factor lambda to balance 1step td and mc
gae_lambda=0.97,
discount_factor=0.99,
),
eval=dict(evaluator=dict(eval_freq=200, )),
other=dict(
replay_buffer=dict(
replay_buffer_size=100000,
max_use=3,
min_sample_ratio=1,
),
),
),
)
main_config = EasyDict(pong_ppo_config)
pong_ppo_create_config = dict(
env=dict(
type='pomdp',
import_names=['app_zoo.pomdp.envs.atari_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='ppo'),
)
create_config = EasyDict(pong_ppo_create_config)
if __name__ == '__main__':
serial_pipeline((main_config, create_config), seed=0)
|
[
"niuyazhe@sensetime.com"
] |
niuyazhe@sensetime.com
|
e257465739f45cb8c41e0db62fca6e5c5961ffa1
|
adbf09a31415e6cf692ff349bd908ea25ded42a8
|
/revision/imports/at_decorators.py
|
2aabeeb6b8c5d049d7838859f8ed33c4e323cd5f
|
[] |
no_license
|
cmulliss/gui_python
|
53a569f301cc82b58880c3c0b2b415fad1ecc3f8
|
6c83d8c2e834464b99024ffd8cf46ac4e734e7a4
|
refs/heads/main
| 2023-08-12T22:33:01.596005
| 2021-10-11T12:35:41
| 2021-10-11T12:35:41
| 408,176,101
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import functools
user = {"username": "jose", "access_level": "guest"}
def make_secure(func):
@functools.wraps(func)
def secure_function():
if user["access_level"] == "admin":
return func()
else:
return f"No admin permissions for {user['username']}."
return secure_function
# @make_secure on top of a fn definition, that will prevent the fn from being created as is, and instead it will create it and pass it through the decorator in one go.
@make_secure
def get_admin_password():
return "1234"
# user = {"username": "bob", "access_level": "admin"}
print(get_admin_password())
|
[
"cmulliss@gmail.com"
] |
cmulliss@gmail.com
|
8ecbea64502b0476e4dd3ec28e53802d98d8344c
|
5945903ff7b3c0be799d8b228aa96309e8d6b68a
|
/LeetCode_Offer_II_004.py
|
3d9937377035c818327328a19057cccb76588d80
|
[] |
no_license
|
freesan44/LeetCode
|
44fd01fa37e2d7e729ae947da2350b1649c163ae
|
2ed9f1955c527d43fe1a02e5bebf5a6f981ef388
|
refs/heads/master
| 2021-12-07T20:07:02.308097
| 2021-11-01T23:58:11
| 2021-11-01T23:58:11
| 245,178,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
class Solution:
def singleNumber(self, nums: List[int]) -> int:
from collections import Counter
counter = Counter(nums).most_common()
x,y = counter[-1]
# print(x,y)
return x
if __name__ == '__main__':
# nums = [2,2,3,2]
nums = [0, 1, 0, 1, 0, 1, 100]
ret = Solution().singleNumber(nums)
print(ret)
|
[
"freesan44@163.com"
] |
freesan44@163.com
|
cf6a17b5b27c54829138ed5b6d13c654a57a13d9
|
03869888260ab1b28c5912f6019a44b1fcbb6c19
|
/acrylamid/templates/jinja2.py
|
95c8c1f7f305e42f346aaed4e42d7d357029850f
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
kenkeiras/acrylamid
|
515971b5385a554f310683d993eb34e38611d50a
|
792298eb32daa0e703afdb2894ee121dc3861d43
|
refs/heads/master
| 2020-03-27T12:33:54.176728
| 2013-03-26T11:35:09
| 2013-03-26T11:35:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,374
|
py
|
# -*- encoding: utf-8 -*-
#
# Copyright 2012 Martin Zimmermann <info@posativ.org>. All rights reserved.
# License: BSD Style, 2 clauses -- see LICENSE.
from __future__ import absolute_import
from io import StringIO
from os.path import exists, getmtime
from jinja2 import Environment as J2Environemnt, FileSystemBytecodeCache
from jinja2 import FileSystemLoader, meta
from acrylamid.templates import AbstractEnvironment, AbstractTemplate
class ExtendedFileSystemLoader(FileSystemLoader):
"""A custom :class:`jinja2.FileSystemLoader` to work with Acrylamid's
caching requirements. Jinja2 does track template changes using the
modification timestamp of the compiled but we need template dependencies
as well as consistent modified values over the whole compilation
process."""
# remember already resolved templates
resolved = {}
# used templates
used = set(['macros.html', ])
def load(self, environment, name, globals=None):
"""patched `load` to add a modified attribute providing information
whether the template or its parents have changed."""
def resolve(parent):
"""We check whether any dependency (extend-block) has changed and
update the bucket -- recursively. Returns True if the template
itself or any parent template has changed. Otherwise False."""
self.used.add(parent)
if parent in self.resolved:
return self.resolved[parent]
source, filename, uptodate = self.get_source(environment, parent)
bucket = bcc.get_bucket(environment, parent, filename, source)
p = bcc._get_cache_filename(bucket)
modified = getmtime(filename) > getmtime(p) if exists(p) else True
if modified:
# updating cached template if timestamp has changed
code = environment.compile(source, parent, filename)
bucket.code = code
bcc.set_bucket(bucket)
self.resolved[parent] = True
return True
ast = environment.parse(source)
for name in meta.find_referenced_templates(ast):
rv = resolve(name)
if rv:
# XXX double-return to break this recursion?
return True
if globals is None:
globals = {}
source, filename, uptodate = self.get_source(environment, name)
bcc = environment.bytecode_cache
bucket = bcc.get_bucket(environment, name, filename, source)
modified = bool(resolve(name))
code = bucket.code
if code is None:
code = environment.compile(source, name, filename)
tt = environment.template_class.from_code(environment, code,
globals, uptodate)
tt.modified = modified
return tt
class Environment(AbstractEnvironment):
def init(self, layoutdir, cachedir):
self.jinja2 = J2Environemnt(loader=ExtendedFileSystemLoader(layoutdir),
bytecode_cache=FileSystemBytecodeCache(cachedir))
# jinja2 is stupid and can't import any module during runtime
import time, datetime, urllib
for module in (time, datetime, urllib):
self.jinja2.globals[module.__name__] = module
for name in dir(module):
if name.startswith('_'):
continue
obj = getattr(module, name)
if hasattr(obj, '__class__') and callable(obj):
self.jinja2.filters[module.__name__ + '.' + name] = obj
def register(self, name, func):
self.jinja2.filters[name] = func
def fromfile(self, path):
return Template(self.jinja2.get_template(path))
def extend(self, path):
self.jinja2.loader.searchpath.append(path)
@property
def templates(self):
return self.jinja2.loader.used
@property
def extension(self):
return ['.html', '.j2']
class Template(AbstractTemplate):
def __init__(self, template):
self.template = template
def render(self, **kw):
buf = StringIO()
self.template.stream(**kw).dump(buf)
return buf
@property
def modified(self):
return self.template.modified
|
[
"info@posativ.org"
] |
info@posativ.org
|
48a2ca87abfdda05840e297a3a45819b20ef60d0
|
c3a84a07539c33040376f2c1e140b1a1041f719e
|
/wagtail-stubs/contrib/postgres_search/models.pyi
|
c9990209da1b7ca59c1ff89fcdab8dbd624dd3d4
|
[] |
no_license
|
tm-kn/tmp-wagtail-stubs
|
cc1a4434b7142cb91bf42efb7daad006c4a7dbf4
|
23ac96406610b87b2e7751bc18f0ccd27f17eb44
|
refs/heads/master
| 2023-01-20T14:41:33.962460
| 2020-11-30T23:15:38
| 2020-11-30T23:15:38
| 317,332,280
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 943
|
pyi
|
from .utils import get_descendants_content_types_pks as get_descendants_content_types_pks
from django.contrib.contenttypes.fields import GenericRelation
from django.db import models
from typing import Any
from wagtail.search.index import class_is_indexed as class_is_indexed
class TextIDGenericRelation(GenericRelation):
auto_created: bool = ...
def get_content_type_lookup(self, alias: Any, remote_alias: Any): ...
def get_object_id_lookup(self, alias: Any, remote_alias: Any): ...
def get_extra_restriction(self, where_class: Any, alias: Any, remote_alias: Any): ...
def resolve_related_fields(self): ...
class IndexEntry(models.Model):
content_type: Any = ...
object_id: Any = ...
content_object: Any = ...
autocomplete: Any = ...
title: Any = ...
title_norm: Any = ...
body: Any = ...
@property
def model(self): ...
@classmethod
def add_generic_relations(cls) -> None: ...
|
[
"hi@tmkn.org"
] |
hi@tmkn.org
|
df1c8a26e0becbc5ddede3a5087948f61aaf7f2e
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/crossmarketetf_bak/crossmarket_creation_HA/YW_CETFSS_SHSG_020.py
|
5d28365fc63ea90b37a0dd3fec3d52f5c0307129
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,323
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test")
from crossmarketetf.cetfservice.cetf_main_service import *
from crossmarketetf.cetfservice.cetf_get_components_asset import *
from crossmarketetf.cetfservice.cetf_utils import *
from mysql.QueryOrderErrorMsg import queryOrderErrorMsg
from service.mainService import *
from mysql.getUpOrDownPrice import getUpPrice
class YW_CETFSS_SHSG_020(xtp_test_case):
def test_YW_CETFSS_SHSG_020(self):
# -----------ETF申购-------------
title = '上海ETF申购--可现金替代:T-1日无成分股&资金不足&计算现金比例=最大现金比例→T日申购ETF'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、全成、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010120,
'errorMSG': queryOrderErrorMsg(11010120),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
unit_info = {
'ticker': '550320', # etf代码
'etf_unit': 1.0, # etf申购单位数
'etf_unit_sell': 1.0, # etf卖出单位数
'component_unit_sell': 1.0 # 成分股卖出单位数
}
# -----------查询ETF申购前成分股持仓-------------
component_stk_info = cetf_get_all_component_stk(Api,unit_info['ticker'])
# 查询etf最小申赎数量
unit_number = query_creation_redem_unit(unit_info['ticker'])
# etf申购数量
quantity = int(unit_info['etf_unit'] * unit_number)
# 定义委托参数信息------------------------------------------
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_ETF'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_PURCHASE'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'quantity':
quantity
}
g_func.cetf_parm_init(case_goal['期望状态'])
rs1 = cetf_service_test(Api, case_goal, wt_reqs,component_stk_info)
etf_creation_log(case_goal, rs1)
self.assertEqual(rs1['用例测试结果'], True)
# --------二级市场,卖出etf-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 二级市场卖出的etf数量
quantity = int(unit_info['etf_unit_sell'] * unit_number)
quantity_list = split_etf_quantity(quantity)
# 查询涨停价
limitup_px = getUpPrice(unit_info['ticker'])
rs2 = {}
for etf_quantity in quantity_list:
wt_reqs_etf = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
unit_info['ticker'],
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
etf_quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs2 = serviceTest(Api, case_goal, wt_reqs_etf)
if rs2['用例测试结果'] is False:
etf_sell_log(case_goal, rs2)
self.assertEqual(rs2['用例测试结果'], True)
return
etf_sell_log(case_goal, rs2)
# ------------二级市场卖出成份股-----------
case_goal['期望状态'] = '废单'
case_goal['errorID'] = 11010121
case_goal['errorMSG'] = queryOrderErrorMsg(11010121)
# 查询etf成分股代码和数量
etf_components = query_cetf_component_share(unit_info['ticker'])
# 如果卖出单位大于100,表示卖出数量;小于100,表示卖出份数
rs3 = {}
for stk_code in etf_components:
# 申购用例1-43会有上海和深圳的成分股各一支,深圳成分股为'008000',只卖上海的
if stk_code != '008000':
components_share = etf_components[stk_code]
quantity = (int(unit_info['component_unit_sell'])
if unit_info['component_unit_sell'] >= 100
else int(components_share * unit_info['component_unit_sell']))
limitup_px = getUpPrice(stk_code)
wt_reqs = {
'business_type':
Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':
2,
'market':
Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker':
stk_code,
'side':
Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type':
Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price':
limitup_px,
'quantity':
quantity
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
rs3 = serviceTest(Api, case_goal, wt_reqs)
if rs3['用例测试结果'] is False:
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
etf_components_sell_log(case_goal, rs3)
self.assertEqual(rs3['用例测试结果'], True)
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
09920af61b71677948e7bed9fa77cabae508c78f
|
87e62af4768c0f594e675551f4c7c1c81ce7f7d9
|
/lawyer/spiders/legislation/caipan_wenshu_spider.py
|
09b385d1e6eb1bcee041a82c399db8cb43aa3e79
|
[] |
no_license
|
dulei001/Spider
|
78d12adbef1d865da6978704fe146cc21a8d2d3e
|
628d468501c6502763ce453a58a09813b2a46b8c
|
refs/heads/master
| 2021-01-18T17:27:11.771434
| 2019-05-14T02:14:33
| 2019-05-14T02:14:33
| 86,802,130
| 12
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,078
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import json
import scrapy
import re
#聚法网裁判文书
class CaipanInfoSpider(scrapy.spiders.Spider):
name = "jufa"
allowed_domains = ['www.jufaanli.com']
start_urls = [
"http://www.jufaanli.com/search2?TypeKey=1%253A%25E6%25A1%2588"
]
pagesize = 20
# 爬虫入口爬去所有省下的地址
def parse(self, response):
pageurl = 'http://www.jufaanli.com/home/search/searchJson'
totalPage = (26694941-1) / (self.pagesize+1)
#totalPage = 2
for page in range(1,totalPage):
yield scrapy.FormRequest(url=pageurl,
method="POST",
headers={'X-Requested-With': 'XMLHttpRequest'},
dont_filter=True,
callback=self.parseAjaxList,
errback=self.handle_error,
formdata={"page":str(page),"searchNum":str(self.pagesize)})
#列表
def parseAjaxList(self,response):
data = json.loads(response.body_as_unicode())
detailUrl='http://www.jufaanli.com/detail/'
for item in data['info']["searchList"]['list']:
yield scrapy.Request(url=detailUrl+ item['uuid'],
method="GET",
dont_filter=True,
callback=self.parseAjaxDetail,
errback=self.handle_error,
)
#详细
def parseAjaxDetail(self,response):
item={}
#标题
item['title']= ''.join(response.css('.text-center.text-black::text').re(u'[^指导案例\d号:]'))
#法院
item['fanyuan']=''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审理法院"]/parent::div/following-sibling::div/a/text()').extract())
#案号
item['anhao'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案号"]/parent::div/following-sibling::div/span/text()').extract())
#案由
item['anyou'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案由"]/parent::div/following-sibling::div/a/text()').extract())
#案件类型
item['type'] = ','.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="案件类型"]/parent::div/following-sibling::div/span/text()').extract()).rstrip(',')
#审判日期
item['stime'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审判日期"]/parent::div/following-sibling::div/span/text()').extract())
#审理程序
item['slcx'] = ''.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="审理程序"]/parent::div/following-sibling::div/span/text()').extract())
#关键词
item['keywords'] = ','.join(response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="关键词"]/parent::div/following-sibling::div').css('.info-item.info-item-gray a::text').extract()).rstrip(',')
#律师and律所
lvshidic=[];
for i in response.xpath(u'//div[@class="col-lg-4 title-area"]/span[text()="律师"]/parent::div/following-sibling::div').css('.legislation-info'):
lvshidic.append({"lvsuo":''.join(i.css('a::text').extract()),'names':''.join(i.css('span::text').extract())})
item['fagui']=response.css('.eachCiteLaw a::text').extract()
#律所律师
item['lvsuolvshi'] =lvshidic
#内容
item['content'] = re.sub('(class|name|id)="[^"]*?"','', ''.join(response.xpath('//*[@id="caseText"]').extract()))
item['collection'] = 'caipan'
item['source'] = '聚法网'
item["url"] = response.url
#item["html"] = response.text
return item
def handle_error(self, result, *args, **kw):
self.logger.error("error url is :%s" % result.request.url)
|
[
"280680441@qq.com"
] |
280680441@qq.com
|
fa41413edc689db57f3afe37347b2bb07b49f3a1
|
b9e4bf5c00ac0d6c1a6e6038e8dc18041819ff99
|
/Python3/0716_Max_Stack.py
|
45a35d6d09c45a3a9630134e1a1c123ef683ea60
|
[] |
no_license
|
kiranani/playground
|
98fdb70a3ca651436cc1eede0d2ba1b1ea9aba1d
|
12f62a218e827e6be2578b206dee9ce256da8d3d
|
refs/heads/master
| 2021-06-03T12:43:29.388589
| 2020-06-12T15:43:45
| 2020-06-12T15:43:45
| 149,614,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
class MaxStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.l = [(float("inf"), -float("inf"))]
def push(self, x: int) -> None:
self.l.append((x, max(x, self.l[-1][1])))
def pop(self) -> int:
return self.l.pop()[0]
def top(self) -> int:
return self.l[-1][0]
def peekMax(self) -> int:
return self.l[-1][1]
def popMax(self) -> int:
mx, s = self.peekMax(), []
while self.l[-1][0] != mx:
s.append(self.l.pop()[0])
self.l.pop()
while s:
self.push(s.pop())
return mx
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
|
[
"noreply@github.com"
] |
kiranani.noreply@github.com
|
4386d9bd629660635a27a3b219d035be73a9ac41
|
79babd1502ea1bd701ce021cfa75dc25ca35a700
|
/python/rootTools/Utils.py
|
a4e4d3433c4ff2b0999a0502997f62013d960800
|
[] |
no_license
|
RazorCMS/RazorAnalyzer
|
99b89b33d2ec2be1d42e3705569d49cd3346d40a
|
2e2adff5ba5d2306c9f0b40c2a5297782fae3158
|
refs/heads/master
| 2021-01-23T21:18:53.534772
| 2019-03-22T14:21:41
| 2019-03-22T14:21:41
| 24,916,087
| 1
| 9
| null | 2017-09-11T15:51:28
| 2014-10-07T23:27:09
|
C++
|
UTF-8
|
Python
| false
| false
| 3,724
|
py
|
#$Revision:$
#the following is from http://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-in-python
def determineNumberOfCPUs():
""" Number of virtual or physical CPUs on this system, i.e.
user/real as output by time(1) when called with an optimally scaling userspace-only program"""
import os,re,subprocess
# Python 2.6+
try:
import multiprocessing
return multiprocessing.cpu_count()
except (ImportError,NotImplementedError):
pass
# POSIX
try:
res = int(os.sysconf('SC_NPROCESSORS_ONLN'))
if res > 0:
return res
except (AttributeError,ValueError):
pass
# Windows
try:
res = int(os.environ['NUMBER_OF_PROCESSORS'])
if res > 0:
return res
except (KeyError, ValueError):
pass
# jython
try:
from java.lang import Runtime
runtime = Runtime.getRuntime()
res = runtime.availableProcessors()
if res > 0:
return res
except ImportError:
pass
# BSD
try:
sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], stdout=subprocess.PIPE)
scStdout = sysctl.communicate()[0]
res = int(scStdout)
if res > 0:
return res
except (OSError, ValueError):
pass
# Linux
try:
res = open('/proc/cpuinfo').read().count('processor\t:')
if res > 0:
return res
except IOError:
pass
# Solaris
try:
pseudoDevices = os.listdir('/devices/pseudo/')
expr = re.compile('^cpuid@[0-9]+$')
res = 0
for pd in pseudoDevices:
if expr.match(pd) != None:
res += 1
if res > 0:
return res
except OSError:
pass
# Other UNIXes (heuristic)
try:
try:
dmesg = open('/var/run/dmesg.boot').read()
except IOError:
dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE)
dmesg = dmesgProcess.communicate()[0]
res = 0
while '\ncpu' + str(res) + ':' in dmesg:
res += 1
if res > 0:
return res
except OSError:
pass
raise Exception('Can not determine number of CPUs on this system')
def importToWS(workspace, *args):
"""Utility function to call the RooWorkspace::import methods"""
return getattr(workspace,'import')(*args)
#------------------------------------------------------------------------------
# File: Color.py
# Description: colors
# Created: 22 Sep 2010 Harrison B. Prosper
#------------------------------------------------------------------------------
RED ="\x1b[0;31;48m"
GREEN ="\x1b[0;32;48m"
YELLOW ="\x1b[0;33;48m"
BLUE ="\x1b[0;34;48m"
MAGENTA="\x1b[0;35;48m"
CYAN ="\x1b[0;36;48m"
BOLDRED ="\x1b[1;31;48m"
BOLDGREEN ="\x1b[1;32;48m"
BOLDYELLOW ="\x1b[1;33;48m"
BOLDBLUE ="\x1b[1;34;48m"
BOLDMAGENTA="\x1b[1;35;48m"
BOLDCYAN ="\x1b[1;36;48m"
RESETCOLOR ="\x1b[0m" # reset to default foreground color
#------------------------------------------------------------------------------
def nameonly(s):
import os
return os.path.splitext(os.path.basename(s))[0]
def scream(message):
from random import randint
i = randint(0,4)
random_phrases = {0: 'Twas brillig and the slithy tothes',
1: 'Let all the evil that lurks in the mud hatch out',
2: 'Alas poor CMS I new them well!',
3: 'Lies, damned lies, and statistics',
4: 'Speak severely to your little boy and beat him '\
'when he sneezes'}
print "\n** %s\n** %s%s%s\n" % (random_phrases[i], BLUE, message,
RESETCOLOR)
sys.exit(0)
|
[
"jduarte@caltech.edu"
] |
jduarte@caltech.edu
|
a2aa0983b8a49972d004006dd2709b75fd1ab70d
|
7c15f211adc9e9eb9f66ccdd570c9f38dff7ea8d
|
/packages/autorest.python/test/azure/version-tolerant/Expected/AcceptanceTests/SubscriptionIdApiVersionVersionTolerant/subscriptionidapiversionversiontolerant/operations/__init__.py
|
c58de146570db0acc07d6828002cccd80c37cc94
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
Azure/autorest.python
|
cc4bfbf91ae11535731cad37cedd6b733edf1ebd
|
a00d7aaa3753ef05cb5a0d38c664a90869478d44
|
refs/heads/main
| 2023-09-03T06:58:44.246200
| 2023-08-31T20:11:51
| 2023-08-31T20:11:51
| 100,315,955
| 47
| 40
|
MIT
| 2023-09-14T21:00:21
| 2017-08-14T22:58:33
|
Python
|
UTF-8
|
Python
| false
| false
| 771
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._operations import GroupOperations
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"GroupOperations",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
67e504ae45a28ad46c3633d6603215e04e77ce66
|
1cb0cc435061b6a0156b37813343ae46b1f7346e
|
/1_learn_step/try_second/glorot_normal-RMSprop-16.py
|
eb6e65fdeee43652b530579008e195c2f09d190b
|
[] |
no_license
|
youthliuxi/keras
|
6370a9de11e152d8ba96e68e9ff02337203b7e66
|
60a367442f74313d0bd9af01f76068d56e23bec0
|
refs/heads/master
| 2020-04-30T19:54:16.628943
| 2019-08-21T09:47:13
| 2019-08-21T09:47:13
| 177,051,874
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
# -*- coding:utf-8 -*-
from keras import backend as K
K.set_image_dim_ordering('th')
import numpy as np
np.random.seed(123)
from keras.layers import *
from keras.models import Sequential
from keras.utils import np_utils
from keras.datasets import mnist
# (X_train, y_train), (X_test, y_test) = mnist.load_data()
path = "./mnist.npz"
f = np.load(path)
X_train, y_train = f['x_train'],f['y_train']
X_test, y_test = f['x_test'],f['y_test']
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
import pylab
from matplotlib import pyplot as plt
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu',init = 'glorot_normal', input_shape=(1,28,28)))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
model.compile(loss='categorical_crossentropy',optimizer='RMSprop',metrics=['accuracy'])
hist = model.fit(X_train, Y_train, batch_size=16, nb_epoch=100, verbose=1, validation_data=(X_test, Y_test))
log_file_name = "try_second/txt/glorot_normal-RMSprop-16.txt"
with open(log_file_name,'w') as f:
f.write(str(hist.history))
# score = model.evaluate(X_test, Y_test, verbose=0, batch_size=16)
# print(score[0])
# print(score[1])
|
[
"lx_einstein@sina.com"
] |
lx_einstein@sina.com
|
5257553d3c163205f228efbd85550aedd5fa8e8e
|
6adf334dd2a074686447e15898ed3fff793aab48
|
/03_Fast_and_Slow_Pointers/08_circular_array_loop_exists.py
|
a679195ddcea8fbbc7f8af0248a3d958cd5cb1c4
|
[] |
no_license
|
satyapatibandla/Patterns-for-Coding-Interviews
|
29ac1a15d5505293b83a8fb4acf12080851fe8d6
|
b3eb2ac82fd640ecbdf3654a91a57a013be1806f
|
refs/heads/main
| 2023-05-07T07:56:01.824272
| 2021-06-01T04:02:50
| 2021-06-01T04:02:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,136
|
py
|
# Time O(N) | Space O(1)
def circular_array_loop_exists(nums):
for i in range(len(nums)):
if nums[i] == 0:
continue
direction = nums[i] > 0
slow = fast = i
while True:
slow = get_next_idx(nums, slow, direction)
fast = get_next_idx(nums, get_next_idx(nums, fast, direction), direction)
if slow == -1 or fast == -1:
break
elif slow == fast:
return True
slow = i
while get_next_idx(nums, slow, direction) != -1:
temp_slow = slow
slow = get_next_idx(nums, slow, direction)
nums[temp_slow] = 0
return False
def get_next_idx(nums, idx, direction):
if idx == -1:
return -1
elif (nums[idx] > 0) != direction:
return -1
next_idx = (idx + nums[idx]) % len(nums)
return -1 if next_idx == idx else next_idx
def main():
print(circular_array_loop_exists([1, 2, -1, 2, 2]))
print(circular_array_loop_exists([2, 2, -1, 2]))
print(circular_array_loop_exists([2, 1, -1, -2]))
if __name__ == '__main__':
main()
|
[
"shash873@gmail.com"
] |
shash873@gmail.com
|
8b8498e424f3ba6a5662bd3a5d6401e4d2ca6e12
|
81adc22ee20698506397135b916903936837db3b
|
/examples/cuda-c++/vector_add.py
|
e5a70e49b2240c3b90135d9c9c037d7034622217
|
[
"Apache-2.0"
] |
permissive
|
KernelTuner/kernel_tuner
|
6c25ca551795cc49a7754f2957de4e59aa98578c
|
b3ff4cdecb12655009b356e3b1840e25b1dd1421
|
refs/heads/master
| 2023-08-10T00:34:53.984541
| 2023-06-01T16:31:47
| 2023-06-01T16:31:47
| 54,894,320
| 59
| 6
|
Apache-2.0
| 2023-09-08T19:28:24
| 2016-03-28T13:32:17
|
Python
|
UTF-8
|
Python
| false
| false
| 904
|
py
|
#!/usr/bin/env python
"""This is the minimal example from the README converted to C++11"""
import json
import numpy
from kernel_tuner import tune_kernel
def tune():
kernel_string = """
template<typename T>
__global__ void vector_add(T *c, T *a, T *b, int n) {
auto i = blockIdx.x * block_size_x + threadIdx.x;
if (i<n) {
c[i] = a[i] + b[i];
}
}
"""
size = 10000000
a = numpy.random.randn(size).astype(numpy.float32)
b = numpy.random.randn(size).astype(numpy.float32)
c = numpy.zeros_like(b)
n = numpy.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["block_size_x"] = [128+64*i for i in range(15)]
result, env = tune_kernel("vector_add<float>", kernel_string, size, args, tune_params)
with open("vector_add.json", 'w') as fp:
json.dump(result, fp)
return result
if __name__ == "__main__":
tune()
|
[
"b.vanwerkhoven@esciencecenter.nl"
] |
b.vanwerkhoven@esciencecenter.nl
|
6496385a65adfdf5d6dd2990cf9ca6dc390ce0a4
|
e3ec5f1898ae491fa0afcdcc154fb306fd694f83
|
/src/components/opPicker/onFilterTextEvent.py
|
4e4fb7d55e5df26b7e2595dd237d87b347bae0e8
|
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
phoebezhung/raytk
|
42397559a76a9ba39308ac03344b4446f64ea04d
|
b91483ce88b2956d7b23717b11e223d332ca8395
|
refs/heads/master
| 2023-08-27T05:20:38.062360
| 2021-10-21T04:33:18
| 2021-10-21T04:33:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from .opPicker import OpPicker
ext.opPicker = OpPicker(COMP())
def onValueChange(panelValue: 'PanelValue', prev):
ext.opPicker.setFilterText(panelValue.val)
|
[
"tekt@immerse.studio"
] |
tekt@immerse.studio
|
5d7ecd12aac912be773a379df1d6f109317b84c0
|
de392462a549be77e5b3372fbd9ea6d7556f0282
|
/operations_9001/migrations/0011_auto_20200806_1350.py
|
5305a4a2d7c547e06c938388a8dbfece7f2c931d
|
[] |
no_license
|
amutebe/AMMS_General
|
2830770b276e995eca97e37f50a7c51f482b2405
|
57b9b85ea2bdd272b44c59f222da8202d3173382
|
refs/heads/main
| 2023-07-17T02:06:36.862081
| 2021-08-28T19:07:17
| 2021-08-28T19:07:17
| 400,064,408
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,147
|
py
|
# Generated by Django 3.0.2 on 2020-08-06 10:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('operations_9001', '0010_auto_20200806_1252'),
]
operations = [
migrations.AlterField(
model_name='maintenance',
name='maintenance_number',
field=models.CharField(default='TEGA-M-06082020125', max_length=200, primary_key=True, serialize=False, verbose_name='Maintenance no.:'),
),
migrations.AlterField(
model_name='mod9001_calibration',
name='calibration_number',
field=models.CharField(default='TEGA-C-06082020249', max_length=200, primary_key=True, serialize=False, verbose_name='Calibration no.:'),
),
migrations.AlterField(
model_name='mod9001_document_manager',
name='document_number',
field=models.CharField(default='TEGA-Q-06082020119', max_length=200, primary_key=True, serialize=False, verbose_name='Document no.:'),
),
migrations.AlterField(
model_name='mod9001_processtable',
name='process_number',
field=models.CharField(default='Comp-Pr-06082020218', max_length=200, primary_key=True, serialize=False, verbose_name='Process ID:'),
),
migrations.AlterField(
model_name='mod9001_qmsplanner',
name='planner_number',
field=models.CharField(default='Comp-QP-06082020151', max_length=200, primary_key=True, serialize=False, verbose_name='Planner no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingplanner',
name='plan_number',
field=models.CharField(default='Comp-TP-06082020133', max_length=200, primary_key=True, serialize=False, verbose_name='Plan no.:'),
),
migrations.AlterField(
model_name='mod9001_trainingregister',
name='training_number',
field=models.CharField(default='Comp-TR-06082020131', max_length=200, primary_key=True, serialize=False, verbose_name='Training no.:'),
),
]
|
[
"mutebe2@gmail.com"
] |
mutebe2@gmail.com
|
01f324c6bcbb1c9a274932a8fafb8cbc266973f2
|
e384f5467d8bcfd70845997bcbd68d950e874a61
|
/example/python/mesh/mesh_007_cube_color_per_triangle_Tex1D/cube.py
|
f445866308c763ea6d3dbaf04fc7e72b93a6e0b5
|
[] |
no_license
|
Rabbid76/graphics-snippets
|
ee642f1ed9ceafc6d320e467d3a084d2446d22c2
|
fa187afeabb9630bc1d988304fb5787e95a91385
|
refs/heads/master
| 2023-08-04T04:32:06.884318
| 2023-07-21T09:15:43
| 2023-07-21T09:15:43
| 109,126,544
| 177
| 12
| null | 2023-04-11T20:05:52
| 2017-11-01T12:05:56
|
C++
|
UTF-8
|
Python
| false
| false
| 3,947
|
py
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir, os.pardir))
os.chdir(os.path.dirname(os.path.abspath(__file__)))
import numpy
# PyOpenGL import
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
# MyLibOGL import
from MyLibOGL.math import mat
from MyLibOGL.math import cam
from MyLibOGL.ogl import shader
from MyLibOGL.ogl import vertex
from MyLibOGL.ogl import uniform
from MyLibOGL.glut import window
class MyWindow(window.CameraWindow):
def __init__( self, cx, cy, multisample=True ):
super().__init__(cx, cy, multisample)
def _InitCamera_(self):
camera = super()._InitCamera_()
#camera.fov_y = 120
camera.pos = (0, -3, 0)
return camera
# draw event
def OnDraw(self):
# set up projection matrix
prjMat = self.Perspective()
# set up view matrix
viewMat = self.LookAt()
# set up attributes and shader program
glEnable( GL_DEPTH_TEST )
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
progDraw.Use()
modelMat = mat.IdentityMat44()
modelMat = self.AutoModelMatrix()
#modelMat = mat.RotateX( modelMat, self.CalcAng( 13.0 ) )
#modelMat = mat.RotateY( modelMat, self.CalcAng( 17.0 ) )
progDraw.SetUniforms( {
b"u_projectionMat44" : self.Perspective(),
b"u_viewMat44" : self.LookAt(),
b"u_modelMat44" : modelMat,
b"u_lightDir" : [-1.0, -0.5, -2.0],
b"u_ambient" : 0.2,
b"u_diffuse" : 0.8,
b"u_specular" : 0.8,
b"u_shininess" : 10.0 } )
# draw object
cubeVAO.Draw()
def AddToBuffer( buffer, data, count=1 ):
for inx_c in range(0, count):
for inx_s in range(0, len(data)): buffer.append( data[inx_s] )
# create window
wnd = MyWindow( 800, 600, True )
# define cube vertex array opject
cubePts = [
(-1.0, -1.0, 1.0), ( 1.0, -1.0, 1.0), ( 1.0, 1.0, 1.0), (-1.0, 1.0, 1.0),
(-1.0, -1.0, -1.0), ( 1.0, -1.0, -1.0), ( 1.0, 1.0, -1.0), (-1.0, 1.0, -1.0) ]
cubeCol = [ [1.0, 0.0, 0.0, 1.0], [1.0, 0.5, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0] ]
cubeHlpInx = [ 0, 1, 2, 3, 1, 5, 6, 2, 5, 4, 7, 6, 4, 0, 3, 7, 3, 2, 6, 7, 1, 0, 4, 5 ]
cubePosData = []
for inx in cubeHlpInx: AddToBuffer( cubePosData, cubePts[inx] )
cubeNVData = []
for inx_nv in range(len(cubeHlpInx) // 4):
nv = [0.0, 0.0, 0.0]
for inx_p in range(4):
for inx_s in range(0, 3): nv[inx_s] += cubePts[ cubeHlpInx[inx_nv*4 + inx_p] ][inx_s]
AddToBuffer( cubeNVData, nv, 4 )
cubeColFaceData = []
for inx_col in range(6):
for inx_c in range(0, 4): cubeColFaceData.append( cubeCol[inx_col][inx_c] )
for inx_c in range(0, 4): cubeColFaceData.append( cubeCol[inx_col][inx_c] )
cubeIndices = []
for inx in range(6):
for inx_s in [0, 1, 2, 0, 2, 3]: cubeIndices.append( inx * 4 + inx_s )
cubeVAO = vertex.VAObject( [ (3, cubePosData), (3, cubeNVData) ], cubeIndices )
# 1D texture
color_texture_unit = 1
glActiveTexture( GL_TEXTURE0+color_texture_unit )
color_texture = glGenTextures( 1 )
glBindTexture( GL_TEXTURE_1D, color_texture )
glTexImage1D( GL_TEXTURE_1D, 0, GL_RGBA, len(cubeColFaceData) // 4, 0, GL_RGBA, GL_FLOAT, numpy.matrix(cubeColFaceData, dtype=numpy.float32))
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST )
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST )
glTexParameteri( GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_REPEAT )
glActiveTexture( GL_TEXTURE0 )
# load, compile and link shader
progDraw = shader.ShaderProgram(
[ ('resource/shader/blinn_phong.vert', GL_VERTEX_SHADER),
('resource/shader/blinn_phong.frag', GL_FRAGMENT_SHADER) ] )
# start main loop
wnd.Run()
|
[
"Gernot.Steinegger@gmail.com"
] |
Gernot.Steinegger@gmail.com
|
cb3b99330dec69408592872eba11b0a5f54912fe
|
e82d49a32b843d02019fe770824d10bbdfc16c1b
|
/Misc/args.py
|
219c767f303ffbf718eb0d407394684e9b33f549
|
[] |
no_license
|
deesaw/PythonD-005
|
142bfdfd6515aa4d570509cab5e6b6008ccae999
|
65b7423b5251b12d06cd64a5135dd0afabde60a1
|
refs/heads/master
| 2023-03-10T02:51:05.967446
| 2021-03-02T14:09:33
| 2021-03-02T14:09:33
| 343,795,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
#This program adds two numbers given on the command line.
#At the OS command prompt call this program as follows
#args.py 3 4
#It should return 7
########################################
import sys
print ("You have entered ",len(sys.argv)-1," arguments")
print (sys.argv[0])
sum=0
for x in sys.argv[1:]:
sum=sum+int(x)
print (sum)
############################################
#run it as below on windows
#c:\python27\python args.py 3 4
|
[
"69420960+deesaw@users.noreply.github.com"
] |
69420960+deesaw@users.noreply.github.com
|
598e7e1951f92af159f2e76bc02bd394360df3cd
|
be0a3aa7b83b87c5d2c257b538545bdded39c051
|
/Chatbot_Web/impl/view/sp_view.py
|
48c5784591afdbd8ef823393a0b3a239e1eeeb6b
|
[
"Apache-2.0"
] |
permissive
|
water123li/Chatbot_CN
|
480e3bc6d6c0d8b6b0823452556acef14df1c2c3
|
e63808030c6cc516020075cdcd0c332120a998fc
|
refs/heads/master
| 2022-01-25T10:34:34.726243
| 2019-06-13T10:44:44
| 2019-06-13T10:44:44
| 192,504,292
| 1
| 0
|
Apache-2.0
| 2019-06-18T09:01:55
| 2019-06-18T09:01:55
| null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: sp_view.py
Description : 语义分析视图跳转
Author : charl
date: 2018/11/2
-------------------------------------------------
Change Activity: 2018/11/2:
-------------------------------------------------
"""
from django.shortcuts import render
def sp_view(request): # index页面需要一开始就加载的内容写在这里
context = {}
return render(request, 'semantic_parsing/semantic_parsing.html', context)
|
[
"charlesxu86@163.com"
] |
charlesxu86@163.com
|
0719c05250e74207d69b6469b6281bd629a2d5d8
|
8f1996c1b5a0211474c7fa287be7dc20a517f5f0
|
/hail/python/hail/vds/combiner/__init__.py
|
66f14a1f2905d37375d28189b406329ce94c335f
|
[
"MIT"
] |
permissive
|
johnc1231/hail
|
9568d6effe05e68dcc7bf398cb32df11bec061be
|
3dcaa0e31c297e8452ebfcbeda5db859cd3f6dc7
|
refs/heads/main
| 2022-04-27T10:51:09.554544
| 2022-02-08T20:05:49
| 2022-02-08T20:05:49
| 78,463,138
| 0
| 0
|
MIT
| 2022-03-01T15:55:25
| 2017-01-09T19:52:45
|
Python
|
UTF-8
|
Python
| false
| false
| 238
|
py
|
from .combine import transform_gvcf, combine_variant_datasets
from .variant_dataset_combiner import new_combiner, load_combiner
__all__ = [
'combine_variant_datasets',
'transform_gvcf',
'new_combiner',
'load_combiner',
]
|
[
"noreply@github.com"
] |
johnc1231.noreply@github.com
|
e76ae7afc9085bd4469750939c331cf04a22eae6
|
61ff94d2987b3bc95f82c5a58897f50d1efa1db8
|
/hive/db/adapter.py
|
88c60225e4070ae7cd7f3e645bf073894c30e7e3
|
[
"MIT"
] |
permissive
|
arpwv/hivemind
|
ee77c9805731fda2bb95e1127a56152fe53b707a
|
a87e5578f9020be02c867021a8acdfff41f06777
|
refs/heads/master
| 2021-01-24T03:43:46.507207
| 2018-02-23T22:18:56
| 2018-02-23T22:18:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,292
|
py
|
import logging
import collections
from funcy.seqs import first
import sqlalchemy
from sqlalchemy import text
from hive.conf import Conf
from hive.db.query_stats import QueryStats
logger = logging.getLogger(__name__)
class Db:
_instance = None
@classmethod
def instance(cls):
if not cls._instance:
cls._instance = Db()
return cls._instance
def __init__(self):
self._conn = None
self._trx_active = False
def conn(self):
if not self._conn:
self._conn = Db.create_engine(echo=False).connect()
# It seems as though sqlalchemy tries to take over transactions
# and handle them itself; seems to issue a START TRANSACTION on
# connect, which makes postgres complain when we start our own:
# > WARNING: there is already a transaction in progress
# TODO: handle this behavior properly. In the meantime:
self._conn.execute(text("COMMIT"))
return self._conn
@staticmethod
def create_engine(echo=False):
engine = sqlalchemy.create_engine(
Conf.get('database_url'),
isolation_level="READ UNCOMMITTED", # only works in mysql
pool_recycle=3600,
echo=echo)
return engine
def is_trx_active(self):
return self._trx_active
# any non-SELECT queries
def query(self, sql, **kwargs):
# if prepared tuple, unpack
if isinstance(sql, tuple):
assert not kwargs
kwargs = sql[1]
sql = sql[0]
assert isinstance(sql, str)
assert isinstance(kwargs, dict)
# this method is reserved for anything but SELECT
assert self._is_write_query(sql), sql
return self._query(sql, **kwargs)
# SELECT n*m
def query_all(self, sql, **kwargs):
res = self._query(sql, **kwargs)
return res.fetchall()
# SELECT 1*m
def query_row(self, sql, **kwargs):
res = self._query(sql, **kwargs)
return first(res)
# SELECT n*1
def query_col(self, sql, **kwargs):
res = self._query(sql, **kwargs).fetchall()
return [r[0] for r in res]
# SELECT 1*1
def query_one(self, sql, **kwargs):
row = self.query_row(sql, **kwargs)
if row:
return first(row)
def db_engine(self):
engine = self.conn().dialect.name
if engine not in ['postgresql', 'mysql']:
raise Exception("db engine %s not supported" % engine)
return engine
@staticmethod
def build_upsert(table, pk, values):
pks = [pk] if isinstance(pk, str) else pk
values = collections.OrderedDict(values)
fields = list(values.keys())
pks_blank = [values[k] is None for k in pks]
if all(pks_blank):
cols = ', '.join([k for k in fields if k not in pks])
params = ', '.join([':'+k for k in fields if k not in pks])
sql = "INSERT INTO %s (%s) VALUES (%s)"
sql = sql % (table, cols, params)
else:
update = ', '.join([k+" = :"+k for k in fields if k not in pks])
where = ' AND '.join([k+" = :"+k for k in fields if k in pks])
sql = "UPDATE %s SET %s WHERE %s"
sql = sql % (table, update, where)
return (sql, values)
@QueryStats()
def _query(self, sql, **kwargs):
if sql == 'START TRANSACTION':
assert not self._trx_active
self._trx_active = True
elif sql == 'COMMIT':
assert self._trx_active
self._trx_active = False
query = text(sql).execution_options(autocommit=False)
try:
return self.conn().execute(query, **kwargs)
except Exception as e:
print("[SQL] Error in query {} ({})".format(sql, kwargs))
#self.conn.close() # TODO: check if needed
logger.exception(e)
raise e
@staticmethod
def _is_write_query(sql):
action = sql.strip()[0:6].strip()
if action == 'SELECT':
return False
if action in ['DELETE', 'UPDATE', 'INSERT', 'COMMIT', 'START', 'ALTER']:
return True
raise Exception("unknown action: {}".format(sql))
|
[
"roadscape@users.noreply.github.com"
] |
roadscape@users.noreply.github.com
|
1dad10d4f022b45a885361c1ef7cad694f8b1ae6
|
4099891546014e49b74f28987d26f93e77559471
|
/app/models.py
|
2b60aa9f5ce76532d09b9a3ed3443348f9d2b2da
|
[
"MIT"
] |
permissive
|
leezichanga/Newshighlights
|
519ecac73341adcf90b364024d335fe3574a12c6
|
abaca8891fe0d62e624e8c83ca4ba65f5ad6fe0f
|
refs/heads/master
| 2020-03-08T21:12:27.616700
| 2018-04-10T13:24:19
| 2018-04-10T13:24:19
| 128,401,925
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 747
|
py
|
class Source:
'''
News class to define news objects
'''
def __init__(self,id,name,description,url,category,language,country):
self.id = id
self.name = name
self.description = description
self.url = url
self.category = category
self.language = language
self.country = country
class Article:
'''
Article class to define article objects
'''
def __init__(self,id,name,author,title,description,url,urlToImage,publishedAt):
self.id = id
self.name = name
self.author = author
self.title = title
self.description = description
self.url = url
self.urlToImage = urlToImage
self.publishedAt = publishedAt
|
[
"elizabbethichanga@yahoo.com"
] |
elizabbethichanga@yahoo.com
|
3b876b254779a8a51c619573dd173dba1daf235b
|
fcf870abec4a3fe936668ed14afcded9c10e4aa3
|
/featureselection/CHI2.py
|
3dbd1809a6a48214769cfd116f2e38f093ac09ef
|
[] |
no_license
|
sirpan/iLearn
|
f8d81523720245cc1ab8368aeb609511fc93af5a
|
507aae17d9fea3d74a7c77984f1f1750eb734f53
|
refs/heads/master
| 2023-03-22T06:55:48.791894
| 2021-03-17T07:23:15
| 2021-03-17T07:23:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,092
|
py
|
#!/usr/bin/env python
# _*_coding:utf-8_*_
import numpy as np
import pandas as pd
binBox = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
def CHI2(encodings, labels):
features = encodings[0][1:]
encodings = np.array(encodings)[1:]
data = encodings[:, 1:]
shape = data.shape
data = np.reshape(data, shape[0] * shape[1])
data = np.reshape([float(i) for i in data], shape)
e = ''
if shape[0] < 5 or shape[1] < 2:
return 0, e
dataShape = data.shape
if dataShape[1] != len(features):
print('Error: inconsistent data shape with feature number.')
return 0, 'Error: inconsistent data shape with feature number.'
if dataShape[0] != len(labels):
print('Error: inconsistent data shape with sample number.')
return 0, 'Error: inconsistent data shape with sample number.'
sampleNumber = len(data)
labelClass = set(labels)
myFea = {}
for i in range(len(features)):
array = data[:, i]
newArray = list(pd.cut(array, len(binBox), labels=binBox))
binBoxClass = set(newArray)
myObservation = {}
for j in range(len(labels)):
# print(labels[j], newArray[j])
myObservation[str(labels[j]) + str(newArray[j])] = myObservation.get(str(labels[j]) + str(newArray[j]),
0) + 1
myExpect = {}
for j in labelClass:
for k in binBox:
myExpect[str(j) + str(k)] = labels.count(j) * newArray.count(k) / sampleNumber
chiValue = 0
for j in labelClass:
for k in binBoxClass:
chiValue = chiValue + pow(((myObservation.get(str(j) + str(k), 0)) - myExpect.get(str(j) + str(k), 0)),
2) / myExpect[str(j) + str(k)]
myFea[features[i]] = chiValue
res = []
res.append(['feature', 'CHI-value'])
for key in sorted(myFea.items(), key=lambda item: item[1], reverse=True):
res.append([key[0], '{0:.3f}'.format(myFea[key[0]])])
return res, e
|
[
"noreply@github.com"
] |
sirpan.noreply@github.com
|
7b77ba86eb35c3ed786fe7a7e707898bd1163f50
|
45e49a395fe58783cdc662ba6cf3805ef499190e
|
/raiden/tests/unit/test_notifyingqueue.py
|
24caa4ff4e423841f70bbc59687e829245155fe6
|
[
"MIT"
] |
permissive
|
mat7ias/raiden
|
862708d7e2f1f84ade6623c626daf3578a948c10
|
7463479ffde4f48577b74421f3c47a097e95a36f
|
refs/heads/master
| 2020-03-28T06:17:30.899834
| 2018-09-07T10:24:37
| 2018-09-07T11:44:18
| 146,172,440
| 0
| 0
|
MIT
| 2018-08-26T10:50:17
| 2018-08-26T10:50:17
| null |
UTF-8
|
Python
| false
| false
| 925
|
py
|
import gevent
from gevent.event import Event
from raiden.utils.notifying_queue import NotifyingQueue
from raiden.network.transport.udp.udp_utils import event_first_of
def add_element_to_queue(queue, element):
queue.put(element)
def test_copy():
queue = NotifyingQueue()
assert queue.copy() == []
queue.put(1)
assert queue.copy() == [1]
assert queue.peek() == 1, 'copy must preserve the queue'
queue.put(2)
assert queue.copy() == [1, 2], 'copy must preserve the items order'
def test_event_must_be_set():
queue = NotifyingQueue()
event_stop = Event()
data_or_stop = event_first_of(
queue,
event_stop,
)
spawn_after_seconds = 1
element = 1
gevent.spawn_later(spawn_after_seconds, add_element_to_queue, queue, element)
assert data_or_stop.wait()
def test_not_empty():
queue = NotifyingQueue(items=[1, 2])
assert queue.is_set()
|
[
"lefteris@refu.co"
] |
lefteris@refu.co
|
ebb258466ca8a94b44c533eca60e68a2ab3edd10
|
2347a00aa41c023924de6bc4ffe0e8bc244a0f3f
|
/application_form/migrations/0030_auto_20150908_1132.py
|
937d76c617b12ba3080d51e9f999a4d0ef15307c
|
[] |
no_license
|
Dean-Christian-Armada/prod-people
|
2ac20d16aecb0cf1ae50a08e456060eee270b518
|
fb8d99394d78bbf4d1831223fce2d7ac4a04f34d
|
refs/heads/master
| 2021-01-01T16:19:36.904967
| 2016-01-26T09:20:36
| 2016-01-26T09:20:36
| 42,503,579
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('application_form', '0029_applicationformflagdocuments'),
]
operations = [
migrations.AlterField(
model_name='applicationformflagdocuments',
name='user',
field=models.ForeignKey(default=None, to='login.UserProfile'),
),
]
|
[
"deanarmada@gmail.com"
] |
deanarmada@gmail.com
|
0458b01249ba1787f2bee6dbad3d2c2a9e97c9d8
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/smsdroid/testcase/firstcases/testcase2_012.py
|
700a6ce30d93469cae97356efa5d69b55ac3671f
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,924
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.ub0r.android.smsdroid',
'appActivity' : 'de.ub0r.android.smsdroid.ConversationListActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.ub0r.android.smsdroid/de.ub0r.android.smsdroid.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=1000)
return
def scrollToFindElement(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1) :
for temp in elements :
if temp.get_attribute("enabled") == "true" :
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.55, 0.5, 0.2)
else :
return element
for i in range(0, 4, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
elements = driver.find_elements_by_android_uiautomator(str)
if (len(elements) > 1):
for temp in elements:
if temp.get_attribute("enabled") == "true":
element = temp
break
except NoSuchElementException:
swipe(driver, 0.5, 0.2, 0.5, 0.55)
else :
return element
return
def scrollToClickElement(driver, str) :
element = scrollToFindElement(driver, str)
if element is None :
return
else :
element.click()
def clickInList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
element.click()
else :
if checkWindow(driver) :
driver.press_keycode(4)
def clickOnCheckable(driver, str, value = "true") :
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_android_uiautomator("new UiSelector().checkable(true)")
nowvalue = innere.get_attribute("checked")
if (nowvalue != value) :
innere.click()
break
except NoSuchElementException:
continue
def typeText(driver, value) :
element = getElememt(driver, "new UiSelector().className(\"android.widget.EditText\")")
element.clear()
element.send_keys(value)
enterelement = getElememt(driver, "new UiSelector().text(\"OK\")")
if (enterelement is None) :
if checkWindow(driver):
driver.press_keycode(4)
else :
enterelement.click()
def checkWindow(driver) :
dsize = driver.get_window_size()
nsize = driver.find_element_by_class_name("android.widget.FrameLayout").size
if dsize['height'] > nsize['height']:
return True
else :
return False
def testingSeekBar(driver, str, value):
try :
if(not checkWindow(driver)) :
element = seekForNearestSeekBar(driver, str)
else :
element = driver.find_element_by_class_name("android.widget.SeekBar")
if (None != element):
settingSeekBar(driver, element, value)
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
except NoSuchElementException:
time.sleep(1)
def seekForNearestSeekBar(driver, str):
parents = driver.find_elements_by_class_name("android.widget.LinearLayout")
for parent in parents:
try :
parent.find_element_by_android_uiautomator(str)
lists = parent.find_elements_by_class_name("android.widget.LinearLayout")
if len(lists) == 1 :
innere = parent.find_element_by_class_name("android.widget.SeekBar")
return innere
break
except NoSuchElementException:
continue
def settingSeekBar(driver, element, value) :
x = element.rect.get("x")
y = element.rect.get("y")
width = element.rect.get("width")
height = element.rect.get("height")
TouchAction(driver).press(None, x + 10, y + height/2).move_to(None, x + width * value,y + height/2).release().perform()
y = value
def clickInMultiList(driver, str) :
element = None
if (str is None) :
candidates = driver.find_elements_by_class_name("android.widget.CheckedTextView")
if len(candidates) >= 1 and checkWindow(driver):
element = candidates[len(candidates)-1]
else :
element = scrollToFindElement(driver, str)
if element is not None :
nowvalue = element.get_attribute("checked")
if (nowvalue != "true") :
element.click()
if checkWindow(driver) :
driver.find_element_by_android_uiautomator("new UiSelector().text(\"OK\")").click()
# testcase2_012
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"66560866\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.smsdroid:id/inout\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememt(driver, "new UiSelector().resourceId(\"de.ub0r.android.smsdroid:id/item_answer\").className(\"android.widget.TextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Me\")", "new UiSelector().className(\"android.widget.TextView\").instance(3)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Call 12312312\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"2_012\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.ub0r.android.smsdroid'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
c39e5331ecbec1272a0f70f65618ae44e1ef8ff4
|
86c5360e5a98088c76bbbcf93e3180b825744708
|
/yolo_v3/video_test.py
|
31082b2b1f160853466a29120a891091b9470756
|
[] |
no_license
|
FenHua/yolo
|
586b154f77f6855c2b8f731f101c92dd07840b39
|
6da4aa7c2ad2656182b6694b44d3c8e7cd6f3aa8
|
refs/heads/master
| 2020-04-26T11:37:37.092504
| 2019-03-03T02:51:23
| 2019-03-03T02:51:23
| 173,523,090
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,823
|
py
|
# coding: utf-8
from __future__ import division, print_function
import tensorflow as tf
import numpy as np
import argparse
import cv2
import time
from utils.misc_utils import parse_anchors, read_class_names
from utils.nms_utils import gpu_nms
from utils.plot_utils import get_color_table, plot_one_box
from model import yolov3
parser = argparse.ArgumentParser(description="YOLO-V3 video test procedure.")
parser.add_argument("input_video", type=str,
help="The path of the input video.")
parser.add_argument("--anchor_path", type=str, default="./data/yolo_anchors.txt",
help="The path of the anchor txt file.")
parser.add_argument("--new_size", nargs='*', type=int, default=[416, 416],
help="Resize the input image with `new_size`, size format: [width, height]")
parser.add_argument("--class_name_path", type=str, default="./data/coco.names",
help="The path of the class names.")
parser.add_argument("--restore_path", type=str, default="./data/darknet_weights/yolov3.ckpt",
help="The path of the weights to restore.")
parser.add_argument("--save_video", type=lambda x: (str(x).lower() == 'true'), default=False,
help="Whether to save the video detection results.")
args = parser.parse_args()
args.anchors = parse_anchors(args.anchor_path)
args.classes = read_class_names(args.class_name_path)
args.num_class = len(args.classes)
color_table = get_color_table(args.num_class)
vid = cv2.VideoCapture(args.input_video)
video_frame_cnt = int(vid.get(7))
video_width = int(vid.get(3))
video_height = int(vid.get(4))
video_fps = int(vid.get(5))
if args.save_video:
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
videoWriter = cv2.VideoWriter('video_result.mp4', fourcc, video_fps, (video_width, video_height))
with tf.Session() as sess:
input_data = tf.placeholder(tf.float32, [1, args.new_size[1], args.new_size[0], 3], name='input_data')
yolo_model = yolov3(args.num_class, args.anchors)
with tf.variable_scope('yolov3'):
pred_feature_maps = yolo_model.forward(input_data, False)
pred_boxes, pred_confs, pred_probs = yolo_model.predict(pred_feature_maps)
pred_scores = pred_confs * pred_probs
boxes, scores, labels = gpu_nms(pred_boxes, pred_scores, args.num_class, max_boxes=30, score_thresh=0.5, iou_thresh=0.5)
saver = tf.train.Saver()
saver.restore(sess, args.restore_path)
for i in range(video_frame_cnt):
ret, img_ori = vid.read()
height_ori, width_ori = img_ori.shape[:2]
img = cv2.resize(img_ori, tuple(args.new_size))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.asarray(img, np.float32)
img = img[np.newaxis, :] / 255.
start_time = time.time()
boxes_, scores_, labels_ = sess.run([boxes, scores, labels], feed_dict={input_data: img})
end_time = time.time()
# 将图片坐标系改为原始图片大小
boxes_[:, 0] *= (width_ori/float(args.new_size[0]))
boxes_[:, 2] *= (width_ori/float(args.new_size[0]))
boxes_[:, 1] *= (height_ori/float(args.new_size[1]))
boxes_[:, 3] *= (height_ori/float(args.new_size[1]))
for i in range(len(boxes_)):
x0, y0, x1, y1 = boxes_[i]
plot_one_box(img_ori, [x0, y0, x1, y1], label=args.classes[labels_[i]], color=color_table[labels_[i]])
cv2.putText(img_ori, '{:.2f}ms'.format((end_time - start_time) * 1000), (40, 40), 0,
fontScale=1, color=(0, 255, 0), thickness=2)
cv2.imshow('image', img_ori)
if args.save_video:
videoWriter.write(img_ori)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
vid.release()
if args.save_video:
videoWriter.release()
|
[
"1556711031@qq.com"
] |
1556711031@qq.com
|
6f539cd8fba94c719c7cdbc20951e989a1caeba5
|
279a141a3d4451b53f24bd369d80ac471da6fd95
|
/helloworld.py
|
fac8390bc075e79093fe9d45c734a6dca7cea8bc
|
[] |
no_license
|
emilyjennings/python-practice
|
4f010878263174487ab9ed5ae8c30c3b9ae2e1ca
|
b98b9ed354999fe3d2286bdd27e83ffd43807f20
|
refs/heads/master
| 2020-05-24T23:31:43.645103
| 2019-05-20T21:33:45
| 2019-05-20T21:33:45
| 187,516,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 446
|
py
|
#My first python code - in a while, anyway
print("Hello World")
if 5 > 2:
print("good work")
x = "me"
y = 10
print(x)
print(y)
# Codewars
def xo(s):
array = s.split()
xs = 0
os = 0
x = "x"
o = "o"
for item in array:
if item == o:
os += 1
elif item == x:
xs += 1
if xs == os:
return True
def xo(s):
s = s.lower()
return s.count('x') == s.count('o')
|
[
"github email address"
] |
github email address
|
f7ff1126916a332306e7385b85ada97ec2c7f820
|
e32ee307e4c59cc18f9dea18d797784a1b23148f
|
/Quinton-t a single line, the minimum subarray sum..py
|
f1c7da01a7b41f0859e383b1d01ebc66770c9e60
|
[] |
no_license
|
GuhanSGCIT/SGCIT
|
f4ab44346186d45129c74cbad466c6614f9f0f08
|
8b2e5ccf693384aa22aa9d57f39b63e4659f6261
|
refs/heads/master
| 2020-07-11T05:47:54.033120
| 2020-07-07T05:02:41
| 2020-07-07T05:02:41
| 204,459,836
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
n = int(input())
l = [int(x) for x in input().split()]
lst = []
for i in range(n):
for j in range(i, n):
lst.append(l[i:j+1])
sm = sum(lst[0])
for i in range(1, len(lst)):
if sm > sum(lst[i]): sm = sum(lst[i])
print(sm)
|
[
"noreply@github.com"
] |
GuhanSGCIT.noreply@github.com
|
874d3a461033beb96e17976e837e5e249cbfeb4b
|
935672cfefee4a8fe861f3247a6e27b7d1d0669a
|
/hoer/models/cifar/rezero/preactresnet.py
|
f79f59dd0a14b959453c3f9f64789b35d6ffefdf
|
[] |
no_license
|
sbl1996/hoer
|
533b10f047a4175a95f8a7cb94430002aef9a39d
|
8ced31d49ebe627eb0932f896484a8b2b2c223ce
|
refs/heads/main
| 2023-02-02T00:43:42.716916
| 2020-12-13T08:14:12
| 2020-12-13T08:14:12
| 321,008,436
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
import tensorflow as tf
from tensorflow.keras import Sequential, Model
from tensorflow.keras.initializers import Constant
from tensorflow.keras.layers import Layer
from hanser.models.layers import Act, Conv2d, Norm, GlobalAvgPool, Linear, Identity
class PreActResBlock(Layer):
def __init__(self, in_channels, out_channels, stride):
super().__init__()
self.norm1 = Norm(in_channels)
self.act1 = Act()
self.conv1 = Conv2d(in_channels, out_channels, kernel_size=3, stride=stride)
self.norm2 = Norm(out_channels)
self.act2 = Act()
self.conv2 = Conv2d(out_channels, out_channels, kernel_size=3)
if stride != 1 or in_channels != out_channels:
self.shortcut = Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
self.res_weight = self.add_weight(
name='res_weight', shape=(), dtype=tf.float32,
trainable=True, initializer=Constant(0.))
def call(self, x):
out = self.norm1(x)
out = self.act1(out)
shortcut = self.shortcut(out) if hasattr(self, 'shortcut') else x
out = self.conv1(out)
out = self.norm2(out)
out = self.act2(out)
out = self.conv2(out)
return shortcut + self.res_weight * out
class ResNet(Model):
stages = [16, 16, 32, 64]
def __init__(self, depth, k, num_classes=10):
super().__init__()
num_blocks = (depth - 4) // 6
self.conv = Conv2d(3, self.stages[0], kernel_size=3)
self.layer1 = self._make_layer(
self.stages[0] * 1, self.stages[1] * k, num_blocks, stride=1)
self.layer2 = self._make_layer(
self.stages[1] * k, self.stages[2] * k, num_blocks, stride=2)
self.layer3 = self._make_layer(
self.stages[2] * k, self.stages[3] * k, num_blocks, stride=2)
self.norm = Norm(self.stages[3] * k)
self.act = Act()
self.avgpool = GlobalAvgPool()
self.fc = Linear(self.stages[3] * k, num_classes)
def _make_layer(self, in_channels, out_channels, blocks, stride):
layers = [PreActResBlock(in_channels, out_channels, stride=stride)]
for i in range(1, blocks):
layers.append(
PreActResBlock(out_channels, out_channels, stride=1))
return Sequential(layers)
def call(self, x):
x = self.conv(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.norm(x)
x = self.act(x)
x = self.avgpool(x)
x = self.fc(x)
return x
|
[
"sbl1996@126.com"
] |
sbl1996@126.com
|
1c56c4804e9cce72cdbf8787317d9f8f9609f5de
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part003278.py
|
27bfeab472a10b41ed9fe95e81801cbb8866f1db
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,655
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher88509(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher88509._instance is None:
CommutativeMatcher88509._instance = CommutativeMatcher88509()
return CommutativeMatcher88509._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 88508
if len(subjects) >= 1 and isinstance(subjects[0], Pow):
tmp1 = subjects.popleft()
subjects2 = deque(tmp1._args)
# State 88510
if len(subjects2) >= 1:
tmp3 = subjects2.popleft()
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.0_1', tmp3)
except ValueError:
pass
else:
pass
# State 88511
if len(subjects2) >= 1:
tmp5 = subjects2.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.2', tmp5)
except ValueError:
pass
else:
pass
# State 88512
if len(subjects2) == 0:
pass
# State 88513
if len(subjects) == 0:
pass
# 0: x**n
yield 0, subst2
subjects2.appendleft(tmp5)
subjects2.appendleft(tmp3)
subjects.appendleft(tmp1)
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
8367a3278f4af2f004b3b831acb7ec551b603b1f
|
053ad96d19c562c44e3fad53db37b24f4ec7134d
|
/torchelastic/tsm/driver/test/standalone_session_test.py
|
5e48f14e4a023c1330e6c9a25758fa33a742de50
|
[
"BSD-3-Clause"
] |
permissive
|
kuikuikuizzZ/elastic
|
7ef0ab7b1a4a3510e91eeb6b91b6f94f863940c2
|
cf2fb9c153cc371e6d6b341f15122c26965b7461
|
refs/heads/master
| 2022-12-31T20:11:38.957121
| 2020-09-23T18:38:11
| 2020-09-23T18:39:31
| 298,498,639
| 0
| 0
|
BSD-3-Clause
| 2020-09-25T07:20:59
| 2020-09-25T07:20:58
| null |
UTF-8
|
Python
| false
| false
| 6,924
|
py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import tempfile
import unittest
from unittest.mock import MagicMock
from torchelastic.tsm.driver.api import (
Application,
AppNotReRunnableException,
AppState,
Container,
DescribeAppResponse,
Resources,
Role,
RunMode,
UnknownAppException,
)
from torchelastic.tsm.driver.local_scheduler import (
LocalDirectoryImageFetcher,
LocalScheduler,
)
from torchelastic.tsm.driver.standalone_session import StandaloneSession
from .test_util import write_shell_script
class Resource:
SMALL = Resources(cpu=1, gpu=0, memMB=1024)
MEDIUM = Resources(cpu=4, gpu=0, memMB=(4 * 1024))
LARGE = Resources(cpu=16, gpu=0, memMB=(16 * 1024))
class StandaloneSessionTest(unittest.TestCase):
def setUp(self):
self.test_dir = tempfile.mkdtemp("StandaloneSessionTest")
write_shell_script(self.test_dir, "touch.sh", ["touch $1"])
write_shell_script(self.test_dir, "fail.sh", ["exit 1"])
write_shell_script(self.test_dir, "sleep.sh", ["sleep $1"])
self.image_fetcher = LocalDirectoryImageFetcher()
self.scheduler = LocalScheduler(self.image_fetcher)
# resource ignored for local scheduler; adding as an example
self.test_container = Container(image=self.test_dir).require(Resource.SMALL)
def tearDown(self):
shutil.rmtree(self.test_dir)
def test_run(self):
test_file = os.path.join(self.test_dir, "test_file")
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("name").of(role)
app_id = session.run(app)
self.assertEqual(AppState.SUCCEEDED, session.wait(app_id).state)
def test_attach(self):
session1 = StandaloneSession(name="test_session1", scheduler=self.scheduler)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_id = session1.run(app)
session2 = StandaloneSession(name="test_session2", scheduler=self.scheduler)
session2.attach(app_id)
self.assertEqual(AppState.RUNNING, session2.status(app_id).state)
session2.stop(app_id)
self.assertEqual(AppState.CANCELLED, session2.status(app_id).state)
def test_attach_and_run(self):
session1 = StandaloneSession(name="test_session1", scheduler=self.scheduler)
test_file = os.path.join(self.test_dir, "test_file")
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("touch_test_file").of(role)
app_id = session1.run(app)
session2 = StandaloneSession(name="test_session2", scheduler=self.scheduler)
attached_app = session2.attach(app_id)
with self.assertRaises(AppNotReRunnableException):
session2.run(attached_app)
def test_list(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="touch").runs("sleep.sh", "1").on(self.test_container)
app = Application("sleeper").of(role)
num_apps = 4
for _ in range(num_apps):
# since this test validates the list() API,
# we do not wait for the apps to finish so run the apps
# in managed mode so that the local scheduler reaps the apps on exit
session.run(app, mode=RunMode.MANAGED)
apps = session.list()
self.assertEqual(num_apps, len(apps))
def test_evict_non_existent_app(self):
# tests that apps previously run with this session that are finished and eventually
# removed by the scheduler also get removed from the session after a status() API has been
# called on the app
scheduler = LocalScheduler(self.image_fetcher, cache_size=1)
session = StandaloneSession(
name="test_session", scheduler=scheduler, wait_interval=1
)
test_file = os.path.join(self.test_dir, "test_file")
role = Role(name="touch").runs("touch.sh", test_file).on(self.test_container)
app = Application("touch_test_file").of(role)
# local scheduler was setup with a cache size of 1
# run the same app twice (the first will be removed from the scheduler's cache)
# then validate that the first one will drop from the session's app cache as well
app_id1 = session.run(app)
session.wait(app_id1)
app_id2 = session.run(app)
session.wait(app_id2)
apps = session.list()
self.assertEqual(1, len(apps))
self.assertFalse(app_id1 in apps)
self.assertTrue(app_id2 in apps)
def test_status(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
role = Role(name="sleep").runs("sleep.sh", "60").on(self.test_container)
app = Application("sleeper").of(role)
app_id = session.run(app)
self.assertEqual(AppState.RUNNING, session.status(app_id).state)
session.stop(app_id)
self.assertEqual(AppState.CANCELLED, session.status(app_id).state)
def test_status_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.status("unknown_app_id")
def test_status_ui_url(self):
app_id = "test_app"
mock_scheduler = MagicMock()
resp = DescribeAppResponse()
resp.ui_url = "https://foobar"
mock_scheduler.submit.return_value = app_id
mock_scheduler.describe.return_value = resp
session = StandaloneSession(
name="test_ui_url_session", scheduler=mock_scheduler
)
role = Role("ignored").runs("/bin/echo").on(self.test_container)
session.run(Application(app_id).of(role))
status = session.status(app_id)
self.assertEquals(resp.ui_url, status.ui_url)
def test_wait_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.wait("unknown_app_id")
def test_stop_unknown_app(self):
session = StandaloneSession(
name="test_session", scheduler=self.scheduler, wait_interval=1
)
with self.assertRaises(UnknownAppException):
session.stop("unknown_app_id")
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
48f6aa788d0e6b6bd8bc7a621ccc3b855d38bbdc
|
31ba27461d50fcd85027318eacefa04d828feb4b
|
/addons/app-trobz-hr/it_equipment_bonus/security/post_object_security.py
|
1075bbfb89ee07b0ebcb2d990bff159c5f43d26d
|
[] |
no_license
|
TinPlusIT05/tms
|
5f258cec903d5bf43c26b93fc112fce0b32de828
|
673dd0f2a7c0b69a984342b20f55164a97a00529
|
refs/heads/master
| 2022-12-04T02:11:54.770745
| 2019-09-23T07:18:11
| 2019-09-23T07:18:11
| 210,278,672
| 0
| 0
| null | 2022-11-22T00:30:37
| 2019-09-23T06:18:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
# -*- encoding: utf-8 -*-
from openerp import models, api
group_user = 'Human Resources / Employee'
group_hr_manager = 'Human Resources / Manager'
class it_equipment_bonus_post_object_security(models.TransientModel):
_name = "it.equipment.bonus.post.object.security"
@api.model
def start(self):
self.create_model_access_rights()
return True
@api.model
def create_model_access_rights(self):
MODEL_ACCESS_RIGHTS = {
('hr.equipment.category'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 0, 0, 0],
},
('employee.it.bonus'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 0, 0, 0],
},
('hr.equipment.request'): {
(group_hr_manager,): [1, 1, 1, 1],
(group_user,): [1, 1, 1, 0],
},
}
return self.env['trobz.base'].with_context(
{'module_name': 'it_equipment_bonus'}).create_model_access_rights(
MODEL_ACCESS_RIGHTS)
|
[
"Tinplusit05@gmail.com"
] |
Tinplusit05@gmail.com
|
432097dea145bd35db9bfcab0f20d4ad2f970c45
|
fc8137f6a4df69640657a0af5d7201de3c6eb261
|
/accepted/Valid Palindrome.py
|
e791233777023d30d7f38d6a0df11bc4c02b9fd2
|
[] |
no_license
|
hustlrr/leetcode
|
68df72b49ee3bbb9f0755028e024cc9fea2c21aa
|
56e33dff3918e371f14d6f7ef03f8951056cc273
|
refs/heads/master
| 2020-04-12T08:14:25.371761
| 2017-01-01T12:19:34
| 2017-01-01T12:19:34
| 77,119,341
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 775
|
py
|
# coding=utf-8
class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
leftcnt,rightcnt=0,0
leftidx,rightidx=0,len(s)-1
s=s.lower()
while leftidx<rightidx:
while not s[leftidx].isalnum() and leftidx<rightidx:
leftidx+=1
while not s[rightidx].isalnum() and leftidx<rightidx:
rightidx-=1
if leftidx<rightidx:
if s[leftidx]!=s[rightidx]:
return False
if s[leftidx].isalnum():
leftcnt+=1
if s[rightidx].isalnum():
rightcnt+=1
leftidx+=1
rightidx-=1
return leftcnt==rightcnt
|
[
"823729390@qq.com"
] |
823729390@qq.com
|
c01a223e3df44056ff29c6a04a6b554b73afe3f5
|
89c6895a0d71d4ce1fa6ca9e415649625ba2d1c6
|
/babybuddy/__init__.py
|
a0ff099532963b2e40c164385a3203065ae80f3e
|
[
"BSD-2-Clause",
"BSD-2-Clause-Views"
] |
permissive
|
bry-c/babybuddy
|
18f72a3f6480abaafe34250bf82828567fe05a23
|
49156c1d80568a8c052a6788af9a63ea658b7452
|
refs/heads/master
| 2020-12-20T18:24:29.053433
| 2020-01-25T05:24:43
| 2020-01-25T05:24:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,979
|
py
|
"""
.----------------.
| .--------------. |
| | ______ | |
| | |_ _ \ | |
| | | |_) | | |
| | | __'. | |
| | _| |__) | | |
| | |_______/ | |
| | | |
| '--------------' |
'----------------'
.----------------. .----------------.
| .--------------. | .--------------. |
| | __ | | | ______ | |
| | / \ | | | |_ _ \ | |
| | / /\ \ | | | | |_) | | |
| | / ____ \ | | | | __'. | |
| | _/ / \ \_ | | | _| |__) | | |
| ||____| |____|| | | |_______/ | |
| | | | | | |
| '--------------' | '--------------' |
'----------------' '----------------'
.----------------. .----------------. .----------------.
| .--------------. | .--------------. | .--------------. |
| | ____ ____ | | | ______ | | | _____ _____ | |
| | |_ _||_ _| | | | |_ _ \ | | ||_ _||_ _|| |
| | \ \ / / | | | | |_) | | | | | | | | | |
| | \ \/ / | | | | __'. | | | | ' ' | | |
| | _| |_ | | | _| |__) | | | | \ `--' / | |
| | |______| | | | |_______/ | | | `.__.' | |
| | | | | | | | | |
| '--------------' | '--------------' | '--------------' |
'----------------' '----------------' '----------------'
.----------------. .----------------. .----------------. .----------------.
| .--------------. | .--------------. | .--------------. | .--------------. |
| | ________ | | | ________ | | | ____ ____ | | | _ | |
| | |_ ___ `. | | | |_ ___ `. | | | |_ _||_ _| | | | | | | |
| | | | `. \ | | | | | `. \ | | | \ \ / / | | | | | | |
| | | | | | | | | | | | | | | | \ \/ / | | | | | | |
| | _| |___.' / | | | _| |___.' / | | | _| |_ | | | | | | |
| | |________.' | | | |________.' | | | |______| | | | |_| | |
| | | | | | | | | | | (_) | |
| '--------------' | '--------------' | '--------------' | '--------------' |
'----------------' '----------------' '----------------' '----------------'
""" # noqa
__title__ = 'Baby Buddy'
__version__ = '1.2.4'
__license__ = 'BSD 2-Clause'
VERSION = __version__
default_app_config = 'babybuddy.apps.BabyBuddyConfig'
|
[
"chris@chrxs.net"
] |
chris@chrxs.net
|
66ee5476b811b7932d971e889a6b0e8fa585e838
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/jozLzME3YptxydiQm_24.py
|
8b1f7fcb54f2b79c2a1a818e7a9f5e7adc43b9b8
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 784
|
py
|
"""
Create a function that takes in a word and determines whether or not it is
plural. A plural word is one that ends in "s".
### Examples
is_plural("changes") ➞ True
is_plural("change") ➞ False
is_plural("dudes") ➞ True
is_plural("magic") ➞ False
### Notes
* Don't forget to `return` the result.
* Remember that return `True` ( _boolean_ ) is not the same as return `"True"` ( _string_ ).
* This is an oversimplification of the English language. We are ignoring edge cases like "goose" and "geese", "fungus" and "fungi", etc.
* If you get stuck on a challenge, find help in the **Resources** tab.
* If you're _really_ stuck, unlock solutions in the **Solutions** tab.
"""
def is_plural(word):
return word.endswith("s")
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
2fa0f6f2d17f04b73bcf80ec13cb4906693bb7f3
|
a56623649791b945ae07d407533511902b67ebad
|
/src/installer/src/tortuga/db/dbManager.py
|
1f81e2d146ecdf6855c4d2f51897b0247a325ef2
|
[
"Apache-2.0"
] |
permissive
|
joedborg/tortuga
|
9792bc0ff1a9d7fa335ac41df9324dc502b80e0b
|
5690e41c0c78602c195f699bf314c6c94ca7b619
|
refs/heads/master
| 2021-04-26T22:56:05.646538
| 2018-11-14T16:33:00
| 2018-11-14T16:33:00
| 123,898,555
| 0
| 0
| null | 2018-03-05T09:48:11
| 2018-03-05T09:48:11
| null |
UTF-8
|
Python
| false
| false
| 7,768
|
py
|
# Copyright 2008-2018 Univa Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=multiple-statements,no-member,no-name-in-module
# pylint: disable=not-callable
import configparser
from logging import getLogger
import os
import sqlalchemy
import sqlalchemy.orm
from tortuga.config.configManager import ConfigManager
from tortuga.exceptions.dbError import DbError
from tortuga.kit.registry import get_all_kit_installers
from tortuga.objects.tortugaObjectManager import TortugaObjectManager
# from .tables import get_all_table_mappers
from .sessionContextManager import SessionContextManager
from .models.base import ModelBase
from . import models # noqa pylint: disable=unused-import
logger = getLogger(__name__)
class DbManager(TortugaObjectManager):
"""
Class for db management.
:param engine: a SQLAlchemy database engine instance
:param init: a flag that is set when the database has not yet
been initialized. If this flag is set, not attempts
will be made to load/map kit tables. This flag is
cleared once the database has been initialized.
"""
def __init__(self, engine=None):
super(DbManager, self).__init__()
if not engine:
self._cm = ConfigManager()
self._dbConfig = self._refreshDbConfig()
engineURI = self.__getDbEngineURI()
if self._dbConfig['engine'] == 'sqlite' and \
not os.path.exists(self._dbConfig['path']):
# Ensure SQLite database file is created with proper permissions
fd = os.open(
self._dbConfig['path'], os.O_CREAT, mode=0o600)
os.close(fd)
self._engine = sqlalchemy.create_engine(engineURI)
else:
self._engine = engine
self.Session = sqlalchemy.orm.scoped_session(
sqlalchemy.orm.sessionmaker(bind=self.engine))
def _map_db_tables(self):
#
# Make sure all kit table mappers have been registered
#
for kit_installer_class in get_all_kit_installers():
kit_installer = kit_installer_class()
kit_installer.register_database_table_mappers()
#
# Map all tables that haven't yet been mapped
#
# for table_mapper in get_all_table_mappers():
# key = table_mapper.__name__
# if key not in self._mapped_tables.keys():
# logger.debug('Mapping table: {}'.format(key))
# self._mapped_tables[key] = table_mapper()
# self._mapped_tables[key].map(self)
pass
@property
def engine(self):
"""
SQLAlchemy Engine object property
"""
self._map_db_tables()
return self._engine
def session(self):
"""
Database session context manager
"""
return SessionContextManager(self)
def init_database(self):
#
# Create tables
#
self._map_db_tables()
try:
ModelBase.metadata.create_all(self.engine)
except Exception:
self.getLogger().exception('SQLAlchemy raised exception')
raise DbError('Check database settings or credentials')
@property
def metadata(self):
return self._metadata
def __getDbEngineURI(self):
dbPort = self._dbConfig['port']
dbHost = self._dbConfig['host']
engine = self._dbConfig['engine']
dbUser = self._dbConfig['username']
dbPassword = self._dbConfig['password']
if engine == 'sqlite':
engineURI = 'sqlite:///%s' % (self._dbConfig['path'])
else:
if dbUser is not None:
if dbPassword is not None:
userspec = '%s:%s' % (dbUser, dbPassword)
else:
userspec = dbUser
else:
userspec = None
if dbPort is not None:
hostspec = '%s:%s' % (dbHost, dbPort)
else:
hostspec = dbHost
engineURI = f'{engine}+pymysql' if engine == 'mysql' else engine
engineURI += '://'
if userspec is not None:
engineURI += f'{userspec}@'
engineURI += f'{hostspec}' + '/{}'.format(self._cm.getDbSchema())
return engineURI
def _getDefaultDbEngine(self): \
# pylint: disable=no-self-use
return 'sqlite'
def _getDefaultDbHost(self): \
# pylint: disable=no-self-use
return 'localhost'
def _getDefaultDbPort(self, engine): \
# pylint: disable=no-self-use
# MySQL default port
if engine == 'mysql':
return 3306
return None
def _getDefaultDbUserName(self):
return self._cm.getDbUser()
def _getDefaultDbPassword(self):
if os.path.exists(self._cm.getDbPasswordFile()):
with open(self._cm.getDbPasswordFile()) as fp:
dbPassword = fp.read()
else:
dbPassword = None
return dbPassword
def _refreshDbConfig(self, cfg=None):
dbConfig = {}
if cfg is None:
cfg = configparser.ConfigParser()
cfg.read(os.path.join(self._cm.getKitConfigBase(), 'tortuga.ini'))
# Database engine
val = cfg.get('database', 'engine').strip().lower() \
if cfg.has_option('database', 'engine') else \
self._getDefaultDbEngine()
dbConfig['engine'] = val
if dbConfig['engine'] == 'sqlite':
# If database is sqlite, read the path
dbConfig['path'] = cfg.get('database', 'path') \
if cfg.has_section('database') and \
cfg.has_option('database', 'path') else \
os.path.join(self._cm.getEtcDir(),
self._cm.getDbSchema() + '.sqlite')
# Database host
val = cfg.get('database', 'host') \
if cfg.has_option('database', 'host') else \
self._getDefaultDbHost()
dbConfig['host'] = val
# Database port
val = cfg.get('database', 'port') \
if cfg.has_option('database', 'port') else None
dbConfig['port'] = val if val else self._getDefaultDbPort(
engine=dbConfig['engine'])
# Database username
val = cfg.get('database', 'username') \
if cfg.has_option('database', 'username') \
else self._getDefaultDbUserName()
dbConfig['username'] = val
# Database password
val = cfg.get('database', 'password') \
if cfg.has_option('database', 'password') \
else self._getDefaultDbPassword()
dbConfig['password'] = val
return dbConfig
def get_backend_opts(self): \
# pylint: disable=no-self-use
return {
'mysql_engine': 'InnoDB',
}
def getMetadataTable(self, table):
return self._metadata.tables[table]
def openSession(self):
""" Open db session. """
return self.Session()
def closeSession(self):
"""Close scoped_session."""
self.Session.remove()
|
[
"mfrisch@univa.com"
] |
mfrisch@univa.com
|
ab958f2bbc272631ae086eaa0bf5cfab4ad7ed6b
|
747755833862b8e9d0f58ebc62879d6ef47c23c8
|
/python-master (5)/python-master/test/tree.py
|
2b41e05f60a4593aa20574d59fedcd8f65037cf4
|
[] |
no_license
|
tangsong41/stu_py
|
98a06730dbca6e158cf81c18d98fe1317c1ae512
|
d41507cd8dd9e8a54084872dfa15c36da443c02b
|
refs/heads/master
| 2022-12-11T23:53:57.530946
| 2019-01-15T18:29:19
| 2019-01-15T18:29:19
| 163,953,100
| 3
| 0
| null | 2022-12-07T23:24:01
| 2019-01-03T09:41:29
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,287
|
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: tree.py
@time: 2017/5/22 下午6:57
"""
import json
from collections import defaultdict
def tree():
"""
定义一棵树
python 字典的特性,赋值操作必须事先声明,所以这里使用 collections 很方便的为字典设置初始值
:return:
"""
return defaultdict(tree)
def test_users():
users = tree()
users['jack_1']['jack_2_1']['jack_3_1'] = {}
users['jack_1']['jack_2_1']['jack_3_2'] = {}
users['jack_1']['jack_2_2'] = {}
users['jack_1']['jack_2_2']['jack_3_1'] = {}
users['lily_1']['lily_2_1']['lily_3_1'] = {}
users['lily_1']['lily_2_2']['lily_3_2'] = {}
users['lily_1']['lily_2_3']['lily_3_3'] = {}
users['emma_1']['emma_2_1'] = {}
# 打印 users 原始结构
print users
# 打印 users json 结构
print(json.dumps(users, indent=4))
# 第一层(users的key)
print [i for i in users]
# 第二层(users子节点的key)
print [i for i in users['jack_1']]
# 第三层(users孙节点的key)
print [i for i in users['jack_1']['jack_2_1']]
l = [
{'u': 4, 'p': 1},
{'u': 10, 'p': 1},
{'u': 5, 'p': 1},
{'u': 6, 'p': 2},
{'u': 7, 'p': 2},
{'u': 8, 'p': 3},
{'u': 9, 'p': 3},
{'u': 11, 'p': 3},
{'u': 12, 'p': 3},
{'u': 13, 'p': 5},
{'u': 14, 'p': 6},
{'u': 15, 'p': 10},
{'u': 17, 'p': 10},
{'u': 19, 'p': 10},
{'u': 20, 'p': 15},
{'u': 21, 'p': 15},
{'u': 22, 'p': 17},
{'u': 23, 'p': 22},
]
def get_child_users(uid):
"""
获取子节点
:param uid:
:return:
"""
r = []
for i in l:
if i['p'] == uid:
r.append(i['u'])
return r
def test_team(uid):
"""
测试
:return:
"""
team = tree()
child_users = get_child_users(uid)
for uid1 in child_users:
team[uid1] = {}
child_users2 = get_child_users(uid1)
for uid2 in child_users2:
team[uid1][uid2] = {}
child_users3 = get_child_users(uid2)
for uid3 in child_users3:
team[uid1][uid2][uid3] = {}
print json.dumps(team, indent=4)
if __name__ == '__main__':
# test_users()
test_team(1)
|
[
"369223985@qq.com"
] |
369223985@qq.com
|
a83ace36cf0328a163b1d22f951b524039dc30db
|
db1b327c4913c453b2fdd9dda661938c4abc5c0e
|
/abc/89/C.py
|
1dad6e66a1695248dab6a8d662bbaa2d771d8383
|
[] |
no_license
|
oamam/atcoder
|
0c129aab72e3c7090c9799fdf52f6e8119ef5238
|
658054b69b7586eed896484535dcfa1fef498e43
|
refs/heads/master
| 2021-06-26T09:01:12.389266
| 2020-10-30T02:01:11
| 2020-10-30T02:01:11
| 165,225,322
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
import itertools
def main():
N = int(input())
d = {'M': 0, 'A': 0, 'R': 0, 'C': 0, 'H': 0}
for _ in range(N):
S = input()
if S[0] in d:
d[S[0]] += 1
ans = 0
for a, b, c in list(itertools.combinations(d.keys(), 3)):
ans += d[a] * d[b] * d[c]
print(ans)
main()
|
[
"chapa0106@gmail.com"
] |
chapa0106@gmail.com
|
a2921655da4108ff25537002abd8ac828267b205
|
461bb1cd322c381be77cafdd2deb78223abfe79b
|
/tests/test_config_locator.py
|
82611972aa5315bb85e8d38d511aa66e7c601536
|
[
"MIT"
] |
permissive
|
ryankanno/py-configurator
|
7a3b205cae2d424b4671c1154ba97d5afa8809a6
|
749a4dc329d23d976712d241da13c1d942ad3d01
|
refs/heads/master
| 2020-06-02T10:38:51.554795
| 2015-03-16T22:45:56
| 2015-03-16T22:45:56
| 17,777,792
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,435
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import eq_
from nose.tools import ok_
import os
from py_configurator import Locator
import unittest
class TestLocator(unittest.TestCase):
def test_locator_construction(self):
config_name = 'foo.bar'
local_dir = './'
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
eq_(locator.config_name, config_name)
eq_(locator.local_dir, local_dir)
eq_(locator.env_key, env_key)
config_name_2 = 'foo.bar.2'
local_dir_2 = '/foo/bar/tmp/2'
env_key_2 = 'CONFIG_KEY_2'
locator.config_name = config_name_2
locator.local_dir = local_dir_2
locator.env_key = env_key_2
eq_(locator.config_name, config_name_2)
eq_(locator.local_dir, local_dir_2)
eq_(locator.env_key, env_key_2)
def test_config_locator_get_config_search_paths(self):
config_name = 'foo.bar'
local_dir = './'
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
env_key_path = '/bar/config.path'
os.environ[env_key] = env_key_path
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
config_search_paths = locator.get_config_paths()
ok_(env_key_path in config_search_paths)
ok_(os.path.join('./', config_name) in config_search_paths)
ok_('/foo/bar/tmp/foo.bar' in config_search_paths)
ok_(os.path.join(os.path.expanduser("~"), config_name) in
config_search_paths)
def test_config_locator_get_config(self):
config_name = 'foobar.ini'
local_dir = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'data')
system_dir = '/foo/bar/tmp'
env_key = 'CONFIG_KEY'
env_key_path = '/bar/config.path'
os.environ[env_key] = env_key_path
locator = Locator(
env_key=env_key,
config_name=config_name,
local_dir=local_dir,
system_dir=system_dir)
config = locator.get_config()
ok_(config is not None)
ok_(config.get('Foo.Bar') == "1")
# vim: filetype=python
|
[
"ryankanno@localkinegrinds.com"
] |
ryankanno@localkinegrinds.com
|
bdffbc8830f8b59e9e1ff61aa4da2822cdd77638
|
a1c5e68d93cd7d7a5620c34c0567b006fa33cd38
|
/.eggs/PyScaffold-3.0.3-py3.6.egg/pyscaffold/extensions/travis.py
|
3d97d584f36aa204824901ac55d8ded1f6778621
|
[
"MIT"
] |
permissive
|
csm-adapt/citrine_converters
|
4f4e3e57379460f0e1205bf643e6251b36ade772
|
32eef5f5e733e7ab9031b2f129bb23b90cedc6bf
|
refs/heads/master
| 2021-01-21T19:13:50.156881
| 2020-01-05T05:48:07
| 2020-01-05T05:48:07
| 92,125,603
| 0
| 2
|
BSD-2-Clause
| 2018-08-13T22:39:56
| 2017-05-23T03:36:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,367
|
py
|
# -*- coding: utf-8 -*-
"""
Extension that generates configuration and script files for Travis CI.
"""
from __future__ import absolute_import
from ..templates import travis, travis_install
from ..api import Extension
from ..api import helpers
class Travis(Extension):
"""Generate Travis CI configuration files"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.add_files,
after='define_structure')
def add_files(self, struct, opts):
"""Add some Travis files to structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
files = {
'.travis.yml': (travis(opts), helpers.NO_OVERWRITE),
'tests': {
'travis_install.sh': (travis_install(opts),
helpers.NO_OVERWRITE)
}
}
return helpers.merge(struct, {opts['project']: files}), opts
|
[
"bkappes@mines.edu"
] |
bkappes@mines.edu
|
d8bdb2f6559ca6362e0ef57c900953ab34883af0
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/ThirteenTeV/GluGluSpin0/GluGluSpin0ToZGamma_ZToLL_W_0p014_M_750_TuneCUEP8M1_13TeV_pythia8_cfi.py
|
f60fb1b53fc8fc0548de2a77ed013835f321393b
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,393
|
py
|
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(1.095e-3),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'HiggsSM:gg2H = on',
'25:m0 = 750',
'25:mWidth = 0.105',
'25:onMode = off',
'25:OnIfMatch = 23 22',
'25:doForceWidth = on',
'Higgs:clipWings = on',
'Higgs:clipWings = 10',
'23:onMode = off',
'23:OnIfMatch = 11 11',
'23:OnIfMatch = 13 13',
'23:OnIfMatch = 15 15',
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters',
)
)
)
ProductionFilterSequence = cms.Sequence(generator)
|
[
"sheffield@physics.rutgers.edu"
] |
sheffield@physics.rutgers.edu
|
4a2a7043ddff842f7ad9b18905ebc72ba0379d26
|
9975b2681a079b46d901b994d2bb50609d62791a
|
/StringSplitAndJoin.py
|
5a97e7a31950412b8579f1742aeb40604a4a44f8
|
[] |
no_license
|
elvinyeka/Hakker_Rank
|
1b0d4aae7a6f4c9ac08f8948be4e5740950057c9
|
ab8c42a3e373d4e4460a6c261b77bde65cf56bfb
|
refs/heads/master
| 2022-12-17T00:32:41.279518
| 2020-09-14T12:09:31
| 2020-09-14T12:09:31
| 294,422,920
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
def split_and_join(line):
result = ""
for i in line:
if i == " ":
result += "-"
else:
result += i
return result
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result)
# https://www.hackerrank.com/challenges/python-string-split-and-join/problem
|
[
"elvinyeka@gmail.com"
] |
elvinyeka@gmail.com
|
a1e65e5164610be033d72ac8d03ab0a25093dbfa
|
3851d5eafcc5fd240a06a7d95a925518412cafa0
|
/Django_Code/gs129/gs129/asgi.py
|
eba9818d2ad8b07e93af3c77180c5cc2d4df5d63
|
[] |
no_license
|
Ikshansaleem/DjangoandRest
|
c0fafaecde13570ffd1d5f08019e04e1212cc2f3
|
0ccc620ca609b4ab99a9efa650b5893ba65de3c5
|
refs/heads/master
| 2023-01-31T04:37:57.746016
| 2020-12-10T06:27:24
| 2020-12-10T06:27:24
| 320,180,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
"""
ASGI config for gs129 project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'gs129.settings')
application = get_asgi_application()
|
[
"ikshan3108@gmail.com"
] |
ikshan3108@gmail.com
|
e46bc6e3189a1504a348b21db1fec3ed510eeda1
|
ab1d659a36a7af22bd65b8a91f5059cc24eb7a01
|
/bigfish_functions/Lowest.py
|
a19bab32e7d4762357a0df46ccca153cec09d610
|
[] |
no_license
|
xingetouzi/Bigfish
|
e1afc23c59dc139418678611bf44d00e2e67a09d
|
7b18192149f0e2c42c8491f9bc7ea5dede11398b
|
refs/heads/master
| 2021-01-21T04:55:34.255633
| 2016-06-06T03:18:15
| 2016-06-06T03:18:15
| 48,084,560
| 4
| 3
| null | 2016-06-06T03:18:15
| 2015-12-16T03:18:38
|
Python
|
UTF-8
|
Python
| false
| false
| 730
|
py
|
# -*- coding:utf-8 -*-
# 计算一组时间序列数据(如开盘价、收盘价、最高价、最低价、收益率等)的最低值
# 输入参数:
# length 时间长度 int
# price 时间序列数据 序列数组 默认为最低价数据
# offset 位移数(从前多少根bar开始) int 默认为0
# 若当前K线总数不足以支持计算,(BarNum>length+offset时才能支持计算)返回None
def Lowest(length, price=None, offset=0):
if length <= 0:
return None
if price is None:
price = Low
if BarNum <= length + offset:
return None
else:
min_ = price[offset]
for i in range(length - 1):
min_ = min(price[i + offset + 1], min_)
return min_
|
[
"541795600@qq.com"
] |
541795600@qq.com
|
bb38a9043b0745fa075e2646e3f8c1a003e7c6a5
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-network/azure/mgmt/network/v2018_11_01/models/ddos_protection_plan_py3.py
|
b29926c6d258440a7323c347d31c5c0c172bfb8d
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DdosProtectionPlan(Model):
"""A DDoS protection plan in a resource group.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param location: Resource location.
:type location: str
:param tags: Resource tags.
:type tags: dict[str, str]
:ivar resource_guid: The resource GUID property of the DDoS protection
plan resource. It uniquely identifies the resource, even if the user
changes its name or migrate the resource across subscriptions or resource
groups.
:vartype resource_guid: str
:ivar provisioning_state: The provisioning state of the DDoS protection
plan resource. Possible values are: 'Succeeded', 'Updating', 'Deleting',
and 'Failed'.
:vartype provisioning_state: str
:ivar virtual_networks: The list of virtual networks associated with the
DDoS protection plan resource. This list is read-only.
:vartype virtual_networks:
list[~azure.mgmt.network.v2018_11_01.models.SubResource]
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'resource_guid': {'readonly': True},
'provisioning_state': {'readonly': True},
'virtual_networks': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'resource_guid': {'key': 'properties.resourceGuid', 'type': 'str'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'virtual_networks': {'key': 'properties.virtualNetworks', 'type': '[SubResource]'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, **kwargs) -> None:
super(DdosProtectionPlan, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.location = location
self.tags = tags
self.resource_guid = None
self.provisioning_state = None
self.virtual_networks = None
self.etag = None
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.