blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2d201164d5d96ab0e272ba9cbbde60a03f5615a6
|
0f2b08b31fab269c77d4b14240b8746a3ba17d5e
|
/orttraining/orttraining/python/training/ortmodule/experimental/json_config/__init__.py
|
d4399b1ee9c09763510caa7de207a45a11dc26ff
|
[
"MIT"
] |
permissive
|
microsoft/onnxruntime
|
f75aa499496f4d0a07ab68ffa589d06f83b7db1d
|
5e747071be882efd6b54d7a7421042e68dcd6aff
|
refs/heads/main
| 2023-09-04T03:14:50.888927
| 2023-09-02T07:16:28
| 2023-09-02T07:16:28
| 156,939,672
| 9,912
| 2,451
|
MIT
| 2023-09-14T21:22:46
| 2018-11-10T02:22:53
|
C++
|
UTF-8
|
Python
| false
| false
| 272
|
py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# __init__.py
# JSON global constants goes here
JSON_PATH_ENVIRONMENT_KEY = "ORTMODULE_JSON_CONFIG_PATH"
from ._load_config_from_json import load_from_json # noqa: E402, F401
|
[
"noreply@github.com"
] |
microsoft.noreply@github.com
|
5b3aa86b8d0fc8ff213bedf0c58f4d1b47b2f43f
|
cdd1901812799e5542ac6c2ecd5af06eb30aeacd
|
/Datasets/voc/find_cls_in_voc.py
|
1183ca0e033352f784398f162221a1315bbea092
|
[] |
no_license
|
thinkinchaos/Tools
|
ff0030494996493aa75b355880961a5d6b511ba6
|
f0571d101c7003ded516b036ce2d6552d38b443f
|
refs/heads/master
| 2023-06-10T01:30:29.285972
| 2021-07-01T02:09:51
| 2021-07-01T02:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,107
|
py
|
import glob
import xml.etree.ElementTree as ET
import numpy as np
ANNOTATIONS_PATH = 'E:/datasets/nuclear_obj_cold/Annotations'
# names=set()
from pathlib import Path
all=[]
train=[]
val=[]
for xml_file in Path(ANNOTATIONS_PATH).glob('*.xml'):
tree = ET.parse(str(xml_file))
xml_root = tree.getroot()
names_this_xml=[]
for obj in xml_root.findall('object'):
# names_this_xml.append(obj.find('name').text)
if obj.find('name').text == 'dog':
all.append(xml_file.name)
# obj.find('name').text = 'brick'
# tree.write(xml_file, encoding="utf-8", xml_declaration=True, method='xml')
# print(names_this_xml)
# if 'robat' in names_this_xml:
# element = ET.Element('object')
#
# sub_element1 = ET.Element('name')
# sub_element1.text = obj_info[1]
# element.append(sub_element1)
#
# sub_element2 = ET.Element('bndbox')
# xmin = ET.Element('xmin')
# xmin.text = str(obj_info[2])
# ymin = ET.Element('ymin')
# ymin.text = str(obj_info[3])
# xmax = ET.Element('xmax')
# xmax.text = str(obj_info[4])
# ymax = ET.Element('ymax')
# ymax.text = str(obj_info[5])
# sub_element2.append(xmin)
# sub_element2.append(ymin)
# sub_element2.append(xmax)
# sub_element2.append(ymax)
#
# element.append(sub_element2)
# root.append(element)
print(all)
# # from pathlib import Path
# import random
# # xmls=[i.name for i in Path('Annotations_CarPersonZebra').iterdir() if i.is_file()]
# random.shuffle(all)
# percent=0.9
# train_n = int(len(all)*percent)
# train=all[:train_n]
# val=all[train_n:]
#
# with open('//192.168.133.15/workspace/sunxin/DATA/2.7_Zebra_HardNeg/Annotations_CarPersonZebra/zebra_train.txt','w') as f:
# for i in train:
# f.write(i)
# f.write('\n')
#
# with open('//192.168.133.15/workspace/sunxin/DATA/2.7_Zebra_HardNeg/Annotations_CarPersonZebra/zebra_val.txt','w') as f:
# for i in val:
# f.write(i)
# f.write('\n')
|
[
"1044068981@qq.com"
] |
1044068981@qq.com
|
8871cdcc6760cba68626ebe15775032f93f8426c
|
fa6f91b50125150c8d77937e002b53788dbcb19d
|
/bin/pushmp
|
013788499bc1a9c20a3a68eac484738d4faa3132
|
[] |
no_license
|
howiemac/mp
|
b5402be12cfe3b3a4be77adb673dc20371c3cf4b
|
694568f45020f7fe2ed2b8914ddc875aada852d1
|
refs/heads/master
| 2020-05-22T09:33:53.160473
| 2018-04-07T22:09:14
| 2018-04-07T22:09:14
| 34,397,381
| 0
| 1
| null | 2017-08-17T13:04:31
| 2015-04-22T15:05:42
|
Python
|
UTF-8
|
Python
| false
| false
| 4,840
|
#! /usr/bin/python
#
# script to archive the master music system - REQUIRES mp to be running at the start
## - pulls from the archive, the slave log since the last update # CURRENTLY DISABLED
## - updates master system with the previous slave log # CURRENTLY DISABLED
# (note: up to here could alternatively be done in advance - eg immediately after storing the previous slave update in archive)
# - pushes => archive
# - current (master) max uid (retained for use in next update - see "last update max uid" below)
# - last update max uid (slave will checked this against its own record, to ensure that we are in sync on the prior update)
# - latest music SQL dump (in full)
# - latest mp code (in full, incuding evoke)
# - renames master log, so as not to be accidentally reused by slave
# - data additions since last update
#
# ASSUMES no more than one update per day
#
#from sys import argv
import os, sys, copy, subprocess
import datetime
today = datetime.date.today().strftime("%y%m%d")
print "starting archive %s..." % today
# required folders
#archive="/Volumes/TRANSFER/"
archive="/media/howie/archive/"
arccodefolder=archive+"code/mp.backup/"
arcdatafolder=archive+"data/music/"
homefolder="/home/howie/"
codefolder=homefolder+'code/'
mpfolder=codefolder+'mp/music/'
backupfolder=codefolder+"backup/mp/"
datafolder=homefolder+'data/music/'
#updatefolders - see below
# fetch the slave log
print "fetching log(s)..."
xarchivefolder= arccodefolder+sorted([i for i in os.listdir(arccodefolder) if i.startswith("mp.backup.")])[-1]+'/'
log=[i for i in os.listdir(xarchivefolder) if i.startswith("eee.")][0]
# copy it to the mp/logs/additions folder
os.system("cp %s%s %slogs/additions/%s " % (xarchivefolder,log,mpfolder,log))
# force mp to update
os.system('wget http://127.0.0.1:8088/1/add_logs')
f=open('add_logs')
print f.read()
f.close()
os.remove('add_logs') # os.system('rm add_logs')
# stop mp, before fetching any code or data
os.system('wget http://127.0.0.1:8088/halt')
os.remove('halt') # os.system('rm halt')
# create new update folder
#
# make sure that maxuid will be greater than xuid, by adding a page of kind=='archive'
os.system('''mysql music -e "insert into pages (kind, stage) values ('archive', 'hide');"''')
# get the maxuid
os.system('mysql music -e "select max(uid) from pages" >maxuid')
f=open("maxuid",'r')
maxuid=int(f.read().split('\n')[1])
f.close()
# define the folders
updatefolder=backupfolder+("mp.backup.%s/" % maxuid)
xupdatefolder= backupfolder+sorted([i for i in os.listdir(backupfolder) if i.startswith("mp.backup.")])[-1]+'/'
# create the new one
os.mkdir(updatefolder)
#move the maxuid file into the new folder
os.system('mv maxuid %smaxuid' % updatefolder)
# create the xuid file
os.system('cp %smaxuid %sxuid' % (xupdatefolder,updatefolder))
# get the xuid
f=open("%sxuid" % updatefolder,'r')
xuid=int(f.read().split('\n')[1])
f.close()
## copy maxuid to mp/music/logs/
#os.system('cp %smaxuid %smp/music/logs/maxuid' % (updatefolder,codefolder))
# rename the master log
os.chdir(codefolder)
if os.path.isfile("mp/music/logs/mp.log"):
os.rename("mp/music/logs/mp.log","mp/music/logs/mac.%s.mp.log" % maxuid)
# backup the music sql database
print "fetching music database..."
os.system('mysqldump music > %smusic.mysql.dump' % updatefolder)
# tar and bzip the code
print "bundling code..."
os.system("tar cf - mp/ | bzip2 -f > %smp.tar.bz2" % updatefolder)
# copy the data additions
print "archiving data..."
if True:
# - identify the data sub-folders
s="%09d" % xuid
subfolders=["%s/%s/" % (s[:-6],s[-6:-3],)]
uid=copy.copy(xuid)
while (maxuid//1000) > (uid//1000): # we have more than one subfolder
uid+=1000
s="%09d" % uid
subfolder="%s/%s/" % (s[:-6],s[-6:-3],)
subfolders.append(subfolder)
# create the new archive folder
os.makedirs(arcdatafolder+subfolder)
# - copy the files
files=[]
for subfolder in subfolders:
for i in os.listdir(datafolder+subfolder):
#
# print "XUID=",xuid
# print i
#
if i and (i.find(".")>0): # i.e. ignoring hidden folders and "no-dot" subfolders such as 'thumbs'
if int(i.split(".")[0])>xuid:
os.system("cp -v %s %s" % (datafolder+subfolder+i,arcdatafolder+subfolder+i))
files.append(subfolder+i+"\n")
# copy file list to datafiles.txt in update folder
f=open(updatefolder+'datafiles.txt','w')
f.writelines(files)
f.close()
# now, all of the data additions are archived
# copy the update folder to the archive
print "archiving code and database..."
os.system('cp -r %s %s' % (updatefolder[:-1],arccodefolder[:-1]))
### restart the system
#print "restarting"
#os.chdir(mpfolder+"code/")
#subprocess.call('./start', shell=True) # restart the system
# and we are done!
print "archive %s completed" % maxuid
|
[
"howiemac@gmail.com"
] |
howiemac@gmail.com
|
|
0dfea63cd551aaa3e88d3dd5f3e72bf1481714c3
|
ca4da546f815ef7e14fd79dfbc5a0c3f9f8c72c9
|
/test/test_ICUNormalizer2Filter.py
|
096bc618004010da29ad3ea48348ffd0537e8eda
|
[
"Apache-2.0"
] |
permissive
|
qiugen/pylucene-trunk
|
be955aedca2d37411f0683e244c30b102d4839b4
|
990079ff0c76b972ce5ef2bac9b85334a0a1f27a
|
refs/heads/master
| 2021-01-18T08:46:38.817236
| 2012-07-18T16:18:45
| 2012-07-18T16:18:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
# -*- coding: utf-8 -*-
# ====================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====================================================================
#
# Port of java/org/apache/lucene/analysis/icu/ICUNormalizer2Filter.java
# using IBM's C++ ICU wrapped by PyICU (http://pyicu.osafoundation.org)
try:
from icu import Normalizer2, UNormalizationMode2
except ImportError, e:
pass
from unittest import main
from BaseTokenStreamTestCase import BaseTokenStreamTestCase
from lucene import *
class TestICUNormalizer2Filter(BaseTokenStreamTestCase):
def testDefaults(self):
from lucene.ICUNormalizer2Filter import ICUNormalizer2Filter
class analyzer(PythonAnalyzer):
def tokenStream(_self, fieldName, reader):
return ICUNormalizer2Filter(WhitespaceTokenizer(Version.LUCENE_CURRENT, reader))
a = analyzer()
# case folding
self._assertAnalyzesTo(a, "This is a test",
[ "this", "is", "a", "test" ])
# case folding
self._assertAnalyzesTo(a, "Ruß", [ "russ" ])
# case folding
self._assertAnalyzesTo(a, u"ΜΆΪΟΣ", [ u"μάϊοσ" ])
self._assertAnalyzesTo(a, u"Μάϊος", [ u"μάϊοσ" ])
# supplementary case folding
self._assertAnalyzesTo(a, u"𐐖", [ u"𐐾" ])
# normalization
self._assertAnalyzesTo(a, u"ﴳﴺﰧ", [ u"طمطمطم" ])
# removal of default ignorables
self._assertAnalyzesTo(a, u"क्ष", [ u"क्ष" ])
def testAlternate(self):
from lucene.ICUNormalizer2Filter import ICUNormalizer2Filter
class analyzer(PythonAnalyzer):
# specify nfc with decompose to get nfd
def tokenStream(_self, fieldName, reader):
return ICUNormalizer2Filter(WhitespaceTokenizer(Version.LUCENE_CURRENT, reader),
Normalizer2.getInstance(None, "nfc", UNormalizationMode2.DECOMPOSE))
a = analyzer()
# decompose EAcute into E + combining Acute
self._assertAnalyzesTo(a, u"\u00E9", [ u"\u0065\u0301" ])
if __name__ == "__main__":
import sys, lucene
try:
import icu
except ImportError:
pass
else:
lucene.initVM()
if '-loop' in sys.argv:
sys.argv.remove('-loop')
while True:
try:
main()
except:
pass
else:
main()
|
[
"roman.chyla@gmail.com"
] |
roman.chyla@gmail.com
|
ee15b4c52486572ddb7c2958f1a220ec2a1e3528
|
a19cd9aebb697fc1dd384e9ce8596d6cc728e6f2
|
/fabfile.py
|
88dfdbee00ddc4cb3a8ed9ede23f657a74cb2a73
|
[] |
no_license
|
aclark4life/Debian-Deploy-Plone
|
315c52e5dfcf278f7e5b2f2dfc5f1045d0518d99
|
2a779c30f1f4b83f4f715ea5f5ae2a93f23afdda
|
refs/heads/master
| 2021-06-04T03:57:38.743523
| 2016-07-27T09:44:30
| 2016-07-27T09:44:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| true
| false
| 2,580
|
py
|
# A work in progress
from fabric.api import env, local, put, run
env.user = 'root'
env.warn_only = True
FORM_VARS = ('form.submitted:boolean=True',
'extension_ids:list=plonetheme.sunburst:default',
'setup_content:boolean=true')
MODULE_CONFS = ('filter.load', 'proxy.conf', 'proxy.load',
'proxy_http.load', 'rewrite.load')
PACKAGES = "apache2 apache2-dev build-essential less libbz2-dev libjpeg62 libjpeg62-dev libpng "
PACKAGES += "libpng-dev libreadline-dev libssl-dev "
PACKAGES += "rsync subversion unzip zlib1g-dev"
def deploy():
copy_pub_key()
update_packages()
install_packages()
install_python()
install_plone()
configure_apache()
def update_packages():
run('aptitude update')
run('aptitude -y safe-upgrade')
def copy_pub_key():
run('mkdir /root/.ssh')
run('chmod 700 /root/.ssh')
put('id_rsa.pub', '/root/.ssh/authorized_keys')
def install_packages():
run('aptitude -y install %s' % PACKAGES)
def install_python():
run('aptitude -y install python')
put('distribute_setup.py', 'distribute_setup.py')
run('python distribute_setup.py')
run('easy_install pip')
run('pip install virtualenv')
run('virtualenv --no-site-packages --distribute python')
run('svn co http://svn.plone.org/svn/collective/buildout/python/')
run('cd python; bin/python bootstrap.py -d')
run('cd python; bin/buildout')
def install_plone():
from time import sleep
run('mkdir /srv/plone')
put('plone.cfg', '/srv/plone/buildout.cfg')
put('bootstrap.py', '/srv/plone/bootstrap.py')
put('rc.local', '/etc/rc.local')
run('cd /srv/plone; /root/python/python-2.6/bin/python2.6 bootstrap.py -d')
install_theme()
run('cd /srv/plone; bin/buildout')
run('chown -R www-data:www-data /srv/plone')
run('cd /srv/plone; sudo -u www-data bin/supervisord')
sleep(5)
create_site()
def create_site():
url = 'http://127.0.0.1:8080/@@plone-addsite?site_id=Plone'
run('curl -u admin:admin -d %s %s' % (' -d '.join(FORM_VARS), url))
def install_theme():
args = '-av --partial --progress --delete'
local('zip -r theme.zip theme')
put('theme.zip', 'theme.zip')
run('cd /srv/plone; unzip -o /root/theme.zip')
run('rsync %s /srv/plone/theme/perfectblemish/ /var/www/static/' % args)
def configure_apache():
put('000-default', '/etc/apache2/sites-enabled')
run('mkdir /var/www/static')
for conf in MODULE_CONFS:
run('cd /etc/apache2/mods-enabled;ln -sf ../mods-available/%s' % conf)
run('/etc/init.d/apache2 restart')
|
[
"aclark@aclark.net"
] |
aclark@aclark.net
|
390bd290bfcf3c4bcda4e837f0b09c4b9049499e
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/eventgrid/azure-mgmt-eventgrid/generated_samples/system_topics_get.py
|
3c841e84e51e5d149b8c34c211d2202bcf26188d
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,580
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.eventgrid import EventGridManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-eventgrid
# USAGE
python system_topics_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = EventGridManagementClient(
credential=DefaultAzureCredential(),
subscription_id="8f6b6269-84f2-4d09-9e31-1127efcd1e40",
)
response = client.system_topics.get(
resource_group_name="examplerg",
system_topic_name="exampleSystemTopic2",
)
print(response)
# x-ms-original-file: specification/eventgrid/resource-manager/Microsoft.EventGrid/preview/2023-06-01-preview/examples/SystemTopics_Get.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
ac86e7fd216d0562913ad2a8cb7dfa6b323d20d3
|
24595e74a83dcd263e92c121210c710c4438d979
|
/lib/hammerlib.py
|
d5004d47f7133e87ce58e9a4ad8c21608fc953f8
|
[] |
no_license
|
amyth/hammer
|
94dde5e6182da8368ed9859b4213226ac4543225
|
54e4fb07ee81055db47ccfd77ee6fc55bdb8fbe8
|
refs/heads/master
| 2021-01-17T15:30:30.717196
| 2016-05-20T07:00:43
| 2016-05-20T07:00:43
| 55,591,019
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,443
|
py
|
from __future__ import division
def levenshtein_distance(x1, x2):
"""
Finds a match between mis-spelt string based on the lavenhtein
distance formula
"""
if len(x1) > len(x2):
x1, x2 = x2, x1
distances = range(len(x1) + 1)
for i2, c2 in enumerate(x2):
distances_ = [i2+1]
for i1, c1 in enumerate(x1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1],
distances[i1 + 1], distances_[-1])))
distances = distances_
return distances[-1]
def spellmatch(x, y):
"""
Returns the match percentage between two given strings
using lavenhstein distance
"""
distance = levenshtein_distance(x, y)
return round(float(-(((distance/(len(x) + len(y)))*100)-100)),2)
def abbrmatch(x, y):
"""
Returns the match percentage between two strings assuming one of
the strings is the abbreviation for the other.
"""
sl = ['-', '&', ',', ', ', ' - ', ';', '; ', '/', '/ ', ' / ']
if len(x) > len(y):
x, y = y, x
for n in sl:
x = x.replace(n, ' ')
y = y.replace(n, ' ')
xl = [n.lower().strip() for n in x.split()]
yl = [n.lower().strip() for n in y.split()]
ps = []
for i, n in enumerate(xl[0]):
ps.append(levenshtein_distance(n, ''.join([z[0] for z in yl])))
return ps
|
[
"aroras.official@gmail.com"
] |
aroras.official@gmail.com
|
bc0e8083bcf514fd9f3302b7fbdfdf561af9d5ae
|
30a1a391444181473b1af54c994bb81781fe830d
|
/Recusrion-and-Dynamic-Programming/triple-step.py
|
1f3a69e6be39a7e21e42a65c37f49bc49c7ad24e
|
[] |
no_license
|
sharmayuvraj/cracking-the-coding-interview
|
8b2a4760041fec98252f32c030a267c9951f0a18
|
b8e63d4190e82cc6d4677ba3dbe27f991c34da8a
|
refs/heads/master
| 2023-04-16T15:27:27.072935
| 2021-04-26T12:18:08
| 2021-04-26T12:18:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 830
|
py
|
"""
A child is running up a staircase with n steps and can hop either 1 step, 2 steps or 3 steps at a time.
Implement a method to count how many possible ways the child can run up the satirs.
"""
# Recursive Method
def countWays(n):
memo = [-1]*(n+1)
return countWaysRecursive(n,memo)
def countWaysRecursive(n, memo):
if n < 0:
return 0
elif n == 0:
return 1
elif memo[n] > -1:
return memo[n]
else:
memo[n] = countWaysRecursive(n-1, memo) + countWaysRecursive(n-2, memo) + countWaysRecursive(n-3, memo)
return memo[n]
# Iterative Method
def countWaysIterative(n) :
memo = [-1] * (n + 1)
memo[0] = 1
memo[1] = 1
memo[2] = 2
for i in range(3, n + 1) :
memo[i] = memo[i - 1] + memo[i - 2] + memo[i - 3]
return memo[n]
|
[
"anant.kaushik2@gmail.com"
] |
anant.kaushik2@gmail.com
|
4801e1eaf02b1557859585316af8ce5e240b6fa3
|
281a10505f8044dbed73f11ed731bd0fbe23e0b5
|
/expenseApp/migrations/0012_auto_20181023_1608.py
|
18e381b3c7040d31723ae82e68cb0a57a0d68e76
|
[
"Apache-2.0"
] |
permissive
|
cs-fullstack-fall-2018/project3-django-jpark1914
|
7c6f57ab5f8055c11ac5b9d3c8bf0aa5057008d7
|
53bca13243d7e50263ec25b2fb8a299a8bbada1c
|
refs/heads/master
| 2020-04-02T00:59:33.254360
| 2018-10-29T04:58:42
| 2018-10-29T04:58:42
| 153,831,819
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
# Generated by Django 2.0.6 on 2018-10-23 16:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('expenseApp', '0011_auto_20181022_1942'),
]
operations = [
migrations.CreateModel(
name='TransactionModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.IntegerField()),
('time_of_transaction', models.DateField()),
],
),
migrations.RenameModel(
old_name='ExpensesModel',
new_name='AccountModel',
),
migrations.AddField(
model_name='transactionmodel',
name='account',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='expenseApp.AccountModel'),
),
]
|
[
"parkerj4321@gmail.com"
] |
parkerj4321@gmail.com
|
942b224bad7b2c2a6138e375dec16529a6d08fac
|
c91e32b5e7a28fd31698764086b88203fd3c8029
|
/root_numpy/info.py
|
c669cbd8832aebac1169448f7ae98cad8eeb4a39
|
[
"MIT"
] |
permissive
|
balarsen/root_numpy
|
5b409a1d90d499f2677990a95246e19d9f596144
|
6229f4eb7ab7884b1950210c92275299d631b9da
|
refs/heads/master
| 2021-01-15T07:54:23.935985
| 2014-02-10T23:10:35
| 2014-02-10T23:10:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 394
|
py
|
"""
_
_ __ ___ ___ | |_ _ __ _ _ _ __ ___ _ __ _ _
| '__/ _ \ / _ \| __| | '_ \| | | | '_ ` _ \| '_ \| | | |
| | | (_) | (_) | |_ | | | | |_| | | | | | | |_) | |_| |
|_| \___/ \___/ \__|___|_| |_|\__,_|_| |_| |_| .__/ \__, | {0}
|_____| |_| |___/
"""
__version__ = '3.2.0.dev'
__doc__ = __doc__.format(__version__)
|
[
"noel.dawe@gmail.com"
] |
noel.dawe@gmail.com
|
48b0f55c8b8802c52876e069c124eb8a0fadbd56
|
add74ecbd87c711f1e10898f87ffd31bb39cc5d6
|
/xcp2k/classes/_move_mm_charge2.py
|
9b960a7447c8177d8674a046b34f493d633239db
|
[] |
no_license
|
superstar54/xcp2k
|
82071e29613ccf58fc14e684154bb9392d00458b
|
e8afae2ccb4b777ddd3731fe99f451b56d416a83
|
refs/heads/master
| 2021-11-11T21:17:30.292500
| 2021-11-06T06:31:20
| 2021-11-06T06:31:20
| 62,589,715
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 985
|
py
|
from xcp2k.inputsection import InputSection
class _move_mm_charge2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Atom_index_1 = None
self.Atom_index_2 = None
self.Alpha = None
self.Radius = None
self.Corr_radius = None
self._name = "MOVE_MM_CHARGE"
self._keywords = {'Atom_index_1': 'ATOM_INDEX_1', 'Atom_index_2': 'ATOM_INDEX_2', 'Alpha': 'ALPHA', 'Radius': 'RADIUS', 'Corr_radius': 'CORR_RADIUS'}
self._aliases = {'Mm1': 'Atom_index_1', 'Mm2': 'Atom_index_2'}
@property
def Mm1(self):
"""
See documentation for Atom_index_1
"""
return self.Atom_index_1
@property
def Mm2(self):
"""
See documentation for Atom_index_2
"""
return self.Atom_index_2
@Mm1.setter
def Mm1(self, value):
self.Atom_index_1 = value
@Mm2.setter
def Mm2(self, value):
self.Atom_index_2 = value
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
fc61d8ff3efe35ece110d4fef6228e3793921dc0
|
f74dd098c3e665d8f605af5ebe7e2874ac31dd2f
|
/aiogithubapi/models/base.py
|
086dcb1b6e17eb22f4c9629e7ea27c46c0145156
|
[
"MIT"
] |
permissive
|
ludeeus/aiogithubapi
|
ce87382698827939aaa127b378b9a11998f13c06
|
90f3fc98e5096300269763c9a5857481b2dec4d2
|
refs/heads/main
| 2023-08-20T19:30:05.309844
| 2023-08-14T20:24:21
| 2023-08-14T20:24:21
| 198,505,021
| 21
| 20
|
MIT
| 2023-09-11T06:12:10
| 2019-07-23T20:39:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,080
|
py
|
"""Base class for all GitHub objects."""
from __future__ import annotations
from logging import Logger
from typing import Any, Dict
from ..const import LOGGER
IGNORE_KEYS = ("node_id", "performed_via_github_app", "_links")
class GitHubBase:
"""Base class for all GitHub objects."""
logger: Logger = LOGGER
@staticmethod
def slugify(value: str) -> str:
"""Slugify."""
return str(value).replace("-", "_").lower()
class GitHubDataModelBase(GitHubBase):
"""Base class for all GitHub data objects."""
_raw_data: Any | None = None
_log_missing: bool = True
_process_data: bool = True
_slugify_keys: bool = True
def __init__(self, data: Dict[str, Any]) -> None:
"""Init."""
self._raw_data = data
if self._process_data:
for key, value in self._raw_data.items():
if self._slugify_keys:
key = self.slugify(key)
if hasattr(self, key):
if handler := getattr(self, f"_generate_{key}", None):
value = handler(value)
self.__setattr__(key, value)
elif self._log_missing and key not in IGNORE_KEYS:
self.logger.debug(
"'%s' is missing key '%s' for %s",
self.__class__.__name__,
key,
type(value),
)
self.__post_init__()
def __post_init__(self):
"""Post init."""
@property
def as_dict(self) -> Dict[str, Any]:
"""Return attributes as a dict."""
def expand_value_if_needed(value: Any) -> Any:
if isinstance(value, GitHubDataModelBase):
return value.as_dict
if isinstance(value, list):
return [expand_value_if_needed(v) for v in value]
return value
return {
key: expand_value_if_needed(value)
for key, value in self.__dict__.items()
if not key.startswith("_")
}
|
[
"noreply@github.com"
] |
ludeeus.noreply@github.com
|
9e86d85037eff55e36fd681cacb39e4363bb8e6d
|
007c1bb62ee70fb387b24bac9387da90745a85db
|
/development/inelastic/vesuvio_calibration/VesuvioGeometryEnergyResolutionTest.py
|
a7ab85cc8ecbd72bb9eb655183d1bb744c990097
|
[] |
no_license
|
mantidproject/scripts
|
fc14040f0674fda31b28bbc668a923fecc00fe99
|
f5a7b79825c7bdd8977e1409967bce979e4ca690
|
refs/heads/master
| 2021-01-17T11:36:00.426451
| 2017-08-25T13:01:26
| 2017-08-25T13:01:26
| 3,379,430
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,319
|
py
|
#pylint: disable=too-many-public-methods
import unittest
from mantid.api import (ITableWorkspace, WorkspaceGroup)
from mantid.simpleapi import *
class VesuvioGeometryEnergyResolutionTest(unittest.TestCase):
def tearDown(self):
mtd.clear()
def _execute_resolution_algorithm(self, **argv):
default_args = {
'InstrumentParFile': 'IP0005.par',
'MonteCarloEvents': 1000
}
default_args.update(argv)
output, mean = VesuvioGeometryEnergyResolution(**default_args)
return (mean, output)
def _validate_result_shape(self, mean, resolution):
"""
Validates the shape of the result tables.
"""
self.assertTrue(isinstance(mean, ITableWorkspace))
self.assertEqual(mean.columnCount(), 6)
self.assertEqual(mean.rowCount(), 24)
self.assertTrue(isinstance(resolution, ITableWorkspace))
self.assertEqual(resolution.columnCount(), 17)
self.assertEqual(resolution.rowCount(), 196)
def test_resolution(self):
"""
Check values calculated by resolution algorithm match those expected.
"""
mean, resolution = self._execute_resolution_algorithm()
self._validate_result_shape(mean, resolution)
# Validate mean values
mean_values = mean.column('Mean')
TOLERANCE = 7
self.assertAlmostEqual(mean_values[0], 0.5023026, places=TOLERANCE)
self.assertAlmostEqual(mean_values[1], 0.9461753, places=TOLERANCE)
self.assertAlmostEqual(mean_values[2], 0.7906631, places=TOLERANCE)
self.assertAlmostEqual(mean_values[3], 0.0298165, places=TOLERANCE)
self.assertAlmostEqual(mean_values[4], 0.0206698, places=TOLERANCE)
self.assertAlmostEqual(mean_values[5], 0.2581127, places=TOLERANCE)
self.assertAlmostEqual(mean_values[6], 0.2972636, places=TOLERANCE)
self.assertAlmostEqual(mean_values[7], 0.0300434, places=TOLERANCE)
self.assertAlmostEqual(mean_values[8], 0.3971947, places=TOLERANCE)
self.assertAlmostEqual(mean_values[9], 7.1871166, places=TOLERANCE)
self.assertAlmostEqual(mean_values[10], 0.4046330, places=TOLERANCE)
self.assertAlmostEqual(mean_values[11], 7.6269999, places=TOLERANCE)
self.assertAlmostEqual(mean_values[12], 55.4417038, places=TOLERANCE)
self.assertAlmostEqual(mean_values[13], 55.4496880, places=TOLERANCE)
self.assertAlmostEqual(mean_values[14], 140.3843994, places=TOLERANCE)
self.assertAlmostEqual(mean_values[15], 53.2056618, places=TOLERANCE)
self.assertAlmostEqual(mean_values[16], 53.2166023, places=TOLERANCE)
self.assertAlmostEqual(mean_values[17], 31.4454365, places=TOLERANCE)
self.assertAlmostEqual(mean_values[18], 72.5857315, places=TOLERANCE)
self.assertAlmostEqual(mean_values[19], 72.5914993, places=TOLERANCE)
self.assertAlmostEqual(mean_values[20], 45.2145004, places=TOLERANCE)
self.assertAlmostEqual(mean_values[21], 143.4886322, places=TOLERANCE)
self.assertAlmostEqual(mean_values[22], 143.4915924, places=TOLERANCE)
self.assertAlmostEqual(mean_values[23], 97.8484039, places=TOLERANCE)
if __name__ == '__main__':
unittest.main()
|
[
"dan@dan-nixon.com"
] |
dan@dan-nixon.com
|
a237c65c46210c1e9d9669557d5d36a9d956ca26
|
9df2fb0bc59ab44f026b0a2f5ef50c72b2fb2ceb
|
/sdk/dynatrace/azure-mgmt-dynatrace/generated_samples/monitors_get_sso_details_minimum_set_gen.py
|
660bfb94f9b61744d91446c0a3f37affbc7c7cf0
|
[
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
openapi-env-test/azure-sdk-for-python
|
b334a2b65eeabcf9b7673879a621abb9be43b0f6
|
f61090e96094cfd4f43650be1a53425736bd8985
|
refs/heads/main
| 2023-08-30T14:22:14.300080
| 2023-06-08T02:53:04
| 2023-06-08T02:53:04
| 222,384,897
| 1
| 0
|
MIT
| 2023-09-08T08:38:48
| 2019-11-18T07:09:24
|
Python
|
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dynatrace import DynatraceObservabilityMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dynatrace
# USAGE
python monitors_get_sso_details_minimum_set_gen.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DynatraceObservabilityMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-0000-0000-0000-000000000000",
)
response = client.monitors.get_sso_details(
resource_group_name="myResourceGroup",
monitor_name="myMonitor",
)
print(response)
# x-ms-original-file: specification/dynatrace/resource-manager/Dynatrace.Observability/stable/2021-09-01/examples/Monitors_GetSSODetails_MinimumSet_Gen.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
openapi-env-test.noreply@github.com
|
5c51d540225abc65aba220b7b3bf103579c1f61a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02913/s835324386.py
|
047781bcb31ffe52b746c9b0454b01728cb1b3cc
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 655
|
py
|
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**7)
from collections import Counter, deque
#from collections import defaultdict
from itertools import combinations, permutations, accumulate, groupby, product
from bisect import bisect_left,bisect_right
from heapq import heapify, heappop, heappush
import math
#inf = 10**17
#mod = 10**9 + 7
n = int(input())
s = input().rstrip()
l, r = 0, 0
res = 0
while r < n:
r += 1
if not s[l:r] in s[r:]:
l += 1
res = max(res, r-l)
print(res)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b8bf3ead37ab6905d1d3250e0dcc5ad2e961386e
|
48a89c1ceb3a761f796ed054c59a44106adefba5
|
/src/moveit_pose.py
|
baf76cbf90210ec3cb175469d503eb360f02bae6
|
[] |
no_license
|
ayushgargdroid/hsr_custom_launch
|
ec404519cc9e8096c078468f5336fc6852204fda
|
f84d4e3e6dc3fcbcb00e65b05d69c9a33e43b94f
|
refs/heads/master
| 2020-09-20T14:26:41.354211
| 2019-12-05T21:36:16
| 2019-12-05T21:36:16
| 224,510,478
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,525
|
py
|
#!/usr/bin/env python
import sys
import copy
import rospy
import moveit_commander
import moveit_msgs.msg
import geometry_msgs.msg
import tf
import numpy as np
from math import pi
from std_msgs.msg import String
from moveit_commander.conversions import pose_to_list
if __name__ == '__main__':
moveit_commander.roscpp_initialize(sys.argv)
rospy.init_node('move_group_python_interface_tutorial',
anonymous=True)
robot = moveit_commander.RobotCommander()
scene = moveit_commander.PlanningSceneInterface()
group_name = "whole_body"
group = moveit_commander.MoveGroupCommander(group_name)
# display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',
# moveit_msgs.msg.DisplayTrajectory,
# queue_size=20)
tf_listen = tf.TransformListener()
tf_broadcast = tf.TransformBroadcaster()
tf_ros = tf.TransformerROS()
trans = rot = None
rate = rospy.Rate(100)
while rospy.is_shutdown() or trans is None:
try:
(trans,rot) = tf_listen.lookupTransform('/odom', '/object', rospy.Time(0))
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
rate.sleep()
continue
rate.sleep()
rospy.loginfo('Position: '+str(trans)+' Orientation: '+str(rot))
change = tf_ros.fromTranslationRotation((-0.4,0,0),(0,0,0,1))
rospy.loginfo('\n'+str(change))
actual = tf_ros.fromTranslationRotation(trans,rot)
rospy.loginfo('\n'+str(actual))
pre = np.dot(actual,change)
rospy.loginfo('\n'+str(pre))
pre_rot = tf.transformations.quaternion_from_matrix(pre)
pre_trans = tf.transformations.translation_from_matrix(pre)
rospy.loginfo('Position: '+str(pre_trans)+' Orientation: '+str(pre_rot))
pose_goal = geometry_msgs.msg.Pose()
pose_goal.orientation.w = pre_rot[3]
pose_goal.orientation.x = pre_rot[0]
pose_goal.orientation.y = pre_rot[1]
pose_goal.orientation.z = pre_rot[2]
pose_goal.position.x = pre_trans[0]
pose_goal.position.y = pre_trans[1]
pose_goal.position.z = pre_trans[2]
group.set_pose_target(pose_goal)
plan = group.go(wait=True)
group.stop()
group.clear_pose_targets()
while not rospy.is_shutdown():
tf_broadcast.sendTransform(pre_trans,
pre_rot,
rospy.Time.now(),
"pre",
"odom")
rate.sleep()
|
[
"ayushgargdroid@gmail.com"
] |
ayushgargdroid@gmail.com
|
b8bcb78bbf8726eb8cc4098a99018c8f38990b6c
|
3c000380cbb7e8deb6abf9c6f3e29e8e89784830
|
/venv/Lib/site-packages/cobra/modelimpl/fv/apndg.py
|
9bbc9206e5d94cae540d5d9b7a425e781af43b51
|
[] |
no_license
|
bkhoward/aciDOM
|
91b0406f00da7aac413a81c8db2129b4bfc5497b
|
f2674456ecb19cf7299ef0c5a0887560b8b315d0
|
refs/heads/master
| 2023-03-27T23:37:02.836904
| 2021-03-26T22:07:54
| 2021-03-26T22:07:54
| 351,855,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,205
|
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class APndg(Mo):
meta = ClassMeta("cobra.model.fv.APndg")
meta.isAbstract = True
meta.moClassName = "fvAPndg"
meta.moClassName = "fvAPndg"
meta.rnFormat = ""
meta.category = MoCategory.REGULAR
meta.label = "Represent a Target Request whose Resolution has been Postponed"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x1
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.concreteSubClasses.add("cobra.model.fv.PndgCtrct")
meta.concreteSubClasses.add("cobra.model.fv.PndgCtrctEpgCont")
meta.concreteSubClasses.add("cobra.model.fv.PndgAnyDef")
meta.concreteSubClasses.add("cobra.model.fv.PndgEpCP")
meta.concreteSubClasses.add("cobra.model.fv.PndgEpPCtrctInfo")
meta.concreteSubClasses.add("cobra.model.fv.PndgRtdOutDef")
meta.concreteSubClasses.add("cobra.model.fv.PndgRFltP")
meta.rnPrefixes = [
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "name", "name", 5577, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 16)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "requestorDn", "requestorDn", 16605, PropCategory.REGULAR)
prop.label = "DN of the Policy Requestor"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("requestorDn", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "targetDn", "targetDn", 16604, PropCategory.REGULAR)
prop.label = "DN of the Policy to Resolve"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("targetDn", prop)
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"bkhoward@live.com"
] |
bkhoward@live.com
|
602852152151ceb503e5cf7d75816f4089723ef5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/ZaEFRDBZ7ZMTiSEce_17.py
|
b22158d637597759d56d9b831520a39cf3c26141
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
"""
You're given a string of words. You need to find the word "Nemo", and return a
string like this: `"I found Nemo at [the order of the word you find nemo]!"`.
If you can't find Nemo, return `"I can't find Nemo :("`.
### Examples
find_nemo("I am finding Nemo !") ➞ "I found Nemo at 4!"
find_nemo("Nemo is me") ➞ "I found Nemo at 1!"
find_nemo("I Nemo am") ➞ "I found Nemo at 2!"
### Notes
* `! , ? .` are always separated from the last word.
* Nemo will always look like _Nemo_ , and not _NeMo_ or other capital variations.
* _Nemo's_ , or anything that says _Nemo_ with something behind it, doesn't count as _Finding Nemo_.
* If there are multiple _Nemo's_ in the sentence, only return for the first one.
"""
def find_nemo(sentence):
try:
return "I found Nemo at %d!" %(sentence.split(' ').index("Nemo")+1)
except:
return "I can't find Nemo :("
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
7a4c1f18a33fabea011455db37153243c394e77c
|
5c5b75f09be052e3b2e8f40802100bb381cc041b
|
/src/kbqa/scripts/simpq_candgen.py
|
60c870c2e9df647020ce6db7ef6649f47d219903
|
[
"MIT"
] |
permissive
|
xuehuiping/TransDG
|
a6f408a03adadf7cedb40039094628213c63b4a1
|
ca55744594c5c8d6fe045bed499df72110880366
|
refs/heads/master
| 2022-04-22T10:49:53.245542
| 2020-04-24T02:05:38
| 2020-04-24T02:05:38
| 258,379,892
| 0
| 0
|
MIT
| 2020-04-24T02:04:40
| 2020-04-24T02:04:39
| null |
UTF-8
|
Python
| false
| false
| 7,755
|
py
|
"""
Generate candidate query graph for SimpQ
"""
import os
import json
import codecs
import shutil
import pickle
import argparse
from src.kbqa.utils.link_data import LinkData
from src.kbqa.utils.log_util import LogInfo
from src.kbqa.dataset.schema import Schema
class SimpleQCandidateGenerator:
def __init__(self, freebase_fp, links_fp, verbose=0):
self.subj_pred_dict = {}
self.q_links_dict = {} # <q_idx, [LinkData]>
# verbose = 0: show basic flow of the process
# verbose = 1: show detail linking information
self.verbose = verbose
self._load_fb_subset(freebase_fp)
self._load_linkings(links_fp)
def _load_fb_subset(self, fb_fp):
LogInfo.begin_track('Loading freebase subset from [%s] ...', fb_fp)
prefix = 'www.freebase.com/'
pref_len = len(prefix)
with codecs.open(fb_fp, 'r', 'utf-8') as br:
lines = br.readlines()
LogInfo.logs('%d lines loaded.', len(lines))
for line_idx, line in enumerate(lines):
if line_idx % 500000 == 0:
LogInfo.logs('Current: %d / %d', line_idx, len(lines))
s, p, _ = line.strip().split('\t')
s = s[pref_len:].replace('/', '.')
p = p[pref_len:].replace('/', '.')
self.subj_pred_dict.setdefault(s, set([])).add(p)
LogInfo.logs('%d related entities and %d <S, P> pairs saved.',
len(self.subj_pred_dict), sum([len(v) for v in self.subj_pred_dict.values()]))
LogInfo.end_track()
def _load_linkings(self, links_fp):
with codecs.open(links_fp, 'r', 'utf-8') as br:
for line in br.readlines():
if line.startswith('#'):
continue
spt = line.strip().split('\t')
q_idx, st, ed, mention, mid, wiki_name, feats = spt
q_idx = int(q_idx)
st = int(st)
ed = int(ed)
feat_dict = json.loads(feats)
for k in feat_dict:
v = float('%.6f' % feat_dict[k])
feat_dict[k] = v
link_data = LinkData(category='Entity',
start=st, end=ed,
mention=mention, comp='==',
value=mid, name=wiki_name,
link_feat=feat_dict)
self.q_links_dict.setdefault(q_idx, []).append(link_data)
LogInfo.logs('%d questions of link data loaded.', len(self.q_links_dict))
def single_question_candgen(self, q_idx, qa, link_fp, schema_fp):
# =================== Linking first ==================== #
if os.path.isfile(link_fp):
gather_linkings = []
with codecs.open(link_fp, 'r', 'utf-8') as br:
for line in br.readlines():
tup_list = json.loads(line.strip())
ld_dict = {k: v for k, v in tup_list}
gather_linkings.append(LinkData(**ld_dict))
LogInfo.logs('Read %d links from file.', len(gather_linkings))
else:
gather_linkings = self.q_links_dict.get(q_idx, [])
for idx in range(len(gather_linkings)):
gather_linkings[idx].gl_pos = idx
LogInfo.begin_track('Show %d E links :', len(gather_linkings))
if self.verbose >= 1:
for gl in gather_linkings:
LogInfo.logs(gl.display())
LogInfo.end_track()
# ==================== Save linking results ================ #
if not os.path.isfile(link_fp):
with codecs.open(link_fp + '.tmp', 'w', 'utf-8') as bw:
for gl in gather_linkings:
bw.write(json.dumps(gl.serialize()) + '\n')
shutil.move(link_fp + '.tmp', link_fp)
LogInfo.logs('%d link data save to file.', len(gather_linkings))
# ===================== simple predicate finding ===================== #
gold_entity, gold_pred, _ = qa['targetValue']
sc_list = []
for gl_data in gather_linkings:
entity = gl_data.value
pred_set = self.subj_pred_dict.get(entity, set([]))
for pred in pred_set:
sc = Schema()
sc.hops = 1
sc.aggregate = False
sc.main_pred_seq = [pred]
sc.raw_paths = [('Main', gl_data, [pred])]
sc.ans_size = 1
if entity == gold_entity and pred == gold_pred:
sc.f1 = sc.p = sc.r = 1.
else:
sc.f1 = sc.p = sc.r = 0.
sc_list.append(sc)
# ==================== Save schema results ================ #
# p, r, f1, ans_size, hops, raw_paths, (agg)
# raw_paths: (category, gl_pos, gl_mid, pred_seq)
with codecs.open(schema_fp + '.tmp', 'w', 'utf-8') as bw:
for sc in sc_list:
sc_info_dict = {k: getattr(sc, k) for k in ('p', 'r', 'f1', 'ans_size', 'hops')}
if sc.aggregate is not None:
sc_info_dict['agg'] = sc.aggregate
opt_raw_paths = []
for cate, gl, pred_seq in sc.raw_paths:
opt_raw_paths.append((cate, gl.gl_pos, gl.value, pred_seq))
sc_info_dict['raw_paths'] = opt_raw_paths
bw.write(json.dumps(sc_info_dict) + '\n')
shutil.move(schema_fp + '.tmp', schema_fp)
LogInfo.logs('%d schemas successfully saved into [%s].', len(sc_list), schema_fp)
def main(args):
data_path = "%s/simpQ.data.pkl" % args.data_dir
freebase_path = "%s/freebase-FB2M.txt" % args.freebase_dir
links_path = "%s/SimpQ.all.links" % args.data_dir
with open(data_path, 'rb') as br:
qa_list = pickle.load(br)
LogInfo.logs('%d SimpleQuestions loaded.' % len(qa_list))
cand_gen = SimpleQCandidateGenerator(freebase_fp=freebase_path, links_fp=links_path,
verbose=args.verbose)
all_list_fp = args.output_dir + '/all_list'
all_lists = []
for q_idx, qa in enumerate(qa_list):
LogInfo.begin_track('Entering Q %d / %d [%s]:',
q_idx, len(qa_list), qa['utterance'])
sub_idx = int(q_idx / 1000) * 1000
index = 'data/%d-%d/%d_schema' % (sub_idx, sub_idx + 999, q_idx)
all_lists.append(index)
sub_dir = '%s/data/%d-%d' % (args.output_dir, sub_idx, sub_idx + 999)
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
schema_fp = '%s/%d_schema' % (sub_dir, q_idx)
link_fp = '%s/%d_links' % (sub_dir, q_idx)
if os.path.isfile(schema_fp):
LogInfo.end_track('Skip this question, already saved.')
continue
cand_gen.single_question_candgen(q_idx=q_idx, qa=qa,
link_fp=link_fp, schema_fp=schema_fp)
LogInfo.end_track()
with open(all_list_fp, 'w') as fw:
for i, idx_str in enumerate(all_lists):
if i == len(all_lists)-1:
fw.write(idx_str)
else:
fw.write(idx_str + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="SimpQ candidates generation")
parser.add_argument('--data_dir', type=str, help="SimpQ data directory")
parser.add_argument('--freebase_dir', type=str, help="Freebase subset directory")
parser.add_argument('--output_dir', type=str, help="Output candidates directory")
parser.add_argument('--verbose', type=int, default=0)
parsed_args = parser.parse_args()
main(parsed_args)
|
[
"1049136551@qq.com"
] |
1049136551@qq.com
|
e84fa4cea01f255f22566d3aa239eceff4c8ea21
|
afc677459e46635ceffccf60d1daf50e62694557
|
/ACME/math/perturb.py
|
9dc7c4abb0a9e47c4955e89d1793b41b1c231f16
|
[
"MIT"
] |
permissive
|
mauriziokovacic/ACME
|
056b06da4bf66d89087fcfcbe0fd0a2e255d09f3
|
2615b66dd4addfd5c03d9d91a24c7da414294308
|
refs/heads/master
| 2020-05-23T23:40:06.667416
| 2020-01-10T14:42:01
| 2020-01-10T14:42:01
| 186,997,977
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
import torch
def perturb(tensor):
"""
Perturbs the input tensor with random noise
Parameters
----------
tensor : Tensor
the input tensor
Returns
-------
Tensor
the perturbed tensor
"""
return torch.randn_like(tensor) * tensor + torch.randn_like(tensor)
|
[
"maurizio.kovacic@gmail.com"
] |
maurizio.kovacic@gmail.com
|
148cdb131a56fd63ff2ef0abeb3b986b26f89eea
|
ac4b9385b7ad2063ea51237fbd8d1b74baffd016
|
/.history/google/docs_quickstart_20210213142716.py
|
483f1642a07b3d11faa3808ec62f56fa18f7a9dd
|
[] |
no_license
|
preethanpa/ssoemprep
|
76297ef21b1d4893f1ac2f307f60ec72fc3e7c6f
|
ce37127845253c768d01aeae85e5d0d1ade64516
|
refs/heads/main
| 2023-03-09T00:15:55.130818
| 2021-02-20T06:54:58
| 2021-02-20T06:54:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,082
|
py
|
from __future__ import print_function
import pickle
import os.path
import io
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from googleapiclient.http import MediaIoBaseDownload
from oauth2client.service_account import ServiceAccountCredentials
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/documents.readonly', 'https://www.googleapis.com/auth/admin.directory.user', 'https://www.googleapis.com/auth/drive.file', 'https://www.googleapis.com/auth/drive.activity', 'https://www.googleapis.com/auth/drive.metadata', 'https://www.googleapis.com/auth/drive']# 'https://www.googleapis.com/auth/documents.readonly']
# The ID of a sample document.
# DOCUMENT_ID = '1bQkFcQrWFHGlte8oTVtq_zyKGIgpFlWAS5_5fi8OzjY'
DOCUMENT_ID = '1sXQie19gQBRHODebxBZv4xUCJy-9rGpnlpM7_SUFor4'
def main():
"""Shows basic usage of the Docs API.
Prints the title of a sample document.
"""
from google.oauth2 import service_account
import googleapiclient.discovery
SCOPES = ['https://www.googleapis.com/auth/documents.readonly', 'https://www.googleapis.com/auth/sqlservice.admin', 'https://www.googleapis.com/auth/drive.file']
SERVICE_ACCOUNT_FILE = '/home/dsie/Developer/sandbox/3ray/3rml/kbc_process/google/domain-wide-credentials-gdrive.json'
credentials = service_account.Credentials.from_service_account_file(
SERVICE_ACCOUNT_FILE, scopes=SCOPES)
# delegated_credentials = credentials.with_subject('abhi@third-ray.com')
# service = build('docs', 'v1', credentials=credentials)
drive_service = build('drive', 'v3', credentials=credentials)
request = drive_service.files() .get_media(fileId=DOCUMENT_ID)
print(request)
fh = io.BytesIO()
downloader = MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
# print(f"Download % {int(status.progress() * 100)}")
if __name__ == '__main__':
main()
|
[
"{abhi@third-ray.com}"
] |
{abhi@third-ray.com}
|
4c6abdca47eed860cbbbd80db8e8195616fdc873
|
7c3a066c2f4e3be8b3f8a418f0152f9d3de69599
|
/google_kickstart/15683.py
|
75639c55a00f4cf507ab52e131684ae11a8d1dab
|
[] |
no_license
|
wkdtjsgur100/algorithm-python
|
5c0fa5ac5f5c2b8618e53ab0f1f599427345734d
|
134809388baac48195630e1398b8c2af7526966c
|
refs/heads/master
| 2020-04-02T19:58:20.480391
| 2018-12-31T08:33:44
| 2018-12-31T08:33:44
| 154,753,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,143
|
py
|
import copy, sys
cctv=[
[[0], [1], [2], [3]],
[[0, 1], [2, 3]],
[[0, 3], [0, 2], [2, 1], [1, 3]],
[[1, 2, 3], [0, 1, 2], [0, 2, 3], [0, 1, 3]],
[[0, 1, 2, 3]]
]
d = [[0, 1], [0, -1], [1, 0], [-1, 0]]
N, M = map(int, input().split())
m = [list(map(int, input().split())) for _ in range(N)]
cctv_poses = []
for i in range(len(m)):
for j in range(len(m[0])):
if 1 <= m[i][j] <= 5:
cctv_poses.append((i, j, m[i][j]))
def observe(m, i, j, d):
if 0 <= i < N and 0 <= j < M and m[i][j] != 6:
if m[i][j] == 0:
m[i][j] = -1
observe(m, i+d[0], j+d[1], d)
def simulate(m, cctv_i):
global N, M
if cctv_i == len(cctv_poses):
return sum(r.count(0) for r in m)
i = cctv_poses[cctv_i][0]
j = cctv_poses[cctv_i][1]
cam_num = cctv_poses[cctv_i][2]
temp_m = copy.deepcopy(m)
ret = sys.maxsize
for sight in range(len(cctv[cam_num-1])):
for d_num in cctv[cam_num-1][sight]:
observe(m, i, j, d[d_num])
ret = min(ret, simulate(m, cctv_i+1))
m = copy.deepcopy(temp_m)
return ret
print(simulate(m, 0))
|
[
"wkdtjsgur100@naver.com"
] |
wkdtjsgur100@naver.com
|
131d239b5a8e340e9dbe95bf2ef6172b9a91bb12
|
97ee5c0f2320aab2ca1b6ad0f18a4020dbd83d1c
|
/venv/Lib/site-packages/ibm_watson_machine_learning/libs/repo/swagger_client/models/error_schema_repository.py
|
ff5393cbefcc0d009faa312008628d9bfc6d0141
|
[] |
no_license
|
yusufcet/healty-hearts
|
4d80471e82a98ea1902b00c8998faed43f99616c
|
a4cd429484e857b849df08d93688d35e632b3e29
|
refs/heads/main
| 2023-05-28T13:57:09.323953
| 2021-05-06T04:15:27
| 2021-05-06T04:15:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,486
|
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
# (C) Copyright IBM Corp. 2020.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pprint import pformat
from six import iteritems
class ErrorSchemaRepository(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ErrorSchemaRepository - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'trace': 'str',
'errors': 'list[ErrorRepository]'
}
self.attribute_map = {
'trace': 'trace',
'errors': 'errors'
}
self._trace = None
self._errors = None
@property
def trace(self):
"""
Gets the trace of this ErrorSchemaRepository.
:return: The trace of this ErrorSchemaRepository.
:rtype: str
"""
return self._trace
@trace.setter
def trace(self, trace):
"""
Sets the trace of this ErrorSchemaRepository.
:param trace: The trace of this ErrorSchemaRepository.
:type: str
"""
self._trace = trace
@property
def errors(self):
"""
Gets the errors of this ErrorSchemaRepository.
:return: The errors of this ErrorSchemaRepository.
:rtype: list[ErrorRepository]
"""
return self._errors
@errors.setter
def errors(self, errors):
"""
Sets the errors of this ErrorSchemaRepository.
:param errors: The errors of this ErrorSchemaRepository.
:type: list[ErrorRepository]
"""
self._errors = errors
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"yusufcetin9999@gmail.com"
] |
yusufcetin9999@gmail.com
|
1eaf0839de9bc5dadf576355771ad319fe60ef55
|
2f330fc050de11676ab46b963b7878882e9b6614
|
/test/test_conversations_api.py
|
57e18e0156fc8c9ab6b181d2209168880857eefa
|
[
"Apache-2.0"
] |
permissive
|
zerodayz/memsource-cli-client
|
609f48c18a2b6daaa639d4cb8a61da43763b5143
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
refs/heads/master
| 2020-08-01T12:43:06.497982
| 2019-09-30T11:14:13
| 2019-09-30T11:14:13
| 210,999,654
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,966
|
py
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import memsource_cli
from memsource_cli.api.conversations_api import ConversationsApi # noqa: E501
from memsource_cli.rest import ApiException
class TestConversationsApi(unittest.TestCase):
"""ConversationsApi unit test stubs"""
def setUp(self):
self.api = memsource_cli.api.conversations_api.ConversationsApi() # noqa: E501
def tearDown(self):
pass
def test_add_lqa_comment(self):
"""Test case for add_lqa_comment
Add LQA comment # noqa: E501
"""
pass
def test_add_plain_comment(self):
"""Test case for add_plain_comment
Add plain comment # noqa: E501
"""
pass
def test_create_lqa_conversation(self):
"""Test case for create_lqa_conversation
Create LQA conversation # noqa: E501
"""
pass
def test_create_segment_target_conversation(self):
"""Test case for create_segment_target_conversation
Create plain conversation # noqa: E501
"""
pass
def test_delete_lqa_comment(self):
"""Test case for delete_lqa_comment
Delete LQA comment # noqa: E501
"""
pass
def test_delete_lqa_conversation(self):
"""Test case for delete_lqa_conversation
Delete conversation # noqa: E501
"""
pass
def test_delete_plain_comment(self):
"""Test case for delete_plain_comment
Delete plain comment # noqa: E501
"""
pass
def test_delete_plain_conversation(self):
"""Test case for delete_plain_conversation
Delete plain conversation # noqa: E501
"""
pass
def test_find_conversations(self):
"""Test case for find_conversations
Find all conversation # noqa: E501
"""
pass
def test_get_lqa_conversation(self):
"""Test case for get_lqa_conversation
Get LQA conversation # noqa: E501
"""
pass
def test_get_plain_conversation(self):
"""Test case for get_plain_conversation
Get plain conversation # noqa: E501
"""
pass
def test_list_all_conversations(self):
"""Test case for list_all_conversations
List all conversations # noqa: E501
"""
pass
def test_list_lqa_conversations(self):
"""Test case for list_lqa_conversations
List LQA conversations # noqa: E501
"""
pass
def test_list_plain_conversations(self):
"""Test case for list_plain_conversations
List plain conversations # noqa: E501
"""
pass
def test_update_lqa_comment(self):
"""Test case for update_lqa_comment
Edit LQA comment # noqa: E501
"""
pass
def test_update_lqa_conversation(self):
"""Test case for update_lqa_conversation
Edit LQA conversation # noqa: E501
"""
pass
def test_update_plain_comment(self):
"""Test case for update_plain_comment
Edit plain comment # noqa: E501
"""
pass
def test_update_plain_conversation(self):
"""Test case for update_plain_conversation
Edit plain conversation # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"cerninr@gmail.com"
] |
cerninr@gmail.com
|
6d72846e5637395a9e8186735df19ecf8593262c
|
6d24a0820a2e1227e8caff083a8fef4f6f207c6f
|
/django_test8remotedb/django_test8remotedb/wsgi.py
|
e1c3c34a28f8e9da506e1712d7ee44b07b95f92e
|
[] |
no_license
|
pyh3887/Django
|
45d4b3be955634edba924cc18bbc8d3454c7355b
|
a44e1067494391ff4a7473aeaeb63bbeba43b3d8
|
refs/heads/master
| 2022-11-08T08:36:04.750050
| 2020-06-28T14:00:53
| 2020-06-28T14:00:53
| 275,596,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
"""
WSGI config for django_test8remotedb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_test8remotedb.settings')
application = get_wsgi_application()
|
[
"pyh3887@naver.com"
] |
pyh3887@naver.com
|
c22e5285f55b9a024db9c6a181f7be86ac4aa38d
|
d0fcc2198f1caf5633c4fc0d004ba68714396f1b
|
/bc4py/__init__.py
|
e4f948768169653f959d63eacfa2036467347985
|
[
"MIT"
] |
permissive
|
webclinic017/bc4py
|
4bfce04b666c2aaadda4b7ecc2a8270839231850
|
620b7d855ec957b3e2b4021cf8069d9dd128587a
|
refs/heads/master
| 2022-12-09T22:23:49.842255
| 2019-06-21T14:24:17
| 2019-06-21T14:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 648
|
py
|
__version__ = '0.0.27-alpha'
__chain_version__ = 0
__block_version__ = 1
__message__ = 'This is alpha version - use at your own risk, do not use for merchant applications'
__logo__ = r"""
_____ _____ _ _
| __ \ / ____| | | | |
| |__) | _ | | ___ _ __ | |_ _ __ __ _ ___| |_
| ___/ | | | | | / _ \| '_ \| __| '__/ _` |/ __| __|
| | | |_| | | |___| (_) | | | | |_| | | (_| | (__| |_
|_| \__, | \_____\___/|_| |_|\__|_| \__,_|\___|\__|
__/ |
|___/
"""
|
[
"thhjuu@yahoo.co.jp"
] |
thhjuu@yahoo.co.jp
|
4d7c7bef47a7b0a60f1eb495d804d2eb2c8ade27
|
93022749a35320a0c5d6dad4db476b1e1795e318
|
/issm/bamgmesh.py
|
a74e7b1994e1ba700f8cf936b7d747286f4f44d7
|
[
"BSD-3-Clause"
] |
permissive
|
pf4d/issm_python
|
78cd88e9ef525bc74e040c1484aaf02e46c97a5b
|
6bf36016cb0c55aee9bf3f7cf59694cc5ce77091
|
refs/heads/master
| 2022-01-17T16:20:20.257966
| 2019-07-10T17:46:31
| 2019-07-10T17:46:31
| 105,887,661
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,051
|
py
|
import numpy as np
class bamgmesh(object):
"""
BAMGMESH class definition
Usage:
bamgmesh(varargin)
"""
def __init__(self,*args): # {{{
self.Vertices=np.empty((0,3))
self.Edges=np.empty((0,3))
self.Triangles=np.empty((0,0))
self.IssmEdges=np.empty((0,0))
self.IssmSegments=np.empty((0,0))
self.VerticesOnGeomVertex=np.empty((0,0))
self.VerticesOnGeomEdge=np.empty((0,0))
self.EdgesOnGeomEdge=np.empty((0,0))
self.SubDomains=np.empty((0,4))
self.SubDomainsFromGeom=np.empty((0,0))
self.ElementConnectivity=np.empty((0,0))
self.NodalConnectivity=np.empty((0,0))
self.NodalElementConnectivity=np.empty((0,0))
self.CrackedVertices=np.empty((0,0))
self.CrackedEdges=np.empty((0,0))
if not len(args):
# if no input arguments, create a default object
pass
elif len(args) == 1:
object=args[0]
for field in object.iterkeys():
if field in vars(self):
setattr(self,field,object[field])
else:
raise TypeError("bamgmesh constructor error message: unknown type of constructor call")
# }}}
def __repr__(self): # {{{
s ="class '%s' object '%s' = \n" % (type(self),'self')
s+=" Vertices: %s\n" % str(self.Vertices)
s+=" Edges: %s\n" % str(self.Edges)
s+=" Triangles: %s\n" % str(self.Triangles)
s+=" IssmEdges: %s\n" % str(self.IssmEdges)
s+=" IssmSegments: %s\n" % str(self.IssmSegments)
s+=" VerticesOnGeomVertex: %s\n" % str(self.VerticesOnGeomVertex)
s+=" VerticesOnGeomEdge: %s\n" % str(self.VerticesOnGeomEdge)
s+=" EdgesOnGeomEdge: %s\n" % str(self.EdgesOnGeomEdge)
s+=" SubDomains: %s\n" % str(self.SubDomains)
s+=" SubDomainsFromGeom: %s\n" % str(self.SubDomainsFromGeom)
s+=" ElementConnectivity: %s\n" % str(self.ElementConnectivity)
s+=" NodalConnectivity: %s\n" % str(self.NodalConnectivity)
s+=" NodalElementConnectivity: %s\n" % str(self.NodalElementConnectivity)
s+=" CrackedVertices: %s\n" % str(self.CrackedVertices)
s+=" CrackedEdges: %s\n" % str(self.CrackedEdges)
return s
# }}}
|
[
"cummings.evan@gmail.com"
] |
cummings.evan@gmail.com
|
2aeb9fcb51b0105557890554d8ed3a4f83d5f27c
|
d489eb7998aa09e17ce8d8aef085a65f799e6a02
|
/lib/modules/powershell/management/get_domain_sid.py
|
61786bf029320aa283e34ee9af48782232a1b12b
|
[
"MIT"
] |
permissive
|
fengjixuchui/invader
|
d36078bbef3d740f95930d9896b2d7dd7227474c
|
68153dafbe25e7bb821c8545952d0cc15ae35a3e
|
refs/heads/master
| 2020-07-21T19:45:10.479388
| 2019-09-26T11:32:38
| 2019-09-26T11:32:38
| 206,958,809
| 2
| 1
|
MIT
| 2019-09-26T11:32:39
| 2019-09-07T11:32:17
|
PowerShell
|
UTF-8
|
Python
| false
| false
| 2,979
|
py
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-DomainSID',
'Author': ['@harmj0y'],
'Description': ('Returns the SID for the current of specified domain.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [ ]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Domain' : {
'Description' : 'Domain to resolve SID for, defaults to the current domain.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
moduleName = self.info["Name"]
# read in the common powerview.ps1 module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/powerview.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
# get just the code needed for the specified function
script = helpers.generate_dynamic_powershell_script(moduleCode, moduleName)
script += moduleName + " "
scriptEnd = ""
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptEnd += " -" + str(option)
else:
scriptEnd += " -" + str(option) + " " + str(values['Value'])
scriptEnd += ' | Out-String | %{$_ + \"`n\"};"`n'+str(moduleName)+' completed!"'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
[
"noreply@github.com"
] |
fengjixuchui.noreply@github.com
|
d6cd273dd70bb1adf7a00656ce07e3a7d18afb1e
|
ebae416013607b045b505dbb0b5598c9e732dcf4
|
/2b Rozmienianie monet.py
|
fc4f0377b9bf95a51b48d6343a5b1bcf82548afb
|
[] |
no_license
|
Ernest93/Ernest93
|
08bbadc067e9ef277bad5f0922ca89a8ae298fb8
|
7570ccf18631c5e88bd38c0498d7c9348e81dd7b
|
refs/heads/master
| 2020-04-06T15:24:56.128434
| 2018-12-10T16:21:06
| 2018-12-10T16:21:06
| 157,577,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
"""
2) Program przyjmuje kwotę w parametrze i wylicza jak rozmienić to na monety: 5, 2, 1, 0.5, 0.2, 0.1 wydając ich jak najmniej.
"""
moneta5 = 5
moneta2 = 2
moneta1 = 1
moneta50 = 0.50
moneta20 = 0.20
moneta10 = 0.10
kwota = input("Podaj kwotę do wymiany na drobne: ")
if kwota
|
[
"you@example.com"
] |
you@example.com
|
3bd394ba3bd4bde21e3af8745d23d7012a1674fa
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/88/usersdata/216/59764/submittedfiles/listas.py
|
59dcb46254afeaa8ea7c2cfa8c36ed1c7c2a8dfa
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 646
|
py
|
# -*- coding: utf-8 -*-
def degrau(a):
for i in range(0,len(a)-1,1):
menos=0
l=[]
if a[i]<0:
a[i]=a[i]*(-1)
menos=menos+(a[i]-a[i+1])
l.append(menos)
if a[i]>=0:
menos=menos+(a[i]-a[i+1])
l.append(menos)
return(l)
l=degrau(a)
def maior(l):
for i in range(0,len(l)-1,1):
maior=0
if l[i]>l[i+1]:
maior=a[i]
return(a[i])
a=[]
n=int(input('Digite o tamanho da lista:'))
for i in range(0,n,1):
x=int(input('Digite o numero:'))
a.append(x)
print (maior(l))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
0ea486071cf97380f79f7282e57c31033c2db52a
|
e573161a9d4fc74ef4debdd9cfd8956bdd1d0416
|
/src/products/migrations/0001_initial.py
|
c5e75cc6b8ce4aa5d27f37b4086623b92994f43b
|
[] |
no_license
|
tanjibpa/rx-verify
|
a11c471afc628524bf95103711102258e6e04c19
|
3947fd2f9a640b422014d1857b9377e42d8961a5
|
refs/heads/main
| 2023-02-23T08:16:43.910998
| 2021-01-23T07:07:43
| 2021-01-23T07:07:43
| 331,103,311
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,357
|
py
|
# Generated by Django 3.1.5 on 2021-01-21 05:22
import django.contrib.postgres.fields.jsonb
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('updated_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('name', models.CharField(max_length=100)),
('mfg_date', models.DateField()),
('expiration_date', models.DateField()),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
'db_table': 'products',
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='RawMaterial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('updated_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('name', models.CharField(max_length=100)),
],
options={
'verbose_name': 'Raw Material',
'verbose_name_plural': 'Raw Materials',
'db_table': 'raw_materials',
'ordering': ['-updated_at'],
},
),
migrations.CreateModel(
name='ProductBatch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('created_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('updated_by', django.contrib.postgres.fields.jsonb.JSONField(default=dict)),
('batch_number', models.UUIDField(default=uuid.uuid4, editable=False)),
('mfg_date', models.DateField()),
('expiration_date', models.DateField()),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
],
options={
'verbose_name': 'Product Batch',
'verbose_name_plural': 'Product Batches',
'db_table': 'product_batches',
'ordering': ['-updated_at'],
},
),
migrations.AddField(
model_name='product',
name='raw_materials',
field=models.ManyToManyField(to='products.RawMaterial'),
),
]
|
[
"ikram.tanjib@gmail.com"
] |
ikram.tanjib@gmail.com
|
9e1a9f7e8ff63503ca25b48244d1b4f4cc6212e7
|
cc9a0d5608b2209b02591ceace0a7416823a9de5
|
/hyk/users/urls.py
|
752239befcc25821e106299311f9a488a257c93b
|
[
"MIT"
] |
permissive
|
morwen1/hack_your_body
|
240838e75dd4447c944d47d37635d2064d4210fd
|
d4156d4fbe2dd4123d5b5bceef451803a50a39f8
|
refs/heads/master
| 2020-11-24T01:55:46.323849
| 2019-12-15T18:15:51
| 2019-12-15T18:15:51
| 226,505,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 403
|
py
|
#django
from django.urls import path , include
#rest_framework
from rest_framework.routers import SimpleRouter
#views
from hyk.users.views import UserViewset ,ProfileViewset
router = SimpleRouter()
router.register(r'' , UserViewset , basename='users_urls')
router.register(r'profile', ProfileViewset , basename='profile_update')
app_name = "users"
urlpatterns = [
path('' , include(router.urls))
]
|
[
"morwen901@gmail.com"
] |
morwen901@gmail.com
|
25a5631cbd7c7365beaf481e7e89807d81afbfe9
|
ca4fff998b3a345595adfc8a33fb273f2ac10a58
|
/tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py
|
ebf68e3bce2897f82a7bef4c73b6900c69018e69
|
[
"Apache-2.0"
] |
permissive
|
yair-ehrenwald/tensorflow
|
d485cef7847ab027639550fe2b19fd7521ea640e
|
eaceb03d7f5ec31c0d27c464f17ae003027980ca
|
refs/heads/master
| 2023-05-04T19:43:16.142132
| 2021-03-21T06:10:13
| 2021-03-21T06:16:59
| 287,766,702
| 2
| 0
|
Apache-2.0
| 2020-08-15T14:51:47
| 2020-08-15T14:51:46
| null |
UTF-8
|
Python
| false
| false
| 3,914
|
py
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.text_vectorization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.distribute.strategy_combinations import all_strategies
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.platform import test
@ds_combinations.generate(
combinations.combine(distribution=all_strategies, mode=["eager"]))
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self, distribution):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_distribution_strategy_output_with_adapt(self, distribution):
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
895e18e52390f46b88c59c8e8bb946d861c58834
|
6237a1d1aa61be65f1a71bb645ec6d8689f24715
|
/PythonistaAppTemplate/PythonistaKit.framework/pylib/site-packages/jedi/api/helpers.py
|
77fc7827a66f2f8eac56ccec40c4e65833df446f
|
[] |
no_license
|
Tkizzy/PythonistaAppTemplate
|
cf8485dc8e9e45c65af624916d71b38e2e11ce56
|
f196581cfee4a1c10168892a20ddcf16270c91b0
|
refs/heads/master
| 2021-01-16T00:17:04.628848
| 2016-03-13T08:15:35
| 2016-03-13T08:15:35
| 52,062,119
| 1
| 0
| null | 2016-02-19T04:48:36
| 2016-02-19T04:48:35
| null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
"""
Helpers for the API
"""
import re
from jedi.parser import tree as pt
from jedi.evaluate import imports
def completion_parts(path_until_cursor):
"""
Returns the parts for the completion
:return: tuple - (path, dot, like)
"""
match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path_until_cursor, flags=re.S)
return match.groups()
def sorted_definitions(defs):
# Note: `or ''` below is required because `module_path` could be
return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0))
def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False):
"""
Resolve the user statement, if it is an import. Only resolve the
parts until the user position.
"""
name = user_stmt.name_for_position(user_context.position)
if name is None:
return None, None
i = imports.ImportWrapper(evaluator, name)
return i, name
def check_error_statements(module, pos):
for error_statement in module.error_statement_stacks:
if error_statement.first_type in ('import_from', 'import_name') \
and error_statement.first_pos < pos <= error_statement.next_start_pos:
return importer_from_error_statement(error_statement, pos)
return None, 0, False, False
def importer_from_error_statement(error_statement, pos):
def check_dotted(children):
for name in children[::2]:
if name.start_pos <= pos:
yield name
names = []
level = 0
only_modules = True
unfinished_dotted = False
for typ, nodes in error_statement.stack:
if typ == 'dotted_name':
names += check_dotted(nodes)
if nodes[-1] == '.':
# An unfinished dotted_name
unfinished_dotted = True
elif typ == 'import_name':
if nodes[0].start_pos <= pos <= nodes[0].end_pos:
# We are on the import.
return None, 0, False, False
elif typ == 'import_from':
for node in nodes:
if node.start_pos >= pos:
break
elif isinstance(node, pt.Node) and node.type == 'dotted_name':
names += check_dotted(node.children)
elif node in ('.', '...'):
level += len(node.value)
elif isinstance(node, pt.Name):
names.append(node)
elif node == 'import':
only_modules = False
return names, level, only_modules, unfinished_dotted
|
[
"olezorn@gmx.net"
] |
olezorn@gmx.net
|
6176ca8999bab8c589d13d80a4c7c5c7ac8ff137
|
a543a24f1b5aebf500c2200cd1d139435948500d
|
/satory074/_abc004/c.py
|
9227c0ec27dc1aa64b43222d4668fa5c0f98ccc1
|
[] |
no_license
|
HomeSox/AtCoder
|
18c89660762c3e0979596f0bcc9918c8962e4abb
|
93e5ffab02ae1f763682aecb032c4f6f4e4b5588
|
refs/heads/main
| 2023-09-05T03:39:34.591433
| 2023-09-04T13:53:36
| 2023-09-04T13:53:36
| 219,873,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 178
|
py
|
n = int(input())
ordlist = ['123456']
for i in range(29):
l = list(ordlist[-1])
l[i%5], l[i%5+1] = l[i%5+1], l[i%5]
ordlist.append(''.join(l))
print(ordlist[n%30])
|
[
"satory074@gmail.com"
] |
satory074@gmail.com
|
3af98569cf8e2029999739ed562ac539f5bce4fb
|
e1b94d6fc781af26c08d7d108d9f9a463b154cac
|
/test20.py
|
a544b507f3842c31604ac995a8c10d3f87e05daf
|
[] |
no_license
|
404akhan/math6450
|
b27d6928bbbddd9903122099b66e8adf49b7525b
|
818baf5ad9c47e7237b3d21f4a698f684649b9f4
|
refs/heads/master
| 2021-01-20T07:42:58.415286
| 2017-05-02T13:02:44
| 2017-05-02T13:02:44
| 90,034,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,657
|
py
|
import numpy as np
import pandas as pd
import random
import statsmodels.api as sm
from sklearn.feature_selection import RFE
from sklearn.svm import SVC
import sys
from pylab import pcolor, show, colorbar, xticks, yticks
df = pd.read_csv("./speed_code.csv", encoding="ISO-8859-1")
input_vars = np.array(['attr', 'sinc', 'intel', 'fun', 'amb', 'shar', 'like', 'prob',
'attr_o', 'sinc_o', 'intel_o', 'fun_o', 'amb_o', 'shar_o', 'like_o', 'prob_o',
'samerace', 'met', 'met_o'])
attribute_num = len(input_vars)
print 'attribute_num', attribute_num
xs = np.zeros((8378, attribute_num))
ys = np.zeros((8378, 1))
for i in range(attribute_num):
xs[:, i] = df[input_vars[i]]
ys[:, 0] = df['dec_o']
xs[np.isnan(xs)] = 0.
random.seed(1339)
shuf_arr = range(0, 8378)
random.shuffle(shuf_arr)
train_size = int(8378 * 0.7)
lr = 0.03
# for i in range(attribute_num):
# d1 = xs[: ,i]
# d2 = ys[: ,0]
# print input_vars[i], np.corrcoef(d1, d2)[0][1]
#
xs_train = xs[shuf_arr[0:train_size], :]
xs_cross_val = xs[shuf_arr[train_size:], :]
ys_train = ys[shuf_arr[0:train_size], :]
ys_cross_val = ys[shuf_arr[train_size:], :]
xs_mean = np.mean(xs_train, axis=0)
xs_std = np.std(xs_train, axis=0)
xs_train = (xs_train - xs_mean) / xs_std
xs_cross_val = (xs_cross_val - xs_mean) / xs_std
def sigmoid(x):
return 1. / (1 + np.exp(-x))
def get_loss():
scores = np.matmul(xs_cross_val, W) + b
predict = sigmoid(scores)
error = predict - ys_cross_val
return np.mean(np.square(error))
def get_accuracy():
scores = np.matmul(xs_cross_val, W) + b
predict = sigmoid(scores)
predict = (predict > 0.5).astype(np.int)
error = predict - ys_cross_val
return np.mean(np.abs(error))
def get_train_loss():
scores = np.matmul(xs_train, W) + b
predict = sigmoid(scores)
error = predict - ys_train
return np.mean(np.square(error))
def get_train_accuracy():
scores = np.matmul(xs_train, W) + b
predict = sigmoid(scores)
predict = (predict > 0.5).astype(np.int)
error = predict - ys_train
return np.mean(np.abs(error))
W = 0.01 * np.random.randn(attribute_num, 1)
b = 0.
for i in range(100*1000):
scores = np.matmul(xs_train, W) + b
predict = sigmoid(scores)
dpredict = 1. / train_size * (predict - ys_train)
dscores = dpredict * predict * (1 - predict)
dW = np.matmul(xs_train.transpose(), dscores)
db = np.sum(dscores)
W -= lr * dW
b -= lr * db
if i % 10 == 0:
print 'iter: %d, loss: %f, acc: %f, tr loss: %f, tr acc: %f' % (i, get_loss(), get_accuracy(), get_train_loss(), get_train_accuracy())
|
[
"404akhan@gmail.com"
] |
404akhan@gmail.com
|
a6b1f0bbee7fe4666bf8d05fb29773efb460b249
|
47a08ca494ee35cf553d223ff7fd69fdf92c1aa5
|
/sourcebook_app/migrations/0006_auto_20180509_1922.py
|
f298c34ae95a14332538631cab4e9c4526eb1522
|
[] |
no_license
|
apjanco/90s-sourcebook
|
010541d2718613a08008285d00ec59d96f742bb0
|
183fb012f87285d641678fe5c9c8da8ab6998084
|
refs/heads/master
| 2021-07-17T01:36:40.098417
| 2019-02-09T02:15:17
| 2019-02-09T02:15:17
| 132,296,098
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 927
|
py
|
# Generated by Django 2.0.5 on 2018-05-09 19:22
import ckeditor.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sourcebook_app', '0005_auto_20180509_1917'),
]
operations = [
migrations.CreateModel(
name='category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, max_length=200, null=True)),
('essay', ckeditor.fields.RichTextField(blank=True)),
],
),
migrations.RemoveField(
model_name='item',
name='category',
),
migrations.AddField(
model_name='item',
name='category',
field=models.ManyToManyField(blank=True, to='sourcebook_app.category'),
),
]
|
[
"apjanco@gmail.com"
] |
apjanco@gmail.com
|
e2eff4ea305101f35ffcf05f15cc188ccb32ced6
|
d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b
|
/application/dataentry/migrations/0207_auto_20211119_1807.py
|
a61f3dfa9a81a9f94de6886491918e4ab7c650cc
|
[] |
no_license
|
Tiny-Hands/tinyhands
|
337d5845ab99861ae189de2b97b8b36203c33eef
|
77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584
|
refs/heads/develop
| 2023-09-06T04:23:06.330489
| 2023-08-31T11:31:17
| 2023-08-31T11:31:17
| 24,202,150
| 7
| 3
| null | 2023-08-31T11:31:18
| 2014-09-18T19:35:02
|
PLpgSQL
|
UTF-8
|
Python
| false
| false
| 615
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-11-19 18:07
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0206_personmatch_match_results'),
]
operations = [
migrations.RunSQL("delete from dataentry_userlocationpermission where permission_id in "\
"(select id from dataentry_permission where permission_group in ('FORMS','VIF'))"),
migrations.RunSQL("delete from dataentry_permission where permission_group in ('FORMS','VIF')"),
]
|
[
"scrishel@sbcglobal.net"
] |
scrishel@sbcglobal.net
|
3109b75d342ecc7144506e06f756165024efbfde
|
751cf52d62dba7d88387fc5734d6ee3954054fc2
|
/opencv/commercial/Instructions/Examples/Backprojection/backProjection.py
|
6700e987ebba4ac4cd7f725e033de35ffb5792a4
|
[
"MIT"
] |
permissive
|
nooralight/lab-computer-vision
|
70a4d84a47a14dc8f5e9796ff6ccb59d4451ff27
|
0c3d89b35d0468d4d3cc5ce2653b3f0ac82652a9
|
refs/heads/master
| 2023-03-17T12:45:22.700237
| 2017-07-11T22:17:09
| 2017-07-11T22:17:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,081
|
py
|
import numpy as np
import cv2
xi, yi, xf, yf = 0, 0, 0, 0
selecting = False
newHist = False
begin = False
def regionSelect( event, x, y, flags, param ):
global xi, yi, xf, yf, selecting, newHist, begin
if event == cv2.EVENT_LBUTTONDOWN:
selecting = True
xi, yi = x, y
elif event == cv2.EVENT_LBUTTONUP:
xf, yf = x, y
selecting = False
newHist = True
begin = True
cap = cv2.VideoCapture( 0 )
cv2.namedWindow( 'frame' )
cv2.setMouseCallback( 'frame', regionSelect )
while( True ):
if not selecting:
_, frame = cap.read()
if begin:
hsv = cv2.cvtColor( frame, cv2.COLOR_BGR2HSV )
mask = np.zeros( frame.shape[:2], np.uint8 )
mask[min( yi, yf ) : max( yi, yf ), min( xi, xf ):max( xi, xf )] = 255
if newHist:
roiHist = cv2.calcHist( [hsv], [0, 1], mask, [180, 256], [0, 180, 0, 256] )
roiHist = cv2.normalize( roiHist, roiHist, 0, 255, cv2.NORM_MINMAX )
newHist = False
targetHist = cv2.calcHist( [hsv], [0, 1], None, [180, 256], [0, 180, 0, 256] )
dst = cv2.calcBackProject( [hsv], [0, 1], roiHist, [0, 180, 0, 256], 1 )
disk = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, ( 15,15 ) )
dst = cv2.filter2D( dst, -1, disk )
prox = np.copy( dst )
# # threshold and binary AND
_, thresh = cv2.threshold( dst, 250, 255, 0 )
kernel = cv2.getStructuringElement( cv2.MORPH_ELLIPSE, ( 5,5 ) )
thresh = cv2.erode( thresh, kernel, iterations = 3 )
thresh = cv2.dilate( thresh, kernel, iterations= 3 )
masked_dots = cv2.bitwise_and( prox, prox, mask = thresh )
# prox = cv2.applyColorMap( prox, cv2.COLORMAP_JET )
masked_dots = cv2.applyColorMap( masked_dots, cv2.COLORMAP_JET )
cv2.imshow( 'distance', masked_dots )
cv2.imshow( 'frame', frame )
if cv2.waitKey(1) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
|
[
"danbudanov@gmail.com"
] |
danbudanov@gmail.com
|
11c815726dd58daf7008db0bd038e4928eabfdf4
|
4323ef02073a8e3c9e6aceba738aef5fc72c3aa6
|
/PythonExercicio/ex101.py
|
baf7c053bab3f6e080ca4354ddfda5d608c417b9
|
[
"MIT"
] |
permissive
|
fotavio16/PycharmProjects
|
e1e57816b5a0dbda7d7921ac024a71c712adac78
|
f5be49db941de69159ec543e8a6dde61f9f94d86
|
refs/heads/master
| 2022-10-19T15:45:52.773005
| 2020-06-14T02:23:02
| 2020-06-14T02:23:02
| 258,865,834
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 514
|
py
|
from time import gmtime
def voto(ano):
anoAtual = gmtime().tm_year
idade = anoAtual - ano
#print(f'Com {idade} anos: ', end='')
if idade < 16:
return "NEGADO", idade
elif idade < 18:
return "OPCIONAL", idade
elif idade < 70:
return "OBRIGATÓRIO", idade
else:
return "OPCIONAL", idade
# Progama Principal
print("-" *30)
anoNasc = int(input("Em que ano você nasceu? "))
situação, idade = voto(anoNasc)
print(f'Com {idade} anos: VOTO {situação}.')
|
[
"fotaviofonseca@gmail.com"
] |
fotaviofonseca@gmail.com
|
dddfb5db6026c566353c7f826f4552c02c9021c0
|
bc441bb06b8948288f110af63feda4e798f30225
|
/state_workflow_sdk/model/cmdb/instance_tree_root_node_pb2.py
|
6ee3af86e87fa1990cdee356016a84a48a148421
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803
| 2020-06-16T07:25:41
| 2020-06-16T07:25:41
| 199,773,131
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 4,421
|
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: instance_tree_root_node.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from state_workflow_sdk.model.cmdb import instance_tree_child_node_pb2 as state__workflow__sdk_dot_model_dot_cmdb_dot_instance__tree__child__node__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='instance_tree_root_node.proto',
package='cmdb',
syntax='proto3',
serialized_options=_b('Z>go.easyops.local/contracts/protorepo-models/easyops/model/cmdb'),
serialized_pb=_b('\n\x1dinstance_tree_root_node.proto\x12\x04\x63mdb\x1a<state_workflow_sdk/model/cmdb/instance_tree_child_node.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xa6\x01\n\x14InstanceTreeRootNode\x12\x11\n\tobject_id\x18\x01 \x01(\t\x12&\n\x05query\x18\x02 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\'\n\x06\x66ields\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12*\n\x05\x63hild\x18\x04 \x03(\x0b\x32\x1b.cmdb.InstanceTreeChildNodeB@Z>go.easyops.local/contracts/protorepo-models/easyops/model/cmdbb\x06proto3')
,
dependencies=[state__workflow__sdk_dot_model_dot_cmdb_dot_instance__tree__child__node__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_INSTANCETREEROOTNODE = _descriptor.Descriptor(
name='InstanceTreeRootNode',
full_name='cmdb.InstanceTreeRootNode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='object_id', full_name='cmdb.InstanceTreeRootNode.object_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='query', full_name='cmdb.InstanceTreeRootNode.query', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fields', full_name='cmdb.InstanceTreeRootNode.fields', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='child', full_name='cmdb.InstanceTreeRootNode.child', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=132,
serialized_end=298,
)
_INSTANCETREEROOTNODE.fields_by_name['query'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_INSTANCETREEROOTNODE.fields_by_name['fields'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_INSTANCETREEROOTNODE.fields_by_name['child'].message_type = state__workflow__sdk_dot_model_dot_cmdb_dot_instance__tree__child__node__pb2._INSTANCETREECHILDNODE
DESCRIPTOR.message_types_by_name['InstanceTreeRootNode'] = _INSTANCETREEROOTNODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InstanceTreeRootNode = _reflection.GeneratedProtocolMessageType('InstanceTreeRootNode', (_message.Message,), {
'DESCRIPTOR' : _INSTANCETREEROOTNODE,
'__module__' : 'instance_tree_root_node_pb2'
# @@protoc_insertion_point(class_scope:cmdb.InstanceTreeRootNode)
})
_sym_db.RegisterMessage(InstanceTreeRootNode)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"service@easyops.cn"
] |
service@easyops.cn
|
b79bee798fa0c9f83afa01e877fb1c6112869372
|
51885da54b320351bfea42c7dd629f41985454cd
|
/abc061/b.py
|
2bb0198431586f9681539f68d514ce6e49ba67fc
|
[] |
no_license
|
mskt4440/AtCoder
|
dd266247205faeda468f911bff279a792eef5113
|
f22702e3932e129a13f0683e91e5cc1a0a99c8d5
|
refs/heads/master
| 2021-12-15T10:21:31.036601
| 2021-12-14T08:19:11
| 2021-12-14T08:19:11
| 185,161,276
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,154
|
py
|
#
# abc061 b
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """4 3
1 2
2 3
1 4"""
output = """2
2
1
1"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """2 5
1 2
2 1
1 2
2 1
1 2"""
output = """5
5"""
self.assertIO(input, output)
def test_入力例_3(self):
input = """8 8
1 2
3 4
1 5
2 8
3 7
5 2
4 1
6 8"""
output = """3
3
2
2
2
1
1
2"""
self.assertIO(input, output)
def resolve():
N, M = map(int, input().split())
ans = [0] * N
for i in range(M):
a, b = map(int, input().split())
ans[a-1] += 1
ans[b-1] += 1
for i in range(N):
print(ans[i])
if __name__ == "__main__":
# unittest.main()
resolve()
|
[
"mskt4440@gmail.com"
] |
mskt4440@gmail.com
|
13cadb461e8dcc7eb8ec1196d1b737fac51cb3b8
|
55a8940b41527a79c3c45f34f2035a53ee7f3621
|
/repositorioDeReferencias/pycom-libraries/examples/lorawan-nano-gateway/config.py
|
78f770651d3c373e06cd4ac367be4d4df16f8036
|
[] |
no_license
|
RegisMelgaco/LoPyLearn
|
ec37a58d334e2bf2dfd9aaf93b4e1bed016c957b
|
3e8115545675947b24d646945a8859cae94b82b1
|
refs/heads/master
| 2021-09-03T10:55:49.614250
| 2018-01-08T13:45:46
| 2018-01-08T13:45:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 630
|
py
|
""" LoPy LoRaWAN Nano Gateway configuration options """
import machine
import ubinascii
WIFI_MAC = ubinascii.hexlify(machine.unique_id()).upper()
# Set the Gateway ID to be the first 3 bytes of MAC address + 'FFFE' + last 3 bytes of MAC address
GATEWAY_ID = WIFI_MAC[:6] + "FFFE" + WIFI_MAC[6:12]
SERVER = 'router.eu.thethings.network'
PORT = 1700
NTP = "pool.ntp.org"
NTP_PERIOD_S = 3600
WIFI_SSID = 'TESTES-NASH'
WIFI_PASS = 'nashifce8556'
# for EU868
LORA_FREQUENCY = 868100000
LORA_GW_DR = "SF7BW125" # DR_5
LORA_NODE_DR = 5
# for US915
# LORA_FREQUENCY = 903900000
# LORA_GW_DR = "SF7BW125" # DR_3
# LORA_NODE_DR = 3
|
[
"regis.melgaco@gmail.com"
] |
regis.melgaco@gmail.com
|
e9b4e4732fa50f1e05c1f15dce5454ddf633aa9a
|
ca1f95723d29f3c72f36e98a69aa86f827f90812
|
/request_helpers.py
|
ea665a3d2e9335db2cff53907e780f2486cb7478
|
[] |
no_license
|
zzaakiirr/parser_stackoverflow
|
70b0376f487303fafe7cbb739b65da8299dea4bb
|
3f39240107fcc8b1cfef95e6f63c0dc8dd3c81ad
|
refs/heads/master
| 2020-03-08T14:44:36.083748
| 2018-04-05T21:09:41
| 2018-04-05T21:09:41
| 128,193,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
import requests
def is_url(request):
try:
requests.get(request)
except requests.exceptions.MissingSchema:
return False
return True
def create_request_url(request):
key_words = request.split()
key_words_sting = ''
for key_word in key_words:
key_words_sting += '+%s' % key_word
request_url = 'https://stackoverflow.com/search?q=%s' % key_words_sting[1:]
return request_url
def get_response(request):
if is_url(request):
response = requests.get(request).content
else:
request_url = create_request_url(request)
response = requests.get(request_url).content
return response
|
[
"zzaakiirr@gmail.com"
] |
zzaakiirr@gmail.com
|
18416831775dcf5a874ae485a50a8664e7427803
|
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
|
/ml-flask/Lib/site-packages/sklearn/feature_selection/tests/test_mutual_info.py
|
90912a4817535201553ce92bef653d054964f5a5
|
[
"MIT"
] |
permissive
|
YaminiHP/SimilitudeApp
|
8cbde52caec3c19d5fa73508fc005f38f79b8418
|
005c59894d8788c97be16ec420c0a43aaec99b80
|
refs/heads/master
| 2023-06-27T00:03:00.404080
| 2021-07-25T17:51:27
| 2021-07-25T17:51:27
| 389,390,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:20a5dcd51f98742f23b8c7f37c2a42a52dc8089b83f2d8acc528fd88f1288e3c
size 7293
|
[
"yamprakash130@gmail.com"
] |
yamprakash130@gmail.com
|
b65417573b6585e83dc4f560df7a9195ef2c53f2
|
83cfd4dec02d1bfe36663b1ef1c5344a9ac922ef
|
/orco/report.py
|
f1d920f1ff9343abb58ef7e82f8100ac1f58a86d
|
[
"MIT"
] |
permissive
|
spirali/orco
|
f170a968725408caca08ec0915b835cebd19423d
|
32c839b4d691a3eb83cfa379a1ec429adcf7f1b0
|
refs/heads/master
| 2021-07-22T11:41:19.430670
| 2020-06-10T08:56:17
| 2020-06-10T08:56:17
| 195,656,806
| 4
| 2
|
MIT
| 2020-07-13T08:40:46
| 2019-07-07T13:50:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,389
|
py
|
class Report:
"""
Report of an event in ORCO. It can be viewed via ORCO browser.
Attributes:
* report_type - "info" / "error" / "timeout"
* executor_id - Id of executor where even comes from
* message - string representation of message
* builder_name - name of builder where event occurs (or None if not related)
* config - config related to the event (or None if not related)
* timestamp - datetime when event was created
"""
__slots__ = (
"report_type",
"executor_id",
"message",
"builder_name",
"config",
"timestamp",
)
def __init__(
self,
report_type,
executor_id,
message,
builder_name=None,
config=None,
timestamp=None,
):
self.executor_id = executor_id
self.timestamp = timestamp
self.report_type = report_type
self.message = message
self.builder_name = builder_name
self.config = config
def to_dict(self):
return {
"executor": self.executor_id,
"timestamp": self.timestamp,
"type": self.report_type,
"message": self.message,
"builder": self.builder_name,
"config": self.config,
}
def __repr__(self):
return "<Report {}: {}>".format(self.report_type, self.message)
|
[
"spirali@kreatrix.org"
] |
spirali@kreatrix.org
|
1456ea0a97ad69d4665eccefa722464babba6e6a
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2539/60617/267823.py
|
0ac81f42b23d79d08df13bc7a0bd2236501f741b
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
def cutinous_subArr():
arr=eval(input())
start=0
end=0
for i in range(1, len(arr)):
if start==0:
if arr[i]<arr[i-1]:
start=i-1
end=i
else:
if arr[i]<arr[i-1]:
end=i
print(end-start+1)
if __name__=='__main__':
cutinous_subArr()
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
3ef8cce8910cc02ab0f2bd51f71613ab2e31635b
|
1e6eb70f63fe91e40fab63675eee2bb05e6f1f28
|
/src/orion/core/worker/strategy.py
|
7a9df26908bb86f08e4632434176667a03fa6dfb
|
[
"BSD-3-Clause"
] |
permissive
|
jerrychatz/orion
|
ed587abc81bceaef5bd8e90e432112df0aad5f43
|
0ef3eea2decafce1985dc6a1cbea80cc2a92e9e8
|
refs/heads/master
| 2023-08-07T01:31:19.195565
| 2021-09-14T18:52:18
| 2021-09-14T18:52:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,335
|
py
|
# -*- coding: utf-8 -*-
"""
Parallel Strategies
===================
Register objectives for incomplete trials
"""
import logging
from abc import ABCMeta, abstractmethod
from orion.core.utils import Factory
from orion.core.worker.trial import Trial
log = logging.getLogger(__name__)
CORRUPTED_DB_WARNING = """\
Trial `%s` has an objective but status is not completed.
This is likely due to a corrupted database, possibly because of
database timeouts. Try setting manually status to `completed`.
You can find documention to do this at
https://orion.readthedocs.io/en/stable/user/storage.html#storage-backend.
If you encounter this issue often, please consider reporting it to
https://github.com/Epistimio/orion/issues."""
def get_objective(trial):
"""Get the value for the objective, if it exists, for this trial
:return: Float or None
The value of the objective, or None if it doesn't exist
"""
objectives = [
result.value for result in trial.results if result.type == "objective"
]
if not objectives:
objective = None
elif len(objectives) == 1:
objective = objectives[0]
elif len(objectives) > 1:
raise RuntimeError(
"Trial {} has {} objectives".format(trial.id, len(objectives))
)
return objective
class BaseParallelStrategy(object, metaclass=ABCMeta):
"""Strategy to give intermediate results for incomplete trials"""
def __init__(self, *args, **kwargs):
pass
@abstractmethod
def observe(self, points, results):
"""Observe completed trials
.. seealso:: `orion.algo.base.BaseAlgorithm.observe` method
Parameters
----------
points: list of tuples of array-likes
Points from a `orion.algo.space.Space`.
Evaluated problem parameters by a consumer.
results: list of dict
Contains the result of an evaluation; partial information about the
black-box function at each point in `params`.
"""
# NOTE: In future points and results will be converted to trials for coherence with
# `Strategy.lie()` as well as for coherence with `Algorithm.observe` which will also be
# converted to expect trials instead of lists and dictionaries.
pass
# pylint: disable=no-self-use
def lie(self, trial):
"""Construct a fake result for an incomplete trial
Parameters
----------
trial: `orion.core.worker.trial.Trial`
A trial object which is not supposed to be completed.
Returns
-------
``orion.core.worker.trial.Trial.Result``
The fake objective result corresponding to the trial given.
Notes
-----
If the trial has an objective even if not completed, a warning is printed to user
with a pointer to documentation to resolve the database corruption. The result returned is
the corresponding objective instead of the lie.
"""
objective = get_objective(trial)
if objective:
log.warning(CORRUPTED_DB_WARNING, trial.id)
return Trial.Result(name="lie", type="lie", value=objective)
return None
@property
def configuration(self):
"""Provide the configuration of the strategy as a dictionary."""
return self.__class__.__name__
class NoParallelStrategy(BaseParallelStrategy):
"""No parallel strategy"""
def observe(self, points, results):
"""See BaseParallelStrategy.observe"""
pass
def lie(self, trial):
"""See BaseParallelStrategy.lie"""
result = super(NoParallelStrategy, self).lie(trial)
if result:
return result
return None
class MaxParallelStrategy(BaseParallelStrategy):
"""Parallel strategy that uses the max of completed objectives"""
def __init__(self, default_result=float("inf")):
"""Initialize the maximum result used to lie"""
super(MaxParallelStrategy, self).__init__()
self.default_result = default_result
self.max_result = default_result
@property
def configuration(self):
"""Provide the configuration of the strategy as a dictionary."""
return {self.__class__.__name__: {"default_result": self.default_result}}
def observe(self, points, results):
"""See BaseParallelStrategy.observe"""
super(MaxParallelStrategy, self).observe(points, results)
results = [
result["objective"] for result in results if result["objective"] is not None
]
if results:
self.max_result = max(results)
def lie(self, trial):
"""See BaseParallelStrategy.lie"""
result = super(MaxParallelStrategy, self).lie(trial)
if result:
return result
return Trial.Result(name="lie", type="lie", value=self.max_result)
class MeanParallelStrategy(BaseParallelStrategy):
"""Parallel strategy that uses the mean of completed objectives"""
def __init__(self, default_result=float("inf")):
"""Initialize the mean result used to lie"""
super(MeanParallelStrategy, self).__init__()
self.default_result = default_result
self.mean_result = default_result
@property
def configuration(self):
"""Provide the configuration of the strategy as a dictionary."""
return {self.__class__.__name__: {"default_result": self.default_result}}
def observe(self, points, results):
"""See BaseParallelStrategy.observe"""
super(MeanParallelStrategy, self).observe(points, results)
objective_values = [
result["objective"] for result in results if result["objective"] is not None
]
if objective_values:
self.mean_result = sum(value for value in objective_values) / float(
len(objective_values)
)
def lie(self, trial):
"""See BaseParallelStrategy.lie"""
result = super(MeanParallelStrategy, self).lie(trial)
if result:
return result
return Trial.Result(name="lie", type="lie", value=self.mean_result)
class StubParallelStrategy(BaseParallelStrategy):
"""Parallel strategy that returns static objective value for incompleted trials."""
def __init__(self, stub_value=None):
"""Initialize the stub value"""
super(StubParallelStrategy, self).__init__()
self.stub_value = stub_value
@property
def configuration(self):
"""Provide the configuration of the strategy as a dictionary."""
return {self.__class__.__name__: {"stub_value": self.stub_value}}
def observe(self, points, results):
"""See BaseParallelStrategy.observe"""
pass
def lie(self, trial):
"""See BaseParallelStrategy.lie"""
result = super(StubParallelStrategy, self).lie(trial)
if result:
return result
return Trial.Result(name="lie", type="lie", value=self.stub_value)
# pylint: disable=too-few-public-methods,abstract-method
class Strategy(BaseParallelStrategy, metaclass=Factory):
"""Class used to build a parallel strategy given name and params
.. seealso:: `orion.core.utils.Factory` metaclass and `BaseParallelStrategy` interface.
"""
pass
|
[
"xavier.bouthillier@umontreal.ca"
] |
xavier.bouthillier@umontreal.ca
|
fb54d19b3e0e115daed7e8ea028298023bd94bc5
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02925/s813842519.py
|
fd514890376add5cc9ff63723b435deab183d427
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 831
|
py
|
n = int(input())
a = [list(map(int, input().split())) for _ in range(n)]
l = [i for i in range(n)]
pos = [0] * n
res = 0
cnt = 0
while True:
res += 1
flag = True
tmp = l.copy()
l = []
used = [False] * n
for v in tmp:
if used[v] or pos[v] == n - 1:
continue
opp = a[v][pos[v]] - 1
if used[opp] or pos[opp] == n - 1:
continue
if a[opp][pos[opp]] - 1 == v:
pos[v] += 1
pos[opp] += 1
l.append(v)
l.append(opp)
used[v] = True
used[opp] = True
flag = False
if pos[v] == n - 1:
cnt += 1
if pos[opp] == n - 1:
cnt += 1
if flag:
print(-1)
break
if cnt == n:
print(res)
break
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b3c32db3186b4caf7016dabb7784a1f42ab8e00a
|
32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd
|
/benchmark/antennapod/testcase/firstcases/testcase6_004.py
|
9e861b2631f1cd4c20da6b5730e18a08b29e1c30
|
[] |
no_license
|
Prefest2018/Prefest
|
c374d0441d714fb90fca40226fe2875b41cf37fc
|
ac236987512889e822ea6686c5d2e5b66b295648
|
refs/heads/master
| 2021-12-09T19:36:24.554864
| 2021-12-06T12:46:14
| 2021-12-06T12:46:14
| 173,225,161
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,080
|
py
|
#coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'de.danoeh.antennapod',
'appActivity' : 'de.danoeh.antennapod.activity.SplashActivity',
'resetKeyboard' : True,
'androidCoverage' : 'de.danoeh.antennapod/de.danoeh.antennapod.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase004
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememt(driver, "new UiSelector().resourceId(\"de.danoeh.antennapod:id/refresh_item\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open menu\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Queue\")", "new UiSelector().className(\"android.widget.TextView\").instance(5)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.danoeh.antennapod:id/refresh_item\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.danoeh.antennapod:id/queue_lock\").className(\"android.widget.TextView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Clear Queue\")", "new UiSelector().className(\"android.widget.TextView\").instance(2)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Send…\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
driver.press_keycode(4)
element = getElememtBack(driver, "new UiSelector().text(\"OK\")", "new UiSelector().className(\"android.widget.Button\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open menu\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Settings\")", "new UiSelector().className(\"android.widget.TextView\").instance(12)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Navigate up\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageButton\").description(\"Open menu\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().resourceId(\"de.danoeh.antennapod:id/imgvCover\").className(\"android.widget.ImageView\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememtBack(driver, "new UiSelector().text(\"5 seconds\")", "new UiSelector().className(\"android.widget.CheckedTextView\")")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Confirm\")", "new UiSelector().className(\"android.widget.Button\").instance(1)")
TouchAction(driver).tap(element).perform()
element = getElememtBack(driver, "new UiSelector().text(\"Queue\")", "new UiSelector().className(\"android.widget.TextView\").instance(6)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.ImageView\").description(\"More options\")")
TouchAction(driver).long_press(element).release().perform()
driver.press_keycode(4)
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"6_004\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'de.danoeh.antennapod'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage)
|
[
"prefest2018@gmail.com"
] |
prefest2018@gmail.com
|
a868589ab22d6b001a1e0a8c4cd3b53b2b965c0e
|
10d89b6e07a7c72c385eb1d1c60a3e0ed9f9fc3c
|
/boss/report/views/movie_sum.py
|
5d816002dcf23fdd6d97d0c4b852eaccb658a172
|
[] |
no_license
|
cash2one/pt
|
2a4998a6627cf1604fb64ea8ac62ff1c227f0296
|
8a8c12375610182747099e5e60e15f1a9bb3f953
|
refs/heads/master
| 2021-01-20T00:36:43.779028
| 2016-11-07T03:27:18
| 2016-11-07T03:27:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,978
|
py
|
#coding: utf-8
"""
服务质量追踪-电影票
"""
from report_pub import *
def get_movie_sum_data(start_date, end_date, app, ver, channel):
if not start_date:
start_date = None
if not end_date:
end_date = None
if not app:
app = None
if not ver:
ver = None
if not channel:
channel = None
cursor = connections['report'].cursor()
cursor.execute("call `SP_T_RP_D_MOVIE_RANKS`(%s, %s, %s, %s, %s)",
[start_date, end_date, ver, channel, app])
objs = cursor.fetchall()
data = []
for obj in objs:
data.append(
[
str(obj[0]),
str(obj[1]),
str(obj[2]),
str(obj[3]),
str(obj[4]),
str(obj[5]),
str(obj[6]),
str(obj[7]),
]
)
if not data:
data.append([Const.NONE] * 8)
else:
data.sort(key=lambda o: o[0], reverse=True)
return data
@login_required
@permission_required(u'man.%s' % ReportConst.BA_PRODUCT_ANALYSIS_MOVIE_REPORT, raise_exception=True)
@add_common_var
def movie_sum(request, template_name):
app = request.GET.get("app")
report_check_app(request, app)
vers = get_app_versions(app)
channels = get_app_channels(app)
product_type = get_product_type(ReportConst.MOVIE)
cps = get_cp_info(product_type)
return report_render(request, template_name,{
"currentdate": get_datestr(0, "%Y-%m-%d"),
"vers": vers,
"channels": channels
})
@login_required
@permission_required(u'man.%s' % ReportConst.BA_PRODUCT_ANALYSIS_MOVIE_REPORT, raise_exception=True)
def movie_sum_ajax(request):
start_date = request.POST.get("start_date")
end_date = request.POST.get("end_date")
app = request.POST.get("app")
report_check_app(request, app)
ver = request.POST.get("ver")
channel = request.POST.get("channel")
result = get_movie_sum_data(start_date, end_date, app, ver, channel)
return HttpResponse(json.dumps(result))
@login_required
@permission_required(u'man.%s' % ReportConst.BA_PRODUCT_ANALYSIS_MOVIE_REPORT, raise_exception=True)
def movie_sum_csv(request):
start_date = request.GET.get("start_date")
end_date = request.GET.get("end_date")
app = request.GET.get("app")
report_check_app(request, app)
ver = request.GET.get("ver")
channel = request.GET.get("channel")
filename = '电影票汇总报表(%s-%s).csv' % (str(start_date), str(end_date))
csv_data = [["电影名称",
"订单总数",
"订单支付数",
"订单支付率",
"订单支付成功数",
"订单支付成功率",
"订单支付失败数",
"订单支付失败率"]]
csv_data.extend(get_movie_sum_data(start_date, end_date, app, ver, channel))
return get_csv_response(filename, csv_data)
|
[
"xl@putao.cn"
] |
xl@putao.cn
|
901df2bd27ecb8f459e0b79373f18de62a4fec93
|
e21599d08d2df9dac2dee21643001c0f7c73b24f
|
/practice/concurrency/threadpool_executor.py
|
5de7abb753749c293b03937824f0c77e6414843f
|
[] |
no_license
|
herolibra/PyCodeComplete
|
c7bf2fb4ce395737f8c67749148de98a36a71035
|
4ef7d2c3aec6d28a53eed0e649cdeb74df3d783b
|
refs/heads/master
| 2022-07-17T05:39:03.554760
| 2020-05-03T07:00:14
| 2020-05-03T07:00:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 707
|
py
|
#!/usr/bin/env python
# coding=utf-8
import time
import requests
from concurrent.futures import ProcessPoolExecutor
def visit_url(url):
response = requests.request('GET', url)
return response.content
if __name__ == '__main__':
start = time.time()
# 创建需要处理的URL序列
urls = ['http://api.bilibili.com/x/web-interface/archive/stat?aid={0}'.format(i) for i in range(1, 1001)]
with ProcessPoolExecutor(max_workers=20) as executor:
# result = executor.map(visit_url, urls)
for num, result in zip(range(1, 1001), executor.map(visit_url, urls)):
print('video ({}) = {}'.format(num, result))
print('COST: {}'.format(time.time() - start))
|
[
"ijumper@163.com"
] |
ijumper@163.com
|
7f6f5cb399c032bf7955265b384a932ad7e4bdbd
|
3fa7203b6180ab9a8955642f1373f3e436514a1e
|
/projects/apexlearningcenter/gdata-python-client/setup.py
|
a057292cc6d08e7313830bbdba294277ff4a9563
|
[
"Apache-2.0"
] |
permissive
|
Opportunitylivetv/blog
|
ce55db2f03dba42286b403f8df0839f6f75b7eea
|
01e99810d977d8ddbb7a99b7f8f6d0c4276f3cd7
|
refs/heads/master
| 2021-07-25T21:40:01.599864
| 2017-11-08T22:10:51
| 2017-11-08T22:10:51
| 110,347,355
| 1
| 1
| null | 2017-11-11T13:26:21
| 2017-11-11T13:26:21
| null |
UTF-8
|
Python
| false
| false
| 1,987
|
py
|
#!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.core import setup
setup(
name='gdata.py',
version='1.0.10.1',
description='Python client library for Google data APIs',
long_description = """\
The Google data Python client library makes it easy to access data
through the Google data APIs. This library provides data model and
service modules for the the following Google data services:
- Google Calendar data API
- Google Spreadsheets data API
- Google Document List data APIs
- Google Base data API
- Google Apps Provisioning API
- Picasa Web Albums Data API
- Google Code Search Data API
- core Google data API functionality
The core Google data code provides sufficient functionality to use this
library with any Google data API (even if a module hasn't been written for
it yet). For example, this client can be used with the Picasa Web Albums data
API, the Blogger API, and the YouTube API. This library may also be used with
any Atom Publishing Protocol service.
""",
author='Jeffrey Scudder',
author_email='api.jscudder@gmail.com',
license='Apache 2.0',
url='http://code.google.com/p/gdata-python-client/',
packages=['atom', 'gdata', 'gdata.calendar', 'gdata.base',
'gdata.spreadsheet', 'gdata.apps', 'gdata.docs', 'gdata.codesearch',
'gdata.photos', 'gdata.exif', 'gdata.geo', 'gdata.media'],
package_dir = {'gdata':'src/gdata', 'atom':'src/atom'}
)
|
[
"joe@bitworking.org"
] |
joe@bitworking.org
|
b46a9f045a08dbe61856a95eec669f548997abff
|
074ce641fe9ab26835e4bfa77bdcac4aed92fcc7
|
/locations/spiders/bojangles.py
|
0e584c803734199752d1f60be07cccbd6260fee0
|
[
"MIT"
] |
permissive
|
zerebubuth/all-the-places
|
173623ef00be2517bda26aff568a342ba1168c74
|
a8b5931ca2e727194a6eb622357998dddccf1bb4
|
refs/heads/master
| 2021-07-04T04:02:54.306426
| 2017-08-02T16:17:46
| 2017-08-02T16:17:46
| 105,061,033
| 0
| 0
| null | 2017-09-27T19:35:12
| 2017-09-27T19:35:12
| null |
UTF-8
|
Python
| false
| false
| 4,326
|
py
|
# -*- coding: utf-8 -*-
import json
import scrapy
import re
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from locations.items import GeojsonPointItem
class BojanglesSpider(scrapy.Spider):
name = "bojangles"
allowed_domains = ["locations.bojangles.com"]
start_urls = (
'http://locations.bojangles.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse_store(self, response):
properties = {
'addr:full': response.xpath('//span[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//span[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//span[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//span[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
phone = response.xpath('//a[@class="c-phone-number-link c-phone-main-number-link"]/text()')[0].extract()
if phone:
properties['phone'] = phone
hours = json.loads(response.xpath('//div[@class="c-location-hours-today js-location-hours"]/@data-days')[0].extract())
opening_hours = self.store_hours(hours) if hours else None
if opening_hours:
properties['opening_hours'] = opening_hours
lon_lat = [
float(response.xpath('//span/meta[@itemprop="longitude"]/@content')[0].extract()),
float(response.xpath('//span/meta[@itemprop="latitude"]/@content')[0].extract()),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
def parse(self, response):
base_url = get_base_url(response)
urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for path in urls:
if len(path.split('/')) > 2:
# If there's only one store, the URL will be longer than <state code>.html
yield scrapy.Request(urljoin_rfc(base_url, path), callback=self.parse_store)
else:
yield scrapy.Request(urljoin_rfc(base_url, path))
urls = response.xpath('//a[@class="c-location-grid-item-link"]/@href').extract()
for path in urls:
yield scrapy.Request(urljoin_rfc(base_url, path), callback=self.parse_store)
|
[
"ian.dees@gmail.com"
] |
ian.dees@gmail.com
|
a927f9c813867b753eff1fe9ffff2a4ca4958e48
|
f65c074b9d47a86488ea82bccf3bcea2c089b576
|
/Matc_links/Matc_links/items.py
|
ab239cf684765f22c9935412ffce36888c35f3d9
|
[
"MIT"
] |
permissive
|
Nouldine/CrawlerSystems
|
32aea71bf4f24f7f5f3430fa93576d4524bf0448
|
7bba8ba3ec76e10f70a35700602812ee6f039b63
|
refs/heads/master
| 2020-09-15T12:18:50.117873
| 2020-01-23T14:50:45
| 2020-01-23T14:50:45
| 223,441,439
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 354
|
py
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class MatcLinksItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
links = scrapy.Field()
pass
|
[
"abdoulyigo@gmail.com"
] |
abdoulyigo@gmail.com
|
7d0c1a071fb41cdfe747f02f3ff8aacdd1540482
|
5864f03e60b18b6ba6d5f666f5656193a423be2a
|
/5.3-random.py
|
a810965241b091e21e408279cdb5a52e425e4457
|
[] |
no_license
|
c4collins/python-standard-library-examples
|
1ca66176cee9dab606afc8cd70f806f7c261ef81
|
ef812ca846692243604a0fc119b6eece4025f148
|
refs/heads/master
| 2021-01-01T17:05:21.504397
| 2018-01-18T21:15:06
| 2018-01-18T21:15:06
| 8,157,627
| 22
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,582
|
py
|
## 5.3 Random
# Uses the Mersenne Twister algorithm
import random, os, itertools, time, decimal, math
import cPickle as pickle
## 5.3.1 Generating Random numbers
for i in xrange(5):
print '%04.3f' % random.random(),
print
# uniform() will generate numbers within a specified range
for i in xrange(5):
print '%04.3f' % random.uniform(1,100),
print
# Seeding gives the same set of 'random' data each time
random.seed(1)
for i in xrange(5):
print '%04.3f' % random.random(),
print
random.seed(1)
for i in xrange(5):
print '%04.3f' % random.random(),
print
random.seed(1)
for i in xrange(5):
print '%04.3f' % random.random(),
print '\n'
## 5.3.3 Saving State
if os.path.exists('data/5.3.3-state.dat'):
#Restore the previously saved state
print "Found '5.3.3-state.dat', initializing random module"
with open('data/5.3.3-state.dat', 'rb') as f:
state = pickle.load(f)
random.setstate(state)
else:
# Use a known start state
print "No '5.3.3-state.dat', seeding"
random.seed(1)
# Produce random values
for i in xrange(3):
print '%04.3f' % random.random(),
print '\n'
# Save state for next time
with open('data/5.3.3-state.dat', 'wb') as f:
pickle.dump(random.getstate(), f)
# Produce more random values
print "After saving state:"
for i in xrange(3):
print '%04.3f' % random.random(),
print '\n'
# Random integers
print '[1, 100]:',
for i in xrange(3):
print random.randint(1,100),
print '\n[-5 ,5]:',
for i in xrange(3):
print random.randint(-5, 5),
print
for i in xrange(3):
print random.randrange(0, 101, 5), # random.randrange(min, max, step)
print '\n'
## 5.3.5 Picking Random Items
for j in xrange(2):
outcomes = { 'heads':0, 'tails':0, }
sides = outcomes.keys()
random.seed() # reset the seed to a random value
for i in xrange(10000):
outcomes[ random.choice(sides) ] += 1
for key in sides:
print key,':',outcomes[key]
print
## 5.3.6 Permutations
FACE_CARDS = ('J', 'Q', 'K', 'A')
SUITS = ('H', 'D', 'C', 'S')
def new_deck():
return list( itertools.product(
itertools.chain( xrange(2, 11), FACE_CARDS ),
SUITS
))
def show_deck(deck):
p_deck = deck[:]
while p_deck:
row = p_deck[:13]
p_deck = p_deck[13:]
for j in row:
print '%2s%s' % j,
print
# Make a new deck, with the cards in order
deck = new_deck()
print "\nInitial deck:"
show_deck(deck)
# Shuffle and sisplay the shuffled deck
random.shuffle(deck)
print "\nShuffled Deck:"
show_deck(deck)
# Deal 4 hands of 5 cards
hands = [ [], [], [], [] ]
for i in xrange(5):
for h in hands:
h.append(deck.pop())
# Show the hands
print "\nHands:"
for n, h in enumerate(hands):
print '%d:' % (n+1)
for c in h:
print '%2s%s' % c,
print
# Show remaining deck
print "\nRemaining deck:"
show_deck(deck)
## 5.3.6 Sampling
with open('/usr/share/dict/words', 'rt') as f:
words = f.readlines()
words = [w.rstrip() for w in words ]
for w in random.sample(words, 5):
print w,
print "\n"
## 5.3.8 Multiple Simultaneous Generators
# Each instance of Random can have these properties set on it's own, and can be utilized separately
print "Default Initialization:\n"
r1 = random.Random()
r2 = random.Random()
for i in xrange(3):
print '%04.3f %04.3f' % (r1.random(), r2.random())
print "\nSame seed:\n"
seed = time.time()
r1 = random.Random(seed)
r2 = random.Random(seed)
for i in xrange(3):
print '%04.3f %04.3f' % (r1.random(), r2.random())
print "\nForce jumpahead on r2:\n"
r2.jumpahead(1024)
for i in xrange(3):
print '%04.3f %04.3f' % (r1.random(), r2.random())
## 5.3.9 SystemRandom
# SystemRandom has the same API as Random, but uses os.urandom() to generate values
# this means seed() and setstate() do nothing because the randomness is coming from the system
print "Default Initialization:\n"
r1 = random.SystemRandom()
r2 = random.SystemRandom()
for i in xrange(3):
print '%04.3f %04.3f' % (r1.random(), r2.random())
print "\nSame seed:\n"
seed = time.time()
r1 = random.SystemRandom(seed)
r2 = random.SystemRandom(seed)
for i in xrange(3):
print '%04.3f %04.3f' % (r1.random(), r2.random())
## 5.3.10 Nonuniform Distributions
# Set up context for rounding
c = decimal.getcontext().copy()
c.rounding = 'ROUND_UP'
c.prec = 2
## Normal
mu = 7.5 # mean
sigma = 2.0 # std. deviation
print "\nNormal(mu=%d, sigma=%d):" % (mu, sigma)
normal = []
for i in xrange(20):
normal.append(c.create_decimal( random.normalvariate( mu, sigma ) ))
normal = sorted(normal)
for n in normal:
print "%02.1d" % n,
## Gauss-Normal
print "\n(Gauss) Normal(mu=%d, sigma=%d):" % (mu, sigma)
gauss = []
for i in xrange(20):
gauss.append(c.create_decimal( random.gauss( mu, sigma ) ))
gauss = sorted(gauss)
for g in gauss:
print "%02.1d" % g,
## Log-Normal
print "\n(Logarithmic) Normal(mu=%d, sigma=%d):" % (mu, sigma)
lognormal = []
for i in xrange(15):
lognormal.append(c.create_decimal( random.lognormvariate( mu, sigma ) ))
lognormal = sorted(lognormal)
for l in lognormal:
print "%02.1d" % l,
## Triangular
low = 0
high = 10
mode = 7.5
print "\nTriangular(low=%d, high=%d, mode=%d)" % ( low, high, mode)
triangular = []
for i in xrange(20):
triangular.append( c.create_decimal( random.triangular( low, high, mode ) ) )
triangular = sorted(triangular)
for t in triangular:
print "%02.1d" % t,
## Exponential
lambd = 1.0 / 7.5 # lambd is (1.0 / the desired mean)
print "\nExponential(lambd=%0.4r)" % ( lambd )
exponential = []
for i in xrange(20):
exponential.append( c.create_decimal( random.expovariate( lambd ) ) )
exponential = sorted(exponential)
for e in exponential:
print "%02.1d" % e,
## Pareto distribution
alpha = 1 # shape parameter
print "\n(Long Tail) Pareto(alpha=%d)" % ( alpha )
pareto = []
for i in xrange(20):
pareto.append( c.create_decimal( random.paretovariate( alpha ) ) )
pareto = sorted(pareto)
for p in pareto:
print "%02.1d" % p,
## Angular (Von Mises)
mu = math.pi * 1.5 # radians between 0 and 2*pi
kappa = 1.5 # concentration, must be >= 0
print "\n(Von Mises) Angular(mu=%d, kappa=%d)" % ( mu, kappa )
angular = []
for i in xrange(20):
angular.append( c.create_decimal( random.vonmisesvariate( mu, kappa ) ) )
angular = sorted(angular)
for a in angular:
print "%02.1d" % a,
## Beta distribution
alpha = 1
beta = 2
print "\nBeta(alpha=%d, beta=%d)" % ( alpha, beta )
beta_v = []
for i in xrange(20):
beta_v.append( random.betavariate( alpha, beta ) )
beta_v = sorted(beta_v)
for b in beta_v:
print c.create_decimal(b),
## Gamma distribution
print "\nGamma(alpha=%d, beta=%d)" % ( alpha, beta )
gamma = []
for i in xrange(20):
gamma.append( random.gammavariate( alpha, beta ) )
gamma = sorted(gamma)
for g in gamma:
print c.create_decimal(g),
## Weibull distribution
print "\nWeibull(alpha=%d, beta=%d)" % ( alpha, beta )
weibull = []
for i in xrange(20):
weibull.append( random.weibullvariate( alpha, beta ) )
weibull = sorted(weibull)
for w in weibull:
print c.create_decimal(w),
|
[
"connor.collins@gmail.com"
] |
connor.collins@gmail.com
|
46eb89357e79a72c0d54fe04daaea91db1801d5d
|
3c01d7928029e74a19d646f5a40b3bf099b281a7
|
/typeshed/stdlib/_thread.pyi
|
2425703121b5dd5c8b5f33b5571063d7ac4de438
|
[
"MIT"
] |
permissive
|
arpancodes/protectsql
|
f3ced238c103fca72615902a9cb719c44ee2b5ba
|
6392bb7a86d1f62b86faf98943a302f7ea3fce4c
|
refs/heads/main
| 2023-08-07T16:33:57.496144
| 2021-09-24T19:44:51
| 2021-09-24T19:44:51
| 409,894,807
| 0
| 1
|
MIT
| 2021-09-24T19:44:52
| 2021-09-24T08:46:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,427
|
pyi
|
import sys
from threading import Thread
from types import TracebackType
from typing import Any, Callable, NoReturn, Optional, Tuple, Type
error = RuntimeError
def _count() -> int: ...
_dangling: Any
class LockType:
def acquire(self, blocking: bool = ..., timeout: float = ...) -> bool: ...
def release(self) -> None: ...
def locked(self) -> bool: ...
def __enter__(self) -> bool: ...
def __exit__(
self, type: Type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None
) -> None: ...
def start_new_thread(function: Callable[..., Any], args: Tuple[Any, ...], kwargs: dict[str, Any] = ...) -> int: ...
def interrupt_main() -> None: ...
def exit() -> NoReturn: ...
def allocate_lock() -> LockType: ...
def get_ident() -> int: ...
def stack_size(size: int = ...) -> int: ...
TIMEOUT_MAX: float
if sys.version_info >= (3, 8):
def get_native_id() -> int: ... # only available on some platforms
class _ExceptHookArgs(Tuple[Type[BaseException], Optional[BaseException], Optional[TracebackType], Optional[Thread]]):
@property
def exc_type(self) -> Type[BaseException]: ...
@property
def exc_value(self) -> BaseException | None: ...
@property
def exc_traceback(self) -> TracebackType | None: ...
@property
def thread(self) -> Thread | None: ...
_excepthook: Callable[[_ExceptHookArgs], Any]
|
[
"arpanforbusiness@gmail.com"
] |
arpanforbusiness@gmail.com
|
a2fab96aa42133cf1bef5480f1a3154ef1479005
|
6923f79f1eaaba0ab28b25337ba6cb56be97d32d
|
/programming_computer_vision_with_python/cvbook-contrib/rof.py
|
cca76f74204c8c254b9909d2168401a571270eac
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
burakbayramli/books
|
9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0
|
5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95
|
refs/heads/master
| 2023-08-17T05:31:08.885134
| 2023-08-14T10:05:37
| 2023-08-14T10:05:37
| 72,460,321
| 223
| 174
| null | 2022-10-24T12:15:06
| 2016-10-31T17:24:00
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,102
|
py
|
import numpy
def denoise(im, U_init, tolerance=0.1, tau=0.125, tv_weight=30):
"""Denoises |im| using the ROF image denoising model.
Note: If tv_weight is set to something large, tolerance needs to be
lowered."""
# THe code is from Jan Erik Solem's book; it's based on:
# Chambolle 2005, eq 11 on p14 "Total variation minimization and a class of
# binary MRF models"
# http://www.cmap.polytechnique.fr/preprint/repository/578.pdf
m, n = im.shape
U = U_init
Px = numpy.zeros((m, n))
Py = numpy.zeros((m, n))
error = 1
while error > tolerance:
Uold = U
GradUx = numpy.roll(U, -1, axis=1) - U
GradUy = numpy.roll(U, -1, axis=0) - U
PxNew = Px + (tau / tv_weight) * GradUx
PyNew = Py + (tau / tv_weight) * GradUy
NormNew = numpy.maximum(1, numpy.sqrt(PxNew**2 + PyNew**2))
Px = PxNew / NormNew
Py = PyNew / NormNew
RxPx = numpy.roll(Px, 1, axis=1)
RyPy = numpy.roll(Py, 1, axis=0)
DivP = (Px - RxPx) + (Py - RyPy)
U = im + tv_weight * DivP
error = numpy.linalg.norm(U - Uold) / numpy.sqrt(n * m)
return U, im - U
|
[
"bb@b.om"
] |
bb@b.om
|
d376851a61e7d1f753b45331107716086f934b7e
|
fa9bae32c203323dfb345d9a415d4eaecb27a931
|
/33. Search in Rotated Sorted Array.py
|
e6cd02b083ea61bcdaa241d139c7425c617c6ff7
|
[] |
no_license
|
IUIUN/The-Best-Time-Is-Now
|
48a0c2e9d449aa2f4b6e565868a227b6d555bf29
|
fab660f98bd36715d1ee613c4de5c7fd2b69369e
|
refs/heads/master
| 2020-09-14T12:06:24.074973
| 2020-02-15T06:55:08
| 2020-02-15T06:55:08
| 223,123,743
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 772
|
py
|
class Solution:
def search(self, nums: List[int], target: int) -> int:
if not nums:
return -1
left, right = 0, len(nums) - 1
if nums[left] ==target:
return left
if nums[right] ==target:
return right
while left +1 < right:
mid = left + (right- left)//2
if target == nums[mid]:
return mid
if nums[left] < nums[mid]:
if nums[left] < target <nums[mid]:
right = mid
else:
left = mid
else:
if nums[mid] < target < nums[right]:
left = mid
else:
right = mid
return -1
|
[
"liuyijun0621@hotmail.com"
] |
liuyijun0621@hotmail.com
|
9dda382f321a6bd1b6fe598188a36bf543a1a2e8
|
026f12a5fdd4b3bfee00713091267aaef71047c1
|
/end/demo4/hualiservice/trade/admin.py
|
daf8549e21a189ea882ce11a3ab0e6af12b56ab1
|
[] |
no_license
|
zzy0371/py1911project
|
64c64413ea0107926ae81479adc27da87ee04767
|
7ce2a2acfc1dade24e6e7f8763fceb809fabd7a1
|
refs/heads/master
| 2023-01-08T07:51:13.388203
| 2020-03-19T03:31:33
| 2020-03-19T03:31:33
| 239,649,431
| 0
| 1
| null | 2023-01-05T09:04:53
| 2020-02-11T01:22:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 170
|
py
|
from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Cart)
admin.site.register(Order)
admin.site.register(OrderDetail)
|
[
"496575233@qq.com"
] |
496575233@qq.com
|
28fde1cc01caebb7f303b02bcf82fa3c46c163d1
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/leetcode/leetCode/DynamicProgramming/132_PalindromePartitioningII.py
|
207d00a98abde367cada73c26aa59d47492dee70
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,820
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
class Solution(object):
"""
Dynamic Programming:
cuts[i]: minimum cuts needed for a palindrome partitioning of s[i:], so we want cuts[0].
To get cuts[i-1], we scan j from i-1 to len(s)-1.
Once we comes to a is_palindrome[i-1][j]==true:
if j==len(s)-1, the string s[i-1:] is a Pal, cuts[i-1] is 0;
else: the current cut num (first cut s[i-1:j+1] and then cut the rest
s[j+1:]) is 1+cuts[j+1], compare it to the exisiting cuts[i-1], repalce if smaller.
is_palindrome[i][j]: whether s[i:j+1] is palindrome.
Here we need not to compute the is_palindrome in advance.
We use "Dynamic Programming" too, the formula is very intuitive:
is_palindrome[i][j] = true if (is_palindrome[i+1][j-1] and s[i] == s[j]) else false
A better O(n) space solution can be found here:
https://discuss.leetcode.com/topic/2840/my-solution-does-not-need-a-table-for-palindrome-is-it-right-it-uses-only-o-n-space
"""
def minCut(self, s):
if not s:
return 0
s_len = len(s)
is_palindrome = [[False for i in range(s_len)]
for j in range(s_len)]
cuts = [s_len - 1 - i for i in range(s_len)]
for i in range(s_len - 1, -1, -1):
for j in range(i, s_len):
# if self.is_palindrome(i, j):
if ((j - i < 2 and s[i] == s[j]) or (s[i] == s[j] and is_palindrome[i + 1][j - 1])):
is_palindrome[i][j] = True
if j == s_len - 1:
cuts[i] = 0
else:
cuts[i] = min(cuts[i], 1 + cuts[j + 1])
else:
pass
return cuts[0]
"""
""
"aab"
"aabb"
"aabaa"
"acbca"
"acbbca"
"""
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
46ff6bb671dba2e977cd92127af3a8fdffa5e7eb
|
2cd06e44dd79b45708ddf010c31289458d850b94
|
/test/functional/p2p_leak.py
|
17143f418ca822b3cde05e7a2d2f0874b7260b7b
|
[
"MIT"
] |
permissive
|
adymoloca/flocoin
|
bc66233e5b3b1af294ca6719b4a26f8829d682e4
|
d9244577577dede975c852f6fcfe1afba4d71a57
|
refs/heads/master
| 2023-08-21T23:51:28.266695
| 2021-10-06T01:40:10
| 2021-10-06T01:40:10
| 408,609,250
| 0
| 0
|
MIT
| 2021-09-30T10:11:53
| 2021-09-20T21:45:28
|
C++
|
UTF-8
|
Python
| false
| false
| 7,392
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
Before receiving a VERACK, a node should not send anything but VERSION/VERACK
and feature negotiation messages (WTXIDRELAY, SENDADDRV2).
This test connects to a node and sends it a few messages, trying to entice it
into sending us something it shouldn't."""
import time
from test_framework.messages import (
msg_getaddr,
msg_ping,
msg_version,
)
from test_framework.p2p import (
P2PInterface,
P2P_SUBVERSION,
P2P_SERVICES,
P2P_VERSION_RELAY,
)
from test_framework.test_framework import FlocoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
)
PEER_TIMEOUT = 3
class LazyPeer(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
self.got_wtxidrelay = False
self.got_sendaddrv2 = False
def bad_message(self, message):
self.unexpected_msg = True
print("should not have received message: %s" % message.msgtype)
def on_open(self):
self.ever_connected = True
# Does not respond to "version" with "verack"
def on_version(self, message): self.bad_message(message)
def on_verack(self, message): self.bad_message(message)
def on_inv(self, message): self.bad_message(message)
def on_addr(self, message): self.bad_message(message)
def on_getdata(self, message): self.bad_message(message)
def on_getblocks(self, message): self.bad_message(message)
def on_tx(self, message): self.bad_message(message)
def on_block(self, message): self.bad_message(message)
def on_getaddr(self, message): self.bad_message(message)
def on_headers(self, message): self.bad_message(message)
def on_getheaders(self, message): self.bad_message(message)
def on_ping(self, message): self.bad_message(message)
def on_mempool(self, message): self.bad_message(message)
def on_pong(self, message): self.bad_message(message)
def on_feefilter(self, message): self.bad_message(message)
def on_sendheaders(self, message): self.bad_message(message)
def on_sendcmpct(self, message): self.bad_message(message)
def on_cmpctblock(self, message): self.bad_message(message)
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
def on_wtxidrelay(self, message): self.got_wtxidrelay = True
def on_sendaddrv2(self, message): self.got_sendaddrv2 = True
# Peer that sends a version but not a verack.
class NoVerackIdlePeer(LazyPeer):
def __init__(self):
self.version_received = False
super().__init__()
def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, message):
self.version_received = True
self.send_message(msg_ping())
self.send_message(msg_getaddr())
class P2PVersionStore(P2PInterface):
version_received = None
def on_version(self, msg):
# Responds with an appropriate verack
super().on_version(msg)
self.version_received = msg
class P2PLeakTest(FlocoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[f"-peertimeout={PEER_TIMEOUT}"]]
def create_old_version(self, nversion):
old_version_msg = msg_version()
old_version_msg.nVersion = nversion
old_version_msg.strSubVer = P2P_SUBVERSION
old_version_msg.nServices = P2P_SERVICES
old_version_msg.relay = P2P_VERSION_RELAY
return old_version_msg
def run_test(self):
self.log.info('Check that the node doesn\'t send unexpected messages before handshake completion')
# Peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)
# Peer that sends a version but not a verack.
no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)
# Pre-wtxidRelay peer that sends a version but not a verack and does not support feature negotiation
# messages which start at nVersion == 70016
pre_wtxidrelay_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), send_version=False, wait_for_verack=False)
pre_wtxidrelay_peer.send_message(self.create_old_version(70015))
# Wait until the peer gets the verack in response to the version. Though, don't wait for the node to receive the
# verack, since the peer never sent one
no_verack_idle_peer.wait_for_verack()
pre_wtxidrelay_peer.wait_for_verack()
no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
pre_wtxidrelay_peer.wait_until(lambda: pre_wtxidrelay_peer.version_received)
# Mine a block and make sure that it's not sent to the connected peers
self.nodes[0].generate(nblocks=1)
# Give the node enough time to possibly leak out a message
time.sleep(PEER_TIMEOUT + 2)
# Make sure only expected messages came in
assert not no_version_idle_peer.unexpected_msg
assert not no_version_idle_peer.got_wtxidrelay
assert not no_version_idle_peer.got_sendaddrv2
assert not no_verack_idle_peer.unexpected_msg
assert no_verack_idle_peer.got_wtxidrelay
assert no_verack_idle_peer.got_sendaddrv2
assert not pre_wtxidrelay_peer.unexpected_msg
assert not pre_wtxidrelay_peer.got_wtxidrelay
assert not pre_wtxidrelay_peer.got_sendaddrv2
# Expect peers to be disconnected due to timeout
assert not no_version_idle_peer.is_connected
assert not no_verack_idle_peer.is_connected
assert not pre_wtxidrelay_peer.is_connected
self.log.info('Check that the version message does not leak the local address of the node')
p2p_version_store = self.nodes[0].add_p2p_connection(P2PVersionStore())
ver = p2p_version_store.version_received
# Check that received time is within one hour of now
assert_greater_than_or_equal(ver.nTime, time.time() - 3600)
assert_greater_than_or_equal(time.time() + 3600, ver.nTime)
assert_equal(ver.addrFrom.port, 0)
assert_equal(ver.addrFrom.ip, '0.0.0.0')
assert_equal(ver.nStartingHeight, 201)
assert_equal(ver.relay, 1)
self.log.info('Check that old peers are disconnected')
p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
p2p_old_peer.send_message(self.create_old_version(31799))
p2p_old_peer.wait_for_disconnect()
if __name__ == '__main__':
P2PLeakTest().main()
|
[
"adymoloca91@gmail.com"
] |
adymoloca91@gmail.com
|
9578af48e44f55f45d0eac8073ab379d3a7704ac
|
dc83706c0fc77dca0dde8f5d8de0c53dd746bd59
|
/cachier/scripts/cli.py
|
c467b07ac8cef327a7cab774a89c4f8a48456cc3
|
[
"MIT"
] |
permissive
|
ofirnk/cachier
|
5a773d38c6093a276a7ce735e5173461e86bcc60
|
0d4a914806e5a6d048dc4189c9f6176105f8954f
|
refs/heads/master
| 2023-06-20T07:09:09.761919
| 2021-07-22T07:56:52
| 2021-07-22T07:56:52
| 388,360,470
| 0
| 0
|
MIT
| 2021-07-22T07:15:51
| 2021-07-22T06:57:58
| null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
"""A command-line interface for cachier."""
import click
from cachier.core import _set_max_workers
@click.group()
def cli():
"""A command-line interface for cachier."""
@cli.command("Limits the number of worker threads used by cachier.")
@click.argument('max_workers', nargs=1, type=int)
def set_max_workers(max_workers):
"""Limits the number of worker threads used by cachier."""
_set_max_workers(max_workers)
|
[
"shaypal5@gmail.com"
] |
shaypal5@gmail.com
|
fcd760062115725444cb9e272a9278e304d34a66
|
83924510277a85ab7719598877f1cc56741854d3
|
/qatrack/notifications/service_log_scheduling/tasks.py
|
1fe758d5a5cd4ff71c7b2b8867ad29e161950829
|
[
"MIT"
] |
permissive
|
tguiot/qatrackplus
|
3cc07ed6320c9f92a2d848e3429c89f2f0051712
|
c587cb2ddbfbc116a3ce5124537b2160af09d8e1
|
refs/heads/master
| 2023-06-01T01:30:53.619027
| 2021-07-02T10:35:52
| 2021-07-02T10:35:52
| 381,009,929
| 0
| 0
|
NOASSERTION
| 2021-07-02T10:35:53
| 2021-06-28T11:39:21
| null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
import logging
from django.conf import settings
from django.utils import timezone
from django_q.models import Schedule
from django_q.tasks import schedule
from qatrack.notifications.models import ServiceEventSchedulingNotice
from qatrack.qatrack_core.email import send_email_to_users
from qatrack.qatrack_core.tasks import run_periodic_scheduler
logger = logging.getLogger('django-q')
def run_scheduling_notices():
run_periodic_scheduler(
ServiceEventSchedulingNotice,
"run_service_log_scheduling_notices",
schedule_service_event_scheduling_notice,
time_field="time",
recurrence_field="recurrences",
)
def schedule_service_event_scheduling_notice(notice, send_time):
logger.info("Service Event Scheduling notification %s for %s" % (notice.pk, send_time))
name = "Send notification %d %s" % (notice.pk, send_time.isoformat())
schedule(
"qatrack.notifications.service_log_scheduling.tasks.send_scheduling_notice",
notice.id,
name,
name=name,
schedule_type=Schedule.ONCE,
repeats=1,
next_run=send_time,
task_name=name,
)
def send_scheduling_notice(notice_id, task_name=""):
notice = ServiceEventSchedulingNotice.objects.filter(id=notice_id).first()
if notice:
if not notice.send_required():
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no Service Event Schedules to notify about" % notice_id) # noqa: E501
return
recipients = notice.recipients.recipient_emails()
if not recipients:
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no recipients" % notice_id)
return
else:
logger.info("Send of ServiceEventSchedulingNotice %s requested, but no such ServiceEventSchedulingNotice exists" % notice_id) # noqa: E501
return
try:
send_email_to_users(
recipients,
"service_log_scheduling/email.html",
context={'notice': notice},
subject_template="service_log_scheduling/subject.txt",
text_template="service_log_scheduling/email.txt",
)
logger.info("Sent ServiceEventSchedulingNotice %s at %s" % (notice_id, timezone.now()))
try:
Schedule.objects.get(name=task_name).delete()
except: # noqa: E722 # pragma: nocover
logger.exception("Unable to delete Schedule.name = %s after successful send" % task_name)
except: # noqa: E722 # pragma: nocover
logger.exception(
"Error sending email for ServiceEventSchedulingNotice %s at %s." % (notice_id, timezone.now())
)
fail_silently = getattr(settings, "EMAIL_FAIL_SILENTLY", True)
if not fail_silently:
raise
finally:
notice.last_sent = timezone.now()
notice.save()
|
[
"randle.taylor@gmail.com"
] |
randle.taylor@gmail.com
|
c5c2760d2fc07bd602141fc4bf0326bacc3903b6
|
1b2369715f47c9276f3dd458541d0b62cf5ba237
|
/core/templator.py
|
e520946dd891c205f610bf01a342298ab7428e33
|
[] |
no_license
|
Virucek/gb_framework
|
5a68cdf4f09867db3704ec589e937ddbe68b27f0
|
50893554c80583243ed301ab52e4bc46875ad241
|
refs/heads/main
| 2023-02-13T14:01:57.808400
| 2021-01-04T22:20:07
| 2021-01-04T22:20:07
| 319,729,864
| 0
| 0
| null | 2021-01-04T22:20:20
| 2020-12-08T18:44:10
|
Python
|
UTF-8
|
Python
| false
| false
| 630
|
py
|
import os
from jinja2 import Template, Environment, FileSystemLoader
def render(template_name, folder='templates', **kwargs):
# file = os.path.join('templates', template_name)
# with open(file, encoding='utf-8') as file:
# template = Template(file.read())
env = Environment()
env.loader = FileSystemLoader(folder)
template = env.get_template(template_name)
if 'context' in kwargs: # Если в качестве аргумент был передан context - используется именно он.
return template.render(**kwargs['context'])
return template.render(**kwargs)
|
[
"aykin.yakov@gmail.com"
] |
aykin.yakov@gmail.com
|
e2d5e9208c5d6b345a80365e423c67dd11e07d48
|
4f510470b3093ab2c60f929221af82c79b121ca7
|
/ML/SCIENCE/day07/bi.py
|
c23037e4121ee3dd657696790e8fe62a76e9e0f4
|
[] |
no_license
|
q737645224/python3
|
ce98926c701214f0fc7da964af45ba0baf8edacf
|
4bfabe3f4bf5ba4133a16102c51bf079d500e4eb
|
refs/heads/master
| 2020-03-30T07:11:17.202996
| 2018-10-30T06:14:51
| 2018-10-30T06:14:51
| 150,921,088
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 796
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import matplotlib.pyplot as mp
outcomes = np.random.binomial(9, 0.5, 10000)
chips = [1000]
for outcome in outcomes:
if outcome >= 5:
chips.append(chips[-1] + 1)
else:
chips.append(chips[-1] - 1)
chips = np.array(chips)
mp.figure('Binomial', facecolor='lightgray')
mp.title('Binomial', fontsize=20)
mp.xlabel('Round', fontsize=14)
mp.ylabel('Chip', fontsize=14)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
# 根据最后筹码的数量,决定线条颜色
if chips[-1] < chips[0]:
color = 'limegreen'
elif chips[-1] > chips[0]:
color = 'orangered'
else:
color = 'dodgerblue'
mp.plot(chips, c=color, label='Chip')
mp.legend()
mp.show()
|
[
"764375224@qq.com"
] |
764375224@qq.com
|
1126c2887c7bf5192117f4eaef69b0120a23243f
|
29b1b15e4fef90717ff7bf8b13ab9a23cdc17c51
|
/postsproject/testapp/views.py
|
cc8ee2c2ba5629d911722b4ea30e6bdf9aaf35a5
|
[] |
no_license
|
deepawalekedar319/DjangoProjects
|
93fe59812593a1e1b8f542c8c5b1642bc95f6da4
|
1780b703a3022ea17dc188ad98b0f17bb14fa12f
|
refs/heads/main
| 2023-09-03T04:48:21.201822
| 2021-11-08T05:28:00
| 2021-11-08T05:28:00
| 425,706,071
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
from django.shortcuts import render , get_object_or_404
from testapp.models import Posts
def post_view(request):
post_list = Posts.objects.all()
return render(request,'testapp/posts_list.html',{'post_list':post_list})
def post_detail_view(request , year , month , day , post):
post = get_object_or_404(post , slug = post , status = 'published' , publish__year = year, publish__month = month, publish__day = day)
return render(request , 'testapp/post_detail.html',{'post':post})
|
[
"deepawalekedar319@gmail.com"
] |
deepawalekedar319@gmail.com
|
06f63f7eb1f9eb33e61b437d376faf87f4639603
|
a4191cc76c1d733c58bbb6692a75b0885bb74e13
|
/Control-Planning-master/Planning/planning_tools/carla_global_path.py
|
35d4fa8e7156ab76d6e1b702c5d87efd4b43ee7e
|
[] |
no_license
|
js7850/sonjunseong
|
69173d67e34ce2085d2e0617fbefa02cbc6676b5
|
0d8bb7c87fac07634abd4b002f1111108b42e939
|
refs/heads/main
| 2023-07-16T15:28:00.459349
| 2021-08-28T13:08:19
| 2021-08-28T13:08:19
| 400,545,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
import rospy
from sensor_msgs.msg import JointState
from geometry_msgs.msg import PoseStamped
import csv
import matplotlib
import threading
import math
rospy.init_node("carla_glodal_path")
class publisher:
def __init__(self):
self.x=0
self.y=0
self.nodes = []
self.node_x = []
self.node_y = []
self.path_x = []
self.path_y = []
self.hz = rospy.Rate(20)
self.get_global_path()
self.path_pub = rospy.Publisher("/hdmap/path", JointState, queue_size = 1)
self.utm_sub = rospy.Subscriber("utm_fix", PoseStamped, self.utm_CallBack)
self.id = 0
self.len = 30
Th1 = threading.Thread(target=self.publishing)
Th1.daemon = True
Th1.start()
rospy.spin()
def utm_CallBack(self, data):
self.x = data.pose.position.x
self.y = data.pose.position.y
def get_global_path(self):
# name =raw_input("input carla path :")
name = "E6_path1.csv"
with open(name, "r") as f:
reader = csv.reader(f, delimiter = ',')
for rows in reader:
x = float(rows[0])
y = float(rows[1])
self.nodes.append([x,y])
def publishing(self):
while self.x==0 or self.y==0:
rospy.loginfo("Waiting for start")
while not rospy.is_shutdown():
id = 0
msg = JointState()
for i in self.nodes:
#print( math.sqrt((i[0]-self.x)**2+(i[1] - self.y)**2), i, self.x, self.y)
if math.sqrt((i[0]-self.x)**2+(i[1] - self.y)**2) < 5:
self.id = id
break
id+=1
else:
rospy.loginfo("Path is gone")
continue
k=2
for i in range(self.id+k, self.id + self.len+k):
x = 0
y = 0
try:
x = self.nodes[i][0]
y = self.nodes[i][1]
except:
rospy.loginfo("# WARNING: Path end")
break
msg.position.append(x)
msg.velocity.append(y)
rospy.loginfo("publishing {}".format(self.id))
rospy.sleep(0.05)
self.path_pub.publish(msg)
if __name__ == "__main__":
a = publisher()
|
[
"noreply@github.com"
] |
js7850.noreply@github.com
|
c139c36375ea4de3c9eed6ff3bcc1abc1ea29fd7
|
e1ef59f60ecc011305e50d12f3fa480937b61e34
|
/Problem Solving/Implementations/Utopian Tree.py
|
8c3cddf1addfff4bddbb325756151b9b8708273f
|
[] |
no_license
|
prashanthr11/HackerRank
|
7ef3c32c3b697f54880fcd5a607245d313b12e05
|
2a01cb28f2f1a8ef616026a126d95bc9e76dd903
|
refs/heads/master
| 2021-07-08T21:17:13.273389
| 2020-12-27T19:00:25
| 2020-12-27T19:00:25
| 222,383,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 548
|
py
|
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
height = 1
for i in range(n):
if i % 2:
height += 1
else:
height *= 2
return height
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
|
[
"noreply@github.com"
] |
prashanthr11.noreply@github.com
|
868926954c42f7c803e11a7d7309f3de7949bd8e
|
f2b31c29d30959ab484187ed5754552a644c0256
|
/setup.py
|
847e732192b6e4fa4a48c36cf0cb2976f239929d
|
[
"Apache-2.0"
] |
permissive
|
pkimber/old_moderate
|
d385dd679909eaf3249204f39626a7f711465cc9
|
761233f9a9c660026f5197d0b5812bf5db28afbe
|
refs/heads/master
| 2016-09-05T15:34:53.960675
| 2014-01-07T14:34:53
| 2014-01-07T14:34:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
import os
from distutils.core import setup
def read_file_into_string(filename):
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
def get_readme():
for name in ('README', 'README.rst', 'README.md'):
if os.path.exists(name):
return read_file_into_string(name)
return ''
setup(
name='pkimber-moderate',
packages=['moderate', 'moderate.tests', 'moderate.management', 'moderate.management.commands'],
version='0.0.06',
description='Moderate',
author='Patrick Kimber',
author_email='code@pkimber.net',
url='git@github.com:pkimber/moderate.git',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Office/Business :: Scheduling',
],
long_description=get_readme(),
)
|
[
"code@pkimber.net"
] |
code@pkimber.net
|
ff1f8886eaa249084c6246e1aa3f939e2a40708b
|
38a9e2780ac8b800c336207a5c0a621eb1277a53
|
/tests/test_planners/test_planner_latin_hypercube.py
|
6c1b96b0e2543493d9ea123d6a11489bec8a8f51
|
[
"MIT"
] |
permissive
|
priyansh-1902/olympus
|
2454850413bb0562a1bfe20ab35fa7e770367323
|
f57ad769918c0d5d805c439ab5ffbd180af698fa
|
refs/heads/main
| 2023-06-21T05:58:49.118264
| 2021-08-07T22:19:41
| 2021-08-07T22:19:41
| 342,454,516
| 0
| 0
|
MIT
| 2021-08-07T22:19:41
| 2021-02-26T03:43:08
|
Python
|
UTF-8
|
Python
| false
| false
| 1,148
|
py
|
#!/usr/bin/env python
import pytest
from olympus import Observations, ParameterVector
from olympus.planners import LatinHypercube
# use parametrize to test multiple configurations of the planner
#@pytest.mark.parametrize("disp, eps, ftol, gtol, maxcor, maxfun, maxiter, maxls",
# [(None, 1e-8, 2.220446049250313e-9, 1e-5, 10, 15000, 15000, 20),
# (True, 1e-9, 2.220446049250313e-10, 1e-6, 15, 20000, 20000, 30)])
def test_planner_ask_tell(two_param_space):#, disp, eps, ftol, gtol, maxcor, maxfun, maxiter, maxls):
# planner = Lbfgs(disp=disp, eps=eps, ftol=ftol, gtol=gtol, maxcor=maxcor, maxfun=maxfun, maxiter=maxiter, maxls=maxls)
planner = LatinHypercube()
planner.set_param_space(param_space=two_param_space)
param = planner.ask()
value = ParameterVector().from_dict({'objective': 0.})
obs = Observations()
obs.add_observation(param, value)
planner.tell(observations=obs)
if __name__ == '__main__':
from olympus import Parameter, ParameterSpace
param_space = ParameterSpace()
param_space.add(Parameter(name='param_0'))
param_space.add(Parameter(name='param_1'))
test_planner_ask_tell(param_space)
|
[
"hase.florian@gmail.com"
] |
hase.florian@gmail.com
|
5c70d23d8d54bf46d7d2e8547bf4ec59236ac4ab
|
fb9bfe18889cdcb1efad2544bec05d1551ec14f8
|
/home-assistant/custom_components/hacs/repositories/theme.py
|
0831694927efa2d466e194c26034207a18209395
|
[
"MIT"
] |
permissive
|
macbury/SmartHouse
|
b5cac3db82ad2350dc613a7fbb19584082ac29a0
|
796afdf7552c7798fc6a2a238537a36fa1073efe
|
refs/heads/master
| 2022-12-25T10:30:47.115121
| 2022-07-10T15:03:00
| 2022-07-10T15:03:00
| 188,223,508
| 166
| 65
|
MIT
| 2022-12-10T15:46:43
| 2019-05-23T11:47:23
|
Python
|
UTF-8
|
Python
| false
| false
| 3,631
|
py
|
"""Class for themes in HACS."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..enums import HacsCategory, HacsDispatchEvent
from ..exceptions import HacsException
from ..utils.decorator import concurrent
from .base import HacsRepository
if TYPE_CHECKING:
from ..base import HacsBase
class HacsThemeRepository(HacsRepository):
"""Themes in HACS."""
def __init__(self, hacs: HacsBase, full_name: str):
"""Initialize."""
super().__init__(hacs=hacs)
self.data.full_name = full_name
self.data.full_name_lower = full_name.lower()
self.data.category = HacsCategory.THEME
self.content.path.remote = "themes"
self.content.path.local = self.localpath
self.content.single = False
@property
def localpath(self):
"""Return localpath."""
return f"{self.hacs.core.config_path}/themes/{self.data.file_name.replace('.yaml', '')}"
async def async_post_installation(self):
"""Run post installation steps."""
try:
await self.hacs.hass.services.async_call("frontend", "reload_themes", {})
except BaseException: # lgtm [py/catch-base-exception] pylint: disable=broad-except
pass
async def validate_repository(self):
"""Validate."""
# Run common validation steps.
await self.common_validate()
# Custom step 1: Validate content.
compliant = False
for treefile in self.treefiles:
if treefile.startswith("themes/") and treefile.endswith(".yaml"):
compliant = True
break
if not compliant:
raise HacsException(
f"{self.string} Repository structure for {self.ref.replace('tags/','')} is not compliant"
)
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
# Handle potential errors
if self.validate.errors:
for error in self.validate.errors:
if not self.hacs.status.startup:
self.logger.error("%s %s", self.string, error)
return self.validate.success
async def async_post_registration(self):
"""Registration."""
# Set name
self.update_filenames()
self.content.path.local = self.localpath
if self.hacs.system.action:
await self.hacs.validation.async_run_repository_checks(self)
@concurrent(concurrenttasks=10, backoff_time=5)
async def update_repository(self, ignore_issues=False, force=False):
"""Update."""
if not await self.common_update(ignore_issues, force) and not force:
return
# Get theme objects.
if self.repository_manifest.content_in_root:
self.content.path.remote = ""
# Update name
self.update_filenames()
self.content.path.local = self.localpath
# Signal entities to refresh
if self.data.installed:
self.hacs.async_dispatch(
HacsDispatchEvent.REPOSITORY,
{
"id": 1337,
"action": "update",
"repository": self.data.full_name,
"repository_id": self.data.id,
},
)
def update_filenames(self) -> None:
"""Get the filename to target."""
for treefile in self.tree:
if treefile.full_path.startswith(
self.content.path.remote
) and treefile.full_path.endswith(".yaml"):
self.data.file_name = treefile.filename
|
[
"me@macbury.ninja"
] |
me@macbury.ninja
|
bd2641b8e5b2b74521b6620cea1f61afcd186eae
|
77077a391973d1f8c05647d08fc135facd04fc5e
|
/xlsxwriter/test/comparison/test_background02.py
|
25a76b75104c00aaf83a899b79d12ccfe831151a
|
[
"BSD-2-Clause-Views"
] |
permissive
|
DeltaEpsilon7787/XlsxWriter
|
28fb1012eaa42ea0f82e063f28c0c548ca016c5e
|
550b9c5bd678c861dcc9f6f4072b33a69566e065
|
refs/heads/main
| 2023-08-02T09:14:10.657395
| 2021-09-06T10:51:56
| 2021-09-06T10:51:56
| 384,948,081
| 0
| 0
|
NOASSERTION
| 2021-07-11T12:57:26
| 2021-07-11T12:57:25
| null |
UTF-8
|
Python
| false
| false
| 1,300
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
from io import BytesIO
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('background02.xlsx')
def test_create_file(self):
"""Test the creation of an XlsxWriter file with a background image."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_background(self.image_dir + 'logo.jpg')
workbook.close()
self.assertExcelEqual()
def test_create_file_bytestream(self):
"""Test the creation of an XlsxWriter file with a background image."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
image_file = open(self.image_dir + 'logo.jpg', 'rb')
image_data = BytesIO(image_file.read())
image_file.close()
worksheet.set_background(image_data, is_byte_stream=True)
workbook.close()
self.assertExcelEqual()
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
f3b5b32158007db75f97c4df7a3bdae34ab46ac3
|
37cfcdfa3b8f1499f5899d2dfa2a48504a690abd
|
/test/functional/mining_prioritisetransaction.py
|
fe0256f3a78b06dd52f4dbc84cd78a7169c450e4
|
[
"MIT"
] |
permissive
|
CJwon-98/Pyeongtaekcoin
|
28acc53280be34b69c986198021724181eeb7d4d
|
45a81933a98a7487f11e57e6e9315efe740a297e
|
refs/heads/master
| 2023-08-17T11:18:24.401724
| 2021-10-14T04:32:55
| 2021-10-14T04:32:55
| 411,525,736
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,616
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Pyeongtaekcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the prioritisetransaction mining RPC."""
import time
from test_framework.messages import COIN, MAX_BLOCK_BASE_SIZE
from test_framework.test_framework import PyeongtaekcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, create_confirmed_utxos, create_lots_of_big_transactions, gen_return_txouts
class PrioritiseTransactionTest(PyeongtaekcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-printpriority=1"], ["-printpriority=1"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Test `prioritisetransaction` required parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction)
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '')
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0)
# Test `prioritisetransaction` invalid extra parameters
assert_raises_rpc_error(-1, "prioritisetransaction", self.nodes[0].prioritisetransaction, '', 0, 0, 0)
# Test `prioritisetransaction` invalid `txid`
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].prioritisetransaction, txid='foo', fee_delta=0)
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000')", self.nodes[0].prioritisetransaction, txid='Zd1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000', fee_delta=0)
# Test `prioritisetransaction` invalid `dummy`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-1, "JSON value is not a number as expected", self.nodes[0].prioritisetransaction, txid, 'foo', 0)
assert_raises_rpc_error(-8, "Priority is no longer supported, dummy argument to prioritisetransaction must be 0.", self.nodes[0].prioritisetransaction, txid, 1, 0)
# Test `prioritisetransaction` invalid `fee_delta`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].prioritisetransaction, txid=txid, fee_delta='foo')
self.txouts = gen_return_txouts()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], end_range - start_range, (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_BASE_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_BASE_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined
self.nodes[0].prioritisetransaction(txid=txids[0][0], fee_delta=int(3*base_fee*COIN))
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx is not None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(txid=high_fee_tx, fee_delta=-int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
self.log.info("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"]
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransactionwithwallet(raw_tx)["hex"]
tx_id = self.nodes[0].decoderawtransaction(tx_hex)["txid"]
# This will raise an exception due to min relay fee not being met
assert_raises_rpc_error(-26, "min relay fee not met", self.nodes[0].sendrawtransaction, tx_hex)
assert(tx_id not in self.nodes[0].getrawmempool())
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000-byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=int(self.relayfee*COIN))
self.log.info("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx_hex), tx_id)
assert(tx_id in self.nodes[0].getrawmempool())
# Test that calling prioritisetransaction is sufficient to trigger
# getblocktemplate to (eventually) return a new block.
mock_time = int(time.time())
self.nodes[0].setmocktime(mock_time)
template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
self.nodes[0].prioritisetransaction(txid=tx_id, fee_delta=-int(self.relayfee*COIN))
self.nodes[0].setmocktime(mock_time+10)
new_template = self.nodes[0].getblocktemplate({'rules': ['segwit']})
assert(template != new_template)
if __name__ == '__main__':
PrioritiseTransactionTest().main()
|
[
"cjone98692996@gmail.com"
] |
cjone98692996@gmail.com
|
be05c884bf49420daeef4374a004c5cda9062076
|
306afd5282d9c24d58297478a1728a006c29e57e
|
/python3/0213_House_Robber_II.py
|
4d1ea3b9abd8dcecdc9fed21a9a7723218c62808
|
[] |
no_license
|
ytatus94/Leetcode
|
d2c1fe3995c7a065139f772569485dc6184295a9
|
01ee75be4ec9bbb080f170cb747f3fc443eb4d55
|
refs/heads/master
| 2023-06-08T17:32:34.439601
| 2023-05-29T04:33:19
| 2023-05-29T04:33:19
| 171,921,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
class Solution:
def rob(self, nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
# 第一棟和最後一棟房子相連,所以不能同時偷
# 要拆開成兩種情況討論,變成兩個普通的 house robber I 問題
res1 = self.house_robber(nums[:-1]) # 不偷第一棟房子
res2 = self.house_robber(nums[1:]) # 不偷最後一棟房子
return max(res1, res2)
# 一般的方式
# def house_robber(self, nums):
# if len(nums) == 0:
# return 0
# if len(nums) == 1:
# return nums[0]
# dp = [0 for i in range(len(nums) + 1)]
# dp[0] = 0
# dp[1] = nums[0]
# for i in range(2, len(nums) + 1):
# dp[i] = max(dp[i - 1], dp[i - 2] + nums[i - 1])
# return dp[len(nums)]
# 用滾動數組的方式
def house_robber(self, nums):
if len(nums) == 0:
return 0
if len(nums) == 1:
return nums[0]
old = 0
new = nums[0]
for i in range(2, len(nums) + 1):
t = max(new, old + nums[i - 1])
old = new
new = t
return new
|
[
"noreply@github.com"
] |
ytatus94.noreply@github.com
|
9cbb8de9fa1b82364b1435e1745f5a067f0bce6a
|
3669cd260bdab697376feca747d1635d35f42c83
|
/lang/clang-devel/files/patch-utils_llvm-build_llvmbuild_main.py
|
2bbe4511dd58f04fbedcf24691c3bcbe18e71267
|
[] |
no_license
|
tuxillo/DPorts
|
58072bc88887c7a53a51988c76a70366bef44a93
|
f523fb13a9d3ecc5ce9a8045fdf146ae05de5399
|
refs/heads/master
| 2020-04-03T08:02:44.297511
| 2013-03-04T07:56:00
| 2013-03-04T07:56:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 820
|
py
|
$FreeBSD: ports/lang/clang-devel/files/patch-utils_llvm-build_llvmbuild_main.py,v 1.2 2012/11/17 05:58:48 svnexp Exp $
--- utils/llvm-build/llvmbuild/main.py.orig
+++ utils/llvm-build/llvmbuild/main.py
@@ -633,7 +633,13 @@
# We handle a few special cases of target names here for historical
# reasons, as these are the names configure currently comes up with.
- native_target_name = { 'x86' : 'X86',
+ native_target_name = { 'amd64' : 'X86',
+ 'arm' : 'ARM',
+ 'i386' : 'X86',
+ 'mips' : 'Mips',
+ 'powerpc' : 'PowerPC',
+ 'sparc64' : 'Sparc',
+ 'x86' : 'X86',
'x86_64' : 'X86',
'Unknown' : None }.get(opts.native_target,
opts.native_target)
|
[
"nobody@home.ok"
] |
nobody@home.ok
|
bb2aef030b5b31e6b3dc6710d92c38df6a019f77
|
e968c7b2a81eac674fe90d4988d49dc76cd6ea90
|
/Chap0/project/guess_number_advanced.py
|
c7469c5b549b102d7e4fce82232709e7f0821658
|
[] |
no_license
|
AIHackerTest/0x0o_Py101-004
|
f27c2988ef4b755546a2a64bf5f8e225c1c46c93
|
cf3fcd4d2618b63e04732ddc0cc9dfdd36e94b8d
|
refs/heads/master
| 2021-06-28T20:05:03.110594
| 2017-09-13T03:41:25
| 2017-09-13T03:41:25
| 103,240,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Program name: 猜数字进阶
Author: 0x0
Github: https://github.com/0x0o
Edition:
Edit date: 2017.08.15
游戏介绍:程序内部用 0 - 9 生成一个 4 位数,每个数位上的数字不重复,且首位数字不为零,如 1942
用户输入 4 位数进行猜测,程序返回相应提示
用 A 表示数字和位置都正确,用 B 表示数字正确但位置错误
用户猜测后,程序返回 A 和 B 的数量
比如:2A1B 表示用户所猜数字,有 2 个数字,数字、位置都正确,有 1 个数字,数字正确但位置错误
猜对或用完 10 次机会,游戏结束
"""
import random
# 生成 random_list 保存为四个数字元素的 list # [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] 可以写成 range(10)
random_list = random.sample(range(10), 4)
# 如果首位为零重新生成
while random_list[0] == 0:
random_list = random.sample(range(10), 4)
print("生成的随机数为 {}".format(random_list))
# 用户输入,把输入拆成一个 list
print("请输入 4 位数进行猜测 ")
# 判断用户输入
def check_guess(input_list):
a = 0
b = 0
for i, num in enumerate(random_list):
if int(guess_list[i]) == int(random_list[i]): # 数字正确且位置正确
a += 1
if a == 4:
print("恭喜你全部猜中")
return True
elif int(guess_list[i]) in random_list: # 位置正确
b += 1
if(a != 4):
print("{}A{}B".format(a, b))
return False
chance = 10
for i in range(0,10):
# 处理用户输入
guess_num = input("> ")
guess_list = list(guess_num)
guess = check_guess(guess_list)
if chance == 0:
print("用完 10 次机会,游戏结束")
break
if guess:
break
else:
chance -= 1
print("你还有 {} 次机会".format(chance))
|
[
"xiaowan5219@gmail.com"
] |
xiaowan5219@gmail.com
|
fa3d745753daff8cdc2dae59e1518e0bf8f81b84
|
1c7b5b866b505b7b8c47dce504c5bd27a34d5992
|
/TargetOffer/和为S的两个数字.py
|
9b7088871e2e866064899912df859868121c9059
|
[] |
no_license
|
ii0/algorithms-6
|
2dbcb3df504810ea52b41e5129b334f62136d70a
|
3eddc77d2f3dafffd177f2a9ee28e9850da2f020
|
refs/heads/master
| 2022-04-25T23:17:53.332297
| 2019-09-19T14:52:04
| 2019-09-19T14:52:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,478
|
py
|
"""
author: buppter
datetime: 2019/8/15 16:34
题目描述
输入一个递增排序的数组和一个数字S,在数组中查找两个数,使得他们的和正好是S,如果有多对数字的和等于S,输出两个数的乘积最小的。
解决思路:
1. 已经排序,所以可以使用双指针
2. 对于未排序的,可以使用 y = s - x,借助Map, LeetCode 第一题。但要判断最小乘积,略显的复杂
"""
class Solution:
def FindNumbersWithSum1(self, array: list, tsum: int) -> list:
if not array:
return []
l, r = 0, len(array) - 1
while l < r:
if array[l] + array[r] < tsum:
l += 1
elif array[l] + array[r] > tsum:
r -= 1
else:
return [array[l], array[r]]
return []
def FindNumbersWithSum2(self, array: list, tsum: int) -> list:
if not array:
return []
dic = {}
res = []
for i, v in enumerate(array):
if tsum - v not in dic:
dic[v] = i
else:
res.append([tsum - v, v])
if len(res) == 1:
return res[0]
if not res:
return []
return self.getMin(res)
def getMin(self, array):
res = []
for i in array:
s = 1
for l in i:
s *= l
res.append(s)
return array[res.index(min(res))]
|
[
"shixintian@aliyun.com"
] |
shixintian@aliyun.com
|
ca5a807578a341ab6150858ebc98582151ea5b7b
|
f3b233e5053e28fa95c549017bd75a30456eb50c
|
/tyk2_input/55/55-42_wat_20Abox/setin.py
|
a052af3560b39d0ff4ffd8b4721a1f81128a90d8
|
[] |
no_license
|
AnguseZhang/Input_TI
|
ddf2ed40ff1c0aa24eea3275b83d4d405b50b820
|
50ada0833890be9e261c967d00948f998313cb60
|
refs/heads/master
| 2021-05-25T15:02:38.858785
| 2020-02-18T16:57:04
| 2020-02-18T16:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,006
|
py
|
import os
final_common = ["C1", "C2", "C3", "C4", "C5", "C6", "CL1", "C8", "O9", "N10", "C11", "C12", "C13", "N14", "C15", "C16", "N17", "C18", "O19", "CL2", "H1", "H2", "H6", "HN10", "H12", "H13", "H16", "HN17"]
dir = '/mnt/scratch/songlin3/run/tyk2/L55/wat_20Abox/ti_one-step/'
res1i='55'
res1='NNN'
res2list=LIST1
for i in res2list:
a=i.upper()
res2='L'+a
filesdir = dir + "%s_%s"%(res1i,i) + "/" + "files" + "/"
os.chdir(filesdir)
os.system("cp ../../input-files/*in .")
os.system("cp ../%s-%s_merged.* ."%(res1i,i))
pdb = file.readlines(open('%s-%s_merged.pdb'%(res1i,i),'r'))
for line in pdb:
newlist = []
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res1):
resid1 = newlist[4]
os.system("sed -i 's/ZZZ/%s/g' temp_*.in"%(resid1))
break
for line in pdb:
newlist = []
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res2):
resid2 = newlist[4]
os.system("sed -i 's/42/%s/g' temp_*.in"%(resid2))
break
print res1 + '>' + res2
atmnmlist1 = []
for line in pdb:
newlist=[]
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res1) and newlist[2] not in final_common:
atomname = newlist[2]
print atomname
atmnmlist1.append(newlist[2])
print atmnmlist1
sc1 = ':1@'
for num in range(0,len(atmnmlist1)):
sc1 = sc1 + atmnmlist1[num] + ','
print sc1
os.system("sed -i 's/AAA/%s/g' temp_*in"%(sc1))
###res2
print res2 + '>' + res1
atmnmlist2 = []
for line in pdb:
newlist=[]
newlist = line.split()
if len(newlist) > 4 and newlist[3] == '%s'%(res2) and newlist[2] not in final_common:
atomname = newlist[2]
print atomname
atmnmlist2.append(newlist[2])
print atmnmlist2
sc2 = ':2@'
#print len(atmnmlist1)
for num in range(0,len(atmnmlist2)):
sc2 = sc2 + atmnmlist2[num] + ','
print sc2
os.system("sed -i 's/BBB/%s/g' temp_*in"%(sc2))
os.system("cd ..")
os.chdir(dir)
|
[
"songlin3@msu.edu"
] |
songlin3@msu.edu
|
d6d6e742047319fd822c0f16e580902ce8b79fad
|
9b20743ec6cd28d749a4323dcbadb1a0cffb281b
|
/09_Data_Preparation_for_Machine_Learning/18/04_transform_evaluate.py
|
5a5532223e4999a7b4f4ff6f9f11674e3a596ea3
|
[] |
no_license
|
jggrimesdc-zz/MachineLearningExercises
|
6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178
|
ee265f1c6029c91daff172b3e7c1a96177646bc5
|
refs/heads/master
| 2023-03-07T19:30:26.691659
| 2021-02-19T08:00:49
| 2021-02-19T08:00:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
# evaluate knn on the diabetes dataset with robust scaler transform
from numpy import mean
from numpy import std
from pandas import read_csv
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import RobustScaler
# load dataset
dataset = read_csv('pima-indians-diabetes.csv', header=None)
data = dataset.values
# separate into input and output columns
X, y = data[:, :-1], data[:, -1]
# ensure inputs are floats and output is an integer label
X = X.astype('float32')
y = LabelEncoder().fit_transform(y.astype('str'))
# define the pipeline
trans = RobustScaler()
model = KNeighborsClassifier()
pipeline = Pipeline(steps=[('t', trans), ('m', model)])
# evaluate the pipeline
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
n_scores = cross_val_score(pipeline, X, y, scoring='accuracy', cv=cv, n_jobs=-1)
# report pipeline performance
print('Accuracy: %.3f (%.3f)' % (mean(n_scores), std(n_scores)))
|
[
"jgrimes@jgrimes.tech"
] |
jgrimes@jgrimes.tech
|
1787a9d7699255bd09744b9a3cdb66697a4b4de1
|
468f397949b514c03b8b497bdff0c7dc6dff753c
|
/addons/hc_person/__openerp__.py
|
542444eb5404795bd7d17d31403ebb8ab04d8f9e
|
[] |
no_license
|
LasLabs/odoo-fhir
|
bc1750de277c71a453a1c75a2f3fbe6ffc8faf4b
|
4eaccbd218f89587217b932651f4eb38feb43047
|
refs/heads/master
| 2021-01-20T05:09:11.970604
| 2017-04-28T21:44:17
| 2017-04-28T21:44:17
| 89,753,143
| 1
| 0
| null | 2017-04-29T00:00:03
| 2017-04-29T00:00:03
| null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
# -*- coding: utf-8 -*-
{
'name': "Person",
'summary': """
A person independent of a specific health-related context
""",
'description': """
Demographics and administrative information about a person independent of a specific health-related context.
**Scope and Usage**
An individual has identity outside of a healthcare setting. The Person resource is used to capture
this information and to relate the person as an individual to other resources that do have a health-related context.
For example, while a patient resource may be created and maintained by each organization providing
care for that person as a patient, a person resource provides a mechanism for linking patient resources
across different organizations and their unique patient identity domains.
""",
'author': "HL7 FHIR",
'website': "https://hl7-fhir.github.io/person.html",
'contributors': "Luigi Sison",
'maintainer': "Luigi Sison",
'license': "GPL-3",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/master/openerp/addons/base/module/module_data.xml
# for the full list
'category': 'Health Care',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['hc_base'],
# always loaded
'data': [
'security/ir.model.access.csv',
'security/hc_person_security.xml',
'views/hc_res_person_views.xml',
'views/hc_res_person_templates.xml',
],
# only loaded in demonstration mode
'demo': [
'demo/demo.xml',
],
'installable': 'True',
# 'auto-install': 'True',
}
|
[
"lsison@moxylus.com"
] |
lsison@moxylus.com
|
23432df9eef11bab3e6a972cfdcc73e473190c62
|
c43c88015f9498aed5f3b5a339d245c31781444e
|
/Free/l10n_ru/__manifest__.py
|
9b57e4476142649d8c7f06a54fccc7adfca950b0
|
[] |
no_license
|
mulaudzicalvin/perpul
|
65106d41d5197fea17628ac1a7fa7e581d29d75e
|
00e3a5ee1771d2e09a48460ca23c2e9c2ef507d6
|
refs/heads/master
| 2020-03-09T18:39:33.131420
| 2018-02-05T05:17:36
| 2018-02-05T05:17:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,046
|
py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-2016 CodUP (<http://codup.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Russia - Accounting',
'version': '3.0',
'summary': 'План счетов РФ',
'category': 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Russia in OpenERP.
==============================================================================
Возможности:
- План счетов бухгалтерского учёта финансово-хозяйственной деятельности организаций, утверждённый Приказом Минфина РФ от 31.10.2000 года № 94н
""",
'author': 'CodUP',
'website': 'http://codup.com',
'depends': ['account'],
'demo': [],
'data': [
'data/account_chart.xml',
'data/account.account.template.csv',
'data/account_chart_template.xml',
'data/account_tax_template.xml',
'data/account_chart_template.yml',
],
'sequence': 1,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"daniel.podvesker@perpul.co"
] |
daniel.podvesker@perpul.co
|
65932c9daac0c0d74e2dfd51da456a1788016eff
|
a55fa75d170dec85b230b68046aae7cb18a7ea55
|
/backend/mobile_8_dec_dev_16442/wsgi.py
|
8eb7242448c4fc2456e1d329215a5f4645042dc1
|
[] |
no_license
|
crowdbotics-apps/mobile-8-dec-dev-16442
|
be0c8274472d7d396bbf12722a80cda25b371590
|
50cc76003f526ed3cfdb811988812663f449918e
|
refs/heads/master
| 2023-06-30T04:40:18.561586
| 2020-12-08T09:16:42
| 2020-12-08T09:16:42
| 319,529,324
| 0
| 0
| null | 2021-08-03T20:05:31
| 2020-12-08T04:50:35
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 421
|
py
|
"""
WSGI config for mobile_8_dec_dev_16442 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mobile_8_dec_dev_16442.settings")
application = get_wsgi_application()
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
bd8e7ae6df45a567320e605bd58c48a15da5e7c5
|
dcb1904a6acbee6a4102a86468f7f805dd4326f6
|
/hackerrank_tuple.py
|
1292ffd1494d12e6826137c66f939a1a62f1472f
|
[] |
no_license
|
Rabbi50/HackerRank-Problem-Solve
|
b7304eaaf42a9a4b85cfd9d53646d4c69f066ee1
|
1501a802f86f13c98acd75936ce79e71c862128d
|
refs/heads/master
| 2020-09-21T19:21:16.883823
| 2020-07-14T14:40:06
| 2020-07-14T14:40:06
| 224,897,275
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 731
|
py
|
# if __name__ == '__main__':
# n = int(input())
# # integer_list = map(int, input().split())
# # print()
# input_line=input()
# input_list=input_line.split()
# for i in range(n):
# input_list[i]=int(input_list[i])
# #print(input_list)
# t=tuple(input_list)
# print(hash(3))
# if __name__ == '__main__':
# n = int(input())
# integer_list = map(int, input().split())
# print(hash(tuple(integer_list)))
# numbers = input().strip().split()
# for i in range(len(numbers)):
# numbers[i] = int(numbers[i])
# T = tuple(numbers)
# print(hash(T))
if __name__ == '__main__':
n = int(input())
integer_list = map(int, input().split())
print(hash(tuple(integer_list)))
|
[
"jasrabbi50@gmail.com"
] |
jasrabbi50@gmail.com
|
42baf2d7ff0468903f6f4794c8562724d5b3a362
|
8bb4a472344fda15985ac322d14e8f4ad79c7553
|
/Python3-Core/src/test/prompto/translate/omo/TestCategories.py
|
00b03872c5e6ee79505515f577f99228f18b9296
|
[] |
no_license
|
prompto/prompto-python3
|
c6b356f5af30c6826730ba7f2ad869f341983a2d
|
64bd3d97d4702cc912097d41d961f7ab3fd82bee
|
refs/heads/master
| 2022-12-24T12:33:16.251468
| 2022-11-27T17:37:56
| 2022-11-27T17:37:56
| 32,623,633
| 4
| 0
| null | 2019-05-04T11:06:05
| 2015-03-21T07:17:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,677
|
py
|
from prompto.parser.o.BaseOParserTest import BaseOParserTest
class TestCategories(BaseOParserTest):
def setUp(self):
super(type(self), self).setUp()
def testAttributeConstructor(self):
self.compareResourceOMO("categories/attributeConstructor.poc")
def testCopyFromAscendant(self):
self.compareResourceOMO("categories/copyFromAscendant.poc")
def testCopyFromAscendantWithOverride(self):
self.compareResourceOMO("categories/copyFromAscendantWithOverride.poc")
def testCopyFromDescendant(self):
self.compareResourceOMO("categories/copyFromDescendant.poc")
def testCopyFromDescendantWithOverride(self):
self.compareResourceOMO("categories/copyFromDescendantWithOverride.poc")
def testCopyFromDocument(self):
self.compareResourceOMO("categories/copyFromDocument.poc")
def testCopyFromStored(self):
self.compareResourceOMO("categories/copyFromStored.poc")
def testEmptyConstructor(self):
self.compareResourceOMO("categories/emptyConstructor.poc")
def testEquals(self):
self.compareResourceOMO("categories/equals.poc")
def testLiteralConstructor(self):
self.compareResourceOMO("categories/literalConstructor.poc")
def testPopulateFalse(self):
self.compareResourceOMO("categories/populateFalse.poc")
def testResourceAttribute(self):
self.compareResourceOMO("categories/resourceAttribute.poc")
def testSynonymConstructor(self):
self.compareResourceOMO("categories/synonymConstructor.poc")
def testValueConstructor(self):
self.compareResourceOMO("categories/valueConstructor.poc")
|
[
"eric.vergnaud@wanadoo.fr"
] |
eric.vergnaud@wanadoo.fr
|
64aa5b08d5c62364013595552c7828a2a2d1976f
|
df0461f16c82af1fd5c580dd9ab91094158e4c43
|
/artifacts/proxy.py
|
e514b2eee728675f2f21105934a81b76fe67406e
|
[
"Apache-2.0"
] |
permissive
|
lucaschultz/unearth
|
738021310178062f0d1893a86fe68e99eaf98b74
|
60bbc887415205b23483d0cb99c3774ab47c9c66
|
refs/heads/master
| 2020-04-22T17:20:34.191739
| 2019-02-25T15:21:44
| 2019-02-25T15:21:44
| 170,538,069
| 2
| 0
|
Apache-2.0
| 2019-02-13T16:12:55
| 2019-02-13T16:12:54
| null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
from SystemConfiguration import SCDynamicStoreCreate, SCDynamicStoreCopyValue
factoid = 'proxies'
def fact():
'''Returns the current dns servers'''
proxies = 'None'
net_config = SCDynamicStoreCreate(None, "net", None, None)
proxy_info = SCDynamicStoreCopyValue(net_config, "State:/Network/Global/Proxies")
if proxy_info and proxy_info.get('ProxyAutoConfigURLString'):
try:
proxies = proxy_info['ProxyAutoConfigURLString']
except KeyError as err:
pass
return {factoid: proxies}
if __name__ == '__main__':
print '<result>%s</result>' % fact()[factoid]
|
[
"chilcote+github@gmail.com"
] |
chilcote+github@gmail.com
|
07eee5fb5e2ef6d4bba6977b2f628b3aa2179927
|
c957fbcb133093d3331731259c557cef5ccf45d1
|
/src/contentbase/json_renderer.py
|
9f0b60111957e16ac2aaf8bf69633c4a3d46a99c
|
[
"MIT"
] |
permissive
|
ClinGen/clincoded
|
da2aa2c08cf98f7af4953f81b13b94653b9c8264
|
5624c74546ce2a44eda00ee632a8de8c2099da10
|
refs/heads/dev
| 2022-09-27T15:48:08.000844
| 2021-08-03T19:05:54
| 2021-08-03T19:05:54
| 36,758,056
| 31
| 10
|
MIT
| 2022-09-16T19:33:53
| 2015-06-02T19:54:12
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from pyramid.threadlocal import get_current_request
import json
import pyramid.renderers
import uuid
def includeme(config):
config.add_renderer(None, json_renderer)
class JSON(pyramid.renderers.JSON):
'''Provide easier access to the configured serializer
'''
def dumps(self, value):
request = get_current_request()
default = self._make_default(request)
return json.dumps(value, default=default, **self.kw)
class BinaryFromJSON:
def __init__(self, app_iter):
self.app_iter = app_iter
def __len__(self):
return len(self.app_iter)
def __iter__(self):
for s in self.app_iter:
yield s.encode('utf-8')
class JSONResult(object):
def __init__(self):
self.app_iter = []
self.write = self.app_iter.append
@classmethod
def serializer(cls, value, **kw):
fp = cls()
json.dump(value, fp, **kw)
if str is bytes:
return fp.app_iter
else:
return BinaryFromJSON(fp.app_iter)
json_renderer = JSON(serializer=JSONResult.serializer)
def uuid_adapter(obj, request):
return str(obj)
def listy_adapter(obj, request):
return list(obj)
json_renderer.add_adapter(uuid.UUID, uuid_adapter)
json_renderer.add_adapter(set, listy_adapter)
json_renderer.add_adapter(frozenset, listy_adapter)
|
[
"laurence@lrowe.co.uk"
] |
laurence@lrowe.co.uk
|
884229c842df85409ab4a26013a3943a54b8d419
|
685038d4be188fa72e9dba1d2213a47ee3aa00a2
|
/ECOS2021/Demands/Inputs/Surveys/A/S7/Oct_S7_A.py
|
ea3279e31338f885612c658133f974cb1e135206
|
[] |
no_license
|
CIE-UMSS/Tradeoff-between-Installed-Capacity-and-Unserved-Energy
|
e5599e4e4ac60b97f0c4c57c5de95e493b1b5ac4
|
459f31552e3ab57a2e52167ab82f8f48558e173c
|
refs/heads/master
| 2023-06-01T18:09:29.839747
| 2021-06-19T15:56:26
| 2021-06-19T15:56:26
| 343,720,452
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 14:33:07 2020
@author: alejandrosoto
Script for 2 class of household in Raqaypampa.
"""
# -*- coding: utf-8 -*-
"""
@author: Alejandro Soto
"""
from core import User, np
User_list = []
#User classes definition
HI = User("high income",1)
User_list.append(HI)
LI = User("low income",0)
User_list.append(LI)
'''
Base scenario (BSA): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2), Water Heater (1), Mixer (1)
Base scenario (B): Indoor bulb (3), outdoor bulb (1), radio (1), tv (1), phone charger (2)
A
Scenario 1: BSA + Fridge (1) + Freezer* (1).
Scenario 2: BSA + Fridge (1).
Scenario 3: BSA + Fridge (1)*.
Scenario 4: BSA + Freezer (1).
Scenario 5: BSA + Welder (1).
Scerario 6: BSA + Grinder (1).
Scanerio 7: Add + Dryer (1),
Scenario 9: All
B
Scenario 8: BSB + Water Heater** (1).
Scenario 10: BSA + Pump Water (1).
Scenario 11: BSA + DVD (1).
Scenario 12: BSA + Blender (1).
Scenario 13: BSA + Iron (1).
Scerario 14: BSA + Mill (1).
* With seasonal variation
** Occasional use
Cold Months: May-Aug Std Cycle 8:00-18:00 Above 10 degrees
Warm Months: Jan-Apr Std Cycle 0:00-23:59 Above 10 degrees
Hot Nonths: Sep-Dec Std Cycle 0:00-10:00; 15:01-23:59 Above 10 degrees
Int Cycle 10:01-15:00
'''
#High-Income
#indoor bulb
HI_indoor_bulb = HI.Appliance(HI,3,7,1,320,0.6,190)
HI_indoor_bulb.windows([1080,1440],[0,0])
#outdoor bulb
HI_outdoor_bulb = HI.Appliance(HI,1,13,1,340,0.1,300)
HI_outdoor_bulb.windows([1100,1440],[0,0])
HI_Radio = HI.Appliance(HI,1,7,1,280,0.3,110)
HI_Radio.windows([420,708],[0,0])
#tv
HI_TV = HI.Appliance(HI,1,60,3,300,0.38,114)
HI_TV.windows([1140,1440],[651,1139],0.35,[300,650])
#phone charger
HI_Phone_charger = HI.Appliance(HI,2,5,3,250,0.4,95)
HI_Phone_charger.windows([1190,1440],[0,420],0.35,[421,1189])
#water_heater
HI_Water_heater = HI.Appliance(HI,1,150,1,60,0.05,30)
HI_Water_heater.windows([0,1440],[0,0])
#mixer
HI_Mixer = HI.Appliance(HI,1,50,1,10,0.5,5,occasional_use = 0.3)
HI_Mixer.windows([420,560],[0,0])
'''
#grinder
HI_Grinder = HI.Appliance(HI,1,750,1,480,0.125,60,occasional_use = 0.3)
HI_Grinder.windows([360,1080],[0,0])
'''
#Lower Income
#indoor bulb
LI_indoor_bulb = LI.Appliance(LI,3,7,2,287,0.4,124)
LI_indoor_bulb.windows([1153,1440],[0,300],0.5)
#outdoor bulb
LI_outdoor_bulb = LI.Appliance(LI,1,13,1,243,0.3,71)
LI_outdoor_bulb.windows([1197,1440],[0,0])
#radio
LI_Radio = LI.Appliance(LI,1,7,2,160,0.3,49)
LI_Radio.windows([480,840],[841,1200],0.5)
#TV
LI_TV = LI.Appliance(LI,1,100,3,250,0.3,74)
LI_TV.windows([1170,1420],[551,1169],0.3,[300,550])
#phone charger
LI_Phone_charger = LI.Appliance(LI,2,5,3,200,0.4,82)
LI_Phone_charger.windows([1020,1440],[0,420],0.3,[720,1019])
|
[
"asm19971997@gmail.com"
] |
asm19971997@gmail.com
|
15ad905cd84616800887959795f7b7b25d2c0bc8
|
f47fe8a7d8cd87b3bfa2e172b4a9fc93e3a4abc2
|
/2015/AST1/vezbovni/Ivan/treci.py
|
92de3fe389d536db156a2b47a60dfee2cec9d33a
|
[] |
no_license
|
ispastlibrary/Titan
|
a4a7e4bb56544d28b884a336db488488e81402e0
|
f60e5c6dc43876415b36ad76ab0322a1f709b14d
|
refs/heads/master
| 2021-01-17T19:23:32.839966
| 2016-06-03T13:47:44
| 2016-06-03T13:47:44
| 60,350,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 200
|
py
|
def fahr(a):
F = (9.0/5)*a + 32
print("temperatura je: ", F)
return F
def cels(a):
C = (a-32)*(5/9)
print("temperatura je:", C)
return C
prvo = fahr(100)
drugo = cels(212)
|
[
"ispast.library@gmail.com"
] |
ispast.library@gmail.com
|
a79fbdaf7b77d609257fa8ea0f0ee08500283919
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03167/s857237201.py
|
ed807852eb80a79aa87a94d4e96803a8c9ee4e1c
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
def add(a, b):
mod = 1e9+7
c = a + b
if c >= mod:
c -= mod
return c
H, W = [int(x) for x in input().split()]
sl = [input() for _ in range(H)]
dp = [[0 for _ in range(W)] for _ in range(H)]
dp[0][0] = 1
for i in range(H):
for j in range(W):
for frm in [[i-1, j], [i, j-1]]:
r, c = frm
if r >= 0 and c >= 0 and sl[r][c] != '#':
dp[i][j] = add(dp[i][j], dp[r][c])
print(int(dp[H-1][W-1]))
"""
using ll = long long;
int add(int a, int b) {
int MOD = 1e9 + 7;
return (a + b) % MOD;
}
int main() {
int H;
int W;
scanf("%d%d\n", &H, &W);
vector<vector<int>> dp(H, vector<int>(W));
vector<vector<bool>> is_wall(H, vector<bool>(W));
for (int i = 0; i < H; i++) {
scanf("\n");
for (int j = 0; j < W; j++) {
char ch;
scanf("%c", &ch);
//cout << i << " " << j << " " << ch << endl;
if (ch == '#') {
is_wall[i][j] = true;
}
}
}
dp[0][0] = 1;
for (int i = 0; i < H; i++) {
for (int j = 0; j < W; j++) {
//cout << i << " " << j << " " << is_wall[i][j] << endl;
if (!is_wall[i][j]) {
if (i - 1 >= 0) {
dp[i][j] = add(dp[i][j], dp[i-1][j]);
}
if (j - 1 >= 0) {
dp[i][j] = add(dp[i][j], dp[i][j-1]);
}
}
}
}
printf("%d\n", dp[H-1][W-1]);
return 0;
}
"""
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
754965cb553d4700b0aea09f00514c8478a8e968
|
1bf9f6b0ef85b6ccad8cb029703f89039f74cedc
|
/src/mixed-reality/azext_mixed_reality/vendored_sdks/mixedreality/models/_mixed_reality_client_enums.py
|
3c095b5688815dc47d4e0a5d9e8e2dad0cf60b42
|
[
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
VSChina/azure-cli-extensions
|
a1f4bf2ea4dc1b507618617e299263ad45213add
|
10b7bfef62cb080c74b1d59aadc4286bd9406841
|
refs/heads/master
| 2022-11-14T03:40:26.009692
| 2022-11-09T01:09:53
| 2022-11-09T01:09:53
| 199,810,654
| 4
| 2
|
MIT
| 2020-07-13T05:51:27
| 2019-07-31T08:10:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum, EnumMeta
from six import with_metaclass
class _CaseInsensitiveEnumMeta(EnumMeta):
def __getitem__(self, name):
return super().__getitem__(name.upper())
def __getattr__(cls, name):
"""Return the enum member matching `name`
We use __getattr__ instead of descriptors or inserting into the enum
class' __dict__ in order to support `name` and `value` being both
properties for enum members (which live in the class' __dict__) and
enum members themselves.
"""
try:
return cls._member_map_[name.upper()]
except KeyError:
raise AttributeError(name)
class CreatedByType(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""The type of identity that created the resource.
"""
USER = "User"
APPLICATION = "Application"
MANAGED_IDENTITY = "ManagedIdentity"
KEY = "Key"
class NameUnavailableReason(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""reason of name unavailable.
"""
INVALID = "Invalid"
ALREADY_EXISTS = "AlreadyExists"
class Serial(with_metaclass(_CaseInsensitiveEnumMeta, int, Enum)):
"""serial of key to be regenerated
"""
#: The Primary Key.
PRIMARY = 1
#: The Secondary Key.
SECONDARY = 2
class SkuTier(with_metaclass(_CaseInsensitiveEnumMeta, str, Enum)):
"""This field is required to be implemented by the Resource Provider if the service has more than
one tier, but is not required on a PUT.
"""
FREE = "Free"
BASIC = "Basic"
STANDARD = "Standard"
PREMIUM = "Premium"
|
[
"noreply@github.com"
] |
VSChina.noreply@github.com
|
36d142620364d13b8ee4ffa85e69d9eede13dc46
|
ebfcae1c5ba2997b2ac4471d5bedc3f5daffcb31
|
/TrackLater-master/tracklater/main.py
|
72e022c9b512b865703c592e30c9a97c5fa8c49a
|
[
"MIT"
] |
permissive
|
babiato/flaskapp1
|
84de2d0b26a54f5820d3bbe97926782ad41e005c
|
530beb9e3b8516e0e93960b99521c23a523ef546
|
refs/heads/master
| 2023-02-26T16:36:49.760632
| 2021-02-04T09:08:40
| 2021-02-04T09:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,766
|
py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import importlib
from typing import Dict
from types import ModuleType
from tracklater import settings
from tracklater.timemodules.interfaces import AbstractParser
from tracklater.models import ApiCall, Entry, Issue, Project
from tracklater.database import db
import logging
logger = logging.getLogger(__name__)
def store_parser_to_database(parser, module_name, start_date, end_date):
Entry.query.filter(
Entry.module == module_name, Entry.start_time >= start_date,
Entry.start_time <= end_date
).delete()
for entry in parser.entries:
entry.module = module_name
db.session.merge(entry)
for issue in parser.issues:
issue.module = module_name
db.session.merge(issue)
Project.query.delete()
for project in parser.projects:
project.module = module_name
db.session.merge(project)
db.session.add(ApiCall(
start_date=start_date,
end_date=end_date,
module=module_name
))
db.session.commit()
def set_parser_caching_data(parser, module_name):
apicall = ApiCall.query.filter_by(module=module_name).order_by('created').first()
if apicall:
parser.set_database_values(
start_date=apicall.start_date,
end_date=apicall.end_date,
issue_count=Issue.query.filter_by(module=module_name).count(),
entry_count=Entry.query.filter_by(module=module_name).count(),
project_count=Project.query.filter_by(module=module_name).count(),
)
class Parser(object):
def __init__(self, start_date, end_date, modules=None) -> None:
self.start_date = start_date
self.end_date = end_date
self.modules: Dict[str, AbstractParser] = {}
for module_name in settings.ENABLED_MODULES:
if modules and module_name not in modules:
continue
module: ModuleType = importlib.import_module(
'tracklater.timemodules.{}'.format(module_name)
)
if getattr(module, 'Parser', None) is None:
logger.warning('Module %s has no Parser class', module_name)
parser = module.Parser(self.start_date, self.end_date) # type: ignore
self.modules[module_name] = parser
def parse(self) -> None:
for module_name, parser in self.modules.items():
set_parser_caching_data(parser, module_name)
parser.parse()
logger.warning("Parsing %s", module_name)
store_parser_to_database(self.modules[module_name], module_name,
start_date=self.start_date, end_date=self.end_date)
logger.warning("Task done %s", module_name)
|
[
"jinxufang@tencent.com"
] |
jinxufang@tencent.com
|
d96a9159f7e818a5432f964d54d8790c633a202a
|
3458efd930792fc768f53d773603c917d172ac3d
|
/webapp/store_frontend/StoreFrontendController.py
|
6eb3e4be6e6f7a353059d8204ad8dd15017c0497
|
[] |
no_license
|
binary-butterfly/shared-delivery
|
0a4a90d9c42d7948267d674da1d1ec323d345c1b
|
63167a6f7d80c822ac02ffc6dd698fcf1ff9e37e
|
refs/heads/master
| 2022-08-17T15:03:04.568889
| 2020-04-28T16:05:00
| 2020-04-28T16:05:00
| 249,144,165
| 15
| 9
| null | 2022-07-20T23:00:35
| 2020-03-22T08:42:59
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,031
|
py
|
# encoding: utf-8
"""
Copyright (c) 2017, Ernesto Ruge
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from flask import Blueprint, render_template, flash, redirect, abort
from ..models import Store, OpeningTime, ObjectDump, Region
from ..extensions import db
from .StoreFrontendForm import StoreFrontendForm
from ..store_management.StoreManagementHelper import get_opening_times_for_form, create_store_revision
store_frontend = Blueprint('store_frontend', __name__, template_folder='templates')
@store_frontend.route('/store/<int:store_id>')
def store_frontend_main(store_id):
store = Store.query.get_or_404(store_id)
opening_times_raw = OpeningTime.query.filter_by(store_id=store.id).order_by(OpeningTime.weekday, OpeningTime.open).all()
opening_times = {
'all': [],
'delivery': [],
'pickup': []
}
for opening_time_raw in opening_times_raw:
opening_times[opening_time_raw.type].append(opening_time_raw)
return render_template('store-frontend.html', store=store, opening_times=opening_times)
@store_frontend.route('/store/<string:region_slug>/suggest', methods=['GET', 'POST'])
def store_frontend_suggest_new(region_slug):
form = StoreFrontendForm()
region = Region.query.filter_by(slug=region_slug).first()
if not region:
abort(404)
if form.validate_on_submit():
opening_times_data = {}
for field in ['all', 'delivery', 'pickup']:
opening_times_data[field] = getattr(form, 'opening_times_%s' % field)
delattr(form, 'opening_times_%s' % field)
store = Store()
form.populate_obj(store)
store.region_id = region.id
store_suggestion = store.to_dict()
store_suggestion['opening_time'] = []
for field in ['all', 'delivery', 'pickup']:
if getattr(form, '%s_switch' % field):
for opening_time in opening_times_data[field]:
store_suggestion['opening_time'].append({
'type': field,
'weekday': opening_time.weekday.data,
'open': opening_time.open.data_out,
'close': opening_time.close.data_out
})
store_suggestion['category'] = form.category.data
object_dump = ObjectDump()
object_dump.data = store_suggestion
object_dump.type = 'suggestion'
object_dump.object = 'store'
object_dump.region_id = store.region_id
object_dump.object_id = store.id
db.session.add(object_dump)
db.session.commit()
flash('Danke für Deinen Verbesserungsvorschlag! Wir schauen kurz drüber und schalten diesen dann normalerweise binnen 24 Stunden frei.', 'success')
return redirect('/')
return render_template('store-suggest-new.html', form=form)
@store_frontend.route('/store/<int:store_id>/suggest', methods=['GET', 'POST'])
def store_frontend_suggest(store_id):
store = Store.query.get_or_404(store_id)
form = StoreFrontendForm(obj=store)
if form.validate_on_submit():
opening_times_data = {}
for field in ['all', 'delivery', 'pickup']:
opening_times_data[field] = getattr(form, 'opening_times_%s' % field)
delattr(form, 'opening_times_%s' % field)
form.populate_obj(store)
store_suggestion = store.to_dict()
db.session.rollback()
store_suggestion['opening_time'] = []
for field in ['all', 'delivery', 'pickup']:
if getattr(form, '%s_switch' % field):
for opening_time in opening_times_data[field]:
store_suggestion['opening_time'].append({
'type': field,
'weekday': opening_time.weekday.data,
'open': opening_time.open.data_out,
'close': opening_time.close.data_out
})
store_suggestion['category'] = form.category.data
object_dump = ObjectDump()
object_dump.data = store_suggestion
object_dump.type = 'suggestion'
object_dump.object = 'store'
object_dump.region_id = store.region_id
object_dump.object_id = store.id
db.session.add(object_dump)
db.session.commit()
flash('Danke für Deinen Verbesserungsvorschlag! Wir schauen kurz drüber und schalten diesen dann normalerweise binnen 24 Stunden frei.', 'success')
return redirect('/store/%s' % store.id)
return render_template('store-suggest.html', store=store, opening_times=get_opening_times_for_form(store.id), form=form)
|
[
"mail@ernestoruge.de"
] |
mail@ernestoruge.de
|
6fec5b707195f997de20929632b6dabf2412d1e1
|
aa6059b13468595a872897694572767d278318d1
|
/RemoveVideoWaterMark/LightVideo.py
|
d31d6d5dfb4b69a7001b0e7f4d10a0d15614204c
|
[] |
no_license
|
18708111002/Tools
|
3845273724fc9bd2b1e31991339053448d08bfa2
|
c81f6df8ac7e57c0c544be78a706c919c3c57384
|
refs/heads/master
| 2022-11-09T10:47:33.608418
| 2018-06-08T09:14:11
| 2018-06-08T09:14:11
| 127,995,521
| 1
| 2
| null | 2022-11-02T07:22:24
| 2018-04-04T02:27:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
#encode-UTF-8
from watchdog.observers import Observer
from watchdog.events import *
import time
class FileEventHandler(FileSystemEventHandler):
def __init__(self):
FileSystemEventHandler.__init__(self)
def on_moved(self, event):
if event.is_directory:
print("directory moved from {0} to {1}".format(event.src_path,event.dest_path))
else:
print("file moved from {0} to {1}".format(event.src_path,event.dest_path))
def on_created(self, event):
if event.is_directory:
print("Starting processing " + event.src_path)
cmd = (r"D:\ffmpeg\bin\ffmpeg -i " + event.src_path +
r" -vf delogo=x=650:y=32:w=160:h=65 " + event.src_path)
os.system(cmd)
print("directory created:{0}".format(event.src_path))
else:
print("file created:{0}".format(event.src_path))
def on_deleted(self, event):
if event.is_directory:
print("directory deleted:{0}".format(event.src_path))
else:
print("file deleted:{0}".format(event.src_path))
def on_modified(self, event):
if event.is_directory:
print("directory modified:{0}".format(event.src_path))
else:
print("file modified:{0}".format(event.src_path))
if __name__ == "__main__":
observer = Observer()
event_handler = FileEventHandler()
observer.schedule(event_handler,"d:/outputvideo",True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"18708111002@163.com"
] |
18708111002@163.com
|
5154db3907a3d17cdf26b8e4ff5596f31844b55c
|
f9f1f887629855bbf12ecb0b7358fed5946b3caa
|
/.history/app_blog_forum/views_20201117201218.py
|
bd4632b14091e7318696c02acc68f232583f1721
|
[] |
no_license
|
hibamohi5/blog_forum
|
4f687cee3ca6bdb1d0302b3657a77c01945404b3
|
d6380eb7149355c79276b738da7da94c2ee03570
|
refs/heads/main
| 2023-01-14T18:33:53.043754
| 2020-11-20T01:52:22
| 2020-11-20T01:52:22
| 314,417,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,269
|
py
|
from django.shortcuts import render, redirect
from .models import *
from django.contrib import messages
def index(request):
return render(request, "index.html")
def register_new_user(request):
errors = User.objects.user_registration_validator(request.POST)
if len(errors) > 0:
for key, value in errors.items():
error_msg = key + ' - ' + value
messages.error(request, error_msg)
return redirect("/")
else:
first_name_from_post = request.POST['first_name']
last_name_from_post = request.POST['last_name']
email_from_post = request.POST['email']
password_from_post = request.POST['password']
new_user = User.objects.create(
first_name=first_name_from_post,
last_name=last_name_from_post,
email=email_from_post,
password=password_from_post
)
print(new_user.id)
request.session['user_id'] = new_user.id
return redirect('/register/view')
def view_home(request):
if 'user_id' not in request.session:
return redirect('/')
user = User.objects.get(id=request.session['user_id'])
context = {
'user':user
}
print(user_id)
return render(request, )
|
[
"hibamohi5@gmail.com"
] |
hibamohi5@gmail.com
|
50e3b6211cb784adbca528e697dca518ab8b7ac8
|
50f202b7068abcac204e795ee7a2dc9f13ab07e3
|
/mchck_swd.py
|
e01cd7996edaa5390d5e681d593db025609fb332
|
[] |
permissive
|
twitchyliquid64/PySWD
|
7830dd9213167d82f567bf5d912b930fa9bfb0e7
|
2981d4dcc385cd58f3c2423b359f3f53623184e0
|
refs/heads/master
| 2020-03-22T17:17:41.642609
| 2018-07-19T01:05:19
| 2018-07-19T01:05:19
| 140,386,338
| 0
| 0
|
BSD-3-Clause
| 2018-07-10T06:19:43
| 2018-07-10T06:19:43
| null |
UTF-8
|
Python
| false
| false
| 1,567
|
py
|
import time
import logging
import serial
from SWDAdapterBase import *
CMD_HANDSHAKE = "?SWD?"
CMD_HANDSHAKE_REPLY = "!SWD1"
CMD_WRITE_WORD = 0x90
CMD_WRITE_BITS = 0xa0
CMD_WRITE_BYTE = CMD_WRITE_BITS | (8 - 1)
CMD_READ_WORD = 0x10
CMD_READ_BITS = 0x20
CMD_CYCLE_CLOCK = 0x28
class Adapter(SWDAdapterBase):
def __init__(self, options):
SWDAdapterBase.__init__(self)
if not options.port:
raise SWDInitError("Port parameter is required")
self.hwlog = logging.getLogger("hwcomm")
self.port = serial.Serial(port=options.port, baudrate=115200, timeout=0.1)
self.init_adapter()
self.JTAG2SWD()
def init_adapter(self):
for i in xrange(20):
self.port.write(CMD_HANDSHAKE)
reply = self.port.read(len(CMD_HANDSHAKE_REPLY))
if reply == CMD_HANDSHAKE_REPLY:
return True
time.sleep(0.1)
raise SWDInitError("Did not get handshake reply")
def readBits(self, num):
"Read 1-8 bits from SWD"
v = bytearray([CMD_READ_BITS | (num - 1)])
self.port.write(v)
self.hwlog.debug("Wrote %s", self.renderHex(v))
v = ord(self.port.read(1))
self.hwlog.debug("Read %#02x", v)
return v
def writeBits(self, val, num):
"Write 1-8 bits to SWD"
v = bytearray([CMD_WRITE_BITS | (num - 1), val])
self.hwlog.debug("Wrote %s", self.renderHex(v))
self.port.write(v)
@staticmethod
def renderHex(arr):
return " ".join([hex(x) for x in arr])
|
[
"pfalcon@users.sourceforge.net"
] |
pfalcon@users.sourceforge.net
|
07f19b1600350ce134465c5c2401089bbc90b0d0
|
8567438779e6af0754620a25d379c348e4cd5a5d
|
/testing/xvfb.py
|
a5620e7cde4072d7bd8b5f6bef54b27af767d9e1
|
[
"BSD-3-Clause"
] |
permissive
|
thngkaiyuan/chromium
|
c389ac4b50ccba28ee077cbf6115c41b547955ae
|
dab56a4a71f87f64ecc0044e97b4a8f247787a68
|
refs/heads/master
| 2022-11-10T02:50:29.326119
| 2017-04-08T12:28:57
| 2017-04-08T12:28:57
| 84,073,924
| 0
| 1
|
BSD-3-Clause
| 2022-10-25T19:47:15
| 2017-03-06T13:04:15
| null |
UTF-8
|
Python
| false
| false
| 2,751
|
py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs tests with Xvfb and Openbox on Linux and normally on other platforms."""
import os
import platform
import signal
import subprocess
import sys
import threading
import test_env
def _kill(proc, send_signal):
"""Kills |proc| and ignores exceptions thrown for non-existent processes."""
try:
os.kill(proc.pid, send_signal)
except OSError:
pass
def kill(proc, timeout_in_seconds=10):
"""Tries to kill |proc| gracefully with a timeout for each signal."""
if not proc or not proc.pid:
return
_kill(proc, signal.SIGTERM)
thread = threading.Thread(target=proc.wait)
thread.start()
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM, trying SIGKILL.'
_kill(proc, signal.SIGKILL)
thread.join(timeout_in_seconds)
if thread.is_alive():
print >> sys.stderr, 'Xvfb running after SIGTERM and SIGKILL; good luck!'
def run_executable(cmd, env):
"""Runs an executable within Xvfb on Linux or normally on other platforms.
Returns the exit code of the specified commandline, or 1 on failure.
"""
if sys.platform == 'linux2':
if env.get('_CHROMIUM_INSIDE_XVFB') == '1':
openbox_proc = None
xcompmgr_proc = None
try:
# Some ChromeOS tests need a window manager.
openbox_proc = subprocess.Popen('openbox', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
# Some tests need a compositing wm to make use of transparent visuals.
xcompmgr_proc = subprocess.Popen('xcompmgr', stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, env=env)
return test_env.run_executable(cmd, env)
except OSError as e:
print >> sys.stderr, 'Failed to start Xvfb or Openbox: %s' % str(e)
return 1
finally:
kill(openbox_proc)
kill(xcompmgr_proc)
else:
env['_CHROMIUM_INSIDE_XVFB'] = '1'
xvfb_script = __file__
if xvfb_script.endswith('.pyc'):
xvfb_script = xvfb_script[:-1]
return subprocess.call(['xvfb-run', '-a', "--server-args=-screen 0 "
"1280x800x24 -ac -nolisten tcp -dpi 96",
xvfb_script] + cmd, env=env)
else:
return test_env.run_executable(cmd, env)
def main():
if len(sys.argv) < 2:
print >> sys.stderr, (
'Usage: xvfb.py [command args...]')
return 2
return run_executable(sys.argv[1:], os.environ.copy())
if __name__ == "__main__":
sys.exit(main())
|
[
"hedonist.ky@gmail.com"
] |
hedonist.ky@gmail.com
|
a9ddf2c7ce753bd52658b66a00fbd265e29339f3
|
2049bda43e392d5f5981fbfdb70090ba226e4ef8
|
/apps/user/management/commands/proxy_detection.py
|
933792ab6086b2d3cab311183442d44cbdc89ce0
|
[] |
no_license
|
embedded1/django-package-forwarding
|
2ef84a1fde5ba6817d42d89f983512bdc3d77bc3
|
8c3286e9a7da8f4ae0401a81c8037585b3bb7ba6
|
refs/heads/master
| 2020-06-22T17:05:36.637695
| 2019-07-26T09:34:40
| 2019-07-26T09:34:40
| 197,738,052
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,026
|
py
|
from django.utils.translation import ugettext as _
from django.core.management.base import BaseCommand
from django.template import loader, Context
from django.core import mail
from django.conf import settings
from django.contrib.auth.models import User
from decimal import Decimal as D
import requests
import logging
logger = logging.getLogger("management_commands")
class Command(BaseCommand):
"""
Background task that periodaclly checks if registered user is using a proxy service
"""
help = _("Proxy detection")
def calc_proxy_score(self, ip):
payload = {
'l': settings.MINFRAUD_LICENSE_KEY,
'i': ip
}
response = requests.get('https://minfraud.maxmind.com/app/ipauth_http', params=payload)
if response.status_code != requests.codes.ok:
logger.error("Request failed with status %s" % response.status_code)
return None
proxy = dict( f.split('=') for f in response.text.split(';') )
if 'err' in proxy and len(proxy['err']):
logger.error("MaxMind returned an error code for the request: %s" % proxy['err'])
return None
return proxy['proxyScore']
def handle(self, **options):
users = User.objects.select_related('profile').exclude(
is_superuser=True).filter(
is_active=True, profile__ip__isnull=False, profile__proxy_score__isnull=True)
emails = []
for user in users:
profile = user.get_profile()
ip = profile.ip
#call maxmind api to calculate proxy score
proxy_score = self.calc_proxy_score(ip)
#save proxy score
if proxy_score:
profile.proxy_score = proxy_score
profile.save()
#send alert only if we detected a proxy
if D(proxy_score) != D('0.00'):
ctx = Context({
'user_name': user.get_full_name(),
'proxy_score': proxy_score
})
subject_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_subject.txt')
body_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_body.txt')
body_html_tpl = loader.get_template('user/alerts/emails/admins/proxy_detection_body.html')
# Build email and add to list
email = {
'subject': subject_tpl.render(ctx).strip(),
'message': body_tpl.render(ctx),
'html_message': body_html_tpl.render(ctx)
}
emails.append(email)
#we use celery to dispatch emails, therefore we iterate over all emails and add
#each one of them to the task queue,send_many doesn't work with priority = now
#therefore, we use the regular send mail
#for email in emails:
# mail.mail_admins(**email)
|
[
"asili@usendhome.com"
] |
asili@usendhome.com
|
a9fd8d74fd2eb83202790909a6fdb4ff546cd49d
|
5d3acf19a31749111bc9332632d56cfa8f229872
|
/testing/tests/001-main/003-self/200-json/001-users.py
|
ace465f0ccc2064551dac140ac940ae4add24ee0
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
fragmede/critic
|
217adea764b96b028fe6d95ee8f0ec82bc38b606
|
f32a41b8c209440b2cbf208b1790320ef6ba3ecb
|
refs/heads/master
| 2020-12-28T07:47:37.603777
| 2015-06-23T08:15:38
| 2015-07-14T09:12:28
| 39,280,420
| 0
| 0
|
NOASSERTION
| 2022-09-02T20:59:50
| 2015-07-18T00:03:57
|
Python
|
UTF-8
|
Python
| false
| false
| 7,042
|
py
|
# @dependency 001-main/001-empty/003-criticctl/002-adduser-deluser.py
# @dependency 001-main/001-empty/004-mixed/003-oauth.py
# @dependency 001-main/001-empty/004-mixed/004-password.py
# @dependency 001-main/003-self/028-gitemails.py
frontend.json(
"users",
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("extra", status="retired"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "current" },
expect={ "users": [user_json("admin", "Testing Administrator"),
user_json("alice"),
user_json("bob"),
user_json("dave"),
user_json("erin"),
user_json("howard"),
user_json("carol"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("iris")] })
frontend.json(
"users",
params={ "status": "retired" },
expect={ "users": [user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired"),
user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users",
params={ "sort": "fullname",
"count": "4" },
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("carol"),
user_json("dave")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "2",
"count": "4" },
expect={ "users": [user_json("carol"),
user_json("dave"),
user_json("erin"),
user_json("extra", status="retired")] })
frontend.json(
"users",
params={ "sort": "fullname",
"offset": "6" },
expect={ "users": [user_json("felix"),
user_json("gina", no_email=True),
user_json("howard"),
user_json("iris"),
user_json("admin", "Testing Administrator")] })
frontend.json(
"users/%d" % instance.userid("alice"),
expect=user_json("alice"))
frontend.json(
"users/%d" % instance.userid("alice"),
params={ "fields": "id" },
expect={ "id": instance.userid("alice") })
frontend.json(
"users",
params={ "name": "alice" },
expect=user_json("alice"))
frontend.json(
"users/%d/emails" % instance.userid("alice"),
expect={ "emails": [{ "address": "alice@example.org",
"selected": True,
"verified": None }] })
filter_json = { "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }
frontend.json(
"users/%d/filters" % instance.userid("alice"),
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "critic" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "repository": "1" },
expect={ "filters": [filter_json] })
frontend.json(
"users/%d/filters" % instance.userid("alice"),
params={ "include": "users,repositories" },
expect={ "filters": [{ "id": int,
"type": "reviewer",
"path": "028-gitemails/",
"repository": 1,
"delegates": [instance.userid("erin")] }],
"linked": { "repositories": [critic_json],
"users": [user_json("erin")] }})
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
expect={ "users": [user_json("alice"),
user_json("bob"),
user_json("dave")] })
frontend.json(
"users/%d,%d,%d" % (instance.userid("alice"),
instance.userid("bob"),
instance.userid("dave")),
params={ "fields[users]": "name" },
expect={ "users": [{ "name": "alice" },
{ "name": "bob" },
{ "name": "dave" }] })
frontend.json(
"users/4711",
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user id: 4711" }},
expected_http_status=404)
frontend.json(
"users/alice",
expect={ "error": { "title": "Invalid API request",
"message": "Invalid numeric id: 'alice'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "name": "nosuchuser" },
expect={ "error": { "title": "No such resource",
"message": "Resource not found: Invalid user name: 'nosuchuser'" }},
expected_http_status=404)
frontend.json(
"users",
params={ "status": "clown" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "status": "current,clown,president" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user status values: 'clown', 'president'" }},
expected_http_status=400)
frontend.json(
"users",
params={ "sort": "age" },
expect={ "error": { "title": "Invalid API request",
"message": "Invalid user sort parameter: 'age'" }},
expected_http_status=400)
frontend.json(
"users/%d/emails/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/emails" }},
expected_http_status=400)
frontend.json(
"users/%d/filters/1" % instance.userid("alice"),
expect={ "error": { "title": "Invalid API request",
"message": "Resource does not support arguments: v1/users/filters" }},
expected_http_status=400)
|
[
"jl@opera.com"
] |
jl@opera.com
|
869a50983066c01546bfa59c724d88e8d2fa2d10
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-1/44730c28cc28b124da727c569ddc9706715f50b1-<main>-bug.py
|
a18ff146bef3def4007632f225c584366ee5ef90
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,947
|
py
|
def main():
module = AnsibleModule(argument_spec=dict(login_user=dict(default=None), login_password=dict(default=None, no_log=True), login_host=dict(default='localhost'), login_port=dict(default='27017'), login_database=dict(default=None), replica_set=dict(default=None), database=dict(required=True, aliases=['db']), name=dict(required=True, aliases=['user']), password=dict(aliases=['pass'], no_log=True), ssl=dict(default=False, type='bool'), roles=dict(default=None, type='list'), state=dict(default='present', choices=['absent', 'present']), update_password=dict(default='always', choices=['always', 'on_create']), ssl_cert_reqs=dict(default='CERT_REQUIRED', choices=['CERT_NONE', 'CERT_OPTIONAL', 'CERT_REQUIRED'])), supports_check_mode=True)
if (not pymongo_found):
module.fail_json(msg='the python pymongo module is required')
login_user = module.params['login_user']
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
login_database = module.params['login_database']
replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['name']
password = module.params['password']
ssl = module.params['ssl']
ssl_cert_reqs = None
roles = (module.params['roles'] or [])
state = module.params['state']
update_password = module.params['update_password']
try:
connection_params = {
'host': login_host,
'port': int(login_port),
}
if replica_set:
connection_params['replicaset'] = replica_set
if ssl:
connection_params['ssl'] = ssl
connection_params['ssl_cert_reqs'] = getattr(ssl_lib, module.params['ssl_cert_reqs'])
client = MongoClient(**connection_params)
check_compatibility(module, client)
if ((login_user is None) and (login_password is None)):
mongocnf_creds = load_mongocnf()
if (mongocnf_creds is not False):
login_user = mongocnf_creds['user']
login_password = mongocnf_creds['password']
elif ((login_password is None) or (login_user is None)):
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if ((login_user is not None) and (login_password is not None)):
client.admin.authenticate(login_user, login_password, source=login_database)
elif (LooseVersion(PyMongoVersion) >= LooseVersion('3.0')):
if (db_name != 'admin'):
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')
except Exception:
e = get_exception()
module.fail_json(msg=('unable to connect to database: %s' % str(e)))
if (state == 'present'):
if ((password is None) and (update_password == 'always')):
module.fail_json(msg='password parameter required when adding a user unless update_password is set to on_create')
try:
uinfo = user_find(client, user, db_name)
if ((update_password != 'always') and uinfo):
password = None
if (not check_if_roles_changed(uinfo, roles, db_name)):
module.exit_json(changed=False, user=user)
if module.check_mode:
module.exit_json(changed=True, user=user)
user_add(module, client, db_name, user, password, roles)
except Exception:
e = get_exception()
module.fail_json(msg=('Unable to add or update user: %s' % str(e)))
elif (state == 'absent'):
try:
user_remove(module, client, db_name, user)
except Exception:
e = get_exception()
module.fail_json(msg=('Unable to remove user: %s' % str(e)))
module.exit_json(changed=True, user=user)
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.