blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ae518b92f0d9797b906239b9fe24dbbce0d6da8e | 52c372d83179f7c4506d31ede4dbde97917abc74 | /bob/db/nivl/models.py | d7c0faa9a8a9938e9e522ddd027b66bc01eadeff | [
"BSD-3-Clause"
] | permissive | bioidiap/bob.db.nivl | af78c64a2b593f5cf9a8c3abe04690887022604d | d5f9282894f5e93a77d35f38c6964629f9ea80ab | refs/heads/master | 2023-04-18T18:13:16.461281 | 2020-11-10T10:34:52 | 2020-11-10T10:34:52 | 283,998,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,163 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira<tiago.pereira@idiap.ch>
# @date: Mon Oct 19 17:41:51 CEST 2015
#
# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Table models and functionality for the Near-Infrared and Visible-Light (NIVL) Dataset
"""
import sqlalchemy
from sqlalchemy import Table, Column, Integer, String, Boolean, ForeignKey, or_, and_, not_
from bob.db.base.sqlalchemy_migration import Enum, relationship
from sqlalchemy.orm import backref
from sqlalchemy.ext.declarative import declarative_base
import bob.db.base
import os
Base = declarative_base()
""" Defining protocols. Yes, they are static """
PROTOCOLS = ( 'idiap-comparison_2011-VIS-NIR', \
'idiap-comparison_2012-VIS-NIR', \
'idiap-search_VIS-NIR_split1', \
'idiap-search_VIS-NIR_split2', \
'idiap-search_VIS-NIR_split3', \
'idiap-search_VIS-NIR_split4', \
'idiap-search_VIS-NIR_split5', \
'idiap-search_VIS-VIS_split1', \
'idiap-search_VIS-VIS_split2', \
'idiap-search_VIS-VIS_split3', \
'idiap-search_VIS-VIS_split4', \
'idiap-search_VIS-VIS_split5', \
'original_2011-2012', \
'original_2012-2011')
GROUPS = ('world', 'dev', 'eval')
PURPOSES = ('train', 'enroll', 'probe')
class Client(Base):
"""
Information about the clients (identities) of the LDHF.
"""
__tablename__ = 'client'
id = Column(String(10), primary_key=True)
def __init__(self, id, group):
self.id = id
self.group = group
def __repr__(self):
return "<Client({0})>".format(self.id)
class Annotation(Base):
"""
- Annotation.id
- x
- y
"""
__tablename__ = 'annotation'
file_id = Column(Integer, ForeignKey('file.id'), primary_key=True)
le_x = Column(Integer)
le_y = Column(Integer)
re_x = Column(Integer)
re_y = Column(Integer)
def __init__(self, file_id, le_x, le_y, re_x, re_y):
self.file_id = file_id
self.le_x = le_x
self.le_y = le_y
self.re_x = re_x
self.re_y = re_y
def __repr__(self):
return "<Annotation(file_id:{0}, le_x={1}, le_y={2}), re_x={3}, re_y={4})>".format(self.file_id, self.le_x, self.le_y, self.re_x, self.re_y)
class File(Base, bob.db.base.File):
"""
Information about the files of the LDHF database.
Each file includes
* the client id
"""
__tablename__ = 'file'
modality_choices = ('VIS', 'NIR')
id = Column(Integer, primary_key=True)
path = Column(String(100), unique=True)
client_id = Column(Integer, ForeignKey('client.id'))
modality = Column(Enum(*modality_choices))
session = Column(Integer)
year = Column(Integer)
# a back-reference from the client class to a list of files
client = relationship("Client", backref=backref("files", order_by=id))
all_annotations = relationship("Annotation", backref=backref("file"), uselist=True)
def __init__(self, file_id, image_name, client_id, modality, session, year):
# call base class constructor
bob.db.base.File.__init__(self, file_id = file_id, path = image_name)
self.client_id = client_id
self.modality = modality
self.session = session
self.year = year
def annotations(self, annotation_type="eyes_center"):
assert len(self.all_annotations)==1
if annotation_type=="eyes_center":
return {'reye' : (self.all_annotations[0].re_y, self.all_annotations[0].re_x ), 'leye' : (self.all_annotations[0].le_y, self.all_annotations[0].le_x) }
else:
raise ValueError("Annotations type {0} invalid. Only 'eyes_center' is allowed".format(annotation_type))
return data
class Protocol_File_Association(Base):
"""
Describe the protocols
"""
__tablename__ = 'protocol_file_association'
protocol = Column('protocol', Enum(*PROTOCOLS), primary_key=True)
group = Column('group', Enum(*GROUPS), primary_key=True)
purpose = Column('purpose', Enum(*PURPOSES), primary_key=True)
file_id = Column('file_id', Integer, ForeignKey('file.id'), primary_key=True)
#client_id = Column('client_id', Integer, ForeignKey('client.id'), primary_key=True)
def __init__(self, protocol, group, purpose, file_id):
self.protocol = protocol
self.group = group
self.purpose = purpose
self.file_id = file_id
#self.client_id = client_id
| [
"tiagofrepereira@gmail.com"
] | tiagofrepereira@gmail.com |
d73652512bd558ac8c51d7ab86b3ae12c6a99bbb | a1439f8dfaf14e61720dcde463e0c8731e497526 | /pao/mpr/examples/moore.py | 0b20dd6de48dd9b40b8632d61a2f6982c475285a | [
"BSD-3-Clause"
] | permissive | whart222/pao | e5ef57baa073facb9d3ce8dc8e86b80d37aa90f3 | 3a80767ef5082be4dd98dd2f38000ffb96d2327c | refs/heads/master | 2023-06-15T06:25:07.564944 | 2021-07-08T14:18:02 | 2021-07-08T14:18:02 | 265,347,819 | 0 | 0 | null | 2020-05-19T19:43:12 | 2020-05-19T19:43:11 | null | UTF-8 | Python | false | false | 662 | py | #
# Example from
# Moore, J. and J. Bard 1990.
# The mixed integer linear bilevel programming problem.
# Operations Research 38(5), 911–921.
#
from pao.mpr import *
def create():
M = LinearMultilevelProblem()
U = M.add_upper(nxZ=1)
L = U.add_lower(nxZ=1)
U.c[U] = [-1]
U.c[L] = [-10]
L.c[L] = [1]
L.A[U] = [[-25],
[1],
[2],
[-2]]
L.A[L] = [[20],
[2],
[-1],
[-10]]
L.b = [30,10,15,-15]
return M
if __name__ == "__main__": #pragma: no cover
M = create()
M.print()
opt = Solver('pao.mpr.FA')
opt.solve(M)
| [
"whart222@gmail.com"
] | whart222@gmail.com |
ced8a84d60c79476996223a6ce4c035cde0cec50 | e1b3816615cce62ebe2b6c59b0eb3fbd3693d73b | /solutions/606-construct-string-from-binary-tree/construct-string-from-binary-tree.py | 392a31dd693a4f747d9d30c1798437fa800fed15 | [] | no_license | fagan2888/leetcode-6 | 1fb18979ffacb82d5db77988b38ecd7371b428b9 | 14176f1752e2bb94dec51bd90dfd412896ed84de | refs/heads/master | 2022-01-10T03:27:51.388066 | 2019-06-15T14:13:48 | 2019-06-15T14:13:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,655 | py | # -*- coding:utf-8 -*-
# You need to construct a string consists of parenthesis and integers from a binary tree with the preorder traversing way.
#
# The null node needs to be represented by empty parenthesis pair "()". And you need to omit all the empty parenthesis pairs that don't affect the one-to-one mapping relationship between the string and the original binary tree.
#
# Example 1:
#
# Input: Binary tree: [1,2,3,4]
# 1
# / \
# 2 3
# /
# 4
#
# Output: "1(2(4))(3)"
# Explanation: Originallay it needs to be "1(2(4)())(3()())", but you need to omit all the unnecessary empty parenthesis pairs. And it will be "1(2(4))(3)".
#
#
#
# Example 2:
#
# Input: Binary tree: [1,2,3,null,4]
# 1
# / \
# 2 3
# \
# 4
#
# Output: "1(2()(4))(3)"
# Explanation: Almost the same as the first example, except we can't omit the first parenthesis pair to break the one-to-one mapping relationship between the input and the output.
#
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def tree2str(self, t):
"""
:type t: TreeNode
:rtype: str
"""
r=""
if t==None:
return r
else:
r+=str(t.val)
if t.left!=None:
r+="("+Solution.tree2str(self,t.left)+")"
elif t.right!=None:
r+="()"
if t.right!=None:
r+="("+Solution.tree2str(self,t.right)+")"
return r
| [
"rzhangpku@pku.edu.cn"
] | rzhangpku@pku.edu.cn |
c0fc556ea5948aed614ee57063c5122ab7b17078 | ce0f8956c4c308c67bd700d31fe8d5a17b16ac08 | /Python3/src/22 JSON and XML/XML/etree/03_createFile.py | 2640cfed91f9383eba24796f1662d88f4bca71eb | [] | no_license | seddon-software/python3 | 795ae8d22a172eea074b71d6cd49d79e388d8cc6 | d5e6db1509a25c1a3040d5ae82d757539a2ff730 | refs/heads/master | 2021-07-10T15:48:31.893757 | 2020-07-16T20:29:22 | 2020-07-16T20:29:22 | 175,872,757 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,349 | py | ############################################################
#
# createFile.py
#
############################################################
from xml.etree.ElementTree import Element, ElementTree
from xml.etree.ElementTree import _namespace_map
"""
Code to generate the following document:
<book:book xmlns:book="http://www.demo.com/book">
<book:title>XMLBeans</book:title>
<book:author first="John" last="Smith" />
<book:publisher>Wiley</book:publisher>
<book:pubdate>2007-06+01:00</book:pubdate>
<book:cost>23.79</book:cost>
</book:book>
"""
# setup namespace and alias
ns = "http://www.demo.com/book"
uri = "{" + ns + "}"
_namespace_map[ns] = 'book'
# define elements
root = Element(uri + "book")
title = Element(uri + "title")
author = Element(uri + "author")
publisher = Element(uri + "publisher")
pubdate = Element(uri + "pubdate")
cost = Element(uri + "cost")
# add attributes
author.attrib["first"] = "John"
author.attrib["last"] = "Smith"
# add text
title.text = "XMLBeans"
publisher.text = "Wiley"
pubdate.text = "2007-06+01:00"
cost.text = "23.79"
# build tree
root.append(title)
root.append(author)
root.append(publisher)
root.append(pubdate)
root.append(cost)
# write to file
tree = ElementTree(root)
tree.write("xml/new_book.xml")
| [
"seddon-software@keme.co.uk"
] | seddon-software@keme.co.uk |
ba9181dae8856bb6fc00c53e168e202b8f15e7ea | 697af415566ba649502bd18751a6521ac526892c | /2020_VERSIONS/get_hrrr_plots.py | 7e56cb6022afdc1b32c5b2d2a320ab4ae25b9cd6 | [] | no_license | srbrodzik/impacts-scripts | df44c8f34746499b8397b5b1a4ad09859b4cc8d4 | 263c7545bbb912bbcea563a21d0619e5112b1788 | refs/heads/master | 2023-05-31T05:01:09.558641 | 2023-05-22T23:24:52 | 2023-05-22T23:24:52 | 215,638,568 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,693 | py | import os
import sys
import time
from datetime import timedelta
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import shutil
def listFD(url, ext=''):
page = requests.get(url).text
#print page
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
# User inputs
debug = 1
secsPerDay = 86400
pastSecs = secsPerDay/4 # 6 hours
secsPerRun = secsPerDay/24
deltaBetweenForecastHours = 1
lastForecastHour = 6
hrrrUrl = 'https://tropicaltidbits.com/analysis/models/hrrr'
targetDirBase = '/home/disk/bob/impacts/model/hrrr_03km'
products = ['ref_frzn_us','ir_us','T2m_us']
has_anal_prod = [0,1,1]
catalogBaseDir = '/home/disk/funnel/impacts-website/archive/model/hrrr_03km'
# get model date and time closest to current time
nowTime = time.gmtime()
now = datetime(nowTime.tm_year, nowTime.tm_mon, nowTime.tm_mday,
nowTime.tm_hour, nowTime.tm_min, nowTime.tm_sec)
nowDateStr = now.strftime("%Y%m%d")
nowHourStr = now.strftime("%H")
lastModelDateTimeStr = nowDateStr+nowHourStr
if debug:
print >>sys.stderr, "lastModelDateTimeStr = ", lastModelDateTimeStr
# compute start time
pastDelta = timedelta(0, pastSecs)
lastModelDateTime = datetime.strptime(lastModelDateTimeStr,'%Y%m%d%H')
startTime = lastModelDateTime - pastDelta
startDateHourStr = startTime.strftime("%Y%m%d%H")
startDateStr = startTime.strftime("%Y%m%d")
if debug:
print >>sys.stderr, "startDateHourStr = ", startDateHourStr
# set up list of model runs to be checked
nRuns = (pastSecs / secsPerRun) + 1
dateStrList = []
dateHourStrList = []
for iRun in range(0, nRuns):
deltaSecs = timedelta(0, iRun * secsPerRun)
dayTime = lastModelDateTime - deltaSecs
dateStr = dayTime.strftime("%Y%m%d")
dateHourStr = dayTime.strftime("%Y%m%d%H")
dateStrList.append(dateStr)
dateHourStrList.append(dateHourStr)
if debug:
print >>sys.stderr, "dateHourStrList = ", dateHourStrList
for t in range(0,nRuns):
currentModelRun = dateHourStrList[t]
for i in range(0,len(products)):
if debug:
print >>sys.stderr, "Processing ", currentModelRun, " run for ", products[i], " data"
# get list of files on server for this run and this product
# only interested in forecasts up to and including 'lastForecastHour'
urlFileList = []
#urlDateList = []
#urlDateTimeList = []
url = hrrrUrl+'/'+dateHourStrList[t]+'/'
ext = 'png'
for file in listFD(url, ext):
tmp = os.path.basename(file)
(base,ext) = os.path.splitext(tmp)
parts = base.split('_')
forecast_num = parts[-1]
if len(forecast_num) < 2:
forecast_num = '0'+forecast_num
if has_anal_prod[i]:
last_forecast_num = str(lastForecastHour/deltaBetweenForecastHours + 1)
else:
last_forecast_num = str(lastForecastHour/deltaBetweenForecastHours)
if products[i] in tmp and int(forecast_num) <= int(last_forecast_num):
urlFileList.append(tmp)
#if debug:
# print >>sys.stderr, "urlFileList = ", urlFileList
if len(urlFileList) == 0:
if debug:
print >>sys.stderr, "WARNING: ignoring run and product - no data on server"
print >>sys.stderr, " for model run time: ", currentModelRun
print >>sys.stderr, " for product : ", products[i]
else:
# make target directory, if necessary, and cd to it
#targetDir = targetDirBase+'/'+dateHourStrList[i]+'/'+products[i]
targetDir = targetDirBase+'/'+currentModelRun
if not os.path.exists(targetDir):
os.makedirs(targetDir)
os.chdir(targetDir)
# get local file list - i.e. those which have already been downloaded
localFileList = os.listdir('.')
#localFileList.reverse()
#if debug:
# print >>sys.stderr, " localFileList: ", localFileList
# get url file list (not sure I need this)
#urlFileList.sort()
#urlFileList.reverse()
# loop through the url file list, downloading those that have
# not yet been downloaded
if debug:
print >>sys.stderr, "Starting to loop through url file list"
for idx,urlFileName in enumerate(urlFileList,0):
if debug:
print >>sys.stderr, " idx = ", idx
print >>sys.stderr, " urlFileName = ", urlFileName
#print >>sys.stderr, " urlDateList[",idx,"] = ", urlDateList[idx]
#print >>sys.stderr, " dateStr = ", dateStr
if urlFileName not in localFileList:
if debug:
print >>sys.stderr, urlFileName," not in localFileList -- get file"
try:
command = 'wget '+hrrrUrl+'/'+currentModelRun+'/'+urlFileName
os.system(command)
except Exception as e:
print sys.stderr, " wget failed, exception: ", e
continue
# rename file and move to web server
# first get forecast_hour
(base,ext) = os.path.splitext(urlFileName)
parts = base.split('_')
if has_anal_prod[i]:
forecast_hour = str( (int(parts[-1])-1) * deltaBetweenForecastHours)
else:
forecast_hour = str(int(parts[-1])*deltaBetweenForecastHours)
if len(forecast_hour) == 1:
forecast_hour = '0'+forecast_hour
if debug:
print >>sys.stderr, " forecast_hour = ", forecast_hour
# create full file name
newFileName = 'model.hrrr_03km.'+currentModelRun+'00.'+forecast_hour+'_'+products[i]+'.png'
if debug:
print >>sys.stderr, " newFileName = ", newFileName
# check to make sure that web server path exists
catalogDir = catalogBaseDir+'/'+dateStrList[t]
if not os.path.exists(catalogDir):
os.makedirs(catalogDir)
# copy file to web server
shutil.copy(targetDir+'/'+urlFileName,catalogDir+'/'+newFileName)
| [
"brodzik@uw.edu"
] | brodzik@uw.edu |
ec0874d853f78a0f15d0b9d998d6f76eec5ea4d5 | 85fc4fcd841226c30b1a5824468eae95e6da3cd1 | /grass.py | 16b07bf614dc8a4d3288eed474e10b56cb855a1c | [] | no_license | a5vh/kattis | 1676060acfc6eef1d7c558299063646f3b7fcbf3 | 093cbeba31149fa0182ecc1bc8a43c60cdb1fa36 | refs/heads/master | 2020-08-17T19:54:11.754205 | 2019-11-26T01:34:29 | 2019-11-26T01:34:29 | 215,705,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | cost = float(input())
L = int(input())
sum = 0.0
for i in range(L):
width, length = input().split()
width = float(width)
length = float(length)
sum += (width*length)*cost
print(float(sum)) | [
"august.hummert5@gmail.com"
] | august.hummert5@gmail.com |
10451e6a52200948649ca456a7e8d0ab43de0548 | 4fa785f727f8bd223e6a5a5ede9f8f642c67b4be | /tests/server/rest/task_stats_test.py | 46b9084353e5f85d694203ea5176dd4360833a20 | [
"MIT"
] | permissive | carlwitt/iceprod | d020ff56f233f4a6312cdfa1cb09b2781c630f0a | bd00be6051dd847bdbadfec276fbe7d8e3fef81a | refs/heads/master | 2020-04-18T22:35:53.053632 | 2019-01-27T06:19:35 | 2019-01-27T06:19:35 | 167,798,458 | 0 | 0 | null | 2019-01-27T10:55:30 | 2019-01-27T10:55:30 | null | UTF-8 | Python | false | false | 5,430 | py | """
Test script for REST/task_stats
"""
import logging
logger = logging.getLogger('rest_task_stats_test')
import os
import sys
import time
import random
import shutil
import tempfile
import unittest
import subprocess
import json
from functools import partial
from unittest.mock import patch, MagicMock
from tests.util import unittest_reporter, glob_tests
import ldap3
import tornado.web
import tornado.ioloop
from tornado.httpclient import AsyncHTTPClient, HTTPError
from tornado.testing import AsyncTestCase
from rest_tools.server import Auth, RestServer
from iceprod.server.modules.rest_api import setup_rest
class rest_task_stats_test(AsyncTestCase):
def setUp(self):
super(rest_task_stats_test,self).setUp()
self.test_dir = tempfile.mkdtemp(dir=os.getcwd())
def cleanup():
shutil.rmtree(self.test_dir)
self.addCleanup(cleanup)
try:
self.port = random.randint(10000,50000)
self.mongo_port = random.randint(10000,50000)
dbpath = os.path.join(self.test_dir,'db')
os.mkdir(dbpath)
dblog = os.path.join(dbpath,'logfile')
m = subprocess.Popen(['mongod', '--port', str(self.mongo_port),
'--dbpath', dbpath, '--smallfiles',
'--quiet', '--nounixsocket',
'--logpath', dblog])
self.addCleanup(partial(time.sleep, 0.05))
self.addCleanup(m.terminate)
config = {
'auth': {
'secret': 'secret'
},
'rest': {
'task_stats': {
'database': {'port':self.mongo_port},
}
},
}
routes, args = setup_rest(config)
self.server = RestServer(**args)
for r in routes:
self.server.add_route(*r)
self.server.startup(port=self.port)
self.token = Auth('secret').create_token('foo', type='user', payload={'role':'admin','username':'admin'})
except Exception:
logger.info('failed setup', exc_info=True)
@unittest_reporter(name='REST POST /tasks/<task_id>/task_stats')
def test_100_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,'bar'),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/task_stats')
def test_200_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
task_id = 'bar'
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,task_id),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/tasks/%s/task_stats'%(self.port,'foo',task_id),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(len(ret), 1)
self.assertIn(task_stat_id, ret)
self.assertIn('task_id', ret[task_stat_id])
self.assertEqual(task_id, ret[task_stat_id]['task_id'])
self.assertEqual(data, ret[task_stat_id]['stats'])
# note: the name is so long it needs a break to wrap correctly
@unittest_reporter(name='REST GET /datasets/<dataset_id>/tasks/<task_id>/task_stats/<task_stat_id>')
def test_210_task_stats(self):
client = AsyncHTTPClient()
data = {
'dataset_id': 'foo',
'bar': 1.23456,
'baz': [1,2,3,4],
}
task_id = 'bar'
r = yield client.fetch('http://localhost:%d/tasks/%s/task_stats'%(self.port,task_id),
method='POST', body=json.dumps(data),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 201)
ret = json.loads(r.body)
task_stat_id = ret['result']
r = yield client.fetch('http://localhost:%d/datasets/%s/tasks/%s/task_stats/%s'%(self.port,'foo',task_id,task_stat_id),
headers={'Authorization': b'bearer '+self.token})
self.assertEqual(r.code, 200)
ret = json.loads(r.body)
self.assertEqual(task_stat_id, ret['task_stat_id'])
self.assertEqual(task_id, ret['task_id'])
self.assertEqual(data, ret['stats'])
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(rest_task_stats_test))
suite.addTests(loader.loadTestsFromNames(alltests,rest_task_stats_test))
return suite
| [
"davids24@gmail.com"
] | davids24@gmail.com |
e65dc5a581a15d57eca06f9512858a6938fe718e | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/apimanagement/v20180101/__init__.py | fc73bf57d9496c3fbc71a455e1a1eea44a2efc5b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 764 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .api_diagnostic import *
from .api_diagnostic_logger import *
from .backend import *
from .diagnostic import *
from .diagnostic_logger import *
from .get_api_diagnostic import *
from .get_backend import *
from .get_diagnostic import *
from .get_logger import *
from .get_subscription import *
from .get_user import *
from .group_user import *
from .logger import *
from .notification_recipient_user import *
from .subscription import *
from .user import *
from ._inputs import *
from . import outputs
| [
"github@mikhail.io"
] | github@mikhail.io |
5eee1f0c6972dcc67edd83dc03bce66c5ec25a2f | 503f5089422a97dc6f496cb7ecdaaf711611e5c0 | /ki/remote.py | c854b82580334b9df169d91e404712852fbe6ab3 | [] | no_license | jd/ki | b3e782ed176ea38099aff8ba0aea4e1c06ba754b | 343eeee119e2167a52e882d7772ecf3fe8f04d3a | refs/heads/main | 2023-05-06T07:06:47.694980 | 2012-03-13T16:17:26 | 2012-03-13T16:17:26 | 363,116,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,452 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ki.remote -- Git based file system storage remote access
#
# Copyright © 2011 Julien Danjou <julien@danjou.info>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import threading
from .config import Configurable, Config, BUS_INTERFACE
from .objects import FileBlock, FetchError
from dulwich.client import get_transport_and_path
from dulwich.errors import HangupException
import dbus.service
import uuid
class Remote(dbus.service.Object, Configurable):
_id_ref = "refs/tags/id"
def __init__(self, storage, name, url, weight=100):
self.url = url
self.weight = weight
self.storage = storage
self.name = name
self.client, self.path = get_transport_and_path(url)
super(Remote, self).__init__(storage.bus,
"%s/remotes/%s" % (storage.__dbus_object_path__, name))
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetURL(self):
return self.url
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetName(self):
return self.name
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='a{ss}')
def GetRefs(self):
return self.refs
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='i')
def GetWeight(self):
return self.weight
@dbus.service.method(dbus_interface="%s.Remote" % BUS_INTERFACE,
out_signature='s')
def GetID(self):
return self.id
def fetch_sha1s(self, sha1s):
return self.fetch(lambda refs: sha1s)
@property
def id(self):
"""Fetch remote id."""
try:
return self._id
except AttributeError:
try:
self._id = str(FileBlock(self.storage, self.refs[Remote._id_ref]))
except KeyError:
f = FileBlock(self.storage)
f.data = str(uuid.uuid4())
def determine_wants(refs):
newrefs = refs.copy()
newrefs[Remote._id_ref] = f.store()
return newrefs
self.push(determine_wants)
self._id = str(f)
return self._id
@property
def config(self):
"""Fetch configuration from the remote."""
try:
return Config(self.storage, self.on_config_store, self.storage[self.refs[Config.ref]])
except KeyError:
return Config(self.storage, self.on_config_store)
def on_config_store(self, sha1):
"""Store the config on the remote."""
def determine_wants(oldrefs):
newrefs = oldrefs.copy()
newrefs[Config.ref] = sha1
return newrefs
self.push(determine_wants)
@property
def refs(self):
"""Connect to the remote and returns all the refs it has."""
return self.fetch(lambda refs: [])
@dbus.service.signal(dbus_interface="%s.Remote" % BUS_INTERFACE,
signature='as')
def FetchProgress(self, status):
pass
def fetch(self, determine_wants=None):
"""Fetch data from the remote.
The function passed in determine_wats is called with the refs dict as first and only argument:
{ "refs/heads/master": "08a1c9f9742bcbd27c44fb84b662c68fabd995e1",
… }
The determine_wants function should returns a list of SHA1 to fetch."""
return self.client.fetch(self.path, self.storage, determine_wants, self.FetchProgress)
def push(self, determine_wants):
"""Push data to the remote.
The function passed in determine_wants is called with the refs dict as first and only argument:
{ "refs/heads/master": "08a1c9f9742bcbd27c44fb84b662c68fabd995e1",
… } """
return self.client.send_pack(self.path,
determine_wants,
self.storage.object_store.generate_pack_contents)
def __le__(self, other):
if isinstance(other, Remote):
return self.weight <= other.weight
return self.weight <= other
def __lt__(self, other):
if isinstance(other, Remote):
return self.weight < other.weight
return self.weight < other
def __ge__(self, other):
if isinstance(other, Remote):
return self.weight >= other.weight
return self.weight >= other
def __gt__(self, other):
if isinstance(other, Remote):
return self.weight > other.weight
return self.weight > other
class Syncer(threading.Thread):
def __init__(self, storage):
self.storage = storage
super(Syncer, self).__init__()
self.daemon = True
self.name = "Syncer for %s" % self.storage.path
def run(self):
while True:
# XXX configure timeout
print "WAIT"
self.storage.must_be_sync.wait(5)
self.storage.must_be_sync.clear()
print "END WAIT"
print "PUSH"
try:
self.storage.push()
except HangupException as e:
print "> Unable to push: %s" % str(e)
print "FETCH"
try:
self.storage.fetch()
except HangupException as e:
print "> Unable to fetch: %s" % str(e)
print "FETCH BLOBS"
try:
self.storage.fetch_blobs()
except FetchError as e:
print "> Unable to fetch blobs: %s" % str(e)
print "UPDATE FROM REMOTES"
self.storage.update_from_remotes()
| [
"julien@danjou.info"
] | julien@danjou.info |
ff42f8804ec94ff50d0a4adaeef22c31c4a5ffae | a0c31af5f4bfbe22b70144c10f7d86cf1184643f | /SWEA/D2/swea4869.py | a4bdcaf424186dbc110410f40ec353f88ab0c5dd | [] | no_license | ejolie/problem-solving | 63a8eb39de11f7ea0525976c9c03b7f7675075c5 | b51fbf71b72d837897db3c04cbc4037b6f6c11f7 | refs/heads/master | 2021-12-08T00:00:02.155359 | 2021-12-05T14:16:43 | 2021-12-05T14:16:43 | 138,751,091 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | '''
4869. 종이붙이기
'''
def solve(n):
memo = [0] * 1000
memo[0], memo[1], memo[2], = 0, 1, 3
for i in range(3, n+1):
memo[i] = memo[i-1] + 2 * memo[i-2]
return memo[n]
tc = int(input())
for t in range(1, tc+1):
n = int(input()) // 10
res = solve(n)
print(f'#{t} {res}') | [
"ejisvip@gmail.com"
] | ejisvip@gmail.com |
b97544b29ef011a1bb3f7e272cad39405a3fb6fe | 5da5473ff3026165a47f98744bac82903cf008e0 | /packages/google-cloud-artifact-registry/samples/generated_samples/artifactregistry_v1_generated_artifact_registry_update_repository_async.py | b63934b97c65c789e9fed24aaf4c1f93d927cd44 | [
"Apache-2.0"
] | permissive | googleapis/google-cloud-python | ed61a5f03a476ab6053870f4da7bc5534e25558b | 93c4e63408c65129422f65217325f4e7d41f7edf | refs/heads/main | 2023-09-04T09:09:07.852632 | 2023-08-31T22:49:26 | 2023-08-31T22:49:26 | 16,316,451 | 2,792 | 917 | Apache-2.0 | 2023-09-14T21:45:18 | 2014-01-28T15:51:47 | Python | UTF-8 | Python | false | false | 1,900 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateRepository
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-artifact-registry
# [START artifactregistry_v1_generated_ArtifactRegistry_UpdateRepository_async]
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import artifactregistry_v1
async def sample_update_repository():
# Create a client
client = artifactregistry_v1.ArtifactRegistryAsyncClient()
# Initialize request argument(s)
request = artifactregistry_v1.UpdateRepositoryRequest(
)
# Make the request
response = await client.update_repository(request=request)
# Handle the response
print(response)
# [END artifactregistry_v1_generated_ArtifactRegistry_UpdateRepository_async]
| [
"noreply@github.com"
] | googleapis.noreply@github.com |
48c3584ee392fe27f7544c8d6fee6e955d5afa00 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/sklearn/ensemble/_hist_gradient_boosting/grower.py | eb2c27208662d5721e07c901e969a66e7645e995 | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:1fde0343be64a238fb4b62a8dce7854c50385b5236eeb3f1d75b38b1778d4544
size 19176
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
e16387b80fa3552314a78fda622b70dc3aebb153 | de3f3575121df3188754145a43b7c10499305f37 | /testCsv.py | 1d577e4fb3c6d01b0fe7e1bc61ae52baa313c03f | [] | no_license | bobosky/GeoLifeDataMining | cbd24422959887575c3f15415988e4e43e6ed4b4 | 54a3d1727633bc4e7c43893b14e570fd8fce2068 | refs/heads/master | 2020-05-03T09:40:17.872102 | 2018-11-08T17:57:18 | 2018-11-08T17:57:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py |
import numpy as np
import pandas as pd
path = 'data/data.csv'
df = pd.read_csv(path)
print(df.head())
print(df.shape)
| [
"781244184@qq.com"
] | 781244184@qq.com |
6fd61b04aee45278a3fe0c68198e4cb3b20772d6 | 44ddd25c6aa008cc0a814f9f49b2344c6a59aedb | /lib/coloraide/spaces/a98_rgb_linear.py | c5f0c249dad74d33c454e08de369aeef8a33f908 | [
"MIT"
] | permissive | facelessuser/ColorHelper | eb757896fa6e4a9029090188fad789587dc2ed06 | ad4d779bff57a65b7c77cda0b79c10cf904eb817 | refs/heads/master | 2023-08-31T20:51:30.390633 | 2023-08-28T15:53:39 | 2023-08-28T15:53:39 | 31,641,842 | 279 | 41 | MIT | 2023-09-06T23:37:41 | 2015-03-04T06:27:11 | Python | UTF-8 | Python | false | false | 1,610 | py | """Linear A98 RGB color class."""
from ..cat import WHITES
from .srgb import sRGB
from .. import algebra as alg
from ..types import Vector
RGB_TO_XYZ = [
[0.5766690429101307, 0.1855582379065463, 0.18822864623499472],
[0.2973449752505361, 0.6273635662554661, 0.07529145849399789],
[0.027031361386412343, 0.07068885253582723, 0.9913375368376389]
]
XYZ_TO_RGB = [
[2.0415879038107456, -0.5650069742788595, -0.34473135077832956],
[-0.9692436362808795, 1.8759675015077202, 0.0415550574071756],
[0.013444280632031147, -0.11836239223101837, 1.0151749943912054]
]
def lin_a98rgb_to_xyz(rgb: Vector) -> Vector:
"""
Convert an array of linear-light a98-rgb values to CIE XYZ using D50.D65.
(so no chromatic adaptation needed afterwards)
http://www.brucelindbloom.com/index.html?Eqn_RGB_XYZ_Matrix.html
which has greater numerical precision than section 4.3.5.3 of
https://www.adobe.com/digitalimag/pdfs/AdobeRGB1998.pdf
"""
return alg.dot(RGB_TO_XYZ, rgb, dims=alg.D2_D1)
def xyz_to_lin_a98rgb(xyz: Vector) -> Vector:
"""Convert XYZ to linear-light a98-rgb."""
return alg.dot(XYZ_TO_RGB, xyz, dims=alg.D2_D1)
class A98RGBLinear(sRGB):
"""Linear A98 RGB class."""
BASE = "xyz-d65"
NAME = "a98-rgb-linear"
SERIALIZE = ('--a98-rgb-linear',)
WHITE = WHITES['2deg']['D65']
def to_base(self, coords: Vector) -> Vector:
"""To XYZ from A98 RGB."""
return lin_a98rgb_to_xyz(coords)
def from_base(self, coords: Vector) -> Vector:
"""From XYZ to A98 RGB."""
return xyz_to_lin_a98rgb(coords)
| [
"noreply@github.com"
] | facelessuser.noreply@github.com |
14a6e111047115fb194beef463f65d4a8c6f9c42 | 7d9d3d5ce2ac19221163d54a94c025993db0af4f | /autotest/ogr/ogr_as_sqlite_extension.py | 196c19f4dacaaa9d6eaa16bd697cd2fcec173aff | [
"MIT"
] | permissive | dcgull/gdal | 5408adad77d001db32173bba547b447220b5e9a2 | a5e2a7b54db955bd061ebfc6d69aa2dd752b120c | refs/heads/master | 2020-04-03T13:30:40.013172 | 2013-10-11T12:07:57 | 2013-10-11T12:07:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,417 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: ogr_as_sqlite_extension.py 25408 2012-12-30 21:41:43Z rouault $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test GDAL as a SQLite3 dynamically loaded extension
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2012, Even Rouault <even dot rouault at mines dash paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
# This file is meant at being run by ogr_virtualogr_3()
# This is a bit messy with heavy use of ctypes. The sqlite3 python module
# is rarely compiled with support of extension loading, so we just simulate
# what a tiny C program would do
import sys
def do(sqlite3name, gdalname):
try:
import ctypes
except:
print('skip')
sys.exit(0)
sqlite_handle = ctypes.cdll.LoadLibrary(sqlite3name)
if sqlite_handle is None:
print('skip')
sys.exit(0)
db = ctypes.c_void_p(0)
pdb = ctypes.pointer(db)
if hasattr(sqlite_handle, 'sqlite3_open'):
ret = sqlite_handle.sqlite3_open(':memory:', pdb)
elif hasattr(sqlite_handle, 'SPLite3_open'):
ret = sqlite_handle.SPLite3_open(':memory:', pdb)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('Error sqlite3_open ret = %d' % ret)
sys.exit(1)
if hasattr(sqlite_handle, 'sqlite3_enable_load_extension'):
ret = sqlite_handle.sqlite3_enable_load_extension(db, 1)
elif hasattr(sqlite_handle, 'SPLite3_enable_load_extension'):
ret = sqlite_handle.SPLite3_enable_load_extension(db, 1)
else:
print('skip')
sys.exit(0)
if ret != 0:
print('skip')
sys.exit(0)
gdalname = gdalname.encode('ascii')
if hasattr(sqlite_handle, 'sqlite3_load_extension'):
ret = sqlite_handle.sqlite3_load_extension(db, gdalname, None, None)
else:
ret = sqlite_handle.SPLite3_load_extension(db, gdalname, None, None)
if ret != 0:
print('Error sqlite3_load_extension ret = %d' % ret)
sys.exit(1)
tab = ctypes.c_void_p()
ptab = ctypes.pointer(tab)
nrow = ctypes.c_int(0)
pnrow = ctypes.pointer(nrow)
ncol = ctypes.c_int(0)
pncol = ctypes.pointer(ncol)
if hasattr(sqlite_handle, 'sqlite3_get_table'):
ret = sqlite_handle.sqlite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
else:
ret = sqlite_handle.SPLite3_get_table(db, 'SELECT ogr_version()'.encode('ascii'), ptab, pnrow, pncol, None)
if ret != 0:
print('Error sqlite3_get_table ret = %d' % ret)
sys.exit(1)
cast_tab = ctypes.cast(tab, ctypes.POINTER(ctypes.c_char_p))
sys.stdout.write(cast_tab[1].decode('ascii'))
sys.stdout.flush()
if hasattr(sqlite_handle, 'sqlite3_close'):
ret = sqlite_handle.sqlite3_close(db)
else:
ret = sqlite_handle.SPLite3_close(db)
if ret != 0:
sys.exit(1)
gdaltest_list = []
if __name__ == '__main__':
if len(sys.argv) != 3:
print('python ogr_as_sqlite_extension name_of_libsqlite3 name_of_libgdal')
sys.exit(1)
do(sys.argv[1], sys.argv[2])
| [
"gerard.choinka@ambrosys.de"
] | gerard.choinka@ambrosys.de |
08f8458a8dd2f4c2231f5131f93e9e29971d471a | 52b5fa23f79d76883728d8de0bfd202c741e9c43 | /kubernetes/test/test_v1beta1_replica_set.py | 18fd0a6b9e4f4ded18a2d959d94bdc11e6ac95c0 | [] | no_license | kippandrew/client-python-tornado | 5d00810f57035825a84e37ff8fc89a7e79aed8da | d479dfeb348c5dd2e929327d800fe033b5b3b010 | refs/heads/master | 2021-09-04T13:01:28.275677 | 2018-01-18T23:27:34 | 2018-01-18T23:27:34 | 114,912,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1beta1_replica_set import V1beta1ReplicaSet # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1beta1ReplicaSet(unittest.TestCase):
"""V1beta1ReplicaSet unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ReplicaSet(self):
"""Test V1beta1ReplicaSet"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1beta1_replica_set.V1beta1ReplicaSet() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"andy@rstudio.com"
] | andy@rstudio.com |
7a3ac6320527067a9046da167e160e4d3cf4874b | 056879eddb853dbf8c96954d212d862282a0dee7 | /basic/text2num.py | e2d87139aa40b95eb976dee89a871e1df1deacc5 | [] | no_license | pytutorial/samples | fed3965bc7ff3a81913bf24db7212dfbf6ab9411 | 850cdf87867ec4ac568405ab055ae9e40b636479 | refs/heads/master | 2022-08-14T23:26:31.606502 | 2022-07-26T02:55:16 | 2022-07-26T02:55:16 | 210,514,946 | 9 | 13 | null | null | null | null | UTF-8 | Python | false | false | 1,714 | py | """
Chương trình chuyển phát âm tiếng Việt của một số 3 chữ số sang giá trị số
- Đầu vào : phát âm tiếng Việt của một số trong phạm vi 1 đến 999
- Đầu ra : giá trị của số
"""
bang_so1 = {'một' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'năm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9, 'mười' : 10}
bang_so2 = {'một' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'lăm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9}
bang_so3 = {'mươi' : 0, 'mốt' : 1, 'hai' : 2, 'ba' : 3, 'bốn' : 4, 'tư' : 4, 'lăm' : 5, 'sáu' : 6, 'bảy' : 7, 'tám' : 8, 'chín' : 9}
def convert2digits(words):
N = len(words)
if N == 1:
return bang_so1.get(words[0], -1)
chuc, donvi = -1, -1
if (N == 3 and words[1] == 'mươi') or N == 2:
chuc = bang_so1.get(words[0], -1)
donvi = bang_so3.get(words[-1], -1)
if N == 2 and words[0] == 'mười':
chuc = 1
donvi = bang_so2.get(words[1], -1)
if chuc >= 0 and donvi >= 0:
return 10 * chuc + donvi
return -1
def convert3digits(words):
N = len(words)
if N <= 1 or words[1] != 'trăm':
return convert2digits(words)
tram = bang_so1.get(words[0], -1)
if N == 2 and tram >= 0:
return 100*tram
if N == 4 and words[2] == 'lẻ':
donvi = bang_so1.get(words[3], -1)
if tram >= 0 and donvi >= 0:
return 100*tram + donvi
x = convert2digits(words[2:])
if tram >= 0 and x >= 0:
return 100*tram + x
return -1
def text2num(text):
return convert3digits(text.lower().split())
print(text2num('tám trăm năm mươi tư'))
| [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
6f940feba72d8b1ff8c1ca3a405ead22e64a3171 | bd17e9fc0e5978cb664037bffdcf618a893e0523 | /python/dataio/reader/discrete_sequence_reader.py | 0991026d62042e0bee801ae9d215617255917c4c | [] | no_license | kedz/ntg | 598513fb2c6e910ad11f40f031675a587eb7ec79 | 34f13b23a6850eb0c8a727a51e7aa49fd6aec098 | refs/heads/master | 2020-12-07T15:29:10.305416 | 2017-11-07T03:07:52 | 2017-11-07T03:07:52 | 95,521,368 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,243 | py | import torch
from dataio.reader.reader_base import ReaderBase
from preprocessor import TextPreprocessor
from vocab import Vocab
class DiscreteSequenceReader(ReaderBase):
def __init__(self, field=0, strip=True, lowercase=True,
replace_digits=True, tokenizer=None,
unknown_token="_UNK_", special_tokens=None,
top_k=10000000, at_least=1, left_pad=None, right_pad=None,
offset_output=False):
if isinstance(special_tokens, str):
special_tokens = [special_tokens]
elif special_tokens is None:
special_tokens = []
if isinstance(left_pad, str):
left_pad = [left_pad]
elif left_pad is None:
left_pad = []
if isinstance(right_pad, str):
right_pad = [right_pad]
elif right_pad is None:
right_pad = []
for token in left_pad + right_pad:
if token not in special_tokens:
special_tokens.append(token)
self.left_pad_ = left_pad
self.right_pad_ = right_pad
self.offset_output_ = offset_output
v = Vocab(
unknown_token=unknown_token, special_tokens=special_tokens,
at_least=at_least, top_k=top_k)
pp = TextPreprocessor(
strip=strip, lowercase=lowercase, replace_digits=replace_digits,
tokenizer=tokenizer)
super(DiscreteSequenceReader, self).__init__(field, pp, v)
self.register_data("data_")
self.register_data("length_")
if self.offset_output:
self.register_data("data_offset_")
def process(self, string):
tokens = self.left_pad + self.preprocess(string) + self.right_pad
indices = [self.vocab.index(token) for token in tokens]
return indices
def save_data(self, datum):
if self.offset_output:
self.data_.append(datum[:-1])
self.data_offset_.append(datum[1:])
self.length_.append(len(datum) - 1)
else:
self.data_.append(datum)
self.length_.append(len(datum))
def info(self):
total = sum(v for k, v in self.vocab.count.items())
unique = len(self.vocab.count)
msg = "DiscreteSequenceReader found {} tokens with " \
"{} unique labels.\n".format(total, unique)
msg += "After pruning, vocabulary has {} unique tokens.\n".format(
self.vocab.size)
for i in range(1, min(self.vocab.size, 11)):
token = self.vocab.token(i)
count = self.vocab.count.get(token, 0)
msg += "{}) {} ({})\n".format(i, token, count)
if i < self.vocab.size:
msg += ":\n:\n:\n"
for i in range(self.vocab.size - 11, self.vocab.size):
token = self.vocab.token(i)
count = self.vocab.count.get(token, 0)
msg += "{}) {} ({})\n".format(i, token, count)
return msg
def finish(self, reset=True):
data_size = len(self.length_)
max_len = max(self.length_)
zed = tuple([0])
if self.offset_output:
for i in range(data_size):
if self.length_[i] < max_len:
self.data_[i] += zed * (max_len - self.length_[i])
self.data_offset_[i] += zed * (max_len - self.length_[i])
input = torch.LongTensor(self.data_)
output = torch.LongTensor(self.data_offset_)
length = torch.LongTensor(self.length_)
finshed_data = (input, output, length)
else:
for i in range(data_size):
if self.length_[i] < max_len:
self.data_[i] += zed * (max_len - self.length_[i])
data = torch.LongTensor(self.data_)
length = torch.LongTensor(self.length_)
finshed_data = (data, length)
if reset:
self.reset()
return finshed_data
@property
def offset_output(self):
return self.offset_output_
@property
def left_pad(self):
return self.left_pad_
@property
def right_pad(self):
return self.right_pad_
| [
"kedzie@cs.columbia.edu"
] | kedzie@cs.columbia.edu |
f885ec0af4413843f7eeaa5784e43cc759c6288f | c1e4c5ee80eb8c820bbc0319f2803123ee4ab781 | /misc/hashfiles.py | af58a723def1a6e16a757c8e9883af9b88906013 | [] | no_license | pstrinkle/thesis-source | f6a2835e2464ea7294b35bbfdfec1f586196fc90 | 91ed3b5a4230864d20db38f4f9b22a7c0a73f1ec | refs/heads/master | 2021-01-17T17:50:09.448806 | 2016-06-13T21:15:53 | 2016-06-13T21:15:53 | 61,070,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | #! /usr/bin/python
__author__ = 'tri1@umbc.edu'
##
# @author: Patrick Trinkle
# Summer 2011
#
# @summary: Stuff.
#
import os
import sys
import hashlib
import misc
def usage():
"""Parameters."""
sys.stderr.write("usage: %s path\n" % sys.argv[0])
def main():
if len(sys.argv) != 2:
usage()
sys.exit(-1)
startpoint = sys.argv[1]
file_hashes = {}
for path in misc.get_file(startpoint):
with open(path, "r") as path:
contents = path.read()
hash = hashlib.sha512(contents).hexdigest()
try:
file_hashes[hash].append(path)
except KeyError:
file_hashes[hash] = []
file_hashes[hash].append(path)
for hash in file_hashes:
if len(file_hashes[hash]) > 1:
print "found possible duplicates"
for path in file_hashes[hash]:
print "\t%s" % path
if __name__ == "__main__":
main()
| [
"patrick@activatr.com"
] | patrick@activatr.com |
b73546084cc6476dceee5b63449f2da885256011 | 5dc7dc7e33122e8c588eb6e13f23bf032c704d2e | /econ_platform_core/extensions/__init__.py | 24d3751da8aa85d909e3378dd18c6abaa1729eb8 | [
"Apache-2.0"
] | permissive | brianr747/platform | a3319e84858345e357c1fa9a3916f92122775b30 | 84b1bd90fc2e35a51f32156a8d414757664b4b4f | refs/heads/master | 2022-01-23T16:06:26.855556 | 2022-01-12T18:13:22 | 2022-01-12T18:13:22 | 184,085,670 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | """
Platform extensions.
Place all modules that extend the platform (including monkey-patching the base code).
This module creates an load_extensions() function that imports *all* python source (*.py) modules in this directory.
Will come up more options (a user-configurable list?) later.
Obviously, use at own risk!
Copyright 2019 Brian Romanchuk
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import os
import econ_platform_core
# This function will be replaced with straight import statements. Only leaving this dynamic
# since the design is changing rapidly at this stage. Once the core has stabilised, we will just import
# the "core extensions". As a result, no point in figuring out unit test techniques.
# Also: may create a "ExtensionManager" class to do this work.
class ExtensionManager(econ_platform_core.PlatformEntity):
"""
Class to handle extension loading and status. Currently non-functional; code will migrate to using this.
This class just offers the interface (for code completion purposes; the real extension manager will be
defined in extensions.__init__.py
"""
def __init__(self):
super().__init__()
self.LoadedExtensions = []
self.FailedExtensions = []
self.DecoratedFailedExtensions = []
def load_extensions(): # pragma: nocover
"""
Imports all *.py files in this directory (in alphabetical order).
Since the order of import will eventually matter, will need to add something to force a order of import operations.
For now, not am issue (can just use the alphabetical order rule to fix problems).
All errors are caught and largely ignored (other than listing the module that failed, and a text dump on the
console.
Returns [loaded_extensions, failed_extensions]
The operations on import of an extension:
(1) The import itself. If you wish, you can just put a script that is executed.
(2) If the module has a variable (hopefully a string) with the name 'extension_name', that is used as the extension
name for display, otherwise it is the name of the text file.
(3) If the module has a main() function, it is called.
Since logging is not yet initialised, things are dumped to console rather than logged. (If you really need logging
for debugging purposes, you could turn on logging in the extension.)
:return: list
"""
# There might be some iteration tools in importlib, but no time to read documentation...
this_dir = os.path.dirname(__file__)
flist = os.listdir(this_dir)
# Do alphabetical order
flist.sort()
exclusion_list = ['__init__']
loaded_extensions = []
failed_extensions = []
decorated_fails = []
use_monkey_example = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseMonkeyPatchExample')
use_example_provider = econ_platform_core.PlatformConfiguration['Options'].getboolean('UseExampleProvider')
if not use_monkey_example:
exclusion_list.append('monkey_patch_example')
if not use_example_provider:
exclusion_list.append('hook_provider_example')
for fname in flist:
fname = fname.lower()
if not fname.endswith('.py'):
continue
fname = fname[:-3]
if fname in exclusion_list:
continue
# Import it!
try:
mod = importlib.import_module('econ_platform_core.extensions.' + fname)
if hasattr(mod, 'extension_name'):
fname = str(mod.extension_name)
# Try running main()
if hasattr(mod, 'main'):
mod.main()
print('Extension {0} loaded.'.format(fname))
loaded_extensions.append(fname)
except Exception as ex:
print('Failure loading extension:', fname)
print(type(ex), str(ex))
failed_extensions.append(fname)
decorated_fails.append((fname, str(ex)))
return (loaded_extensions, failed_extensions, decorated_fails) | [
"brianr747@gmail.com"
] | brianr747@gmail.com |
90e67e8f27903d86d9ceb8579ee7c679d3dfeaae | 4d59015f3392952d3b969cd46289974f4ed625cc | /machines/rasppi01/current_loop_logger.py | 513a8ace41ef71fc9224b526309d518a429499f5 | [] | no_license | jlopezBolt/PyExpLabSys | 766d6eae909c10db1783c31f9c0bb9478d22cd74 | 14d2a24c3031a78da0d2d686c42bc01ffe18faca | refs/heads/master | 2021-01-19T23:57:52.297666 | 2016-04-19T08:20:02 | 2016-04-19T08:21:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,385 | py | """ Argon pressure measuring """
from __future__ import print_function
import threading
import logging
import time
from PyExpLabSys.common.value_logger import ValueLogger
from PyExpLabSys.common.database_saver import ContinuousDataSaver
from PyExpLabSys.common.sockets import DateDataPullSocket
from PyExpLabSys.common.sockets import LiveSocket
from ABE_helpers import ABEHelpers
from ABE_ADCPi import ADCPi
import credentials
class PressureReader(threading.Thread):
""" Read argon pressure """
def __init__(self, adc):
threading.Thread.__init__(self)
self.adc = adc
self.waterpressure = -1
self.quit = False
def value(self):
""" Return the value of the reader """
return self.waterpressure
def run(self):
while not self.quit:
time.sleep(1)
current = (self.adc.read_voltage(1) / 148) * 1000
self.waterpressure = (current - 4) * (500 / 16) * 0.068947
def main():
""" Main function """
logging.basicConfig(filename="logger.txt", level=logging.ERROR)
logging.basicConfig(level=logging.ERROR)
i2c_helper = ABEHelpers()
bus = i2c_helper.get_smbus()
adc_instance = ADCPi(bus, 0x68, 0x69, 18)
pressurereader = PressureReader(adc_instance)
pressurereader.daemon = True
pressurereader.start()
logger = ValueLogger(pressurereader, comp_val=0.5)
logger.start()
socket = DateDataPullSocket('hall_n5_argon_pressure',
['n5_argon_pressure'], timeouts=[1.0])
socket.start()
live_socket = LiveSocket('hall_n5_argon_pressure', ['n5_argon_pressure'], 2)
live_socket.start()
db_logger = ContinuousDataSaver(continuous_data_table='dateplots_hall',
username=credentials.user,
password=credentials.passwd,
measurement_codenames=['n5_argon_pressure'])
db_logger.start()
time.sleep(2)
while True:
time.sleep(0.25)
value = logger.read_value()
socket.set_point_now('n5_argon_pressure', value)
live_socket.set_point_now('n5_argon_pressure', value)
if logger.read_trigged():
print(value)
db_logger.save_point_now('n5_argon_pressure', value)
logger.clear_trigged()
if __name__ == '__main__':
main()
| [
"jensen.robert@gmail.com"
] | jensen.robert@gmail.com |
831d89d9010f5aa0c82cbaac8e30bf85c06391ab | 364ce434984eedab1ed491ad3e12bb245eeddf8b | /Fwd Converter Tool and Script/parse.py | 9217746b601771a526fa995f78a529361d9bfb3e | [] | no_license | anirudhdahiya9/sentence-type-identification | c532dffb14efcfb44444e2f737ddaa10d3673953 | b4271a5b4f5c214fdcbe10582220c2cf3300c826 | refs/heads/master | 2021-01-19T05:03:53.533891 | 2016-07-05T10:47:20 | 2016-07-05T10:47:20 | 61,596,642 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,271 | py | #!/usr/bin/python
import os
import sys
import codecs
inp = sys.argv[1]
out = sys.argv[2]
count = 1
for line in codecs.open(inp, 'r', 'utf-8'):
if line.strip() != '':
codecs.open('temp.out', 'w', 'utf-8').write(line)
os.system("sh $SHALLOW_PARSER_HIN/bin/sl/tokenizer/tokenizer.sh temp.out > temp1.out")
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp1.out -o=temp2.out')
os.system('sh $SHALLOW_PARSER_HIN/bin/sl/morph/hin/morph.sh temp2.out>>temp3.out')
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp3.out -o=temp4.out')
os.system("sh $SHALLOW_PARSER_HIN/bin/sl/postagger/hin/postagger.sh temp4.out > temp5.out")
#os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp5.out -o=temp6.out')
#os.system("sh $SHALLOW_PARSER_HIN/bin/sl/chunker/hin/chunker.sh temp6.out > temp7.out")
#os.system(" perl $SHALLOW_PARSER_HIN/bin/sl/pruning/pruning.pl --path=$SHALLOW_PARSER_HIN/bin/sl/pruning/ --resource=$SHALLOW_PARSER_HIN/data_bin/sl/pruning/mapping.dat < temp8.out | perl $SHALLOW_PARSER_HIN/bin/sl/pickonemorph/pickonemorph.pl --path=$SHALLOW_PARSER_HIN/bin/sl/pickonemorph/ | perl $SHALLOW_PARSER_HIN/bin/sl/headcomputation/headcomputation.pl --path=$SHALLOW_PARSER_HIN/bin/sl/headcomputation/ | perl $SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/vibhakticomputation.pl --path=$SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/ | perl $SHALLOW_PARSER_HIN/bin/sl/vibhakticomputation/printinput.pl")
#os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=utf -t=wx -i=temp9.out -o=temp10.out')
os.system('perl -C ~/convertor-indic-1.5.2/convertor_indic.pl -f=ssf -l=hin -s=wx -t=utf -i=temp5.out>>' + out)
os.system('rm temp.out')
os.system('rm temp1.out')
os.system('rm temp2.out')
os.system('rm temp3.out')
os.system('rm temp4.out')
os.system('rm temp5.out')
#os.system('rm temp6.out')
#os.system('rm temp7.out')
#os.system('rm temp8.out')
#os.system('rm temp9.out')
print("Processed Line no " + str(count))
count += 1
| [
"anirudhdahiya9@gmail.com"
] | anirudhdahiya9@gmail.com |
7f74138e0d9edcce5ccd2899fbd27e9087eee765 | f22efea488f85d3ce88f9a5be7b8b6a0f589f747 | /widgets/img/ex2_img.py | b536c2186829a2a491f5c328edc5f7ea5334d031 | [
"MIT"
] | permissive | maozhifeng/lv_mpy_examples | c96cf97db58a93a65d2c3944a759694e9d7bd54d | 9c5abcf562b3eb4ace65e658cb5a5ca5443347ba | refs/heads/main | 2023-02-01T18:27:28.496638 | 2020-12-08T15:54:18 | 2020-12-08T15:54:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | #!/opt/bin/lv_micropython
import time
import lvgl as lv
import init_gui
from lv_colors import lv_colors
from imagetools import get_png_info, open_png
SLIDER_WIDTH=15
# Register PNG image decoder
decoder = lv.img.decoder_create()
decoder.info_cb = get_png_info
decoder.open_cb = open_png
with open('img_cogwheel_argb.png','rb') as f:
png_data = f.read()
png_img_dsc = lv.img_dsc_t({
'data_size': len(png_data),
'data': png_data
})
def slider_event_cb(slider,event):
if event == lv.EVENT.VALUE_CHANGED:
# Recolor the image based on the sliders' values
color = lv.color_make(red_slider.get_value(), green_slider.get_value(), blue_slider.get_value())
intense = intense_slider.get_value()
img1.set_style_local_image_recolor_opa(lv.img.PART.MAIN, lv.STATE.DEFAULT, intense)
img1.set_style_local_image_recolor(lv.img.PART.MAIN, lv.STATE.DEFAULT, color)
# Create a set of RGB sliders
# Use the red one as a base for all the settings
red_slider = lv.slider(lv.scr_act(), None)
red_slider.set_range(0, 255)
red_slider.set_size(SLIDER_WIDTH, 200) # Be sure it's a vertical slider
red_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.RED)
red_slider.set_event_cb(slider_event_cb)
# Copy it for the other three sliders
green_slider = lv.slider(lv.scr_act(), red_slider)
green_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.LIME)
green_slider.set_event_cb(slider_event_cb)
blue_slider = lv.slider(lv.scr_act(), red_slider)
blue_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.BLUE)
blue_slider.set_event_cb(slider_event_cb)
intense_slider = lv.slider(lv.scr_act(), red_slider)
intense_slider.set_style_local_bg_color(lv.slider.PART.INDIC, lv.STATE.DEFAULT, lv_colors.GRAY)
intense_slider.set_value(255, lv.ANIM.OFF)
intense_slider.set_event_cb(slider_event_cb)
red_slider.align(None, lv.ALIGN.IN_LEFT_MID, 15, 0)
green_slider.align(red_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
blue_slider.align(green_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
intense_slider.align(blue_slider, lv.ALIGN.OUT_RIGHT_MID, 15, 0)
img1 = lv.img(lv.scr_act(),None)
lv.img.cache_set_size(2)
img1.align(lv.scr_act(), lv.ALIGN.CENTER, 50, -30)
img1.set_src(png_img_dsc)
while True:
lv.task_handler()
time.sleep_ms(10)
| [
"uli.raich@gmail.com"
] | uli.raich@gmail.com |
3b4c5a53b2b3f98002af33023a574713c44a007d | 8e07f5f06452f9566640d2130a5c1bcefcebd745 | /peter/completecrm/cases/forms.py | 9a207afbdaa4159c453ba6a4cc06593941fbc2e9 | [
"MIT"
] | permissive | bot242/djangocrm | 65dbe42a814fd538d77ec9c0cc5626a7d6ce19b4 | 6f5e64b4f65dbb13583d68ef5f6a3feaea51befb | refs/heads/main | 2023-01-23T10:29:32.338620 | 2020-12-02T06:43:35 | 2020-12-02T06:43:35 | 317,773,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,996 | py | from django import forms
from cases.models import Case
from common.models import Comment, Attachments
from teams.models import Teams
# import phonenumbers
import datetime
class CaseForm(forms.ModelForm):
teams_queryset = []
teams = forms.MultipleChoiceField(choices=teams_queryset)
def __init__(self, *args, **kwargs):
casecount = Case.objects.all().count()
print("CASECOUNT:",casecount)
c = casecount+1
assigned_users = kwargs.pop('assigned_to', [])
case_accounts = kwargs.pop('account', [])
case_contacts = kwargs.pop('contacts', [])
super(CaseForm, self).__init__(*args, **kwargs)
for field in self.fields.values():
field.widget.attrs = {"class": "form-control"}
self.fields['description'].widget.attrs.update({
'rows': '4'})
self.fields['address'].widget.attrs.update({
'rows': '4'})
self.fields['action_items'].widget.attrs.update({
'rows': '4'})
self.fields['parent_description'].widget.attrs.update({
'rows': '3'})
if assigned_users:
self.fields['assigned_to'].queryset = assigned_users
self.fields['assigned_to'].required = False
self.fields['assigned_date'].required = False
self.fields['assigned_date'].widget.attrs['readonly'] = True
self.fields['assigned_date' ].input_formats = [ '%d-%m-%Y %H:%M:%S' ]
self.fields['assigned_date'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
self.fields['account'].queryset = case_accounts
self.fields['contacts'].queryset = case_contacts
self.fields['contacts'].required = False
self.fields['case_number'].required = True
self.fields['case_number'].initial = "C_00"+str(c)
self.fields['case_number'].widget.attrs['readonly'] = True
self.fields['creation_date'].required = True
self.fields['creation_date'].widget.attrs['readonly'] = True
self.fields['creation_date'].input_formats = [ '%d-%m-%Y %H:%M:%S' ]
self.fields['creation_date'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
self.fields['case_type'].required = True
self.fields['closed_on'].required = False
self.fields['closed_on'].widget.attrs['readonly'] = True
self.fields['closed_on' ].input_formats = [ '%d-%m-%Y %H:%M:%S']
self.fields['closed_on'].initial = datetime.datetime.now().strftime('%d-%m-%Y %H:%M:%S')
# self.fields['assigned_to'].required = True
# self.fields['assigned_date'].required = True
self.fields['sla'].widget.attrs.update({'class' :'sla'})
# self.fields['sla'].widget.attrs['placeholder'] = "00:00:00"
for key, value in self.fields.items():
value.widget.attrs['placeholder'] = value.label
self.fields['parent_case'].widget.attrs['placeholder'] ="Related Case"
self.fields['name'].widget.attrs['placeholder'] = "Contact name"
self.fields['phone1'].widget.attrs['placeholder'] = "Phone/Mobile"
self.fields["teams"].choices = [(team.get('id'), team.get('name')) for team in Teams.objects.all().values('id', 'name')]
self.fields["teams"].required = False
class Meta:
model = Case
fields = ('assigned_to','phone1', 'name', 'status',
'priority', 'case_type', 'account','remark',
'contacts', 'closed_on', 'description', 'sla',
'case_number', 'email', 'address', 'action_items', 'creation_date','assigned_date','parent_case','parent_description')
widgets = {
'phone1': forms.NumberInput(attrs={'class': 'form-control','type': 'number'})
# widget=forms.TextInput(attrs={'min':1,'max': '5','type': 'number'}))
}
# def clean_name(self):
# name = self.cleaned_data['name']
# case = Case.objects.filter(
# name__iexact=name).exclude(id=self.instance.id)
# if case:
# raise forms.ValidationError("Case Already Exists with this Name")
# else:
# return name
# def clean_phone1(self):
# phone1 = self.cleaned_data.get("phone1")
# z = phonenumbers.parse(phone1)
# if not phonenumbers.is_valid_number(z):
# raise forms.ValidationError("Number not in valid")
# return phone1
class CaseCommentForm(forms.ModelForm):
comment = forms.CharField(max_length=255, required=True)
class Meta:
model = Comment
fields = ('comment', 'case', 'commented_by', )
class CaseAttachmentForm(forms.ModelForm):
attachment = forms.FileField(max_length=1001, required=True)
class Meta:
model = Attachments
fields = ('attachment', 'case')
# class SlaForm(forms.ModelForm):
# class Meta:
# time = forms.TimeField(input_formats=["%H:%M"])
# model = Sla
# fields = ('status','time') | [
"sankarmass619@gmail.com"
] | sankarmass619@gmail.com |
742778cfbe8961fcfb828688a65fe536e706c2ac | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /ACL_PyTorch/contrib/cv/image_process/DnCNN/data_preprocess.py | 729ade7a02e497f94c8c0476117cbb5214c790b1 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,544 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import os
import os.path
import numpy as np
import random
import torch
import cv2
import glob
infer_data = 'Set68'
infer_noiseL = 15
def normalize(data):
return data / 255.
def proprecess(data_path, ISource_bin, INoisy_bin):
# load data info
print('Loading data info ...\n')
files = glob.glob(os.path.join(data_path, infer_data, '*.png'))
files.sort()
# process data
for i in range(len(files)):
# image
filename = os.path.basename(files[i])
img = cv2.imread(files[i])
img = normalize(np.float32(img[:, :, 0]))
img_padded = np.full([481, 481], 0, dtype=np.float32)
width_offset = (481 - img.shape[1]) // 2
height_offset = (481 - img.shape[0]) // 2
img_padded[height_offset:height_offset + img.shape[0], width_offset:width_offset + img.shape[1]] = img
img = img_padded
img = np.expand_dims(img, 0)
img = np.expand_dims(img, 1)
ISource = torch.Tensor(img)
# noise
noise = torch.FloatTensor(ISource.size()).normal_(mean=0, std=infer_noiseL / 255.)
# noisy image
INoisy = ISource + noise
# save ISource_bin
ISource = ISource.numpy()
print("ISource shape is", ISource.shape)
ISource.tofile(os.path.join(ISource_bin, filename.split('.')[0] + '.bin'))
# save INoisy_bin
INoisy = INoisy.numpy()
print("INoisy shape is", INoisy.shape)
INoisy.tofile(os.path.join(INoisy_bin, filename.split('.')[0] + '.bin'))
if __name__ == '__main__':
data_path = sys.argv[1]
ISource_bin = sys.argv[2]
INoisy_bin = sys.argv[3]
if os.path.exists(ISource_bin) is False:
os.mkdir(ISource_bin)
if os.path.exists(INoisy_bin) is False:
os.mkdir(INoisy_bin)
proprecess(data_path, ISource_bin, INoisy_bin)
| [
"wangjiangben@huawei.com"
] | wangjiangben@huawei.com |
5b2880d4193c0aca32d56ce78f67f59b0a7be22d | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano1082.py | b8f95efbdc487c05e58b233e9de62f8739a7bc2f | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/C42A2FC9-76FC-7F4E-929A-F2957BCFBF0D.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest1082.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"rsk146@scarletmail.rutgers.edu"
] | rsk146@scarletmail.rutgers.edu |
8ea3119829e7f8014ee5ff896e439e31e5bef8d9 | 5a7a3447d434a458a7bb63f2aa11b64c284d5492 | /Data_storage/ini/email_conf/email_RW.py | 06351a2a740c5d3a12b234683e772c500589dd6e | [] | no_license | woshimayi/mypython | 35792e12036a7a05f12d3ef7006637b2b03f0e2e | 7f1eb38e8585bf6d2f21d3ad0f64dace61425875 | refs/heads/master | 2023-09-01T08:59:12.301836 | 2023-08-30T05:30:54 | 2023-08-30T05:30:54 | 130,017,052 | 4 | 0 | null | 2018-12-02T16:18:14 | 2018-04-18T06:50:36 | HTML | UTF-8 | Python | false | false | 5,166 | py | #!/usr/bin/env python
# encoding: utf-8
'''
* @FilePath: email_RW.py
* @version: (C) Copyright 2010-2049, Node Supply Chain Manager Corporation Limited.
* @Author: dof
* @Date: 2022/2/20 17:18
* @LastEditors: sueRimn
* @LastEditTime: 2022/2/20 17:18
* @Descripttion:
'''
# !/usr/bin/env python
# encoding: utf-8
'''
@author: caopeng
@license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
@contact: deamoncao100@gmail.com
@software: garner
@file: test_dof.py
@time: 2021/2/22 15:37
@desc: ini file read write
'''
import configparser
'''
# write ini file
config = configparser.ConfigParser()
config['DEFAULT'] = {'ServerAliveInterval': '45',
'Compression': 'yes',
'CompressionLevel': '9'}
config['bitbucket.org'] = {}
config['bitbucket.org']['User'] = 'hg'
config['topsecret.server.com'] = {}
topsecret = config['topsecret.server.com']
topsecret['Port'] = '50022' # mutates the parser
topsecret['ForwardX11'] = 'no' # same here
config['DEFAULT']['ForwardX11'] = 'yes'
with open('example.ini', 'w') as configfile:
config.write(configfile)
# read ini file
config = configparser.ConfigParser()
print('sections')
print('1', config.sections())
print('2', config.read('example.ini'))
print('3', config.sections())
print('4', ('bitbucket.org' in config))
print('5', ('bytebong.com' in config))
print('6', config['bitbucket.org']['User'])
print('7', config['DEFAULT']['Compression'])
topsecret = config['topsecret.server.com']
print('8', topsecret['ForwardX11'])
print('9', topsecret['Port'])
for key in config['bitbucket.org']:
print('10', key)
for key in config['topsecret.server.com']:
print('12', key, config['topsecret.server.com'][key])
print('11', config['bitbucket.org']['ForwardX11'])
# -sections得到所有的section,并以列表的形式返回
print('sections:', ' ', config.sections())
# -options(section)得到该section的所有option
print('options:', ' ', config.options('bitbucket.org'))
# -items(section)得到该section的所有键值对
print('items:', ' ', config.items('bitbucket.org'))
# -get(section,option)得到section中option的值,返回为string类型
print('get:', ' ', config.get('bitbucket.org', 'user'))
# 首先得到配置文件的所有分组,然后根据分组逐一展示所有
for sections in config.sections():
for items in config.items(sections):
print(items, items[0], items[1])
# add section
config = configparser.ConfigParser()
config.add_section('type')
config.set('type', 'stun', 'bool')
with open('example.ini', 'a') as configfile:
config.write(configfile)
# remove section option
config = configparser.ConfigParser()
print('2', config.read('example.ini'))
# config.remove_option('bitbucket.org', 'user')
# config.remove_section('bitbucket.org')
config.write(open('example.ini', 'w'))
'''
class Email_operate(object):
"""docstring for Email_operate"""
def __init__(self, file):
print("open conf file: ", file)
super(Email_operate, self).__init__()
self.file = file
self.config = configparser.ConfigParser()
err = self.config.read(self.file)
print("err = ", err)
if 0 == len(err):
print("err = ssss")
self.config['Global'] = {}
self.config['send'] = {'mail': '',
'user': '',
'password': ''}
self.config['recv'] = {'user': ''}
with open(self.file, 'w') as configfile:
self.config.write(configfile)
def read(self, section, key):
try:
if section in self.config:
return self.config[section][key]
except:
pass
def write(self, section, key, value):
try:
if section in self.config:
self.config[section][key] = value
else:
self.config.add_section[section]
self.config.set(section, key, value)
with open(self.file, 'w') as configfile:
self.config.write(configfile)
except:
pass
def show(self):
for sections in self.config.sections():
print("[%s]" % sections)
for items in self.config.items(sections):
print("%s = %s" % (items[0], items[1]))
print()
def read_mail(self):
C.read("send", "mail")
pass
def read_user(self):
C.read("send", "user")
pass
def read_pass(self):
C.read("send", "password")
pass
def write_mail(self):
pass
def write_user(self):
pass
def write_pass(self):
pass
def __del__(self):
print("end ... ")
if __name__ == '__main__':
print('Hello world')
C = Email_operate("email.ini")
C.show()
print("user = zzz", C.read("send", "user"))
C.write("recv", "pass", "ssssss")
C.show()
| [
"woshidamayi@Gmail.com"
] | woshidamayi@Gmail.com |
139c2b790b42e35160ff579c230c6aaf06592b0f | 304926837d94f37ef33c46b8f3c71ecfac4690e8 | /2.8_number_eight.py | 811908108a2f301d97a7a3a5fc3092a6c35bf496 | [] | no_license | ver0nika4ka/PythonCrashCourse | 1015d207d9da1b0f9efaee3acc502d2757880f33 | 6bde3b716deb86d022da5cb478c0a95505fe5acc | refs/heads/master | 2021-07-12T17:24:16.478133 | 2021-06-17T03:27:24 | 2021-06-17T03:27:24 | 246,993,773 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | addition = f"{5+3}"
substraction = f"{9-1}"
multipl = f"{2*4}"
division = f"{16/2}"
print(f"The result is:\n{addition}\n{substraction}\n{multipl}\n{division}")
# same as above, but you can write in one line
print(f"{5+3}\n{9-1}\n{2*4}\n{16/2}")
| [
"veranika.aizu@gmail.com"
] | veranika.aizu@gmail.com |
491d75489dc73b64d619f63effa5b9d9ade79f7f | 846a7668ac964632bdb6db639ab381be11c13b77 | /android/test/vts/testcases/host/camera/conventional/2_1/SampleCameraV2Test.py | a9b0087d3b858e0c6647feb0e5a3b5ddce105fa4 | [] | no_license | BPI-SINOVOIP/BPI-A64-Android8 | f2900965e96fd6f2a28ced68af668a858b15ebe1 | 744c72c133b9bf5d2e9efe0ab33e01e6e51d5743 | refs/heads/master | 2023-05-21T08:02:23.364495 | 2020-07-15T11:27:51 | 2020-07-15T11:27:51 | 143,945,191 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,445 | py | #!/usr/bin/env python
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import time
from vts.runners.host import asserts
from vts.runners.host import base_test
from vts.runners.host import test_runner
class SampleCameraV2Test(base_test.BaseTestClass):
"""A sample testcase for the non-HIDL, conventional Camera HAL."""
# Camera HAL version value (v2.1).
VERSION_2_1 = 0x201
VERSION_2_4 = 0x204
MAX_RETRIES = 5
def setUpClass(self):
self.dut = self.android_devices[0]
self.dut.hal.InitConventionalHal(
target_type="camera",
target_version=2.1,
target_basepaths=["/system/lib/hw"],
bits=32,
target_package="hal.conventional.camera")
def setUp(self):
self.call_count_camera_device_status_change = 0
self.call_count_torch_mode_status_change = 0
def testCameraNormal(self):
"""A simple testcase which just emulates a normal usage pattern."""
version = self.dut.hal.camera.common.GetAttributeValue(
"module_api_version")
logging.info("version: %s", hex(version))
if version != self.VERSION_2_1 and version != self.VERSION_2_4:
asserts.skip("HAL version %s is neither v2.1 nor v2.4" % version)
result = self.dut.hal.camera.get_number_of_cameras()
count = result.return_type.scalar_value.int32_t
logging.info("# of found cameras: %s", count)
asserts.assertTrue(count > 0, "no camera found")
for index in range(0, count):
arg = self.dut.hal.camera.camera_info_t(facing=0)
logging.info(self.dut.hal.camera.get_camera_info(index, arg))
# uncomment when undefined function is handled gracefully.
# self.dut.hal.camera.init()
def camera_device_status_change(callbacks, camera_id, new_status):
self.call_count_camera_device_status_change += 1
logging.info("camera_device_status_change")
logging.info("camera_device_status_change: camera_id = %s",
camera_id)
logging.info("camera_device_status_change: new_status = %s",
new_status)
logging.info("camera_device_status_change: callbacks = %s",
callbacks)
def torch_mode_status_change(callbacks, camera_id, new_status):
self.profiling.StopHostProfiling(
"callback_latency_torch_mode_status_change")
self.call_count_torch_mode_status_change += 1
logging.info("torch_mode_status_change")
logging.info("torch_mode_status_change: camera_id = %s", camera_id)
logging.info("torch_mode_status_change: new_status = %s",
new_status)
logging.info("torch_mode_status_change: callbacks = %s", callbacks)
my_callback = self.dut.hal.camera.camera_module_callbacks_t(
camera_device_status_change, torch_mode_status_change)
self.dut.hal.camera.set_callbacks(my_callback)
self.profiling.StartHostProfiling(
"callback_latency_torch_mode_status_change")
self.dut.hal.camera.common.methods.open() # note args are skipped
retries = 0
while (self.call_count_torch_mode_status_change < 1 and
retries < self.MAX_RETRIES):
logging.info("waiting %s %s",
self.call_count_camera_device_status_change,
self.call_count_torch_mode_status_change)
time.sleep(1)
retries += 1
if self.call_count_torch_mode_status_change < 1:
# The above callback was not always called (~50% of chance).
logging.error("Callback not called within %s seconds",
self.MAX_RETRIES)
if __name__ == "__main__":
test_runner.main()
| [
"mingxin.android@gmail.com"
] | mingxin.android@gmail.com |
06bd0eeeed12d227a0e832205e942acba3b8c52f | a15a7dcb2ba3880a75309dba66e718be7ca964b7 | /st2tests/integration/orquesta/test_wiring_error_handling.py | 8bd0218dd3110abe76d8f7235c15420dca021652 | [
"Apache-2.0"
] | permissive | alexiono/st2 | dfb6a9b2c6d00023771ff626883d9631e586fc06 | a2bf25e085dfc9d2d407e8160a2febd48e5a4920 | refs/heads/master | 2020-04-02T00:48:03.440760 | 2018-10-18T17:13:56 | 2018-10-18T17:13:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,356 | py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from integration.orquesta import base
from st2common.constants import action as ac_const
class ErrorHandlingTest(base.TestWorkflowExecution):
def test_inspection_error(self):
expected_errors = [
{
'type': 'content',
'message': 'The action "std.noop" is not registered in the database.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task3.action'
},
{
'type': 'context',
'language': 'yaql',
'expression': '<% ctx().foobar %>',
'message': 'Variable "foobar" is referenced before assignment.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input',
'spec_path': 'tasks.task1.input',
},
{
'type': 'expression',
'language': 'yaql',
'expression': '<% <% succeeded() %>',
'message': (
'Parse error: unexpected \'<\' at '
'position 0 of expression \'<% succeeded()\''
),
'schema_path': (
'properties.tasks.patternProperties.^\w+$.'
'properties.next.items.properties.when'
),
'spec_path': 'tasks.task2.next[0].when'
},
{
'type': 'syntax',
'message': '[{\'cmd\': \'echo <% ctx().macro %>\'}] is not of type \'object\'',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input.type',
'spec_path': 'tasks.task2.input'
}
]
ex = self._execute_workflow('examples.orquesta-fail-inspection')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_input_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-input-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_vars_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-vars-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_start_task_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"', 'task_id': 'task1'}]
ex = self._execute_workflow('examples.orquesta-fail-start-task')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_transition_error(self):
expected_errors = [
{
'message': (
'Unable to resolve key \'value\' in expression \''
'<% succeeded() and result().value %>\' from context.'
),
'task_transition_id': 'task2__0',
'task_id': 'task1'
}
]
ex = self._execute_workflow('examples.orquesta-fail-task-transition')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_publish_error(self):
expected_errors = [
{
'message': (
'Unable to resolve key \'value\' in expression \''
'<% result().value %>\' from context.'
),
'task_transition_id': 'task2__0',
'task_id': 'task1'
}
]
ex = self._execute_workflow('examples.orquesta-fail-task-publish')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_output_error(self):
expected_errors = [{'message': 'Unknown function "#property#value"'}]
ex = self._execute_workflow('examples.orquesta-fail-output-rendering')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
def test_task_content_errors(self):
expected_errors = [
{
'type': 'content',
'message': 'The action reference "echo" is not formatted correctly.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task1.action'
},
{
'type': 'content',
'message': 'The action "core.echoz" is not registered in the database.',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.action',
'spec_path': 'tasks.task2.action'
},
{
'type': 'content',
'message': 'Action "core.echo" is missing required input "message".',
'schema_path': 'properties.tasks.patternProperties.^\w+$.properties.input',
'spec_path': 'tasks.task3.input'
},
{
'type': 'content',
'message': 'Action "core.echo" has unexpected input "messages".',
'schema_path': (
'properties.tasks.patternProperties.^\w+$.properties.input.'
'patternProperties.^\w+$'
),
'spec_path': 'tasks.task3.input.messages'
}
]
ex = self._execute_workflow('examples.orquesta-fail-inspection-task-contents')
ex = self._wait_for_completion(ex)
self.assertEqual(ex.status, ac_const.LIVEACTION_STATUS_FAILED)
self.assertDictEqual(ex.result, {'errors': expected_errors, 'output': None})
| [
"m4d.coder@gmail.com"
] | m4d.coder@gmail.com |
c9e883fa698c1a3aefc67747af1fc68a37696834 | 891aba394df57d7894900e99e5881ad5817a84bd | /s23/23.4.1_readlines_v2.py | 70870c5131582b1b2ae4b22f6171b7b1876f9546 | [] | no_license | feliperojas/mision_tic_G11 | 42d87e698eb8c9ace896805f5fc5436a0035ec3b | cfc41e873a4138f3f4f2ad63143042eb606c0f45 | refs/heads/master | 2023-05-28T09:29:40.247531 | 2021-06-09T16:43:45 | 2021-06-09T16:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | contador = 1
with open("C:/Users/Camilo/Desktop/reportes/reporte2.txt","r") as archivo:
data = archivo.readlines()
print("El archivo tiene",len(data), "lineas")
for linea in data:
print(f"En la linea {contador} esta es la informacion: {linea}", end="")
print()
contador+=1 | [
"camohe90@gmail.com"
] | camohe90@gmail.com |
f4dfc272af48dd327f4b10f236a506542361cb96 | 167b90bff7f1db51a066f7b8f6f543b77a077ebf | /exercise087.py | 57696072a4ad8dc9594e5f6493b4d3508ae13084 | [] | no_license | DanielMafra/Python-LanguageStudies | 9bcbe753c14e5aa2b23b11c5e103cf00c7dfcad3 | 29700f832ebbddad6e74d88add70c08eeba14054 | refs/heads/main | 2023-07-02T09:34:31.856246 | 2021-07-29T21:34:39 | 2021-07-29T21:34:39 | 389,669,574 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | headquarters = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
spar = mai = scol = 0
for l in range(0, 3):
for c in range(0, 3):
headquarters[l][c] = int(input(f'Enter a value for [{l}, {c}]: '))
print('-=' * 30)
for l in range(0, 3):
for c in range(0, 3):
print(f'[{headquarters[l][c]:^5}]', end='')
if headquarters[l][c] % 2 == 0:
spar += headquarters[l][c]
print()
print('-=' * 30)
print(f'Sum pairs: {spar}')
for l in range(0, 3):
scol += headquarters[l][2]
print(f'Sum third column: {scol}')
for c in range(0, 3):
if c == 0:
mai = headquarters[1][c]
elif headquarters[1][c] > mai:
mai = headquarters[1][c]
print(f'Highest value second row: {mai}')
| [
"danielmafradev@gmail.com"
] | danielmafradev@gmail.com |
ede2c4d557022d282d3225e376d14e79ed3466a0 | cfad82fd82eeb832bce6f8d3c30aad05d000ae9b | /migrations/versions/13b676178b08_box_plot.py | d05afe3dbcf455bfc4681ad633e5ab85ccde9b6a | [
"Apache-2.0"
] | permissive | dpdi-unifor/caipirinha | 73508fcc6aa519749db69d1126a65e4f27099ffd | 43e4512c282cfcfa988ea38e160939b6f3c2c604 | refs/heads/master | 2022-12-08T04:39:19.637631 | 2020-07-16T22:50:30 | 2020-07-16T22:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | """box_plot
Revision ID: 13b676178b08
Revises: cfacecb61ac1
Create Date: 2019-04-16 21:55:10.723443
"""
from alembic import op
from sqlalchemy import String, Integer
from sqlalchemy.sql import table, column, text
# revision identifiers, used by Alembic.
revision = '13b676178b08'
down_revision = 'cfacecb61ac1'
branch_labels = None
depends_on = None
def insert_visualization_type():
tb = table(
'visualization_type',
column('id', Integer),
column('name', String),
column('help', String),
column('icon', String))
all_ops = [
(123, 'box-plot', 'Box plot', 'fa-chart'),
(124, 'histogram', 'Histogram', 'fa-chart'),
]
rows = [dict(zip([c.name for c in tb.columns], operation)) for operation in
all_ops]
op.bulk_insert(tb, rows)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
try:
op.execute(text('START TRANSACTION'))
insert_visualization_type()
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
# noinspection PyBroadException
def downgrade():
try:
op.execute(text('START TRANSACTION'))
op.execute(text('SET FOREIGN_KEY_CHECKS=0;'))
op.execute(
text("DELETE FROM visualization_type WHERE id IN (123, 124)"))
op.execute(text('SET FOREIGN_KEY_CHECKS=1;'))
op.execute(text('COMMIT'))
except:
op.execute(text('ROLLBACK'))
raise
| [
"waltersf@gmail.com"
] | waltersf@gmail.com |
d022ac9505b8dfedb695be5bd5e43e6ab95c0ebd | 22986b48baf0bb2e87055534cc47743292d123e7 | /simcorpfinder/wsgi.py | 264af00372a7a50265128d1d341b77f9b9dad156 | [] | no_license | GoatWang/DjangoTest | 9d8e97351ce61f8815cc0f8b957c77f8b7122789 | 2c0a057c5947ff3c20b4456b2b17e874cac3d225 | refs/heads/master | 2021-01-22T07:32:02.740989 | 2017-09-04T09:29:04 | 2017-09-04T09:29:04 | 102,306,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | """
WSGI config for simcorpfinder project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "simcorpfinder.settings")
application = get_wsgi_application()
| [
"jeremy4555@yahoo.com.tw"
] | jeremy4555@yahoo.com.tw |
acbfef4791270a3e4d12702b4d80a8beb81ca83d | c868d681415d152ba331bd80e0ed542832f20f0e | /week13/onlineShop/onlineShop/main/models.py | 0cf27335a5481957f4aa13325b13e507ce8ef636 | [] | no_license | Yeldarmt/BFDjango | a297a6b0c00ffb1a269f05c7e6665c5d34a51097 | b8256ff1d5f2125495df66eabf267fc17e667aeb | refs/heads/master | 2022-11-30T12:45:17.356453 | 2020-04-19T16:50:26 | 2020-04-19T16:50:26 | 233,515,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | from django.db import models
import os
from django.core.exceptions import ValidationError
ALLOWED_EXTENSIONS = ['.docx', '.pdf']
def validate_extension(value):
split_ext = os.path.splitext(value.name)
print('split_ext', split_ext)
if len(split_ext) > 1:
ext = split_ext[1]
if not ext.lower() in ALLOWED_EXTENSIONS:
raise ValidationError(f'not allowed file, valid extensions: {ALLOWED_EXTENSIONS}')
class Category(models.Model):
name = models.CharField(max_length=300)
category_desc = models.FileField(upload_to='desc_files',
validators=[validate_extension],
null=True, blank=True)
class Meta:
verbose_name = 'Category'
verbose_name_plural = 'Categories'
def __str__(self):
return 'Category id: {}, name: {}'.format(self.id, self.name)
def _try_create_profile_for_user(self, created):
print('not in _try_create_profile_for_user')
if created:
print('in _try_create_profile_for_user')
CategoryFullInfo.objects.get_or_create(category=self)
def save(self, *args, **kwargs):
print('before saving')
created = self.id is None
self.name = f'main_{self.name}'
super(Category, self).save(*args, **kwargs)
self._try_create_profile_for_user(created)
print('after saving')
class CategoryFullInfo(models.Model):
category = models.OneToOneField(Category, on_delete=models.CASCADE)
category_info = models.TextField(default='')
| [
"eldarmukhametkazin@gmail.com"
] | eldarmukhametkazin@gmail.com |
494736c4c41ac8fb3a48320e6706ab5f44726047 | 28b06ed3e562eb9c2b372934ea9a04e81320bb59 | /setup.py | 7fb9e8c05924b417dac0eb5d9dd8f89ddc9da35d | [
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | biomodels/MODEL1310110026 | 1f2cc0849110b22ce30be8e7444eba0c29e293db | 503fd2992f9c20b25c633fecb97177fd0116404b | refs/heads/master | 2016-09-05T19:27:29.698048 | 2014-10-16T05:30:04 | 2014-10-16T05:30:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | from setuptools import setup, find_packages
setup(name='MODEL1310110026',
version=20140916,
description='MODEL1310110026 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1310110026',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | [
"stanleygu@gmail.com"
] | stanleygu@gmail.com |
e64fde0b61a1ff49dd4a9e786d2b8546b5e85b1f | 67747b6ee7d4e1d24eadc5d0390f38d609501ccd | /爬虫/图片爬虫/crawlpjt/crawlpjt/spiders/smartspider.py | 8343a383f946ac46bbbe47bf1f7fbbc7434ef945 | [] | no_license | callmeliuchu/codeGitBook | 577937013a355ba36a688792f5722d31be33fc0b | 780cac294db47a46bb14129f166dd31c180e9473 | refs/heads/master | 2020-12-03T08:17:55.851568 | 2017-11-28T15:26:38 | 2017-11-28T15:26:38 | 95,679,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # -*- coding: utf-8 -*-
import scrapy
from crawlpjt.items import CrawlpjtItem
from scrapy.http import Request
import re
class SmartspiderSpider(scrapy.Spider):
name = "smartspider"
allowed_domains = ["mmjpg.com"]
start_urls = ['http://www.douluo123.com/shaonvmanhua/114220.html']
def parse(self, response):
item = CrawlpjtItem()
paturl = "(http://dlpic.fungood.cn/uploads/.*?\.(jpg|png))"
item['picurl'] = re.compile(paturl).findall(str(response.body))
patid = "http://dlpic.fungood.cn/uploads/.*?/(.*?)\.(jpg|png)"
item['picid']=re.compile(patid).findall(str(response.body))
yield item
for i in range(201,220):
url = "http://www.douluo123.com/shaonvmanhua/114" + str(i) + ".html"
yield Request(url,callback=self.parse)
| [
"1371772034@qq.com"
] | 1371772034@qq.com |
b0e4c3d769de1f3108c005c2de386ec073ad6d44 | 5147809b6382397185f2b1b6f43a272ea9e4f150 | /reddening-colors.py | 53c1d0fc71ecdf98e7c8f364825d71dbb0818e2b | [] | no_license | AngelGSoto/python-programs-1 | f8df3d498756ed6332504e8601924991803c3561 | 5e2b607ee9dd1459143a55218f9890a61539fd6a | refs/heads/master | 2023-01-08T00:24:15.664946 | 2020-10-23T21:07:23 | 2020-10-23T21:07:23 | 297,742,537 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | # -*- coding: utf-8 -*-
'''
Make color-color diagrams for JPLUS 2017
'''
from __future__ import print_function
import numpy as np
import glob
import json
import matplotlib.pyplot as plt
import seaborn as sns
import sys
from scipy.stats import gaussian_kde
import pandas as pd
from astropy.table import Table
#import StringIO
from sympy import S, symbols
from scipy.optimize import fsolve
import os
#reading the files .json
pattern = "*-spectros/*-JPLUS17-magnitude.json"
file_list = glob.glob(pattern)
# def filter_mag(e, s, f1, f2, f3):
# '''
# Calculate the colors using any of set of filters
# '''
# col, col0 = [], []
# if data['id'].endswith(e):
# if data['id'].startswith(str(s)):
# filter1 = data[f1]
# filter2 = data[f2]
# filter3 = data[f3]
# diff = filter1 - filter2
# diff0 = filter1 - filter3
# col.append(diff)
# col0.append(diff0)
# return col, col0
def filter_mag(e, s, f1, f2, f3, f4):
'''
Calculate the colors using any of set of filters
'''
col, col0 = [], []
if data['id'].endswith(e):
if data['id'].startswith(str(s)):
filter1 = data[f1]
filter2 = data[f2]
filter3 = data[f3]
filter4 = data[f4]
diff = filter1 - filter2
diff0 = filter3 - filter4
col.append(diff)
col0.append(diff0)
return col, col0
def plot_mag(f1, f2, f3, f4):
x1, y1 = filter_mag("E00_100", "", f1, f2, f3, f4)
x2, y2 = filter_mag("E01_100", "", f1, f2, f3, f4)
x3, y3 = filter_mag("E02_100", "", f1, f2, f3, f4)
x4, y4 = filter_mag("E00_300", "", f1, f2, f3, f4)
x5, y5 = filter_mag("E01_300", "", f1, f2, f3, f4)
x6, y6 = filter_mag("E02_300", "", f1, f2, f3, f4)
x7, y7 = filter_mag("E00_600", "", f1, f2, f3, f4)
x8, y8 = filter_mag("E01_600", "", f1, f2, f3, f4)
x9, y9 = filter_mag("E02_600", "", f1, f2, f3, f4)
for a, b in zip(x1, y1):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x4, y4):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x7, y7):
A1[0].append(a)
B1[0].append(b)
for a, b in zip(x3, y3):
A1[1].append(a)
B1[1].append(b)
for a, b in zip(x6, y6):
A1[1].append(a)
B1[1].append(b)
for a, b in zip(x9, y9):
A1[1].append(a)
B1[1].append(b)
n = 3
A1, B1 = [[] for _ in range(n)], [[] for _ in range(n)]
d_644_jplus, d_768_jplus = [], []
d_644_jplus1, d_768_jplus1 = [], []
label=[]
for file_name in file_list:
with open(file_name) as f:
data = json.load(f)
# if data['id'].endswith("1-HPNe"):
# label.append("")
# elif data['id'].endswith("SLOAN-HPNe-"):
# label.append("H4-1")
# elif data['id'].endswith("1359559-HPNe"):
# label.append("PNG 135.9+55.9")
if data['id'].startswith("ngc"):
label.append("")
elif data['id'].startswith("mwc"):
label.append("")
#plot_mag("F625_r_sdss", "F660", "F766_i_sdss")
#plot_mag("F515", "F660", "F861")
#plot_mag("F911_z_sdss", "F660", "F480_g_sdss")
#plot_mag("F480_g_sdss", "F515", "F660", "F625_r_sdss")
plot_mag("F410", "F660", "F480_g_sdss", "F766_i_sdss")
print(np.mean(B1[0]), np.mean(A1[0]))
print(np.mean(B1[1]), np.mean(A1[1]))
| [
"gsoto.angel@gmail.com"
] | gsoto.angel@gmail.com |
1a174e26cf1fe96bba7762cd1b733be0dcec6705 | 6e9d6a682f20054e13d3764e95b8bd3b7b64fabf | /dailychallenge525.py | d10d8a429a88bee8b70f25c94e5e6f075a337848 | [] | no_license | SeanyDcode/codechallenges | 30a271e04bc2b360bca923ae868be65a9533c8db | 947cf3034911b381afaf777794d22d2af06aa5ba | refs/heads/master | 2022-11-07T21:22:56.927863 | 2022-10-18T23:33:13 | 2022-10-18T23:33:13 | 154,498,776 | 1 | 0 | null | 2022-10-18T23:02:05 | 2018-10-24T12:38:45 | Python | UTF-8 | Python | false | false | 412 | py | # from dailycodingproblem.com
#
# Daily Challenge #525
# Given a N by M matrix of numbers, print out the matrix in a clockwise spiral.
#
# For example, given the following matrix:
#
# [[1, 2, 3, 4, 5],
# [6, 7, 8, 9, 10],
# [11, 12, 13, 14, 15],
# [16, 17, 18, 19, 20]]
# You should print out the following:
#
# 1
# 2
# 3
# 4
# 5
# 10
# 15
# 20
# 19
# 18
# 17
# 16
# 11
# 6
# 7
# 8
# 9
# 14
# 13
# 12
| [
"noreply@github.com"
] | SeanyDcode.noreply@github.com |
853a5e39a2163b7ab19cb4af8f77ccfbb4328637 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2371/60782/270717.py | 94105bafb18dd168ee67d41dd0341234d6bebbe9 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | """
题目描述
贾维斯在计算字母数字字符回文率方面很弱。
当钢铁侠忙于与灭霸战斗时,他需要启动音速冲动,但是贾维斯不会计算回文。
给定一个包含字母数字字符的字符串S,找出字符串是否是回文,拯救钢铁侠。
"""
"""
输入描述
输入的第一行包含T,即测试用例的数量。随后是T个测试用例。测试用例的每一行都包含字符串“S”。
"""
"""
输出描述
如果字符串是回文,则输出的每一行都包含“ YES”,如果字符串不是回文,则输出“ NO”。
"""
times = int(input())
while times > 0:
times = times - 1
string = input()
''.join([x for x in string if x.isalpha()])
string = string.lower()
l = list(string)
l.reverse()
reverse_string = str(l)
if reverse_string == string:
print("Yes")
else:
print("No") | [
"1069583789@qq.com"
] | 1069583789@qq.com |
db0263c9651dc1ae01bf1e8ac4c68375a560f81e | e4ee9f2ca60b60ea9fa1b05c982594a2c1b10484 | /day78 课上笔记以及代码/代码/luffy_permission/rbac/urls.py | 1ae198f501bc00ee4561ad886e7e546e6b50df48 | [] | no_license | tianshang486/Pythonlaonanhai | 100df2cc437aad1ee1baf45bdfc4500b1302092b | 2a5b46986f5ca684b2ae350596e293db54e1e2f4 | refs/heads/master | 2022-09-19T02:16:56.972160 | 2020-06-04T09:24:30 | 2020-06-04T09:24:30 | 269,314,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | from django.conf.urls import url
from rbac import views
app_name = 'rbac'
urlpatterns = [
# /app01/role/list/ # rbac:role_list
url(r'^role/list/$', views.role_list, name='role_list'),
url(r'^role/add/$', views.role, name='role_add'),
url(r'^role/edit/(\d+)/$', views.role, name='role_edit'),
url(r'^role/del/(\d+)/$', views.del_role, name='role_del'),
url(r'^menu/list/$', views.menu_list, name='menu_list'),
url(r'^menu/add/$', views.menu, name='menu_add'),
url(r'^menu/edit/(\d+)/$', views.menu, name='menu_edit'),
url(r'^permission/add/$', views.permission, name='permission_add'),
url(r'^permission/edit/(\d+)/$', views.permission, name='permission_edit'),
url(r'^permission/del/(\d+)/$', views.del_permission, name='permission_del'),
url(r'^multi/permissions/$', views.multi_permissions, name='multi_permissions'),
url(r'^distribute/permissions/$', views.distribute_permissions, name='distribute_permissions'),
url(r'^distribute/permissions2/$', views.distribute_permissions2, name='distribute_permissions'),
url(r'^permissions_tree/$', views.permissions_tree, name='permissions_tree'),
]
| [
"tianshang486@.com"
] | tianshang486@.com |
ece832936045045a43c026cc845db74a25fc0911 | 1f4505ed66f4fd68c6d1edf18ecff58362742fad | /algorithm/Backtracking/131_Palindrome_Partitioning.py | 94732d477f1b056aeaa91fc00bc9c257da15fab9 | [
"MIT"
] | permissive | nishitpatel01/Data-Science-Toolbox | 0d9b63a365698cc4a423abd5881cde8f6bf672be | 80dc1310d103c9481feff8792426c550ddcc0a36 | refs/heads/master | 2020-05-19T08:26:40.319321 | 2019-05-04T05:58:48 | 2019-05-04T05:58:48 | 184,921,541 | 1 | 1 | MIT | 2019-05-04T16:53:21 | 2019-05-04T16:53:20 | null | UTF-8 | Python | false | false | 3,120 | py | import collections
class Solution(object):
# brute force
def partition2(self, s):
res = []
def dfs(s, path, res):
if not s:
res.append(path)
return
# warning: index ends with len(s), not len(s) - 1
# because s[:len(s)] is the whole string
for i in range(1, len(s) + 1):
if s[:i] == s[:i:-1]:
dfs(s[i:], path + [s[:i]], res)
dfs(s, [], res)
return res
# memo, backward
# use a list of list, with index as implicit key
# index i stores a list of palindromes made from first i characters
def partitionDBRec(self, s):
# we'll use string length as key to retrieve, so memo need one extra space
self.memo = [None] * (len(s) + 1)
# zero length string has an empoty list, which is used as base case
self.memo[0] = [[]]
def partition_core(s):
s_len = len(s)
if self.memo[s_len]:
return self.memo[s_len]
res = []
for i in range(len(s) - 1, - 1, - 1):
current = s[i:]
if current == current[::-1]:
# pre_res = partition_core(s[:i])
# res += [r + [current] for r in pre_res]
for rem in partition_core(s[:i]):
res.append(rem + [current]) # concatenate two list, and concatenate list to res
self.memo[s_len] = res
return res
return partition_core(s)
# same logic as above, same recurson
# def partitionDPRec2(self, s):
# def helper(s, h):
# if s in h:
# return h[s]
# h[s] = []
# for i in range(len(s)):
# if s[:i + 1] == s[:i + 1][::-1]:
# if i + 1 == len(s):
# h[s].append([s])
# else:
# for rest in self.helper(s[i + 1:], h):
# h[s].append([s[:i + 1]] + rest)
# return h[s]
#
# return helper(s, {})
def partitionDP(self, s):
"""
:type s: str
:rtype: List[List[str]]
"""
def make_results(index, pallindromes, result, results):
if index >= len(s):
results += result[:]
else:
for pallindrome in pallindromes[index]:
make_results(index + len(pallindrome), pallindromes, result + [pallindrome], results)
n = len(s)
is_pallindrome = set()
pallindromes = collections.defaultdict(list)
for i in range(0, len(s)):
for j in range(i + 1):
if s[i] == s[j] and ((i - j) <= 1 or (j + 1, i - 1) in is_pallindrome):
is_pallindrome.add((j, i))
substring = s[j:i + 1]
pallindromes[j] += substring
results = []
make_results(0, pallindromes, [], results)
return results
solver = Solution()
print(solver.partitionDPRec2("aab")) | [
"shawlu@github.com"
] | shawlu@github.com |
80eac598597ba1c160fb0155aeab022602216b45 | b3c47795e8b6d95ae5521dcbbb920ab71851a92f | /Nowcoder/第八届“图灵杯”NEUQ-ACM程序设计竞赛个人赛/E.py | 1ddd5930f982aa12adfaaa374f53809ba517673b | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | Wizmann/ACM-ICPC | 6afecd0fd09918c53a2a84c4d22c244de0065710 | 7c30454c49485a794dcc4d1c09daf2f755f9ecc1 | refs/heads/master | 2023-07-15T02:46:21.372860 | 2023-07-09T15:30:27 | 2023-07-09T15:30:27 | 3,009,276 | 51 | 23 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #coding=utf-8
from math import sqrt
def solve(n, x):
a, b = x - 1, n - x
a, b = min(a, b), max(a, b)
fi = (sqrt(5) + 1) / 2
k = b - a
# print fi, k
return a == int(k * fi)
T = int(raw_input())
for case_ in xrange(T):
(n, x) = map(int, raw_input().split())
if solve(n, x):
# second player wins
print 'ma la se mi no.1!'
else:
# first player wins
print 'yo xi no forever!'
'''
^^^TEST^^^
8
1 1
10 3
17 6
12 5
4 3
9 6
12 8
17 11
--------
ma la se mi no.1!
yo xi no forever!
yo xi no forever!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
ma la se mi no.1!
$$$TEST$$$
'''
| [
"noreply@github.com"
] | Wizmann.noreply@github.com |
dade28675e6427d46d2f875da1203198c231c5ea | ed14784949d5fa2208aa99ae1e31be0b6d1f196d | /backend/fametok_19651/settings.py | 6b0a917a15f6ad8150a19ad7f7b3fdf5478c88bb | [] | no_license | crowdbotics-apps/fametok-19651 | d530a768e791d04394133ec8e92731c9d4f1f02e | aeecff19e0a628fed01c0d85d81c90c5dd98a99c | refs/heads/master | 2022-12-03T06:33:14.174775 | 2020-08-19T12:41:58 | 2020-08-19T12:41:58 | 288,728,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,841 | py | """
Django settings for fametok_19651 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.sites",
"course",
]
LOCAL_APPS = [
"home",
"users.apps.UsersConfig",
]
THIRD_PARTY_APPS = [
"rest_framework",
"rest_framework.authtoken",
"rest_auth",
"rest_auth.registration",
"bootstrap4",
"allauth",
"allauth.account",
"allauth.socialaccount",
"allauth.socialaccount.providers.google",
"django_extensions",
"drf_yasg",
# start fcm_django push notifications
"fcm_django",
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "fametok_19651.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "fametok_19651.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(BASE_DIR, "db.sqlite3"),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {"default": env.db()}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = "/static/"
MIDDLEWARE += ["whitenoise.middleware.WhiteNoiseMiddleware"]
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = "email"
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
abf86800d2a983e5d00cb8ab431150246bb1bdad | 552ba370742e346dbb1cf7c7bf4b99648a17979b | /tbx/services/migrations/0006_new_servicepage_model.py | b9ca45203ff3651aafcc0a436498635c2cc4649e | [
"MIT"
] | permissive | arush15june/wagtail-torchbox | 73e5cdae81b524bd1ee9c563cdc8a7b5315a809e | c4d06e096c72bd8007975dc016133024f9d27fab | refs/heads/master | 2022-12-25T05:39:32.309635 | 2020-08-13T14:50:42 | 2020-08-13T14:50:42 | 299,591,277 | 0 | 0 | MIT | 2020-09-29T11:08:49 | 2020-09-29T11:08:48 | null | UTF-8 | Python | false | false | 6,030 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-01-21 16:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('blog', '0017_map_tags_to_related_services'),
('work', '0014_map_tags_to_related_services'),
('people', '0005_contact'),
('taxonomy', '0002_initial_services'),
('wagtailcore', '0040_page_draft_title'),
('torchbox', '0120_remove_contactpage'),
('services', '0005_remove_models'),
]
operations = [
migrations.CreateModel(
name='ServicePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('strapline', models.CharField(max_length=255)),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('heading_for_key_points', wagtail.core.fields.RichTextField()),
('contact', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='people.Contact')),
('service', models.OneToOneField(blank=True, help_text='Link to this service in taxonomy', null=True, on_delete=django.db.models.deletion.SET_NULL, to='taxonomy.Service')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ServicePageClientLogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='torchbox.TorchboxImage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='client_logos', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageFeaturedBlogPost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('blog_post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='blog.BlogPage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='featured_blog_posts', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageFeaturedCaseStudy',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('case_study', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='work.WorkPage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='featured_case_studies', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageKeyPoint',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('text', models.CharField(max_length=255)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='key_points', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageTestimonial',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('quote', models.TextField()),
('name', models.CharField(max_length=255)),
('role', models.CharField(max_length=255)),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='testimonials', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ServicePageUSAClientLogo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='torchbox.TorchboxImage')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='usa_client_logos', to='services.ServicePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| [
"karl@torchbox.com"
] | karl@torchbox.com |
ee7eedfd43a41db4ba4f5048132a797a8ea062e8 | 2264807e07d88d0f0dea05d3973163430765794d | /wip/other/tap.py | daca3680b44ded7d42deb529c89fec50356ba486 | [
"MIT"
] | permissive | deadsy/pycs | e0dc9d2d3ec2ff0f7019d0a31d94e0a1237c24b1 | 7e262c710255ac9742703c7ccedb0ae90ae079ec | refs/heads/master | 2023-03-05T10:33:03.485327 | 2023-02-27T19:34:27 | 2023-02-27T19:34:27 | 38,818,707 | 57 | 11 | null | 2017-12-21T22:40:29 | 2015-07-09T12:24:47 | Python | UTF-8 | Python | false | false | 2,818 | py | #-----------------------------------------------------------------------------
"""
Generate TMS sequences for JTAG TAP state machine transitions
Note:
State names are taken from the SVF file specification.
This keeps things simple when processing SVF files.
"""
#-----------------------------------------------------------------------------
state_machine = {
'RESET': ('IDLE','RESET'),
'IDLE': ('IDLE','DRSELECT'),
'DRSELECT': ('DRCAPTURE','IRSELECT'),
'DRCAPTURE': ('DRSHIFT','DREXIT1'),
'DRSHIFT': ('DRSHIFT','DREXIT1'),
'DREXIT1': ('DRPAUSE','DRUPDATE'),
'DRPAUSE': ('DRPAUSE','DREXIT2'),
'DREXIT2': ('DRSHIFT','DRUPDATE'),
'DRUPDATE': ('IDLE','DRSELECT'),
'IRSELECT': ('IRCAPTURE','RESET'),
'IRCAPTURE': ('IRSHIFT','IREXIT1'),
'IRSHIFT': ('IRSHIFT','IREXIT1'),
'IREXIT1': ('IRPAUSE','IRUPDATE'),
'IRPAUSE': ('IRPAUSE','IREXIT2'),
'IREXIT2': ('IRSHIFT','IRUPDATE'),
'IRUPDATE': ('IDLE','DRSELECT'),
}
#-----------------------------------------------------------------------------
# build a cache of all state transitions for fast lookup
tap_cache = {}
def search(path, current, dst):
"""return the shortest state path linking src and dst states"""
# are we done?
if current == dst:
return path
# get the two outgoing states
(state0, state1) = state_machine[current]
# search paths with state0
if state0 in path:
# looping - not the shortest path
path0 = None
else:
path0 = list(path)
path0.append(state0)
path0 = search(path0, state0, dst)
# search paths with state1
if state1 in path:
# looping - not the shortest path
path1 = None
else:
path1 = list(path)
path1.append(state1)
path1 = search(path1, state1, dst)
# return the shortest path
if path0 is None:
return path1
if path1 is None:
return path0
return path0 if len(path0) < len(path1) else path1
def tms(path, current):
"""return a tms bit tuple from the current state along the path"""
s = []
for state in path:
s.append(state_machine[current].index(state))
current = state
return tuple(s)
def init_cache():
states = state_machine.keys()
for src in states:
for dst in states:
path = search([], src, dst)
tap_cache['%s->%s' % (src, dst)] = tms(path, src)
# any state to RESET
tap_cache['*->RESET'] = (1,1,1,1,1)
def lookup(src, dst):
if len(tap_cache) == 0:
init_cache()
return tap_cache['%s->%s' % (src, dst)]
#-----------------------------------------------------------------------------
#class tap(object):
# """JTAG TAP State Machine"""
# def __init__(self):
# pass
#-----------------------------------------------------------------------------
| [
"jasonh@mistsys.com"
] | jasonh@mistsys.com |
a9cd02628949811c68f380fb680e2de40035eea0 | c1f9f4926cf7ac20a854e3222d18b5a0e3eeb6b3 | /minos/thesis/styles.py | cba205dff8b46462376467149ce793834ec7d805 | [] | no_license | ndevenish/python-minos | d794ec02ff2a7617b57e7d4ad983eef1ac5d071f | 43876f473ac992e76037bfda219b56f068ab52fd | refs/heads/master | 2021-03-19T06:14:19.390943 | 2015-04-21T20:51:59 | 2015-04-21T20:51:59 | 25,158,699 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | #coding: utf-8
# class Sizes(object):
# A4 = (8.3-0.4, 11.7-0.4) # With 5mm margin on all sides
# Standard = (5.9, 5.5)
# Wide = (4.9, 3.3)
# Half =
import matplotlib as mpl
#from minos.thesis import Sizes
class Sizes(object):
A4 = (8.3-0.4, 11.7-0.4) # With 5mm margin on all sides
Standard = (5.9, 4.4)
Wide = (5.9, 3.3)
Half = (5.9*0.49, 5.9*0.49*(3./4.))
FullPage = (5.9, 5.9*1.4) # A4 ratio, but thesis fit
Standard = {
"figure.figsize": Sizes.Standard,
"font.size": 12,
"legend.numpoints": 1,
"legend.fontsize": "medium"
}
Wide = {
"figure.figsize": Sizes.Wide,
"font.size": 12,
}
# Used for half-width 4:3 plots e.g. side-by-side
Half = {
"figure.figsize": Sizes.Half,
"font.size": 9,
"legend.fontsize": "small",
}
FullPage = {
"figure.figsize": Sizes.FullPage,
}
def figure_style(style):
"""Uses the matplotlib style context manager for a specific function"""
def _wrap(fn):
def _innerwrap(*args, **kwargs):
with mpl.style.context(style):
return fn(*args, **kwargs)
return _innerwrap
return _wrap
| [
"ndevenish@gmail.com"
] | ndevenish@gmail.com |
36d611a427b99241d39486c5737c8fb20e4e1194 | 2af94f8a7609d47fdcea28a2132c4f8bacb103e3 | /lib/idigi_pc.py | 62d44db918f767b61b1bc902aa601d06c006764b | [] | no_license | bernhara/DigiGateway4Raph | 685527723f0b306f387233c78d27fe9d78717c38 | f36ba29ef883d70f94b8609ff734b5dcde786c66 | refs/heads/master | 2020-07-05T19:56:27.027547 | 2019-08-19T06:10:46 | 2019-08-19T06:10:46 | 202,756,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,223 | py | ############################################################################
# #
# Copyright (c)2011, Digi International (Digi). All Rights Reserved. #
# #
# Permission to use, copy, modify, and distribute this software and its #
# documentation, without fee and without a signed licensing agreement, is #
# hereby granted, provided that the software is used on Digi products only #
# and that the software contain this copyright notice, and the following #
# two paragraphs appear in all copies, modifications, and distributions as #
# well. ContactProduct Management, Digi International, Inc., 11001 Bren #
# Road East, Minnetonka, MN, +1 952-912-3444, for commercial licensing #
# opportunities for non-Digi products. #
# #
# DIGI SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED #
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A #
# PARTICULAR PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, #
# PROVIDED HEREUNDER IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND. #
# DIGI HAS NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, #
# ENHANCEMENTS, OR MODIFICATIONS. #
# #
# IN NO EVENT SHALL DIGI BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, #
# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, #
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF #
# DIGI HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. #
# #
############################################################################
''' PC compatibility stub for Digi device cwm module. '''
def _get_ws_parms():
''' Dummy values for invalid configuration (from cwmmodule.cc). '''
return (None, None, None, 0, 0)
| [
"ORBA6563@S-ORBA65630.rd.francetelecom.fr"
] | ORBA6563@S-ORBA65630.rd.francetelecom.fr |
3e21ac19dada8214c4ab79d2e9fcbdcaed32fc2e | 1cf74ce90fd2bbe6d450312ae14b0cd581740281 | /tests/test_evaluator.py | cfc38c0bfda7651c10db719049e0b4245dc9d56a | [
"Apache-2.0"
] | permissive | bearstech/whirlwind | aa05e4a0cdd81ef8b9260ccfb4ba2325dae49f46 | 2776de5c615bf5b6e1b2c30f917527321079817c | refs/heads/master | 2021-06-06T17:32:41.669973 | 2020-10-13T09:46:24 | 2020-10-13T09:46:24 | 9,644,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,042 | py | import unittest
from whirlwind import evaluator
from whirlwind.mock import MockStaticReader, MockFinder
from whirlwind.storage import Store
from whirlwind.attime import parseATTime
class EvaluatorTest(unittest.TestCase):
def setUp(self):
mock_reader = MockStaticReader()
finder = MockFinder({
'one': MockStaticReader(1),
'three': MockStaticReader(3),
'five': MockStaticReader(5)
})
self.store = Store([finder], hosts=None)
def _evaluator(self, tokens):
context = {
'startTime': parseATTime('-2days'),
'endTime': parseATTime('now'),
'localOnly': True
}
return evaluator.evaluateTarget(self.store, context, tokens)
def test_average(self):
values = self._evaluator('averageSeries(one, three)')
for v in values[0]:
assert v == 2.0
def test_sum(self):
values = self._evaluator('sumSeries(one, three)')
for v in values[0]:
assert v == 4.0
def test_diff(self):
values = self._evaluator('diffSeries(five, one)')
for v in values[0]:
assert v == 4.0
# Doesn't work in the graphite project too
#values = self._evaluator('diffSeries(a.b.5,3)')
#for v in values[0]:
#assert v == 2.0
# FIXME
#def test_min_max(self):
#store = MockStore({'a.b.c': [1, 2, 3, 4, 1, 5],
#'d.e.f': [2, 1, 3, 0, 6, 7]
#})
#context = {
#'startTime': '-2days',
#'endTime': 'now',
#}
#tokens = 'minSeries(a.b.c, d.e.f)'
#values = evaluator.evaluateTarget(store, context, tokens)
#vv = [v for v in values[0] if v is not None]
#assert vv == [1, 1, 3, 0, 1, 5]
#tokens = 'maxSeries(a.b.c, d.e.f)'
#values = evaluator.evaluateTarget(store, context, tokens)
#vv = [v for v in values[0] if v is not None]
#assert vv == [2, 2, 3, 4, 6, 7]
| [
"mlecarme@bearstech.com"
] | mlecarme@bearstech.com |
6ea90f5be725e40ce34a353f8bb7cb2604b6367c | afa2ebb439e6592caf42c507a789833b9fbf44b2 | /supervised_learning/0x02-tensorflow/7-evaluate.py | dcaf66e8a897d9e8fa382a62237c8ef55fef1152 | [] | no_license | anaruzz/holbertonschool-machine_learning | 64c66a0f1d489434dd0946193747ed296760e6c8 | 91300120d38acb6440a6dbb8c408b1193c07de88 | refs/heads/master | 2023-07-30T20:09:30.416167 | 2021-09-23T16:22:40 | 2021-09-23T16:22:40 | 279,293,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | #!/usr/bin/env python3
"""
Script that Evaluates the output of a neural network
"""
import tensorflow as tf
def evaluate(X, Y, save_path):
"""
Evaluate the output of a NN
"""
sess = tf.Session()
saver = tf.train.import_meta_graph(save_path + '.meta')
saver.restore(sess, save_path)
y_pred = tf.get_collection('y_pred', scope=None)[0]
loss = tf.get_collection('loss', scope=None)[0]
accuracy = tf.get_collection('accuracy', scope=None)[0]
x = tf.get_collection('x', scope=None)[0]
y = tf.get_collection('y', scope=None)[0]
y_pred, accuracy, loss = sess.run((y_pred, accuracy, loss),
feed_dict={x: X, y: Y})
return y_pred, accuracy, loss
| [
"laabidigh@gmail.com"
] | laabidigh@gmail.com |
78776dd647b6904bb6a18538a5f55a8ee87e7683 | 60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24 | /IronPythonStubs/release/stubs.min/System/Runtime/InteropServices/__init___parts/UCOMIConnectionPointContainer.py | d6d4c5c738b33d1b7dac9915eb618e3272d54a34 | [
"MIT"
] | permissive | shnlmn/Rhino-Grasshopper-Scripts | a9411098c5d1bbc55feb782def565d535b27b709 | 0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823 | refs/heads/master | 2020-04-10T18:59:43.518140 | 2020-04-08T02:49:07 | 2020-04-08T02:49:07 | 161,219,695 | 11 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | class UCOMIConnectionPointContainer:
""" Use System.Runtime.InteropServices.ComTypes.IConnectionPointContainer instead. """
def EnumConnectionPoints(self,ppEnum):
"""
EnumConnectionPoints(self: UCOMIConnectionPointContainer) -> UCOMIEnumConnectionPoints
Creates an enumerator of all the connection points supported in the connectable object,one
connection point per IID.
"""
pass
def FindConnectionPoint(self,riid,ppCP):
"""
FindConnectionPoint(self: UCOMIConnectionPointContainer,riid: Guid) -> (Guid,UCOMIConnectionPoint)
Asks the connectable object if it has a connection point for a particular IID,and if so,
returns the IConnectionPoint interface pointer to that connection point.
riid: A reference to the outgoing interface IID whose connection point is being requested.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
| [
"magnetscoil@gmail.com"
] | magnetscoil@gmail.com |
05b93131012fc6bad5c9fad35bf55749ba7a81cf | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_267/ch171_2020_06_22_16_19_21_764865.py | e62085add4dd9fd088cd16a9e2d58cfcd82a50b6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | class Carrinho:
def __init__(self):
self.dicio = {}
def adiciona(self, nome_produto, preco):
if nome_produto not in self.dicio.keys():
self.dicio[nome_produto] = preco
else:
self.dicio[nome_produto] += preco
def total_do_produto(self, nome_produto):
preco_total = 0
for nome,preco in self.dicio.items():
preco_total += self.dicio[nome_produto]
return preco_total
| [
"you@example.com"
] | you@example.com |
6d4cbc8ac1d3cde642db40343bb5cb5cafb3d8ed | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/52/usersdata/67/22506/submittedfiles/matriz1.py | b83917a56822d82448117c1894d206f2ee154a27 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
def menorcoluna (a):
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
return j
def maiorcoluna (a):
for j in range (0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
cd=j
return cd
def maiorlinha (a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j]==1:
lb=i
return lb
def menorlinha (a):
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
if a[i,j]==1:
lc=i
return i
linhas=input("Digite a quantidade de linhas:")
colunas=input("Digite a quantidade de colunas:")
a=np.zeros((linhas,colunas))
for i in range (0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=input("Digite um elemento:")
print ( a[menorlinha(a):maiorlinha(a)+1,menorcoluna(a):maiorcoluna(a)+1] )
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b819f0927c0e24f8d7915d6afa203893a63b2360 | 2f7f918888f57a1a341035649e6c42b264c91313 | /vendor/atmel/atmel.py | e769752270820deb68ade3716f33baed0ee2ad00 | [] | no_license | arunsigood/pycs | 8331417d46084b0ccb6381a85ac3490d97d8b162 | 4f6035a24169e4c9130f1a47ba0e68cc1bf6390b | refs/heads/master | 2020-03-22T21:08:31.009632 | 2018-07-05T22:49:14 | 2018-07-05T22:49:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | #-----------------------------------------------------------------------------
"""
SoC file for Atmel SAM Devices
Read in the SVD file for a named SoC.
Run fixup functions to correct any SVD inadequecies.
"""
#-----------------------------------------------------------------------------
import soc
import cmregs
import util
#-----------------------------------------------------------------------------
# build a database of SoC devices
class soc_info(object):
def __init__(self):
pass
soc_db = {}
#-----------------------------------------------------------------------------
# NVM User Row Mapping: Not in the SVD :-(
def _eeprom_fmt(x):
return '%s' % util.memsize((1 << (14-x),0)[x == 7])
def _bootprot_fmt(x):
return '%s' % util.memsize((1 << (15-x),0)[x == 7])
_nvmr0_fieldset = (
('WDT_Period', 31, 28, None, 'WDT Period at power-on'),
('WDT_Always', 27, 27, None, 'WDT Always-On at power-on'),
('WDT_Enable', 26, 26, None, 'WDT Enable at power-on'),
('BOD12_Action', 25, 24, None, 'BOD12 Action at power-on'),
('BOD12_Disable', 23, 23, None, 'BOD12 Disable at power-on'),
('BOD12_Level', 22, 17, None, 'BOD12 threshold level at power-on'),
('BOD33_Action', 16, 15, None, 'BOD33 Action at power-on'),
('BOD33_Disable', 14, 14, None, 'BOD33 Disable at power-on'),
('BOD33_Level', 13, 8, None, 'BOD33 threshold level at power-on'),
('EEPROM', 6, 4, _eeprom_fmt, 'Used to select one of eight different EEPROM sizes'),
('BOOTPROT', 2, 0, _bootprot_fmt, 'Used to select one of eight different bootloader sizes'),
)
_nvmr1_fieldset = (
('LOCK', 31, 16, None, 'NVM Region Lock Bits'),
('BOD12_Hysteresis', 10, 10, None, 'BOD12 Hysteresis configuration Hysteresis at power-on'),
('BOD33_Hysteresis', 9, 9, None, 'BOD33 Hysteresis configuration Hysteresis at power-on'),
('WDT_WEN', 8, 8, None, 'WDT Timer Window Mode Enable at power-on'),
('WDT_EWOFFSET', 7, 4, None, 'WDT Early Warning Interrupt Time Offset at power-on'),
('WDT_Window', 3, 0, None, 'WDT Window mode time-out at power-on'),
)
_nvm_user_row_regset = (
('NVMUR0', 32, 0x0, _nvmr0_fieldset, 'NVM User Row 0'),
('NVMUR1', 32, 0x4, _nvmr1_fieldset, 'NVM User Row 1'),
)
#-----------------------------------------------------------------------------
# ATSAML21J18B
def ATSAML21J18B_fixup(d):
d.soc_name = 'ATSAML21J18B'
d.cpu_info.deviceNumInterrupts = 32
# memory and misc periperhals
d.insert(soc.make_peripheral('flash', 0x00000000, 256 << 10, None, 'Flash'))
d.insert(soc.make_peripheral('rww', 0x00400000, 8 << 10, None, 'RWW Section'))
d.insert(soc.make_peripheral('sram', 0x20000000, 32 << 10, None, 'SRAM'))
d.insert(soc.make_peripheral('lp_sram', 0x30000000, 8 << 10, None, 'Low Power SRAM'))
d.insert(soc.make_peripheral('NVMUR', 0x00804000, 8, _nvm_user_row_regset, 'NVM User Row'))
s = soc_info()
s.name = 'ATSAML21J18B'
s.svd = 'ATSAML21J18B'
s.fixups = (ATSAML21J18B_fixup, cmregs.cm0plus_fixup)
soc_db[s.name] = s
#-----------------------------------------------------------------------------
def get_device(ui, name):
"""return the device structure for the named SoC"""
if not soc_db.has_key(name):
assert False, 'unknown SoC name %s' % name
return None
info = soc_db[name]
svd_file = './vendor/atmel/svd/%s.svd.gz' % info.svd
ui.put('%s: compiling %s\n' % (name, svd_file))
device = soc.build_device(svd_file)
for f in info.fixups:
f(device)
return device
#-----------------------------------------------------------------------------
| [
"jasonh@mistsys.com"
] | jasonh@mistsys.com |
34c7eb67850c4cf233982feffdbd2f7f4ff892db | c376179fd8572514826e574a67d0cb4002780497 | /mnist_lenet5/mnist_backward_lenet5.py | 70686effbde3d50738726ec93764022df4e9a308 | [] | no_license | youthliuxi/tf_learn | a4ec4e03dfebe2abf550b895607b2aa76f19f2fe | 617c26a72b4d8f95280723eda97d99a0e2b8bfbf | refs/heads/master | 2020-03-19T09:45:16.750767 | 2018-06-06T10:54:56 | 2018-06-06T10:54:56 | 136,315,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | #coding:utf-8
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import mnist_forward_lenet5
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.005
LEARNING_RATE_DECAY = 0.99
REGULARIZER = 0.0001
STEPS = 2000
MOVING_AVERAGE_DECAY = 0.99
MODEL_SAVE_PATH = "./model/"
MODEL_NAME = "mnist_model"
def backward(mnist):
x = tf.placeholder(tf.float32, [
BATCH_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.NUM_CHANNELS])
y_ = tf.placeholder(tf.float32, [None, mnist_forward_lenet5.OUTPUT_NODE])
y = mnist_forward_lenet5.forward(x, True, REGULARIZER)
global_step = tf.Variable(0, trainable = False)
ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = y, labels = tf.argmax(y_, 1))
cem = tf.reduce_mean(ce)
loss = cem + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
mnist.train.num_examples / BATCH_SIZE,
LEARNING_RATE_DECAY,
staircase = True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step = global_step)
ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
ema_op = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train_step, ema_op]):
train_op = tf.no_op(name = 'train')
saver = tf.train.Saver()
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
for i in range(STEPS):
xs, ys = mnist.train.next_batch(BATCH_SIZE)
reshaped_xs = np.reshape(xs, (
BATCH_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.IMAGE_SIZE,
mnist_forward_lenet5.NUM_CHANNELS))
_, loss_value, step = sess.run([train_op, loss, global_step], feed_dict = {x: reshaped_xs, y_: ys})
if i % 100 ==0:
print("After %d training step(s), loss on training batch is %g." % (step, loss_value))
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step = global_step)
def main():
mnist = input_data.read_data_sets("./data/", one_hot = True)
backward(mnist)
# print(mnist.train.num_examples)
if __name__ == '__main__':
main() | [
"lx_einstein@sina.com"
] | lx_einstein@sina.com |
e0e6ee7e997ccdcc861d7be06c8017a1bd4981b7 | b403c7fe56209472855dff451f0b6283d5471008 | /Supplemental_Material/PythonProjects/10. GUI_Tkinter/GUI_BindingButtons_nb.py | 5a8a1050361e8e0331cdb9c6a8943e10f1012300 | [] | no_license | Sandbox4KidsTM/Python_Basics | 842bde52796896e913fdb5cc349034c52092555f | 68c95547ec1567958fc8069e6a4bb119e436211a | refs/heads/master | 2020-03-23T01:06:29.363196 | 2018-08-10T04:32:58 | 2018-08-10T04:32:58 | 140,901,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | from tkinter import *
root = Tk()
def printName():
print("hello, my name is Bucky")
button_1 = Button(root, text="Print my name", command=printName, bg="red", fg="white")
# command binds the function printName() to the button_1
button_1.pack()
root.mainloop() | [
"mitchslabrenz@gmail.com"
] | mitchslabrenz@gmail.com |
356b51db1cd46f05c6cfb7019f4972928980f198 | 45f7a9b44ea1c45448703707da793d51151c0527 | /ui_tests/answers/answers_02.py | 2cb15c8bf8c7a775a15f8f87a5b2feb6fcc81548 | [] | no_license | basdijkstra/python-for-testers | a40d30432c31712c6d0eadbca9de73056ff10535 | 50bfbabfb2b8426eed8d048b0448959c34f71b61 | refs/heads/master | 2023-05-24T18:48:58.557924 | 2023-05-23T05:44:11 | 2023-05-23T05:44:11 | 219,865,075 | 7 | 4 | null | 2023-05-23T05:44:13 | 2019-11-05T22:47:09 | Python | UTF-8 | Python | false | false | 1,623 | py | from selenium import webdriver
from selenium.webdriver.support.ui import Select
import pytest
import time
@pytest.fixture
def browser():
driver = webdriver.Chrome()
driver.maximize_window()
yield driver
driver.quit()
# Exercise 2.1
# Extend this test with the following actions:
# 1. Select the menu item 'Request Loan' from the side menu bar
# 2. Specify '1000' as the requested loan amount
# 3. Specify '100' as the down payment
# 4. Select '12456' as the from account ID
# 5. Click the 'Apply Now' button
# 6. Check that the element containing the result of the loan application is displayed
# (you might need to add a time.sleep(x) statement here, which
# makes the test wait for x seconds before proceeding with the
# next statement)
# 7. Check that the result of the loan application equals 'Denied'
def test_successful_loan_request(browser):
browser.get("http://parabank.parasoft.com")
browser.find_element_by_name("username").send_keys("john")
browser.find_element_by_name("password").send_keys("demo")
browser.find_element_by_xpath("//input[@value='Log In']").click()
browser.find_element_by_link_text("Request Loan").click()
browser.find_element_by_id("amount").send_keys("1000")
browser.find_element_by_id("downPayment").send_keys("100")
Select(browser.find_element_by_id("fromAccountId")).select_by_visible_text("12456")
browser.find_element_by_xpath("//input[@value='Apply Now']").click()
time.sleep(3)
assert browser.find_element_by_id("loanStatus").is_displayed()
assert browser.find_element_by_id("loanStatus").text == "Denied"
| [
"bas@ontestautomation.com"
] | bas@ontestautomation.com |
866fb60bedda985216024081f5bbc4d86cc63df1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/126/usersdata/179/32849/submittedfiles/ap2.py | ae550679631479cc360ae5392d5442f324129039 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
a=int(input('primeiro numero:'))
b=int(input('segundo numero:'))
c=int(input('terceiro numero:'))
d=int(input('quarto numero:'))
if a>b and a>c and a>d:
print (a)
elif b>a and b>c and b>d:
print (b)
elif c>a and c>b and c>d:
print (c)
elif d>a and d>b and d>c:
print (d)
if a<b and a<c and a<d:
print (a)
elif b<a and b<c and b<d:
print (b)
elif c<a and c<b and c<d:
print (c)
elif d<a and d<b and d<c:
print (d)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
2583a229d3a812adb1e72cf5cc77957bfb57a97e | 7e27d2b844e962a567e0311a6eb5ccf3fcdc7b98 | /lib/exabgp/application/cli.py | 3abc6a4b8deb29611f0efd9ba3fe1b415a3a3d6d | [] | no_license | slabakov/exabgp | 1dbf6a98b06a2c2cdbeedf0954d0429f0dbf98fb | 33f851d70715f4ba1792acc36436ef32b70c30c9 | refs/heads/master | 2020-12-30T19:46:09.570146 | 2015-06-05T15:36:19 | 2015-06-05T15:36:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,083 | py | #!/usr/bin/env python
# encoding: utf-8
"""
cli.py
Created by Thomas Mangin on 2014-12-22.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
import sys
from exabgp.dep.cmd2 import cmd
from exabgp.version import version
class Completed (cmd.Cmd):
# use_rawinput = False
# prompt = ''
# doc_header = 'doc_header'
# misc_header = 'misc_header'
# undoc_header = 'undoc_header'
ruler = '-'
completion = {}
def __init__ (self, intro=''):
self.prompt = '%s> ' % intro
cmd.Cmd.__init__(self)
def completedefault (self, text, line, begidx, endidx): # pylint: disable=W0613
commands = line.split()
local = self.completion
for command in commands:
if command in local:
local = local[command]
continue
break
return [_ for _ in local.keys() if _.startswith(text)]
def default (self, line):
print 'unrecognised syntax: ', line
def do_EOF (self):
return True
class SubMenu (Completed):
def do_exit (self, _):
return True
do_x = do_exit
class Attribute (SubMenu):
chars = ''.join(chr(_) for _ in range(ord('a'),ord('z')+1) + range(ord('0'),ord('9')+1) + [ord ('-')])
attribute = None
completion = {
'origin': {
'igp': {
},
'egp': {
},
'incomplete': {
},
},
}
def __init__ (self, name):
self.name = name
SubMenu.__init__(self,'attribute %s' % name)
def do_origin (self, line):
if line in ('igp','egp','incomplete'):
self.attribute['origin'] = line
else:
print 'invalid origin'
def do_as_path (self, line):
pass
# next-hop
def do_med (self, line):
if not line.isdigit():
print 'invalid med, %s is not a number' % line
return
med = int(line)
if 0 > med < 65536:
print 'invalid med, %s is not a valid number' % line
self.attribute['origin'] = line
# local-preference
# atomic-aggregate
# aggregator
# community
# originator-id
# cluster-list
# extended-community
# psmi
# aigp
def do_show (self, _):
print 'attribute %s ' % self.name + ' '.join('%s %s' % (key,value) for key,value in self.attribute.iteritems())
class Syntax (Completed):
completion = {
'announce': {
'route': {
},
'l2vpn': {
},
},
'neighbor': {
'include': {
},
'exclude': {
},
'reset': {
},
'list': {
},
},
'attribute': {
},
'show': {
'routes': {
'extensive': {
},
'minimal': {
},
},
},
'reload': {
},
'restart': {
},
}
def _update_prompt (self):
if self._neighbors:
self.prompt = '\n# neighbor ' + ', '.join(self._neighbors) + '\n> '
else:
self.prompt = '\n> '
#
# repeat last command
#
# last = 'help'
# def do_last (self, line):
# "Print the input, replacing '$out' with the output of the last shell command"
# # Obviously not robust
# if hasattr(self, 'last_output'):
# print line.replace('$out', self.last_output)
_neighbors = set()
def do_neighbor (self, line):
try:
action,ip = line.split()
except ValueError:
if line == 'reset':
print 'removed neighbors', ', '.join(self._neighbors)
self._neighbors = set()
self._update_prompt()
else:
print 'invalid syntax'
self.help_neighbor()
return
if action == 'include':
# check ip is an IP
# check ip is a known IP
self._neighbors.add(ip)
self._update_prompt()
elif action == 'exclude':
if ip in self._neighbors:
self._neighbors.remove(ip)
print 'neighbor excluded'
self._update_prompt()
else:
print 'invalid neighbor'
elif action == 'list':
print 'removed neighbors', ', '.join(self._neighbors)
else:
print 'invalid syntax'
self.help_neighbor()
def help_neighbor (self):
print "neighbor include <ip>: limit the action to the defined neighbors"
print "neighbor exclude <ip>: remove a particular neighbor"
print "neighbor reset : clear the neighbor previous set "
_attribute = {}
def do_attribute (self, name):
if not name:
self.help_attribute()
return
invalid = ''.join([_ for _ in name if _ not in Attribute.chars])
if invalid:
print 'invalid character(s) in attribute name: %s' % invalid
return
cli = Attribute(name)
cli.attribute = self._attribute.get(name,{})
cli.cmdloop()
def help_attribute (self):
print 'attribute <name>'
def do_quit (self, _):
return True
do_q = do_quit
class Command (object):
def do_show (self,line):
self.request('show routes')
self.report()
import select
class Connection (object):
def __init__ (self,name):
self.read = open(name,'r+')
self.write = open(name,'w+')
def request (self,command):
self.write.write(command + '\n')
def report (self):
while select.select([self.read],[],[],5):
print self.read.readline()
def close (self):
self.read.close()
self.write.close()
class ExaBGP (Connection,Command,Syntax):
def __init__ (self,name='exabgp.cmd'):
Connection.__init__(self,name)
Syntax.__init__(self,'')
def main():
if len(sys.argv) > 1:
ExaBGP().onecmd(' '.join(sys.argv[1:]))
else:
print "ExaBGP %s CLI" % version
ExaBGP('').cmdloop()
if __name__ == '__main__':
main()
| [
"thomas.mangin@exa-networks.co.uk"
] | thomas.mangin@exa-networks.co.uk |
f019de6bfe87e4a0abc40de090dffd0869c0bc61 | 70976a4a0526f7585f810921925cf8d19e6aabfa | /project/apps/registration/tests/factories.py | 75a1b1bcb049fe20c0d38982e66af5692df52680 | [
"BSD-2-Clause"
] | permissive | barberscore/barberscore-api | 36be50b943ed59ac2fc738069661f5b589354a36 | 1ed4c01ae35cad21282b573a492733837f956285 | refs/heads/master | 2023-09-03T21:14:57.358069 | 2023-07-08T20:45:03 | 2023-07-08T20:45:03 | 11,014,681 | 14 | 5 | BSD-2-Clause | 2023-02-08T01:18:17 | 2013-06-28T03:28:17 | Python | UTF-8 | Python | false | false | 2,676 | py |
# Standard Library
import datetime
import rest_framework_jwt
# Third-Party
from factory import Faker # post_generation,
from factory import Iterator
from factory import LazyAttribute
from factory import PostGenerationMethodCall
from factory import RelatedFactory
from factory import Sequence
from factory import SubFactory
from factory.django import DjangoModelFactory
from factory.django import mute_signals
from factory.fuzzy import FuzzyInteger
# Django
from django.db.models.signals import pre_delete
from django.db.models.signals import pre_save
from django.db.models.signals import m2m_changed
# First-Party
from apps.registration.models import Assignment
from apps.registration.models import Contest
from apps.registration.models import Entry
from apps.registration.models import Session
from rest_framework_jwt.models import User
class AssignmentFactory(DjangoModelFactory):
# status = Assignment.STATUS.active
kind = Assignment.KIND.official
# convention = SubFactory('factories.ConventionFactory')
session = SubFactory('apps.registration.tests.factories.SessionFactory')
class Meta:
model = Assignment
class ContestFactory(DjangoModelFactory):
# status = Contest.STATUS.included
session = SubFactory('apps.registration.tests.factories.SessionFactory')
# award = SubFactory('factories.AwardFactory')
class Meta:
model = Contest
class EntryFactory(DjangoModelFactory):
status = Entry.STATUS.new
is_evaluation = True
is_private = False
session = SubFactory('apps.registration.tests.factories.SessionFactory')
# group = SubFactory('factories.GroupFactory')
class Meta:
model = Entry
class SessionFactory(DjangoModelFactory):
status = Session.STATUS.new
kind = Session.KIND.quartet
name = "International Championship"
district = Session.DISTRICT.bhs
is_invitational = False
num_rounds = 2
# convention = SubFactory('factories.ConventionFactory')
class Meta:
model = Session
# @post_generation
# def create_rounds(self, create, extracted, **kwargs):
# if create:
# for i in range(self.num_rounds):
# num = i + 1
# kind = self.num_rounds - i
# RoundFactory(
# session=self,
# num=num,
# kind=kind,
# )
@mute_signals(pre_delete, pre_save, m2m_changed)
class UserFactory(DjangoModelFactory):
name = Faker('name_male')
email = Faker('email')
password = PostGenerationMethodCall('set_password', 'password')
is_staff = False
class Meta:
model = User
| [
"dbinetti@gmail.com"
] | dbinetti@gmail.com |
5de7011859a215b253659cf1f2f4cd9ba586bbdc | 6df0d7a677129e9b325d4fdb4bbf72d512dd08b2 | /PycharmProjects/nsd_python_v02/day08/python_code_web/logs_engine/cals/cal_ip_converter.py | 834453bfe8326dbd076717b52602cd8e2eef0ac2 | [] | no_license | yingxingtianxia/python | 01265a37136f2ad73fdd142f72d70f7c962e0241 | 3e1a7617a4b6552bce4a7e15a182f30e1bae221e | refs/heads/master | 2021-06-14T15:48:00.939472 | 2019-12-13T05:57:36 | 2019-12-13T05:57:36 | 152,200,507 | 0 | 0 | null | 2021-06-10T20:54:26 | 2018-10-09T06:40:10 | Python | UTF-8 | Python | false | false | 639 | py | #!/usr/bin/python
# coding=utf-8
"""
将IP地址翻译为具体地名
这里使用的是假的地名,真实情况,可以通过:
1)自己建立的地址库
2)网络上的开放地址库进行翻译
http://ip.taobao.com/service/getIpInfo.php?ip=120.25.63.167
http://ip.ws.126.net/ipquery?ip=120.25.63.167
"""
import data_save as cache
import sys
sys.path.append("..")
from defines import *
def ip2Location(ip=""):
return ips[ip][0], ips[ip][1]
def cal(log, parameters=[]):
city, province = ip2Location(log['ip'])
# print log['sdate'],city,province
cache.addOne("city=" + city)
cache.addOne("province=" + province)
| [
"root@room8pc205.tedu.cn"
] | root@room8pc205.tedu.cn |
909213a9973c1d8f3eb65713c5dceb7f61293c51 | c0d9fd9aaf65fff29aaf867ba3a1cd55d35788d1 | /thumb/fields/video_thumbnail_field/widget.py | c40f72151968efe5907694cc1805813b1135176a | [
"BSD-2-Clause"
] | permissive | AmrAnwar/django-thumb | bb76e5511edca3ae331e00767e5c302ce4f54bfe | ec22446cd1e1721a02dd3d101c3697cf0f309ded | refs/heads/master | 2022-11-29T01:47:38.944343 | 2022-09-21T08:15:25 | 2022-09-21T08:15:25 | 138,357,544 | 12 | 0 | BSD-2-Clause | 2022-11-22T07:53:26 | 2018-06-22T23:53:27 | Python | UTF-8 | Python | false | false | 2,366 | py | from django import forms
from ..colored_text_field import ColoredTextInput
from ..cascade_data import CASCADE_DATA
from ..image_thumnail_field import ImageThumbnailInput
CASCADE_CHOICES = list(
[("", "select a cascade case")] +
[(str(cascade_index), cascade[0])
for cascade_index, cascade in enumerate(CASCADE_DATA)]
)
class VideoThumbnailInput(ImageThumbnailInput):
def __init__(self, video_field_name, video_capture_help_text=None, attrs=None):
self.video_field_name = video_field_name or "video"
self.video_capture_help_text = (video_capture_help_text
or "if checked or entered data in more than 1 field, it will execute in order")
# for update case
self.video = None
widgets = [
# to get the image path
forms.HiddenInput(attrs=attrs),
# to get the video value
forms.HiddenInput(attrs=attrs),
# capture options
forms.Select(choices=CASCADE_CHOICES),
forms.TextInput(attrs={'placeholder': 'MM:SS'}),
forms.CheckboxInput(attrs={'label': "random", "note": "get random thumbnail"}, ),
# for manual input
forms.ClearableFileInput(attrs=attrs),
# color
ColoredTextInput(attrs=attrs)
]
super(VideoThumbnailInput, self).__init__(child_widgets=widgets, attrs=attrs)
self.template_name = "video_thumbnail.html"
def get_context(self, name, value, attrs):
context = super(VideoThumbnailInput, self).get_context(name, value, attrs)
context['widget']['video_capture_help_text'] = self.video_capture_help_text
return context
def decompress(self, value):
image_value = value
value = super(VideoThumbnailInput, self).decompress(value)
if any(value):
video = getattr(image_value.instance, self.video_field_name, None)
try:
value[1] = video.path
except ValueError:
value[1] = None
return value
def value_from_datadict(self, data, files, name):
value = super(VideoThumbnailInput, self).value_from_datadict(data, files, name)
submitted_video = files.get(self.video_field_name) or value['data'][1]
value['video'] = submitted_video
return value
| [
"amranwar945@gmail.com"
] | amranwar945@gmail.com |
0c748b030025fe67ebf6fdf3a85cd7acb380914e | 5b4686ace41ebfcb2c694283b232761010cf31d7 | /commands/deviot_choose_display_mode.py | ddd22cf4e81514b3e01948d627fc7200308de35c | [
"Apache-2.0"
] | permissive | gepd/Deviot | bbf4d40fbecb8187255a4ab0f3e4dae7e5a7d985 | 150caea06108369b30210eb287a580fcff4904af | refs/heads/develop | 2023-08-18T04:13:56.932126 | 2020-07-13T18:02:23 | 2020-07-13T18:02:23 | 47,856,861 | 335 | 91 | Apache-2.0 | 2023-01-28T02:53:49 | 2015-12-11T23:56:06 | Python | UTF-8 | Python | false | false | 507 | py | from sublime_plugin import WindowCommand
from ..libraries.quick_menu import QuickMenu
class DeviotChooseDisplayModeCommand(WindowCommand):
"""
Stores the display mode option selected for the user and save it in
the preferences file.
Extends: sublime_plugin.WindowCommand
"""
def run(self):
Quick = QuickMenu()
items = Quick.display_mode_list()
callback = Quick.callback_display_mode
Quick.set_list(items)
Quick.show_quick_panel(callback)
| [
"guillermoepd@hotmail.com"
] | guillermoepd@hotmail.com |
58be6c6b116aee998632f1b2feeee7dc7e4292cd | b5ed599b776c4c34e9bd985cf72834bbbcad25ac | /Chapter4/SumOfListRecursion.py | c0237838fc67cb2bd89ec780252282a1fc650504 | [] | no_license | Kaushiksekar/DSAlgInteractivePython | 170c57db14583e1d4923fb6aefce84ec1fdd6772 | 3560e5a8e564900c9d499504fa26d49dcdcb0784 | refs/heads/master | 2021-04-12T11:15:54.976931 | 2018-04-14T17:13:57 | 2018-04-14T17:13:57 | 126,722,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | def listSum(list1):
if len(list1) == 1:
return list1[0]
else:
return list1[0] + listSum(list1[1:])
print(listSum([1,2,3,4,5]))
| [
"kaushik3993@gmail.com"
] | kaushik3993@gmail.com |
c7e18e72d352a0e8e4db91ed53dbd9f44471b7ba | fb5dd7410679bd28299cfe3841de6fe826d978cb | /src/core/migrations/0005_auto_20201207_1836.py | 64b6f100ce9b6c92c5e82a0f07135be96127816d | [] | no_license | IvanYukish/finance-manager | 35202fde63a7f519b52d8e09f3f64dd547cccbc5 | 9147d09cff7543361f5ccefa79ec334a58efc9a1 | refs/heads/master | 2023-07-11T14:39:17.536557 | 2021-08-04T23:05:45 | 2021-08-04T23:05:45 | 317,544,811 | 1 | 0 | null | 2021-08-23T17:18:10 | 2020-12-01T13:09:50 | CSS | UTF-8 | Python | false | false | 428 | py | # Generated by Django 3.1.3 on 2020-12-07 18:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20201207_1825'),
]
operations = [
migrations.AlterField(
model_name='debts',
name='description',
field=models.CharField(db_index=True, max_length=500, verbose_name='Опис'),
),
]
| [
"iwan.jukisch@gmail.com"
] | iwan.jukisch@gmail.com |
8eff9c7439dde637d31627de4f899aa0551c24f1 | a97f789530412fc1cb83170a11811f294b139ee8 | /疯狂Python讲义/codes/19/19.2/barh_test.py | 6d599b9a156299c3f8e0b59efbad2a80af7a7fb8 | [] | no_license | baidongbin/python | 3cebf2cc342a15b38bf20c23f941e6887dac187a | 1c1398bff1f1820afdd8ddfa0c95ccebb4ee836f | refs/heads/master | 2021-07-21T19:23:32.860444 | 2020-03-07T11:55:30 | 2020-03-07T11:55:30 | 195,909,272 | 0 | 1 | null | 2020-07-21T00:51:24 | 2019-07-09T01:24:31 | Python | UTF-8 | Python | false | false | 1,476 | py | import matplotlib.pyplot as plt
import numpy as np
# 用来正常显示中文标签
plt.rcParams['font.sans-serif'] = ['SimHei']
# 用来正常显示负号
plt.rcParams['axes.unicode_minus'] = False
# 构建数据
x_data = ['2011', '2012', '2013', '2014', '2015', '2016', '2017']
y_data = [58000, 60200, 63000, 71000, 84000, 90500, 107000]
y_data2 = [52000, 54200, 51500, 58300, 56800, 59500, 62700]
bar_width = 0.3
# Y 轴数据使用 range(len(x_data), 就是 0、1、2...
plt.barh(y=range(len(x_data)), width=y_data, label='疯狂Java讲义',
color='steelblue', alpha=0.8, height=bar_width)
# Y 轴数据使用 np.arange(len(x_data))+bar_width,
# 就是 bar_width、1+bar_width、2+bar_width... 这样就和第一个柱状图并列了
plt.barh(y=np.arange(len(x_data)) + bar_width, width=y_data2,
label='疯狂 Android 讲义', color='indianred', alpha=0.8, height=bar_width)
# 在柱状图上显示具体数值, ha 参数控制水平对齐方式, va 控制垂直对齐方式
for y, x in enumerate(y_data):
plt.text(x + 5000, y - bar_width / 2, '%s' % x, ha='center', va='bottom')
for y, x in enumerate(y_data2):
plt.text(x + 5000, y + bar_width / 2, '%s' % x, ha='center', va='bottom')
# 为 Y 轴设置刻度值
plt.yticks(np.arange(len(x_data)) + bar_width / 2, x_data)
# 设置标题
plt.title("Java 与 Android 图书对比")
# 为两条坐标轴设置名称
plt.xlabel("销量")
plt.ylabel("年份")
# 显示图例
plt.legend()
plt.show()
| [
"baidongbin@thunisoft.com"
] | baidongbin@thunisoft.com |
abfa065e0669f0ebca97cd0fcf096a691985a579 | 96d22b720aa724341afc9ecb60951340f18afc11 | /bench_run_times/compare_gelsy.py | 54510a751a137ff97c3b2c9bd1d2ef97fc650932 | [
"MIT"
] | permissive | cjekel/pwlf_scipy_tf_benchmarks | 7d10ecd5437ab0a73ddf10cc307bb50950d47033 | 7fc3a92fc4bd58b53a9839fe139fe0c53528a894 | refs/heads/master | 2023-03-21T04:56:03.779462 | 2019-05-15T23:26:50 | 2019-05-15T23:26:50 | 180,461,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | import numpy as np
import matplotlib.pyplot as plt
# factor for 90% coverage with 90% confidence using Normal distribution
# with 10 samples from table XII in [1]
# [1] Montgomery, D. C., & Runger, G. C. (2014). Applied statistics and
# probability for engineers. Sixth edition. John Wiley & Sons.
k = 2.535
run_times = np.load('intel_i5_6300u/6_break_times.npy')
n = np.load('intel_i5_6300u/n.npy')
run_times1 = np.load('intel_i5_6300u_gelsy/6_break_times.npy')
run_times_means = run_times.mean(axis=2)
run_times_stds = run_times.std(axis=2, ddof=1)
run_times_means1 = run_times1.mean(axis=2)
run_times_stds1 = run_times1.std(axis=2, ddof=1)
plt.figure()
plt.grid()
plt.errorbar(n, run_times_means[0], yerr=k*run_times_stds[0], capsize=2.0, label='Numpy')
plt.errorbar(n, run_times_means1[0], yerr=k*run_times_stds1[0], capsize=2.0, label='Scipy gelsy')
# plt.errorbar(n, run_times_means[1], yerr=k*run_times_stds[1], capsize=2.0, label='TF GPU')
plt.errorbar(n, run_times_means[1], yerr=k*run_times_stds[1], capsize=2.0, label='TF CPU')
plt.xlabel('Number of data points')
plt.ylabel('Run time (seconds, Lower is better)')
plt.semilogx()
plt.legend()
plt.show()
| [
"cjekel@gmail.com"
] | cjekel@gmail.com |
d131c57a429d7bc9e3f7bcc03878e32f6db37d3b | 32904d4841d104143ba0f41cc3aeb749e470f546 | /backend/django/apps/memos/migrations/0007_memoattachment_memocomment.py | 38b6513ecf0f5f3e2176e62cf023ff8cb3467139 | [] | no_license | aurthurm/dispatrace-api-vuejs | 20ec5deee015e69bce7a64dc2d89ccae8941b800 | 56d122318af27ff64755fc515345974631d3026f | refs/heads/master | 2023-01-23T23:03:15.438339 | 2020-10-20T22:09:29 | 2020-10-20T22:09:29 | 219,028,985 | 0 | 1 | null | 2022-12-22T18:31:38 | 2019-11-01T17:08:35 | Vue | UTF-8 | Python | false | false | 2,053 | py | # Generated by Django 2.2.6 on 2019-10-25 17:52
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import mptt.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('memos', '0006_auto_20191024_1130'),
]
operations = [
migrations.CreateModel(
name='MemoComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.TextField()),
('timestamp', models.DateTimeField(default=django.utils.timezone.now)),
('lft', models.PositiveIntegerField(editable=False)),
('rght', models.PositiveIntegerField(editable=False)),
('tree_id', models.PositiveIntegerField(db_index=True, editable=False)),
('level', models.PositiveIntegerField(editable=False)),
('commenter', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)),
('memo', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='memocomment_comment', to='memos.Memo')),
('parent', mptt.fields.TreeForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='memocomment_sub_comment', to='memos.MemoComment')),
],
options={
'verbose_name': 'memo comment',
'verbose_name_plural': 'memo comments',
},
),
migrations.CreateModel(
name='MemoAttachment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('memo', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='memos.Memo')),
],
),
]
| [
"aurthurmusendame@gmail.com"
] | aurthurmusendame@gmail.com |
7c75f12faf85cdd86400124d3913a6fab1f7255e | 97f4a8bbd501e03cc0ff463b30cd031a905532fe | /custom_components/yandex_station/humidifier.py | 1322fa524feba59f89e2e2ec8c55e654b39c8fbd | [] | no_license | AlexxIT/YandexStation | 57ced1544745a71d11e12c74a04afdae657019ad | 9966a647d9f1d385ac6f0365b5e0ed0b516686a6 | refs/heads/master | 2023-08-28T18:50:10.197891 | 2023-06-15T09:08:23 | 2023-06-15T09:08:23 | 236,572,107 | 1,018 | 134 | null | 2023-06-15T08:59:28 | 2020-01-27T19:15:27 | Python | UTF-8 | Python | false | false | 6,036 | py | """Support for Yandex Smart Home humidifier."""
import logging
from typing import Any
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.climate.const import SUPPORT_TARGET_HUMIDITY
from homeassistant.components.humidifier import HumidifierEntity
from homeassistant.const import ATTR_STATE
from homeassistant.helpers import entity_platform
from . import CONF_INCLUDE, DATA_CONFIG, DOMAIN, YandexQuasar
_LOGGER = logging.getLogger(__name__)
SERVICE_MUTE = "mute"
SERVICE_IONIZATION = "ionization"
SERVICE_BACKLIGHT = "backlight"
HUMIDIFIER_STATE_USER_SCHEMA = {vol.Required(ATTR_STATE): cv.boolean}
DEVICES = ["devices.types.humidifier"]
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up humidifier from a config entry."""
include = hass.data[DOMAIN][DATA_CONFIG][CONF_INCLUDE]
quasar = hass.data[DOMAIN][entry.unique_id]
devices = [
YandexHumidifier(quasar, device)
for device in quasar.devices
if device["name"] in include and device["type"] in DEVICES
]
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_MUTE, HUMIDIFIER_STATE_USER_SCHEMA, "mute"
)
platform.async_register_entity_service(
SERVICE_IONIZATION, HUMIDIFIER_STATE_USER_SCHEMA, "ionization"
)
platform.async_register_entity_service(
SERVICE_BACKLIGHT, HUMIDIFIER_STATE_USER_SCHEMA, "backlight"
)
async_add_entities(devices, True)
# noinspection PyAbstractClass
class YandexHumidifier(HumidifierEntity):
"""Yandex Home humidifier entity"""
_is_on = None
_min_humidity = None
_max_humidity = None
_target_humidity = None
_precision = None
_is_muted = None
_is_ionization_on = None
_is_backlight_on = None
_supported = 0
def __init__(self, quasar: YandexQuasar, device: dict) -> None:
"""Initialize entity."""
self.quasar = quasar
self.device = device
@property
def unique_id(self):
"""Return entity unique id."""
return self.device["id"].replace("-", "")
@property
def name(self):
"""Return entity name."""
return self.device["name"]
@property
def is_on(self) -> bool:
"""Return if device is turned on."""
return self._is_on
@property
def min_humidity(self) -> int:
"""Return min humidity."""
return self._min_humidity
@property
def max_humidity(self) -> int:
"""Return max humidity."""
return self._max_humidity
@property
def precision(self) -> int:
"""Return target humidity precision."""
return self._precision
@property
def target_humidity(self) -> int:
"""Return target humidity."""
return self._target_humidity
@property
def is_muted(self) -> bool:
"""Return if device is muted."""
return self._is_muted
@property
def is_ionization_on(self) -> bool:
"""Return if ionization is turned on."""
return self._is_ionization_on
@property
def is_backlight_on(self) -> bool:
"""Return if backlight is turned on."""
return self._is_backlight_on
@property
def supported_features(self):
"""Return supported features."""
return self._supported
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the device specific state attributes."""
attributes = {
"is_muted": self.is_muted,
"is_ionization_on": self.is_ionization_on,
"is_backlight_on": self.is_backlight_on,
}
return attributes
async def init_params(self, capabilities: dict):
"""Initialize parameters."""
for capability in capabilities:
parameters = capability["parameters"]
instance = parameters.get("instance")
if instance == "humidity":
self._supported |= SUPPORT_TARGET_HUMIDITY
range_ = parameters["range"]
self._min_humidity = range_["min"]
self._max_humidity = range_["max"]
self._precision = range_["precision"]
async def async_update(self):
"""Update the entity."""
data = await self.quasar.get_device(self.device["id"])
self._attr_available = data["state"] == "online"
if self._is_on is None:
await self.init_params(data["capabilities"])
for capability in data["capabilities"]:
if not capability["retrievable"]:
continue
instance = capability["state"]["instance"]
value = capability["state"]["value"]
if instance == "on":
self._is_on = value
if instance == "humidity":
self._target_humidity = value
if instance == "mute":
self._is_muted = value
if instance == "ionization":
self._is_ionization_on = value
if instance == "backlight":
self._is_backlight_on = value
async def async_turn_on(self, **kwargs):
"""Turn on."""
await self.quasar.device_action(self.device["id"], on=True)
async def async_turn_off(self, **kwargs):
"""Turn off."""
await self.quasar.device_action(self.device["id"], on=False)
async def async_set_humidity(self, humidity):
"""Set humidity."""
await self.quasar.device_action(self.device["id"], humidity=humidity)
async def mute(self, state):
"""Mute humidifier."""
await self.quasar.device_action(self.device["id"], mute=state)
async def ionization(self, state):
"""Turn on/off ionization."""
await self.quasar.device_action(self.device["id"], ionization=state)
async def backlight(self, state):
"""Turn on/off backlight."""
await self.quasar.device_action(self.device["id"], backlight=state)
| [
"alexey.khit@gmail.com"
] | alexey.khit@gmail.com |
7721dbf166413b9eeee07f742fda689f3e4e3158 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201029154240.py | 5d26c40dc248b24e75e50c0955ce1170f1062551 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,772 | py | from django import forms
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
from wagtail.contrib.table_block.blocks import TableBlock
#Walidacja problemu
from django.core.exceptions import ValidationError
from django.co
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
internal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class RadioSelectBlock(blocks.ChoiceBlock):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.field.widget = forms.RadioSelect(
choices=self.field.widget.choices
)
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie przycięty do rozmiaru 786 na 552 px.')
image_alignment = RadioSelectBlock(
choices = (
('left','Opraz po lewej stronie'),
('right', 'Obraz po prawej stronie'),
),
default = 'left',
help_text = 'Obraz po lewej stronie, tekst po prawej lub obraz po prawej stronie tekst po lewej.'
)
title = blocks.CharBlock(
max_length=60,
help_text='Maksymalna długość 60 znaków.'
)
text = blocks.CharBlock(
max_length = 140,
required = False,
)
link = Link()
class Meta:
template = 'streams/image_and_text_block.html'
icon = 'image'
label = 'Obraz & Tekst'
class CallToActionBlock(blocks.StructBlock):
title =blocks.CharBlock(
max_length = 200,
help_text = 'Maksymalnie 200 znaków.'
)
link = Link()
class Meta:
template = 'streams/call_to_action_block.html'
icon = 'plus'
label = 'Wezwanie do działania'
class PricingTableBlock(TableBlock):
"""Blok tabeli cen."""
class Meta:
template = 'streams/pricing_table_block.html'
label = 'Tabela cen'
icon = 'table'
help_text = 'Twoje tabele z cenami powinny zawierać zawsze 4 kolumny.'
'''
class RichTextWithTitleBlock(blocks.StructBlock):
title = blocks.CharBlock(max_length=50)
context = blocks.RichTextBlock(features=[])
class Meta:
template = 'streams/simple_richtext_block.html'
''' | [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
d84fb94a99b36b6de87d7c2df65643fe96c30f94 | f40ff2ac9d25137230c2a80f74be8fd013e73aca | /utopiantree.py | e8e5b7f386649bb4777219e312191f0aa444d743 | [] | no_license | SurajPatil314/HackerRank-challenges | 805020147e5227498b674b9da4c99a915627882d | 921b1b1d7c0cd4beae54f8833063e16aa33883ea | refs/heads/master | 2021-06-30T16:18:27.431389 | 2021-06-13T22:59:55 | 2021-06-13T22:59:55 | 239,815,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 979 | py | '''
https://www.hackerrank.com/challenges/utopian-tree/problem
The Utopian Tree goes through 2 cycles of growth every year. Each spring, it doubles in height. Each summer, its height increases by 1 meter.
Laura plants a Utopian Tree sapling with a height of 1 meter at the onset of spring. How tall will her tree be after growth cycles?
For example, if the number of growth cycles is , the calculations are as follows:
'''
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the utopianTree function below.
def utopianTree(n):
ans = 1
if n==0:
return ans
for i in range(1,n+1):
if i%2 == 0:
ans = ans + 1
else:
ans = ans*2
return ans
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
t = int(input())
for t_itr in range(t):
n = int(input())
result = utopianTree(n)
fptr.write(str(result) + '\n')
fptr.close()
| [
"spatil2@umbc.edu"
] | spatil2@umbc.edu |
a226024dcb719920028ef52f87364fe61ccdb0d3 | 235fb362b5af1f7dbd90dc3819fe63f18e074e9d | /learn_django/test_Create/test_Create/wsgi.py | a0497b8ae5db66fb70718326dbcd16b5b0ed7552 | [] | no_license | cener-1999/learn_about_python | 74c9b8c6a546224261d5577183a946a78ca7e84f | 86cfc0a5621f86fc8a1885a39847d40b33137c49 | refs/heads/master | 2023-04-30T06:38:34.459506 | 2021-05-18T14:20:29 | 2021-05-18T14:20:29 | 368,473,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for test_Create project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_Create.settings')
application = get_wsgi_application()
| [
"1065802928@qq.com"
] | 1065802928@qq.com |
9cbc729eee969f3724cdb8e89268bcf22b9fc0eb | a25f7829512f09d9a25679b2ccbf0a4d970e8a44 | /restiro/generators/__init__.py | 90572410a7aa5439101b9ab74141c7cfb5cd6034 | [
"MIT"
] | permissive | meyt/restiro | 1adaa27e6818ed0de29529b4e76c3829e376e23c | 016ffe386656eda9fea490f348e1d5408a1e9608 | refs/heads/master | 2022-01-17T01:11:51.943211 | 2019-07-24T06:43:35 | 2019-07-24T06:43:35 | 95,980,903 | 0 | 0 | MIT | 2019-04-22T07:28:56 | 2017-07-01T18:23:55 | Python | UTF-8 | Python | false | false | 136 | py | from .base import BaseGenerator
from .markdown import MarkdownGenerator
from .json import JSONGenerator
from .mock import MockGenerator
| [
"pasd3000@gmail.com"
] | pasd3000@gmail.com |
19f664fb896775e20e494f366d1f1424d2c39aa1 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/scale_20200709203334.py | f0e58bcaf9e76b14a3ab0ae93d883663fbab5442 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 790 | py | def balancing(strArr):
sides = strArr[0][1:-1].split(", ")
left = int(sides[0])
right = int(sides[1])
# here we convert every element in the array to a int as we ignore the first bracket and last bracket
# then here we split where there is a comma and space
weights = [int(x) for x in strArr[1][1:-1].split(", ")]
# we loop through the array
for i in range(len(weights)):
if(left + weights[i] == right) or (right + weights[i] ==left):
print('weights',weights[i])
for i in range(len(weights)):
for j in range(i+1,len(weights)):
if (left + weights[i] + weights[j] == right ) or (right + weights[i]+weights[j] == left) /
or (left + weights[i] == right):
balancing(["[5, 9]", "[1, 2, 6, 7]"]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
d966f966eff2bfe0046d1c8f0c333cd97e208c4b | 4836c4349bd65944b48fef01e2d2b7149479573c | /bin/pip3.6 | b991042ffd8e1d511edfe2717f2d425b404b7251 | [] | no_license | ogol254/M1 | c0b634e2a735002497bdf0114f656d3c12c65194 | 8761a4228ce91961922f0e722dba8191fce73a9b | refs/heads/master | 2020-03-18T17:59:20.979639 | 2018-05-27T16:46:07 | 2018-05-27T16:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | 6 | #!/Users/Mcogol/venv/M1/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"abramogol@gmail.com"
] | abramogol@gmail.com |
fd48fb9af25fb41caa09a6fdceb03bf2615e8c43 | 0f1b67ee77adab881409b9bea17dfbc6f8c15c27 | /backend/restless_sunset_27759/settings.py | 12d0ff62cfdea9c0787f83e5a3e5afd6a3e75538 | [] | no_license | crowdbotics-apps/restless-sunset-27759 | e7d7279616d1528815c4e33ca55139f5ac5a150c | 8ae02190c9fdb515ff75bd9e33523371d97bb3d8 | refs/heads/master | 2023-05-31T06:22:27.536695 | 2021-06-06T00:40:25 | 2021-06-06T00:40:25 | 374,238,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,129 | py | """
Django settings for restless_sunset_27759 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'restless_sunset_27759.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'restless_sunset_27759.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
d010596cc59c1d09eea01fdb646b958bd1c2b051 | c9fe05f893deff75232aabca4e877c144972249a | /arcpyenv/arcgispro-py3-clone/Lib/site-packages/osgeo/samples/tigerpoly.py | 15a90c3c88116baf036cef900fe7a427f5323cb2 | [
"Python-2.0"
] | permissive | SherbazHashmi/HackathonServer | 4d1dc7f0122a701a0f3a17787d32efe83bc67601 | a874fe7e5c95196e4de68db2da0e2a05eb70e5d8 | refs/heads/master | 2022-12-26T06:46:33.893749 | 2019-11-03T10:49:47 | 2019-11-03T10:49:47 | 218,912,149 | 3 | 3 | null | 2022-12-11T11:52:37 | 2019-11-01T04:16:38 | Python | UTF-8 | Python | false | false | 7,060 | py | #!/usr/bin/env python
###############################################################################
# $Id$
#
# Project: OGR Python samples
# Purpose: Assemble TIGER Polygons.
# Author: Frank Warmerdam, warmerdam@pobox.com
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <warmerdam@pobox.com>
# Copyright (c) 2009, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
from osgeo import ogr
from osgeo import osr
#############################################################################
class Module:
def __init__(self):
self.lines = {}
self.poly_line_links = {}
#############################################################################
def Usage():
print('Usage: tigerpoly.py infile [outfile].shp')
print('')
sys.exit(1)
#############################################################################
# Argument processing.
infile = None
outfile = None
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if infile is None:
infile = arg
elif outfile is None:
outfile = arg
else:
Usage()
i = i + 1
if outfile is None:
outfile = 'poly.shp'
if infile is None:
Usage()
#############################################################################
# Open the datasource to operate on.
ds = ogr.Open(infile, update=0)
poly_layer = ds.GetLayerByName('Polygon')
#############################################################################
# Create output file for the composed polygons.
nad83 = osr.SpatialReference()
nad83.SetFromUserInput('NAD83')
shp_driver = ogr.GetDriverByName('ESRI Shapefile')
shp_driver.DeleteDataSource(outfile)
shp_ds = shp_driver.CreateDataSource(outfile)
shp_layer = shp_ds.CreateLayer('out', geom_type=ogr.wkbPolygon,
srs=nad83)
src_defn = poly_layer.GetLayerDefn()
poly_field_count = src_defn.GetFieldCount()
for fld_index in range(poly_field_count):
src_fd = src_defn.GetFieldDefn(fld_index)
fd = ogr.FieldDefn(src_fd.GetName(), src_fd.GetType())
fd.SetWidth(src_fd.GetWidth())
fd.SetPrecision(src_fd.GetPrecision())
shp_layer.CreateField(fd)
#############################################################################
# Read all features in the line layer, holding just the geometry in a hash
# for fast lookup by TLID.
line_layer = ds.GetLayerByName('CompleteChain')
line_count = 0
modules_hash = {}
feat = line_layer.GetNextFeature()
geom_id_field = feat.GetFieldIndex('TLID')
tile_ref_field = feat.GetFieldIndex('MODULE')
while feat is not None:
geom_id = feat.GetField(geom_id_field)
tile_ref = feat.GetField(tile_ref_field)
try:
module = modules_hash[tile_ref]
except:
module = Module()
modules_hash[tile_ref] = module
module.lines[geom_id] = feat.GetGeometryRef().Clone()
line_count = line_count + 1
feat.Destroy()
feat = line_layer.GetNextFeature()
print('Got %d lines in %d modules.' % (line_count, len(modules_hash)))
#############################################################################
# Read all polygon/chain links and build a hash keyed by POLY_ID listing
# the chains (by TLID) attached to it.
link_layer = ds.GetLayerByName('PolyChainLink')
feat = link_layer.GetNextFeature()
geom_id_field = feat.GetFieldIndex('TLID')
tile_ref_field = feat.GetFieldIndex('MODULE')
lpoly_field = feat.GetFieldIndex('POLYIDL')
rpoly_field = feat.GetFieldIndex('POLYIDR')
link_count = 0
while feat is not None:
module = modules_hash[feat.GetField(tile_ref_field)]
tlid = feat.GetField(geom_id_field)
lpoly_id = feat.GetField(lpoly_field)
rpoly_id = feat.GetField(rpoly_field)
if lpoly_id == rpoly_id:
feat.Destroy()
feat = link_layer.GetNextFeature()
continue
try:
module.poly_line_links[lpoly_id].append(tlid)
except:
module.poly_line_links[lpoly_id] = [tlid]
try:
module.poly_line_links[rpoly_id].append(tlid)
except:
module.poly_line_links[rpoly_id] = [tlid]
link_count = link_count + 1
feat.Destroy()
feat = link_layer.GetNextFeature()
print('Processed %d links.' % link_count)
#############################################################################
# Process all polygon features.
feat = poly_layer.GetNextFeature()
tile_ref_field = feat.GetFieldIndex('MODULE')
polyid_field = feat.GetFieldIndex('POLYID')
poly_count = 0
degenerate_count = 0
while feat is not None:
module = modules_hash[feat.GetField(tile_ref_field)]
polyid = feat.GetField(polyid_field)
tlid_list = module.poly_line_links[polyid]
link_coll = ogr.Geometry(type=ogr.wkbGeometryCollection)
for tlid in tlid_list:
geom = module.lines[tlid]
link_coll.AddGeometry(geom)
try:
poly = ogr.BuildPolygonFromEdges(link_coll)
if poly.GetGeometryRef(0).GetPointCount() < 4:
degenerate_count = degenerate_count + 1
poly.Destroy()
feat.Destroy()
feat = poly_layer.GetNextFeature()
continue
# print poly.ExportToWkt()
# feat.SetGeometryDirectly( poly )
feat2 = ogr.Feature(feature_def=shp_layer.GetLayerDefn())
for fld_index in range(poly_field_count):
feat2.SetField(fld_index, feat.GetField(fld_index))
feat2.SetGeometryDirectly(poly)
shp_layer.CreateFeature(feat2)
feat2.Destroy()
poly_count = poly_count + 1
except:
print('BuildPolygonFromEdges failed.')
feat.Destroy()
feat = poly_layer.GetNextFeature()
if degenerate_count:
print('Discarded %d degenerate polygons.' % degenerate_count)
print('Built %d polygons.' % poly_count)
#############################################################################
# Cleanup
shp_ds.Destroy()
ds.Destroy()
| [
"sherbaz.hashmi@gmail.com"
] | sherbaz.hashmi@gmail.com |
532b32bc9e36d22b7092537e44b75a301a3bc920 | 17df5351498798ad348ee1ea3a26835f6ef7de49 | /linak_dpg_bt/synchronized.py | 8d5623594ea79015df0e03b4386414fcf7a03cd6 | [
"MIT"
] | permissive | anetczuk/linak_bt_desk | 8e4c8f514d6671af18962b0e13ecda3210778421 | 7ac5fe1b69638976326842b27d76e52f0cc958fd | refs/heads/master | 2023-05-28T04:52:09.859883 | 2023-05-12T19:58:12 | 2023-05-12T19:58:12 | 142,626,557 | 20 | 5 | null | 2018-07-27T21:52:13 | 2018-07-27T21:52:13 | null | UTF-8 | Python | false | false | 2,547 | py | '''
Implementation of method '@synchronized' decorator. it reflects functionality
of 'synchronized' keyword from Java language.
It accepts one optional argument -- name of lock field declared within object.
Usage examples:
@synchronized
def send_dpg_write_command(self, dpgCommandType, data):
pass
@synchronized()
def send_dpg_write_command(self, dpgCommandType, data):
pass
@synchronized("myLock")
def send_dpg_write_command(self, dpgCommandType, data):
pass
'''
import threading
from functools import wraps
def dirprint(var):
names = dir(var)
for name in names:
if name == "__globals__":
print( name, ": --globals--" )
else:
value = getattr(var, name)
print( name, ":", value )
def extractSelf(func, decorator, *args):
params = args
if len(params) < 1:
return None
## 'self' goes always as first parameter
firstParam = params[0]
fName = func.__name__
if hasattr(firstParam, fName) == False:
return None
## object has method with the same name -- check if it has the same decorator
method = getattr(firstParam, fName)
if checkMethod(decorator, method):
return firstParam
return None
def checkMethod(func, method):
if method.__func__ == func:
return True
return False
##
## Definition of function decorator
##
def synchronized_with_arg(lock_name = None):
if lock_name == None:
lock_name = "_methods_lock"
def synced_method(func):
### every decorated method has it's own instance of 'decorator()' function
@wraps(func)
def decorator(self, *args, **kws):
# owner = extractSelf(func, decorator, *args)
# if owner == None:
# return func(*args, **kws)
lock = None
if hasattr(self, lock_name) == False:
lock = threading.RLock()
setattr(self, lock_name, lock)
else:
lock = getattr(self, lock_name)
with lock:
return func(self, *args, **kws)
return decorator
return synced_method
def synchronized(lock_name = None):
if callable(lock_name):
### lock_name contains function to call
function = lock_name
synced = synchronized_with_arg()
return synced(function)
else:
### lock_name contains name of lock to handle
synced = synchronized_with_arg(lock_name)
return synced
| [
"anetczuk@o2.pl"
] | anetczuk@o2.pl |
4dde1224a57efff18e13fe73847ae88291ab6578 | 6ad700a44e2d99d5e66115f10d133451c0e860ee | /yyy.py | 0dbff290fe1ee91807ec39a61e8807d0d131f900 | [] | no_license | suganthicj/yyy | 992388db4a5184ba4779a5e39e8a751ef75333c8 | f036e67f1874edc76c4df400275a13737a4e6694 | refs/heads/master | 2020-06-21T02:05:39.750322 | 2019-07-17T05:07:44 | 2019-07-17T05:07:44 | 197,318,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | def isComposite(n):
# Corner cases
if (n <= 1):
return False
if (n <= 3):
return False
# This is checked so that we can skip
# middle five numbers in below loop
if (n % 2 == 0 or n % 3 == 0):
return True
i = 5
while(i * i <= n):
if (n % i == 0 or n % (i + 2) == 0):
return True
i = i + 6
return False
# Driver Program to test above function
print("true") if(isComposite(11)) else print("false")
print("true") if(isComposite(15)) else print("false")
| [
"noreply@github.com"
] | suganthicj.noreply@github.com |
028ebf4e6b1845fbf22d97ccc8aada2ef4353edf | efb60258270c70d0b16581e8b6fe32001015b16b | /modules/services/dbservices/media/save_dir_audio_service.py | c7d5ffbcfc1efaa4699c5873b82a1c148fffe38e | [
"LicenseRef-scancode-public-domain"
] | permissive | signeus/API-Web | bd48177201007e2091828458da3572fde4f24755 | b9fe99465a178666299a079f99a42dafc80c5bb7 | refs/heads/master | 2021-01-11T20:30:50.964928 | 2017-05-19T09:31:01 | 2017-05-19T09:31:01 | 79,131,449 | 1 | 1 | null | 2017-04-07T08:42:55 | 2017-01-16T15:25:05 | Python | UTF-8 | Python | false | false | 493 | py | # -*- coding: utf-8 -*-
from services.interfaces.i_service import IService
class SaveDirAudioService(IService):
def __init__(self, core, parameters):
super(SaveDirAudioService, self).__init__(core, parameters)
def run(self):
path = self.parameters.get("path", "unknown/")
_id = self.parameters.get("id", "")
data = self.parameters.get("data", "")
return self.core.InternalOperation("saveAudio",{"path":path,"filename":str(_id), "data":data})
| [
"kev.ms93@gmail.com"
] | kev.ms93@gmail.com |
18ea2824a677d14685ff06a804a57f4f1f3a119c | 9da47968b4a023c33119d040f4d12942047dccf7 | /recipients/code/dashboard/dashboard/urls.py | 42944346dd97d449ad24d0aec2979fe9c49c0230 | [] | no_license | ajgara/whoscorecards | 05597de11793de24fcc6109ab1a75ebc49757693 | 8554e154f666fa4af24bf0782e244c1f3c179dd0 | refs/heads/master | 2021-01-24T22:28:05.225717 | 2015-10-22T21:44:44 | 2015-10-22T21:44:44 | 25,478,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.views.static import serve
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'dashboard.views.home', name='home'),
url(r'^admin/', include(admin.site.urls)),
url(r'^oda/', include('oda.urls')),
)
#if settings.DEBUG:
# urlpatterns += patterns('',
# (r'^site_media/(?P<path>.*)$', serve, {'document_root': '/path/to/media'}),
# )
| [
"adi@burgercom.co.za"
] | adi@burgercom.co.za |
b6ed6298710eb2c6a421aa64540abdca56d7c85b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_galleons.py | 8334bd189fb9d336e509df65b4441f348038d30e | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py |
from xai.brain.wordbase.nouns._galleon import _GALLEON
#calss header
class _GALLEONS(_GALLEON, ):
def __init__(self,):
_GALLEON.__init__(self)
self.name = "GALLEONS"
self.specie = 'nouns'
self.basic = "galleon"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
41415a7eec0ea40d6e56743ac77ede744174333b | 8f5f0c3ef83fdd482387973149738f6178477a42 | /medium/arithmetic/find_the_duplicate_number.py | a39dfb3b6a44397ae279a9a249f76971a33f567d | [] | no_license | nicokuzak/leetcode | 79a5771ad83786cc7dbfd790f8fffcf1ce58794e | 39b0235dc429a97a7cba0689d44641a6af6d7a32 | refs/heads/main | 2023-04-06T21:02:09.553185 | 2021-04-14T22:21:20 | 2021-04-14T22:21:20 | 336,847,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | """Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
There is only one repeated number in nums, return this repeated number.
Example 1:
Input: nums = [1,3,4,2,2]
Output: 2
Example 2:
Input: nums = [3,1,3,4,2]
Output: 3
Example 3:
Input: nums = [1,1]
Output: 1
Example 4:
Input: nums = [1,1,2]
Output: 1
Constraints:
2 <= n <= 3 * 104
nums.length == n + 1
1 <= nums[i] <= n
All the integers in nums appear only once except for precisely one integer which appears two or more times."""
from typing import List
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
addl, l = sum(nums), len(nums)
st = set(nums)
adds, s = sum(st), len(st)
return int((addl-adds)/(l-s))
| [
"nicokuzak95@gmail.com"
] | nicokuzak95@gmail.com |
0f9114ffc547c5c89058181b18e5f8eec218ea51 | d99572b009c3c519cee6fcaf0ad3f9cd2d7a13ae | /deeplearn8.py | f460c0b3dcaedc855e2cbcd467b91ba9d08a8cc2 | [] | no_license | bhatnagaranshika02/Deep-Learning | 10a3f6794bf1265222c8b78555398aea7bbca34e | de851c909fb40f17d07999a65cc269d6b5ee6ff5 | refs/heads/master | 2023-01-27T20:09:37.500589 | 2020-12-04T23:43:00 | 2020-12-04T23:43:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 211 | py | import numpy as np
weights = np.array([1, 2])
input_data = np.array([3, 4])
target = 6
learning_rate = 0.01
preds = (weights*input_data).sum()
error = preds - target
slope = input_data * error * 2
print(slope)
| [
"bhatnagaranshika02@gmail.com"
] | bhatnagaranshika02@gmail.com |
2cb541b98ffac5f54ae764f99dae824b9c55bf17 | d0c4e3b53310c291ff1faf391b7240cb41ae2a31 | /tensorflow_probability/python/internal/auto_batching/xla.py | 575aea285a952da733aaa5254fe4bd391738433e | [
"Apache-2.0"
] | permissive | harryprince/probability | 0696c47d8f78a4343ebdf7a7a41280a08cec34ce | 9439c3d04b4d5e60b8cf721cc5a1dbfac73605d2 | refs/heads/master | 2020-06-30T20:04:12.874945 | 2019-08-06T22:38:34 | 2019-08-06T22:39:36 | 200,938,780 | 0 | 1 | Apache-2.0 | 2019-08-06T23:42:01 | 2019-08-06T23:42:01 | null | UTF-8 | Python | false | false | 2,073 | py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""XLA utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
__all__ = ['compile_nested_output']
def compile_nested_output(f, compile_fn=None):
"""Wraps f with a `tpu.rewrite` or `xla.compile`, propagates output structure.
`xla.compile` insists `f` output a flat list of `Tensor`s or `Op`s, but
tolerates nested input arguments. Here, we capture the output structure in
order to propagate it.
Args:
f: Callable to compile, may accept/return nested inputs/outputs.
compile_fn: The function to use to compile, i.e. `xla.compile` or
`tpu.rewrite`. Accepts two args, `f` and `inputs`.
Returns:
g: Callable wrapping `f` which returns XLA-compiled, nested outputs.
"""
def _wrapper(*inputs): # pylint:disable=missing-docstring
nest = tf.compat.v2.nest
struct = [None]
def _flattened(*inputs):
result = f(*inputs)
flat = nest.flatten(result)
# Ick: Side-effect. Ideally we could push output nest support into
# tpu.rewrite / xla.compile. b/121383831
struct[0] = nest.pack_sequence_as(result, [1] * len(flat))
return flat
res = compile_fn(_flattened, inputs)
if struct[0] is None:
raise ValueError('Expected nest structure in struct[0]')
return nest.pack_sequence_as(struct[0], res)
return _wrapper
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
e8c0b63502c01b7f908df1fb3f7f68e3397ca2c2 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_reshuffles.py | d4d2867c23fe73fde20033cb9e9460d4dcd34b5c | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.verbs._reshuffle import _RESHUFFLE
#calss header
class _RESHUFFLES(_RESHUFFLE, ):
def __init__(self,):
_RESHUFFLE.__init__(self)
self.name = "RESHUFFLES"
self.specie = 'verbs'
self.basic = "reshuffle"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
fa4131b83b9bc2e159026691e37231865d73e8ba | 5930f323d96e7ed45c01fef63b100e1ad220f764 | /catalyst/contrib/dl/callbacks/cutmix_callback.py | 642279ee763e5e3b56a96d047a01709ac4c464d5 | [
"Apache-2.0"
] | permissive | saswat0/catalyst | 8cb91c2392bccdbdd318544e6861e6fe6ac39b33 | a35297ecab8d1a6c2f00b6435ea1d6d37ec9f441 | refs/heads/master | 2023-04-05T00:43:29.124864 | 2020-06-18T05:41:33 | 2020-06-18T05:41:33 | 272,268,902 | 2 | 0 | Apache-2.0 | 2020-06-18T05:41:34 | 2020-06-14T19:24:04 | null | UTF-8 | Python | false | false | 4,315 | py | from typing import List
import numpy as np
import torch
from catalyst.core.callbacks import CriterionCallback
from catalyst.core.runner import IRunner
class CutmixCallback(CriterionCallback):
"""
Callback to do Cutmix augmentation that has been proposed in
`CutMix: Regularization Strategy to Train Strong Classifiers
with Localizable Features`_.
.. warning::
:class:`catalyst.contrib.dl.callbacks.CutmixCallback` is inherited from
:class:`catalyst.dl.CriterionCallback` and does its work.
You may not use them together.
.. _CutMix\: Regularization Strategy to Train Strong Classifiers
with Localizable Features: https://arxiv.org/abs/1905.04899
"""
def __init__(
self,
fields: List[str] = ("features",),
alpha=1.0,
on_train_only=True,
**kwargs
):
"""
Args:
fields (List[str]): list of features which must be affected.
alpha (float): beta distribution parameter.
on_train_only (bool): Apply to train only.
So, if on_train_only is True, use a standard output/metric
for validation.
"""
assert (
len(fields) > 0
), "At least one field for CutmixCallback is required"
assert alpha >= 0, "alpha must be >=0"
super().__init__(**kwargs)
self.on_train_only = on_train_only
self.fields = fields
self.alpha = alpha
self.lam = 1
self.index = None
self.is_needed = True
def _compute_loss(self, runner: IRunner, criterion):
"""Computes loss.
If self.is_needed is ``False`` then calls ``_compute_loss``
from ``CriterionCallback``, otherwise computes loss value.
Args:
runner (IRunner): current runner
criterion: that is used to compute loss
"""
if not self.is_needed:
return super()._compute_loss_value(runner, criterion)
pred = runner.output[self.output_key]
y_a = runner.input[self.input_key]
y_b = runner.input[self.input_key][self.index]
loss = self.lam * criterion(pred, y_a) + (1 - self.lam) * criterion(
pred, y_b
)
return loss
def _rand_bbox(self, size, lam):
"""
Generates top-left and bottom-right coordinates of the box
of the given size.
Args:
size: size of the box
lam: lambda parameter
Returns:
top-left and bottom-right coordinates of the box
"""
w = size[2]
h = size[3]
cut_rat = np.sqrt(1.0 - lam)
cut_w = np.int(w * cut_rat)
cut_h = np.int(h * cut_rat)
cx = np.random.randint(w)
cy = np.random.randint(h)
bbx1 = np.clip(cx - cut_w // 2, 0, w)
bby1 = np.clip(cy - cut_h // 2, 0, h)
bbx2 = np.clip(cx + cut_w // 2, 0, w)
bby2 = np.clip(cy + cut_h // 2, 0, h)
return bbx1, bby1, bbx2, bby2
def on_loader_start(self, runner: IRunner) -> None:
"""Checks if it is needed for the loader.
Args:
runner (IRunner): current runner
"""
self.is_needed = not self.on_train_only or runner.is_train_loader
def on_batch_start(self, runner: IRunner) -> None:
"""Mixes data according to Cutmix algorithm.
Args:
runner (IRunner): current runner
"""
if not self.is_needed:
return
if self.alpha > 0:
self.lam = np.random.beta(self.alpha, self.alpha)
else:
self.lam = 1
self.index = torch.randperm(runner.input[self.fields[0]].shape[0])
self.index.to(runner.device)
bbx1, bby1, bbx2, bby2 = self._rand_bbox(
runner.input[self.fields[0]].shape, self.lam
)
for f in self.fields:
runner.input[f][:, :, bbx1:bbx2, bby1:bby2] = runner.input[f][
self.index, :, bbx1:bbx2, bby1:bby2
]
self.lam = 1 - (
(bbx2 - bbx1)
* (bby2 - bby1)
/ (
runner.input[self.fields[0]].shape[-1]
* runner.input[self.fields[0]].shape[-2]
)
)
__all__ = ["CutmixCallback"]
| [
"noreply@github.com"
] | saswat0.noreply@github.com |
5fdb64d4234410647093b1bb7411e27ec879697f | 1141cd4aeffafe496bb7d8a1399ca7c8445edd6e | /tests/ui_tests/test_ui_config_hell.py | 876ce1e478d23dcec405f796a0f6e07036954323 | [
"Apache-2.0"
] | permissive | amleshkov/adcm | d338c3b7c51e38ffe9a0b2715c85e54bed0c4f46 | e1c67e3041437ad9e17dccc6c95c5ac02184eddb | refs/heads/master | 2020-11-30T15:35:57.456194 | 2019-12-16T20:27:06 | 2019-12-16T20:27:06 | 230,432,278 | 0 | 0 | NOASSERTION | 2019-12-27T11:30:23 | 2019-12-27T11:30:22 | null | UTF-8 | Python | false | false | 1,776 | py | import os
import pytest
from adcm_pytest_plugin.utils import get_data_dir
# pylint: disable=W0611, W0621
from tests.ui_tests.app.app import ADCMTest
from tests.ui_tests.app.pages import Configuration, LoginPage
DATADIR = get_data_dir(__file__)
BUNDLES = os.path.join(os.path.dirname(__file__), "../stack/")
@pytest.fixture(scope='function')
def ui_hell_fs(sdk_client_fs):
bundle = sdk_client_fs.upload_from_fs(DATADIR)
cluster = bundle.cluster_create(name='my cluster')
cluster.service_add(name='ui_config_hell')
service = cluster.service(name="ui_config_hell")
return service
@pytest.fixture()
def app(adcm_fs):
return ADCMTest(adcm_fs)
@pytest.fixture()
def login(app):
app.driver.get(app.adcm.url)
login = LoginPage(app.driver)
login.login("admin", "admin")
@pytest.fixture()
def prototype_display_names(ui_hell_fs):
display_header_name = ui_hell_fs.display_name
display_names = {config['display_name'] for config in ui_hell_fs.prototype().config}
return display_header_name, display_names
@pytest.fixture()
def ui_display_names(login, app, ui_hell_fs):
app.driver.get("{}/cluster/{}/service/{}/config".format
(app.adcm.url, ui_hell_fs.cluster_id, ui_hell_fs.service_id))
ui_config = Configuration(app.driver)
return ui_config.get_display_names()
def test_display_names(prototype_display_names, ui_display_names):
"""Scenario:
1. Get Service configuration
2. Get display names from UI
3. Check that config name in prototype is correct
4. Check that in UI we have full list of display names from prototype
"""
assert prototype_display_names[0] == "UI Config Hell"
for d_name in ui_display_names:
assert d_name in prototype_display_names[1]
| [
"cab@arenadata.io"
] | cab@arenadata.io |
3fa2c83c546500af96324893edd0add3698409b3 | 1b94aae63500b6ff94b0446d01c3c9bee385fad2 | /.history/chandori/account/views_20210825231017.py | 962f5084c5591398941e7b67df6a489f3776d996 | [] | no_license | miracle3070/chandori | 71389c2a9df76c242a5895c2c23d4394220f9c8e | b01d1eaa1d9c0d12d7abdc8f164039bcd9c42925 | refs/heads/master | 2023-08-18T11:46:11.303934 | 2021-09-28T19:23:22 | 2021-09-28T19:23:22 | 393,949,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,292 | py | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.forms import UserChangeForm
from django.utils import timezone
from .models import *
from .models import BankAccount
from .forms import CustomUserChangeForm
from django.contrib import messages
from django.contrib.auth.decorators import login_required
@login_required
def edit(request):
if request.method == 'GET':
return render(request, 'edit.html')
elif request.method == 'POST':
user_change_form = CustomUserChangeForm(request.POST, instance = request.user)
user_change_form.nickname = request.POST.get('nickname')
user_change_form.age = int(request.POST.get('age'))
user_change_form.job = request.POST.get('job')
user_change_form.income = int(request.POST.get('income'))
user_change_form.save()
messages.success(request, '회원정보가 수정되었습니다.')
return render(request, 'edit.html')
def edit_bank(request):
if request.method == 'GET':
return render(request, 'add_Account.html')
elif request.method == 'POST':
add_Account = BankAccount()
add_Account.user = CustomUser.objects.get(pk=1)
add_Account.account_num = request.POST.get('account_num')
add_Account.bank = request.POST.get('bank')
add_Account.balance = request.POST.get('balance')
add_Account.save()
return render(request, 'edit.html', {add_Account:'add_Account'})
def login_view(request):
error_msg = ""
if request.method == "POST":
username = request.POST.get('username')
password = request.POST.get('password')
if username == "" or password == "":
error_msg = "아이디 또는 비밀번호를 입력해주세요."
else:
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect("accounting:home")
else:
error_msg = "아이디 또는 비밀번호가 틀렸습니다."
return render(request, "login.html", {"error_msg" : error_msg})
def logout_view(request):
logout(request)
return redirect("accounting:home")
def signup_view(request):
error_msg = ""
if request.method == "POST":
password1 = request.POST["password1"]
password2 = request.POST["password2"]
if password1 == password2:
username = request.POST["username"]
nickname = request.POST["nickname"]
age = int(request.POST['age'])
job = request.POST['job']
income = int(request.POST['income'])
signup_date = timezone.now()
user = CustomUser.objects.create_user(
username = username,
password = password1,
nickname = nickname,
age = age,
job = job,
income = income,
signup_date = signup_date,
)
return redirect("account:login")
else:
error_msg = "비밀번호가 일치하지 않습니다."
return render(request, "signup.html", {"error_msg" : error_msg})
| [
"62284729+ehddus980@users.noreply.github.com"
] | 62284729+ehddus980@users.noreply.github.com |
941d7134dc48a6b37fcc70578c7ecebb24d49a90 | 939e9dc95a720fef1844d8b52890b9ca688754c2 | /outliers/enron_outliers.py | 310ba76c1f350da99da7fc011629782a37cbd0ee | [] | no_license | ilyarudyak/ud120-machine-learning | 2350d8d451b94106606a486d4ac4a78ff3c4fe63 | c9ddce6599b278f40abfe5e15f92c02209dfacb4 | refs/heads/master | 2021-01-20T22:05:53.258319 | 2018-06-14T12:59:40 | 2018-06-14T12:59:40 | 101,799,504 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | #!/usr/bin/python
import pickle
import matplotlib.pyplot as plt
from tools.feature_format import featureFormat, targetFeatureSplit
import numpy as np
def get_data():
# read in data dictionary, convert to numpy array
data_dict = pickle.load(open('final_project_dataset.pkl','rb'))
del data_dict['TOTAL']
features = ['salary', 'bonus']
return featureFormat(data_dict, features)
def plot_data(salary_data):
salary, bonus = salary_data[:, 0], salary_data[:, 1]
plt.scatter(salary, bonus)
plt.xlabel('salary')
plt.ylabel('bonus')
plt.show()
# your code below
if __name__ == '__main__':
salary_data = get_data()
plot_data(salary_data)
| [
"ilyarudyak@yahoo.com"
] | ilyarudyak@yahoo.com |
34a6c530d071afbfe82a7bd521e4964a0e475056 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v11/enums/types/customizer_attribute_status.py | a55a3f6d36c4ffb95a7f855ec2dc9f165968622a | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 1,194 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.enums",
marshal="google.ads.googleads.v11",
manifest={"CustomizerAttributeStatusEnum",},
)
class CustomizerAttributeStatusEnum(proto.Message):
r"""Container for enum describing possible statuses of a
customizer attribute.
"""
class CustomizerAttributeStatus(proto.Enum):
r"""The possible statuses of a customizer attribute."""
UNSPECIFIED = 0
UNKNOWN = 1
ENABLED = 2
REMOVED = 3
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"noreply@github.com"
] | GerhardusM.noreply@github.com |
6046d42a8be0edfd0e6f61ba8a5aa1359e0f0f75 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-mrsp.0/mrsp_ut=3.5_rd=0.8_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=66/sched.py | 24ca03c1050f53f56b98349570171dfcef06cc75 | [] | no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | -S 0 -X RUN -Q 0 -L 4 97 300
-S 0 -X RUN -Q 0 -L 4 78 300
-S 0 -X RUN -Q 0 -L 4 73 300
-S 1 -X RUN -Q 1 -L 3 69 250
-S 1 -X RUN -Q 1 -L 3 66 300
-S 1 -X RUN -Q 1 -L 3 62 200
-S 2 -X RUN -Q 2 -L 2 48 150
-S 2 -X RUN -Q 2 -L 2 41 175
-S 2 -X RUN -Q 2 -L 2 40 150
-S 3 -X RUN -Q 3 -L 1 34 175
-S 3 -X RUN -Q 3 -L 1 29 100
-S 3 -X RUN -Q 3 -L 1 22 125
-S 4 21 150
-S 4 19 200
-S 4 15 100
| [
"ricardo.btxr@gmail.com"
] | ricardo.btxr@gmail.com |
549191a5508e7d23bc35f003a2772ba44440c3b9 | 41586d36dd07c06860b9808c760e2b0212ed846b | /system/base/inary/actions.py | 51f407d19d443069040111eb40f7b1876b6bbf54 | [] | no_license | SulinOS/SulinRepository | 4d5551861f57bc1f4bec6879dfe28ce68c7c125d | 9686811a1e06080f63199233561a922fe1f78d67 | refs/heads/master | 2021-06-15T21:34:25.039979 | 2021-06-05T13:43:34 | 2021-06-05T13:43:34 | 207,672,864 | 6 | 3 | null | 2019-12-06T08:11:22 | 2019-09-10T22:16:17 | Python | UTF-8 | Python | false | false | 605 | py | # -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from inary.actionsapi import get
from inary.actionsapi import inarytools
from inary.actionsapi import shelltools
from inary.actionsapi import pythonmodules
def build():
pythonmodules.compile(pyVer='3')
def install():
shelltools.system("rm -r po/*")
pythonmodules.install("--install-lib=/usr/lib/sulin", pyVer='3')
inarytools.dosym("inary-cli", "/usr/bin/inary")
inarytools.dodir("/var/lib/inary/info/")
inarytools.dodir("/usr/lib/sulin")
| [
"zaryob.dev@gmail.com"
] | zaryob.dev@gmail.com |
a7b8ecc8600ae64fff27cc805a08157f9d474fb8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03816/s472692535.py | 0bf949a31b20b3367d9721e32c4c1197579ce541 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 281 | py | n = int(input())
a = sorted(list(map(int, input().split())))
eat = 0
l = 0
r = n-1
while l < r:
while l < n - 1 and a[l+1] != a[l]:
l += 1
while r > 0 and a[r-1] != a[r]:
r -= 1
if r <= l:
break
eat += 1
l += 1
r -= 1
print(n-eat*2) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
25edf5e1709e8c999cb8d8d26d28ee82133cf944 | 628ec414b7807fc50de67345361e41cc68ba3720 | /mayan/apps/events/icons.py | 8741af807574afd2c754b2d27e0efb1073aa1085 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | TestingCodeReview/Mayan-EDMS | aafe144424ffa8128a4ff7cee24d91bf1e1f2750 | d493ec34b2f93244e32e1a2a4e6cda4501d3cf4e | refs/heads/master | 2020-05-27T23:34:44.118503 | 2019-04-05T02:04:18 | 2019-04-05T02:04:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from __future__ import absolute_import, unicode_literals
from appearance.classes import Icon
icon_event_types_subscriptions_list = Icon(
driver_name='fontawesome', symbol='list-ol'
)
icon_events_list = Icon(driver_name='fontawesome', symbol='list-ol')
icon_events_for_object = Icon(driver_name='fontawesome', symbol='list-ol')
icon_events_user_list = Icon(driver_name='fontawesome', symbol='rss')
icon_object_event_types_user_subcriptions_list = Icon(
driver_name='fontawesome', symbol='rss'
)
icon_user_notifications_list = Icon(
driver_name='fontawesome', symbol='bell'
)
| [
"roberto.rosario.gonzalez@gmail.com"
] | roberto.rosario.gonzalez@gmail.com |
922ea7da8f1d8bb455f3da5f8e3aa7e842fb3ab4 | 19eafacbf77452f5059b8524ab1c72954fb6ecf6 | /quant_engine/Factor/Analyst/ROE_FY1.py | abb7c47cd55f6bd89402eb415f6fbd7fdc09c69a | [] | no_license | fagan2888/DFZQ | 1893fe113428234f7f6f10408043e9b1683fb885 | 94730b31c8f53ca0ebecdea4327e55d92fb244b3 | refs/heads/master | 2022-11-17T17:57:55.608746 | 2020-07-14T10:01:28 | 2020-07-14T10:01:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,517 | py | # 盈利能力因子 ROE_FY1 的计算
# 对齐 report period
from factor_base import FactorBase
import pandas as pd
import numpy as np
import datetime
from global_constant import N_JOBS, FACTOR_DB
from joblib import Parallel, delayed, parallel_backend
from influxdb_data import influxdbData
from data_process import DataProcess
class ROE_FY1(FactorBase):
def __init__(self):
super().__init__()
self.db = 'DailyFactors_Gus'
@staticmethod
def JOB_cur_ROE_TTM(codes, df, db, measure):
influx = influxdbData()
save_res = []
for code in codes:
code_df = df.loc[df['code'] == code, :].copy()
conditions = [code_df['FY0_rp'].values == code_df['equity_last0Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last1Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last2Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last3Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last4Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last5Q_rp'].values,
code_df['FY0_rp'].values == code_df['equity_last6Q_rp'].values]
choices = [code_df['net_equity'].values, code_df['net_equity_last1Q'].values,
code_df['net_equity_last2Q'].values, code_df['net_equity_last3Q'].values,
code_df['net_equity_last4Q'].values, code_df['net_equity_last5Q'].values,
code_df['net_equity_last6Q'].values]
code_df['ROE_equity'] = np.select(conditions, choices, default=np.nan)
# 用最近的非nan值填充ROE_equity
code_df[['net_equity_last6Q', 'net_equity_last5Q', 'net_equity_last4Q', 'net_equity_last3Q',
'net_equity_last2Q', 'net_equity_last1Q', 'net_equity', 'ROE_equity']] = \
code_df[['net_equity_last6Q', 'net_equity_last5Q', 'net_equity_last4Q', 'net_equity_last3Q',
'net_equity_last2Q', 'net_equity_last1Q', 'net_equity', 'ROE_equity']].fillna(
method='ffill', axis=1)
# 计算 ROE_FY1
code_df['ROE_FY1'] = code_df['net_profit_FY1'] / code_df['ROE_equity']
code_df.set_index('date', inplace=True)
code_df = code_df.loc[:, ['code', 'ROE_FY1', 'report_period']]
code_df = code_df.replace(np.inf, np.nan)
code_df = code_df.replace(-np.inf, np.nan)
code_df = code_df.dropna()
print('code: %s' % code)
if code_df.empty:
continue
r = influx.saveData(code_df, db, measure)
if r == 'No error occurred...':
pass
else:
save_res.append('ROE_FY1 Error: %s' % r)
return save_res
def cal_ROE_TTM(self):
save_measure = 'ROE_FY1'
# get profit
profit_df = self.influx.getDataMultiprocess(FACTOR_DB, 'AnalystNetProfit', self.start, self.end,
['code', 'net_profit_FY1', 'report_period'])
profit_df.index.names = ['date']
profit_df.reset_index(inplace=True)
# --------------------------------------------------------------------------------------
# 计算 ROE_FY1
cur_rps = []
former_rps = []
for rp in profit_df['report_period'].unique():
cur_rps.append(rp)
former_rps.append(DataProcess.get_former_RP(rp, 4))
rp_dict = dict(zip(cur_rps, former_rps))
profit_df['FY0_rp'] = profit_df['report_period'].map(rp_dict)
equity_df = self.net_equity.copy()
for i in range(1, 7):
cur_rps = []
former_rps = []
for rp in equity_df['report_period'].unique():
cur_rps.append(rp)
former_rps.append(DataProcess.get_former_RP(rp, i))
rp_dict = dict(zip(cur_rps, former_rps))
equity_df['equity_last{0}Q_rp'.format(i)] = equity_df['report_period'].map(rp_dict)
equity_df.rename(columns={'report_period': 'equity_last0Q_rp'}, inplace=True)
ROE_df = pd.merge(profit_df, equity_df, how='inner', on=['date', 'code'])
ROE_df = ROE_df.sort_values(['date', 'code', 'report_period'])
codes = ROE_df['code'].unique()
split_codes = np.array_split(codes, self.n_jobs)
with parallel_backend('multiprocessing', n_jobs=self.n_jobs):
res = Parallel()(delayed(ROE_FY1.JOB_cur_ROE_TTM)
(codes, ROE_df, self.db, save_measure) for codes in split_codes)
print('ROE_FY1 finish')
print('-' * 30)
for r in res:
self.fail_list.extend(r)
def cal_factors(self, start, end, n_jobs):
pd.set_option('mode.use_inf_as_na', True)
self.start = start
self.end = end
self.n_jobs = n_jobs
self.fail_list = []
# get net equity
self.net_equity = \
self.influx.getDataMultiprocess('FinancialReport_Gus', 'net_equity', start, end)
self.net_equity.index.names = ['date']
self.net_equity.reset_index(inplace=True)
self.cal_ROE_TTM()
return self.fail_list
if __name__ == '__main__':
roe = ROE_FY1()
r = roe.cal_factors(20090101, 20200604, N_JOBS)
print('task finish')
print(r)
print(datetime.datetime.now()) | [
"bright_gl@126.com"
] | bright_gl@126.com |
7d400123827930ea4426b72ad394fba68219b856 | 6de622e922361beac91e3cfc4cd67829451bc095 | /wyzepal/wyzepal/examples/message-history | fb650052f4bdae7a8b5fe58bf6694d9755660181 | [] | no_license | WyzePal/api | fd1f1771aa9e1bfeb5d5de102b3f525d905fae29 | 8646c90148885b1c4286557bd62cfcf844b9d107 | refs/heads/master | 2020-03-23T15:25:53.559240 | 2019-03-08T23:54:00 | 2019-03-08T23:54:00 | 141,747,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import wyzepal
usage = """message-history <message_id> [options]
Example: message-history 42
"""
parser = wyzepal.add_default_arguments(argparse.ArgumentParser(usage=usage))
parser.add_argument('message_id', type=int)
options = parser.parse_args()
client = wyzepal.init_from_options(options)
print(client.get_message_history(options.message_id))
| [
"dannym@wyzepal.com"
] | dannym@wyzepal.com | |
42505fe70d5e1e62150a3a5fe90404a7a83fd63d | 42516b0348936e257d04113c2e632dc72ba58e91 | /test_env/test_suit_ui_native_apk/test_suit_ui_native_apk_case05.py | dabb1ffacba1db255a1a1217d9e02bc7a0be878f | [] | no_license | wwlwwlqaz/Qualcomm | 2c3a225875fba955d771101f3c38ca0420d8f468 | a04b717ae437511abae1e7e9e399373c161a7b65 | refs/heads/master | 2021-01-11T19:01:06.123677 | 2017-04-05T07:57:21 | 2017-04-05T07:57:21 | 79,292,426 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,095 | py | # coding: utf-8
'''
check native apk: alarm
@author: U{huitingn<huitingn@qti.qualcomm.com>}
@version: version 1.0.0
@requires: python 2.7+
@license:
@see: L{TestCaseBase <TestCaseBase>}
@note:
@attention:
@bug:
@warning:
'''
import fs_wrapper
import settings.common as SC
from case_utility import *
from logging_wrapper import log_test_case, take_screenshot
from test_case_base import TestCaseBase
from qrd_shared.case import *
############################################
# author:
# huitingn@qti.qualcomm.com
# function:
# check native clock
# precondition:
#
# steps:
# launch native clock
# set alarm
# wait for alarm
############################################
import sys, string, os, shutil
from threading import Thread
import commands
import re, subprocess, shlex
import datetime
from test_suit_ui_native_apk import *
class test_suit_ui_native_apk_case05(TestCaseBase):
'''
test_suit_ui_native_apk_case05 is a class for check native clock: can it alarm in expect time.
@see: L{TestCaseBase <TestCaseBase>}
'''
tag = 'ui_native_apk_case05'
def test_case_main(self, case_results):
'''
main entry.
@type case_results: tuple
@param case_results: record some case result information
'''
case_flag = False
pre_check()
#
# read what's the time now
#
try:
(hour, minute, a_p, cur_time) = cur_time_in_mobilephone()
except:
set_cannot_continue()
log_test_case(self.tag, "before SET ALARM: time format maybe wrong" + cur_time)
#
# STEP 1: launch alarm
#
if can_continue():
launcher.launch_from_launcher('clock')
#
# STEP 2: set alarm
#
if can_continue():
# new alarm
click_view_by_container_id('action_bar_container', 'android.widget.ImageView', 0)
click_button_by_id('fab') # alarm_add_alarm
# set the alarm. e.g.:now(12:56AM)set( 1:00AM)
# now( PM)set( PM)
# now(11:56AM)set(12:00PM)
# now( PM)set( AM)
#
# caculate what time should be set
#
# minute decide hour
if (int(minute) + 1 + 5) > 60:boundary = True
else:boundary = False
setMinute = (int(minute) + 1 + 5) / 5
setHour = int(hour) + boundary
if setHour % 12 == 0 and boundary:
apDict = {'True':'pm', 'False':'am'}
setAP = apDict[str(a_p == 'AM')]
else:setAP = a_p.lower()
setMinute = '%02.0f' % (setMinute * 5 % 60)
setHour = str(setHour)
log_test_case(self.tag, "SET hour: " + setHour + " minute: " + setMinute + " ap: " + setAP)
# set alarm
click(CLOCK_PLATE['HOUR'][setHour][0], CLOCK_PLATE['HOUR'][setHour][1])
click(CLOCK_PLATE['MINUTE'][setMinute][0], CLOCK_PLATE['MINUTE'][setMinute][1])
# click(CLOCK_PLATE['A_P'][setAP][0],CLOCK_PLATE['A_P'][setAP][1])
click_textview_by_text(setAP.upper())
#
# check if alarm is set correctly
#
if get_view_text_by_id(VIEW_TEXT_VIEW, 'hours') == setHour \
and get_view_text_by_id(VIEW_TEXT_VIEW, 'minutes') == setMinute:
# and get_view_text_by_id(VIEW_TEXT_VIEW,'ampm_label')==setAP
click_button_by_text('OK')
else:
set_cannot_continue()
log_test_case(self.tag, "SET ALARM: h,m,ap At least one of them is clicked wrong")
#
# STEP 3: wait for alarm
#
if can_continue():
send_key(KEY_HOME)
sleep(2)
send_key(KEYCODE_POWER)
sleep(2)
func = lambda:is_view_enabled_by_id(VIEW_IMAGE_VIEW, 'alarm', isScrollable=0)
if wait_for_fun(func, True, timeout=300, sleeptime=10):
a = get_view_text_by_id(VIEW_TEXT_VIEW, 'digital_clock', isScrollable=0)
if a:case_flag = True
startX = int(240.0 / 480 * 100)
startY = int(590.0 / 855 * 100)
endX = int(400.0 / 480 * 100)
endY = int(590.0 / 855 * 100)
drag_by_param(startX, startY, endX, endY, 10)
#
# STEP 5: exit
#
exit_cur_case(self.tag)
log_test_case(self.tag, "case_flag = " + str(case_flag))
if case_flag:
qsst_log_case_status(STATUS_SUCCESS, "" , SEVERITY_HIGH)
else:
qsst_log_case_status(STATUS_FAILED, "native alarm is failed", SEVERITY_HIGH)
case_results.append((self.case_config_map[fs_wrapper.CASE_NAME_ATTR], can_continue()))
| [
"c_wwan@qti.qualcomm.com"
] | c_wwan@qti.qualcomm.com |
7f07de87a674fce74e537b03815c9f0175773dfd | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/tests/zd/CB_AP_CLI_Check_Wlans.py | 93b2c57d9cb88aa0a0d9f29f9af7f3c9188362ed | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,144 | py | '''
Description:
Prerequisite (Assumptions about the state of the testbed/DUT):
1. Build under test is loaded on the AP and Zone Director
Required components: 'RuckusAP'
Test parameters:
Result type: PASS/FAIL
Results: PASS:
FAIL:
Messages: If FAIL the test script returns a message related to the criteria that is not satisfied
Test procedure:
1. Config:
- initialize parameter.
2. Test:
- check number of ssids
3. Cleanup:
- None
How it was tested:
Create on 2013-1-10
@author: cwang@ruckuswireless.com
'''
import logging
from RuckusAutoTest.models import Test
from RuckusAutoTest.components.lib.apcli import radiogroup
class CB_AP_CLI_Check_Wlans(Test):
required_components = ['RuckusAP']
parameters_description = {}
def _init_params(self, conf):
self.conf = dict(num_of_ssids=64,
ap_tag = 'AP_01'
)
self.conf.update(conf)
def _retrieve_carribag(self):
self.active_ap = self.carrierbag[self.conf.get('ap_tag')]['ap_ins']
def _update_carribag(self):
pass
def config(self, conf):
self._init_params(conf)
self._retrieve_carribag()
def test(self):
import time
st = time.time()
while time.time() - st < 230:
wlan_list = radiogroup.get_wlanlist(self.active_ap)
cnt = 0
for wlan in wlan_list:
if 'AP' == wlan.get('type') and 'up' == wlan.get('status') and '00:00:00:00:00:00' != wlan.get('bssid') and (not 'mesh' in wlan.get('name')):
cnt += 1
if self.conf.get('num_of_ssids') == cnt:
return self.returnResult('PASS', 'The WLANs status is correct')
else:
time.sleep(10)
if 'wlan_list' in locals():
logging.info(wlan_list)
return self.returnResult('FAIL', 'The WLANs status is incorrect, please check')
def cleanup(self):
self._update_carribag() | [
"tan@xx.com"
] | tan@xx.com |
3d7c87091f35d690835e37012b967b52d9e57aa6 | dc280634cd9c6601c1d35cc31debc63fe4d4d88d | /twisted/plugins/anchore_simplequeue.py | bcd31006b26ab16231db135609db5203a309cb19 | [
"Apache-2.0"
] | permissive | roachmd/anchore-engine | 9fe5166bbce00471516730c270b9dab7658f38d2 | 521d6796778139a95f51542670714205c2735a81 | refs/heads/master | 2020-03-26T01:22:25.812770 | 2018-08-15T06:10:18 | 2018-08-15T06:10:18 | 144,364,236 | 0 | 0 | Apache-2.0 | 2018-08-11T07:17:22 | 2018-08-11T07:17:22 | null | UTF-8 | Python | false | false | 2,642 | py | import sys
import os
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import log
from twisted.python import usage
from zope.interface import implements
# anchore modules
from anchore_engine.configuration import localconfig
import anchore_engine.services.common
from anchore_engine.subsys import logger
class Options(usage.Options):
#class Options(usage.Options, strcred.AuthOptionMixin):
# supportedInterfaces = (credentials.IUsernamePassword,)
optParameters = [
["config", "c", None, "Configuration directory location."]
]
class AnchoreServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "anchore-simplequeue"
servicenames = ["simplequeue"]
description = "Anchore Container Image Scanner Service: " + ','.join(servicenames)
options = Options
def makeService(self, options):
slist = []
try:
configfile = os.path.join(options['config'], 'config.yaml')
config = localconfig.read_config(configfile=configfile)
except Exception as err:
log.err("cannot load local configuration: " + str(err))
raise err
log_level = 'INFO'
log_to_db = False
if 'log_level' in config:
log_level = config['log_level']
if 'log_to_db' in config:
log_to_db = config['log_to_db']
slist = self.servicenames
try:
config_services = config['services']
isEnabled = False
for sname in slist:
if 'log_level' in config_services[sname]:
log_level = config_services[sname]['log_level']
if config_services[sname]['enabled']:
isEnabled = True
break
if not isEnabled:
log.err("no services in list ("+str(self.servicenames)+") are enabled in configuration file: shutting down")
sys.exit(0)
except Exception as err:
log.err("error checking for enabled services, check config file - exception: " + str(err))
raise Exception("error checking for enabled services, check config file - exception: " + str(err))
try:
logger.set_log_level(log_level, log_to_db=log_to_db)
except Exception as err:
log.err("exception while initializing logger - exception: " + str(err))
logger.set_log_level('INFO')
r = anchore_engine.services.common.makeService(slist, options)
return(r)
serviceMaker = AnchoreServiceMaker()
| [
"nurmi@anchore.com"
] | nurmi@anchore.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.