blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
153479a79245d9a606054d6ec1ac2b88deffe40f | 83ed8b754703a1c9e661c90f0763bfebbc0f2606 | /数据处理/Brokerage/create_db.py | 4f34cf8e25321badf0c3409fc6e0102b3fe613a5 | [] | no_license | zbh123/hobby | 4ce267a20e1af7f2accd2bde8d39af269efa319b | 2215c406fe7700bf150fd536dd56823a2e4733d1 | refs/heads/master | 2021-08-02T10:31:34.683391 | 2021-07-26T07:26:16 | 2021-07-26T07:26:16 | 150,555,879 | 4 | 0 | null | 2021-07-27T07:34:28 | 2018-09-27T08:41:44 | Python | UTF-8 | Python | false | false | 2,936 | py | import MySQLdb
from sshtunnel import SSHTunnelForwarder
import time
with SSHTunnelForwarder(
("10.29.24.47", 222), # ssh IP和port
ssh_password="zts000000", # ssh 密码
ssh_username="tianyj", # ssh账号
remote_bind_address=("10.29.129.94", 3306)) as server: # 数据库所在的IP和端口
server.start()
# 打印本地端口,已检查是否配置正确
print(server.local_bind_port)
conn = MySQLdb.connect(host="127.0.0.1", # 固定写法
port=server.local_bind_port,
user="rpa", # 数据库账号
passwd="zts000", # 数据库密码
db='brokerage',
charset='utf8') # 可以限定,只访问特定的数据库,否则需要在mysql的查询或者操作语句中,指定好表名
print('连接成功')
time.sleep(100)
cur = conn.cursor()
# sql = """CREATE TABLE shenzhen_month (
# month CHAR(150),
# total_amount CHAR(150),
# market_share CHAR(150),
# stock_trading_amount CHAR(150),
# fund_trading_amount CHAR(150),
# bond_trading_amount CHAR(150),
# warrants_trading_amount CHAR(150),
# `current_time` CHAR(150))"""
# sql = """CREATE TABLE sse_month(
# member_name CHAR(150),
# number_seats CHAR(150),
# total CHAR(150),
# stock CHAR(150),
# investment_funds CHAR(150),
# ETF CHAR(150),
# treasury CHAR(150),
# amount_lgd CHAR(150),
# corporate_bonds CHAR(150),
# convertible_bonds CHAR(150),
# repurchase_bonds CHAR(150),
# warrants CHAR(150),
# current_month CHAR(150),
# `current_time` CHAR(150))
#
# """
# sql = """CREATE TABLE shenzhen_total_day(
# types_bond CHAR(150),
# number CHAR(150),
# transaction_amount CHAR(150),
# turnover CHAR(150),
# total_equity CHAR(150),
# total_market_value CHAR(150),
# negotiable_capital CHAR(150),
# circulation_market_value CHAR(150),
# `current_time` CHAR(150))"""
# sql = """ CREATE TABLE sse_stock_day(
# single_day_situation CHAR(150),
# stock CHAR(150),
# mainboard_A CHAR(150),
# mainboard_B CHAR(150),
# ipo CHAR(150),
# repurchase_bonds CHAR(150),
# `current_time` CHAR(150)
# )
# """
# sql = """CREATE TABLE sse_fund_day(
# single_day_situation CHAR(150),
# fund CHAR(150),
# closed_fund CHAR(150),
# ETF CHAR(150),
# LOF CHAR(150),
# trading_fund CHAR(150),
# repurchase_fund CHAR(150),
# `current_time` CHAR(150)
# )
# """
# cur.execute(sql)
print("CREATE TABLE OK")
# 关闭数据库连接
cur.close()
# 关闭连接
conn.close()
| [
"zbh@example.com"
] | zbh@example.com |
0d45a727bc28374f7e8457cccf8c58524bade227 | d42954213667ce37a1eeadc125dc6ba1c67cf5d9 | /todoapp/todolist/migrations/0001_initial.py | d21f6b48027b9a9c184f8e38b6325c85ccb21f36 | [] | no_license | Hossain-Shah/Project | 8363f37e6557bca4fd03882b8aae7f344b8eb789 | 338e5fbdc132ea0952b02f93932031546d992d8a | refs/heads/master | 2023-08-09T12:35:01.774043 | 2023-08-05T13:05:22 | 2023-08-05T13:05:22 | 233,031,408 | 0 | 0 | null | 2020-10-13T18:22:53 | 2020-01-10T11:17:42 | Python | UTF-8 | Python | false | false | 1,342 | py | # Generated by Django 2.2.1 on 2019-10-01 16:28
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('content', models.TextField(blank=True)),
('created', models.DateField(default='2019-10-01')),
('due_date', models.DateField(default='2019-10-01')),
('category', models.ForeignKey(default='general', on_delete='models.CASCADE', to='todolist.Category')),
],
options={
'ordering': ['-created'],
},
),
]
| [
"noreply@github.com"
] | Hossain-Shah.noreply@github.com |
ee8c450917f609d531ffc85dbe264150f66aa9ee | d9eef8dd3489682c8db41f2311e3058d1f369780 | /.history/abel-network-files/mcmc_alg_implementation_own_20180626142636.py | 056663041df5c2070817234221eb3afcb7505416 | [] | no_license | McKenzie-Lamb/Gerrymandering | 93fe4a49fe39a0b307ed341e46ba8620ea1225be | b7a7c4129d6b0fcd760ba8952de51eafa701eac3 | refs/heads/master | 2021-01-25T06:06:43.824339 | 2018-10-16T14:27:01 | 2018-10-16T14:27:01 | 93,526,515 | 0 | 0 | null | 2018-07-12T19:07:35 | 2017-06-06T14:17:47 | Python | UTF-8 | Python | false | false | 1,820 | py | # Author: Abel Gonzalez
# Date: 06/26/18
#
# Description:
# This program uses the .shp file to create a network graph where each node
# represents a census tract and the edge represents adjacency between each
# tract, usign graph-tool instead of networkx
import graph_tool.all as gt
from pathlib import Path
# Paths
data_folder = Path("abel-network-files/data/")
images_folder = Path("abel-network-files/images/")
# Loading the previous created Graph and creating the prop maps
graph = gt.load_graph(str(data_folder / "tmp_graph.gt"))
district_no = graph.new_vertex_property("int")
color = graph.new_vertex_property("vector<double>")
ring_color = graph.new_vertex_property("vector<double>")
# Assigning the district to each vertex as a property map
districts_data = {}
districts = gt.minimize_blockmodel_dl(graph, 2,2)
blocks = districts.get_blocks()
for i in graph.vertices():
district_no[graph.vertex(i)] = blocks[i]
color[graph.vertex(i)] = (255, 255, 0, 1) if blocks[i] == 1 else (0, 255, 255, 1)
if district_no[i] in districts_data.keys():
for j in districts_data[blocks[i]].keys():
districts_data[blocks[i]][j] += graph.vp.data[i]
else:
districts_data[blocks[i]] = graph.vp.data[i]
# Assign ring color based on democrats total votes:
for i in districts_data.keys():
if districts_data[i]['CONDEM14'] > districts_data[i]['CONREP14']:
ring_color_ = (0, 0, 255, 1)
else:
ring_color_ = (255, 0, 0, 1)
matched_vertices = gt.find_vertex(graph, district_no, i)
for j in matched_vertices:
ring_color[graph.vertex(j)] = ring_color_
gt.graph_draw(graph, bg_color=(255, 255, 255, 1), vertex_fill_color=color, vertex_color=ring_color, pos=graph.vp.pos,
vertex_text=graph.vertex_index, output='abel-network-files/tmp.png')
| [
"gonzaleza@ripon.edu"
] | gonzaleza@ripon.edu |
8d70e1519b3f927b87482b6e543cdcf6457c7802 | 28d5d19196ac52d4bd60cbe5e06963dd8fd2ed74 | /etat_civil/geonames_place/tests/test_models.py | e7ad712113190310e59ed9582d5b45c388da847e | [
"MIT"
] | permissive | kingsdigitallab/etat-civil-django | a01ebea68ea205a185a36cfa76b6245d5a1df3e6 | 02bb32870dfd75d42f10189db6a527f133cbcdd6 | refs/heads/master | 2020-11-26T23:10:49.810602 | 2020-07-02T14:30:15 | 2020-07-02T14:30:15 | 229,226,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,640 | py | import geocoder
import pytest
from django.conf import settings
from django.db.utils import IntegrityError
from etat_civil.geonames_place.models import Place
pytestmark = pytest.mark.django_db
@pytest.mark.django_db
class TestPlace:
GEONAMES_ID = 2635167
GEONAMES_ADDRESS = "United Kingdom"
def test_save(self):
place = Place(geonames_id=self.GEONAMES_ID)
place.save()
assert place.address == self.GEONAMES_ADDRESS
with pytest.raises(IntegrityError):
place = Place()
place.save()
place.geonames_id = self.GEONAMES_ID
place.update_from_geonames = False
place.save()
def test_hydrate_from_geonames(self):
place = Place()
place.hydrate_from_geonames()
assert place.address is None
place.geonames_id = self.GEONAMES_ID
place.hydrate_from_geonames()
assert place.address == self.GEONAMES_ADDRESS
def test__hydrate(self):
place = Place(geonames_id=self.GEONAMES_ID)
place._hydrate(None)
assert place.address is None
g = geocoder.geonames(
place.geonames_id, key=settings.GEONAMES_KEY, method="details"
)
place._hydrate(g)
assert place.address == self.GEONAMES_ADDRESS
def test_to_list(self):
place = Place(geonames_id=1, address="Address", lat=1, lon=1)
place_as_list = place.to_list()
assert len(place_as_list) == 4
assert place_as_list[0] == place.geonames_id
assert place_as_list[1] == place.address
assert place_as_list[2] == place.lat
assert place_as_list[3] == place.lon
def test_create_or_update_from_geonames(self):
assert Place.create_or_update_from_geonames(None) == 0
assert Place.create_or_update_from_geonames("un") == 0
assert (
Place.create_or_update_from_geonames(self.GEONAMES_ADDRESS)
== settings.GEONAMES_MAX_RESULTS
)
def test_get_or_create_from_geonames(self):
assert Place.get_or_create_from_geonames(None) is None
assert Place.get_or_create_from_geonames("un") is None
place = Place.get_or_create_from_geonames(self.GEONAMES_ADDRESS)
assert place is not None
assert place.geonames_id == self.GEONAMES_ID
def test_places_to_list(self, tmpdir):
places = Place.places_to_list()
assert len(places) == 0
Place.objects.get_or_create(geonames_id=1, address="Address", lat=1, lon=1)
places = Place.places_to_list()
assert len(places) == 1
assert "Address" in places[0]
| [
"jmvieira@gmail.com"
] | jmvieira@gmail.com |
1dea1bd5576192e35833fe57508301a3a89aeb69 | 08176a94897588554501217f5ceb89bf8f004e63 | /galpy_cuda_demo/orbits/__init__.py | 7ae680602986aab03874a4c225e71cd40280a674 | [
"MIT"
] | permissive | jobovy/galpy_cuda_demo | 8e8f717a552a588fa7df6a2f25ff3eff8c2f083a | 3e79f3a6701721fd5d89914b2f9c144e68a72ec8 | refs/heads/master | 2020-03-30T18:23:01.791959 | 2018-10-03T02:57:58 | 2018-10-03T02:57:58 | 151,497,462 | 2 | 0 | MIT | 2018-10-04T00:15:10 | 2018-10-04T00:15:09 | null | UTF-8 | Python | false | false | 737 | py | from .CUDAOrbits import CUDAOrbits
from .npOrbits import npOrbits
def Orbits(x, y, vx, vy, mode='cuda'):
"""
Create Orbits array orbit, return Orbits instance depending on the mode
:param x: x-locations in AU
:type x: np.ndarray
:param y: y-locations in AU
:type y: np.ndarray
:param vx: x-velocity in AU/yr
:type vx: np.ndarray
:param vy: y-velocity in AU/yr
:type vy: np.ndarray
:param mode: 'cuda' to use CUDA GPU or 'cpu' to use numpy CPU
:type mode: str
"""
if mode.lower() == 'cuda':
return CUDAOrbits(x, y, vx, vy)
elif mode.lower() == 'cpu':
return npOrbits(x, y, vx, vy)
else:
raise ValueError("Mode can only be either 'CUDA' or 'CPU'")
| [
"henryskyleung@gmail.com"
] | henryskyleung@gmail.com |
041b8097b2e1d3e803ced7dd694c4a406c35ca5c | f4b5721c6b3f5623e306d0aa9a95ec53461c1f89 | /backend/src/gloader/xml/dom/ext/reader/Sax2.py | 87ba6c5ca1a52aad83672c154d50ec1eefc0eb1f | [
"Apache-1.1",
"MIT"
] | permissive | citelab/gini5 | b53e306eb5dabf98e9a7ded3802cf2c646f32914 | d095076113c1e84c33f52ef46a3df1f8bc8ffa43 | refs/heads/uml-rename | 2022-12-10T15:58:49.578271 | 2021-12-09T23:58:01 | 2021-12-09T23:58:01 | 134,980,773 | 12 | 11 | MIT | 2022-12-08T05:20:58 | 2018-05-26T17:16:50 | Python | UTF-8 | Python | false | false | 15,985 | py | ########################################################################
#
# File Name: Sax2.py
#
#
"""
Components for reading XML files from a SAX2 producer.
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000, 2001 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
import sys, string, cStringIO, os, urllib2
from xml.sax import saxlib, saxutils, sax2exts, handler
from xml.dom import Entity, DocumentType, Document
from xml.dom import Node
from xml.dom import implementation
from xml.dom.ext import SplitQName, ReleaseNode
from xml.dom import XML_NAMESPACE, XMLNS_NAMESPACE, EMPTY_NAMESPACE
from xml.dom import Element
from xml.dom import Attr
from xml.dom.ext import reader
class NsHandler:
def initState(self, ownerDoc=None):
self._namespaces = {'xml': XML_NAMESPACE, None: EMPTY_NAMESPACE}
self._namespaceStack = []
return
def startElement(self, name, attribs):
self._completeTextNode()
old_nss = {}
del_nss = []
for curr_attrib_key, value in attribs.items():
(prefix, local) = SplitQName(curr_attrib_key)
if local == 'xmlns':
if self._namespaces.has_key(prefix):
old_nss[prefix] = self._namespaces[prefix]
if value:
self._namespaces[prefix] = attribs[curr_attrib_key]
else:
del self._namespaces[prefix]
elif value:
self._namespaces[prefix] = attribs[curr_attrib_key]
del_nss.append(prefix)
self._namespaceStack.append((old_nss, del_nss))
(prefix, local) = SplitQName(name)
nameSpace = self._namespaces.get(prefix, None)
if self._ownerDoc:
new_element = self._ownerDoc.createElementNS(nameSpace, (prefix and prefix + ':' + local) or local)
else:
self._initRootNode(nameSpace, name)
new_element = self._ownerDoc.documentElement
for curr_attrib_key,curr_attrib_value in attribs.items():
(prefix, local) = SplitQName(curr_attrib_key)
qname = local
if local == 'xmlns':
namespace = XMLNS_NAMESPACE
if prefix:
qname = local + ':' + prefix
attr = self._ownerDoc.createAttributeNS(namespace, qname)
else:
if prefix:
qname = prefix + ':' + local
namespace = prefix and self._namespaces.get(prefix, None) or None
attr = self._ownerDoc.createAttributeNS(namespace, qname)
attr.value = curr_attrib_value
new_element.setAttributeNodeNS(attr)
self._nodeStack.append(new_element)
return
def endElement(self, name):
self._completeTextNode()
new_element = self._nodeStack[-1]
del self._nodeStack[-1]
old_nss, del_nss = self._namespaceStack[-1]
del self._namespaceStack[-1]
self._namespaces.update(old_nss)
for prefix in del_nss:
del self._namespaces[prefix]
if new_element != self._ownerDoc.documentElement:
self._nodeStack[-1].appendChild(new_element)
return
class XmlDomGenerator(NsHandler, saxutils.DefaultHandler,
saxlib.LexicalHandler,
saxlib.DeclHandler):
def __init__(self, keepAllWs=0, implementation=implementation):
self._keepAllWs = keepAllWs
self._impl = implementation
return
def initState(self, ownerDoc=None):
self._ownerDoc = None
self._rootNode = None
#Set up the stack which keeps track of the nesting of DOM nodes.
self._nodeStack = []
self._nsuri2pref = {EMPTY_NAMESPACE:[None], XML_NAMESPACE: ['xml']}
self._pref2nsuri = {None: [EMPTY_NAMESPACE], 'xml': XML_NAMESPACE}
self._new_prefix_mappings = []
if ownerDoc:
self._ownerDoc = ownerDoc
#Create a docfrag to hold all the generated nodes.
self._rootNode = self._ownerDoc.createDocumentFragment()
self._nodeStack.append(self._rootNode)
self._dt = None
self._xmlDecl = None
self._orphanedNodes = []
self._currText = ''
NsHandler.initState(self, ownerDoc)
return
def _initRootNode(self, docElementUri, docElementName):
if not self._dt:
self._dt = self._impl.createDocumentType(docElementName, None, '')
self._ownerDoc = self._impl.createDocument(docElementUri, docElementName, self._dt)
if self._xmlDecl:
decl_data = 'version="%s"' % (
self._xmlDecl['version']
)
if self._xmlDecl['encoding']:
decl_data = decl_data + ' encoding="%s"'%(
self._xmlDecl['encoding']
)
if self._xmlDecl['standalone']:
decl_data = decl_data + ' standalone="%s"'%(
self._xmlDecl['standalone']
)
xml_decl_node = self._ownerDoc.createProcessingInstruction(
'xml',
decl_data
)
self._ownerDoc.insertBefore(xml_decl_node, self._ownerDoc.docType)
before_doctype = 1
for o_node in self._orphanedNodes:
if o_node[0] == 'pi':
pi = self._ownerDoc.createProcessingInstruction(
o_node[1],
o_node[2]
)
if before_doctype:
self._ownerDoc.insertBefore(pi, self._dt)
else:
self._ownerDoc.appendChild(pi)
elif o_node[0] == 'comment':
comment = self._ownerDoc.createComment(o_node[1])
if before_doctype:
self._ownerDoc.insertBefore(comment, self._dt)
else:
self._ownerDoc.appendChild(comment)
elif o_node[0] == 'doctype':
before_doctype = 0
elif o_node[0] == 'unparsedentitydecl':
apply(self.unparsedEntityDecl, o_node[1:])
else:
raise "Unknown orphaned node:"+o_node[0]
self._rootNode = self._ownerDoc
self._nodeStack.append(self._rootNode)
return
def _completeTextNode(self):
#Note some parsers don't report ignorable white space properly
if self._currText and len(self._nodeStack) and self._nodeStack[-1].nodeType != Node.DOCUMENT_NODE:
new_text = self._ownerDoc.createTextNode(self._currText)
self._nodeStack[-1].appendChild(new_text)
self._currText = ''
return
def getRootNode(self):
self._completeTextNode()
return self._rootNode
#Overridden DocumentHandler methods
def processingInstruction(self, target, data):
if self._rootNode:
self._completeTextNode()
pi = self._ownerDoc.createProcessingInstruction(target, data)
self._nodeStack[-1].appendChild(pi)
else:
self._orphanedNodes.append(('pi', target, data))
return
def startPrefixMapping(self, prefix, uri):
try:
map = self._pref2nsuri[prefix]
except:
map = []
self._pref2nsuri[prefix] = map
map.append(uri)
try:
map = self._nsuri2pref[uri]
except:
map = []
self._nsuri2pref[uri] = map
map.append(prefix)
self._new_prefix_mappings.append((prefix,uri))
## print 'startPrefixMapping',prefix,uri
## print 'pref->uri',self._pref2nsuri
## print 'uri->pref',self._nsuri2pref
def endPrefixMapping(self, prefix):
## print 'endPrefixMapping',prefix
## print 'pref->uri',self._pref2nsuri
## print 'uri->pref',self._nsuri2pref
uri = self._pref2nsuri[prefix][-1]
del self._pref2nsuri[prefix][-1]
del self._nsuri2pref[uri][-1]
if not self._pref2nsuri[prefix]:
del self._pref2nsuri[prefix]
if not self._nsuri2pref[uri]:
del self._nsuri2pref[uri]
def startElementNS(self, name, qname, attribs):
self._completeTextNode()
namespace = name[0]
local = name[1]
if qname is None:
if self._nsuri2pref[namespace][-1]:
qname = string.join((self._nsuri2pref[namespace][-1], local), ':')
else :
qname = local
if self._ownerDoc:
new_element = self._ownerDoc.createElementNS(namespace, qname)
else:
self._initRootNode(namespace, qname)
new_element = self._ownerDoc.documentElement
for ((attr_ns, lname), value) in attribs.items():
if attr_ns is not None:
try:
attr_qname = attribs.getQNameByName((attr_ns, lname))
except KeyError:# pyexpat doesn't report qnames...
attr_prefix = self._nsuri2pref[attr_ns][-1]
if attr_prefix is None: # I'm not sure that this is possible
attr_qname = lname
else:
attr_qname = string.join((attr_prefix,lname), ':')
else:
attr_qname = lname
attr = self._ownerDoc.createAttributeNS(attr_ns, attr_qname)
attr.value = value
new_element.setAttributeNodeNS(attr)
for (prefix,uri) in self._new_prefix_mappings:
if prefix is None :
new_element.setAttributeNS(XMLNS_NAMESPACE,'xmlns',uri or '')
else:
new_element.setAttributeNS(XMLNS_NAMESPACE,'xmlns'+':'+prefix,uri)
self._new_prefix_mappings = []
self._nodeStack.append(new_element)
return
def endElementNS(self, name, qname):
self._completeTextNode()
new_element = self._nodeStack[-1]
del self._nodeStack[-1]
if new_element != self._ownerDoc.documentElement:
self._nodeStack[-1].appendChild(new_element)
return
def ignorableWhitespace(self, chars):
"""
If 'keepAllWs' permits, add ignorable white-space as a text node.
A Document node cannot contain text nodes directly.
If the white-space occurs outside the root element, there is no place
for it in the DOM and it must be discarded.
"""
if self._keepAllWs and self._nodeStack[-1].nodeType != Node.DOCUMENT_NODE:
self._currText = self._currText + chars
return
def characters(self, chars):
self._currText = self._currText + chars
return
#Overridden LexicalHandler methods
def xmlDecl(self, version, encoding, standalone):
self._xmlDecl = {'version': version, 'encoding': encoding, 'standalone': standalone}
return
def startDTD(self, doctype, publicID, systemID):
self._dt = self._impl.createDocumentType(doctype, publicID, systemID)
if not self._rootNode:
self._orphanedNodes.append(('doctype',))
#else:
#raise Exception('Illegal DocType declaration')
return
def comment(self, text):
if self._rootNode:
self._completeTextNode()
new_comment = self._ownerDoc.createComment(text)
self._nodeStack[-1].appendChild(new_comment)
else:
self._orphanedNodes.append(('comment', text))
return
def startCDATA(self):
self._completeTextNode()
return
def endCDATA(self):
#NOTE: this doesn't handle the error where endCDATA is called
#Without corresponding startCDATA. Is this a problem?
if self._currText:
new_text = self._ownerDoc.createCDATASection(self._currText)
self._nodeStack[-1].appendChild(new_text)
self._currText = ''
return
#Overridden DTDHandler methods
def notationDecl (self, name, publicId, systemId):
new_notation = self._ownerDoc.getFactory().createNotation(self._ownerDoc, publicId, systemId, name)
self._ownerDoc.getDocumentType().getNotations().setNamedItem(new_notation)
return
def unparsedEntityDecl (self, name, publicId, systemId, ndata):
if self._ownerDoc:
new_notation = self._ownerDoc.getFactory().createEntity(self._ownerDoc, publicId, systemId, name)
self._ownerDoc.getDocumentType().getEntities().setNamedItem(new_notation)
else:
self._orphanedNodes.append(('unparsedentitydecl', name, publicId, systemId, ndata))
return
#Overridden ErrorHandler methods
#FIXME: How do we handle warnings?
def error(self, exception):
raise exception
def fatalError(self, exception):
raise exception
class Reader(reader.Reader):
def __init__(self, validate=0, keepAllWs=0, catName=None,
saxHandlerClass=XmlDomGenerator, parser=None):
self.parser = parser or (validate and sax2exts.XMLValParserFactory.make_parser()) or sax2exts.XMLParserFactory.make_parser()
if catName:
#set up the catalog, if there is one
from xml.parsers.xmlproc import catalog
cat_handler = catalog.SAX_catalog(
catName, catalog.CatParserFactory()
)
self.parser.setEntityResolver(cat_handler)
self.handler = saxHandlerClass(keepAllWs)
self.parser.setContentHandler(self.handler)
self.parser.setDTDHandler(self.handler)
self.parser.setErrorHandler(self.handler)
try:
#FIXME: Maybe raise a warning?
self.parser.setProperty(handler.property_lexical_handler, self.handler)
self.parser.setProperty(handler.property_declaration_handler, self.handler)
except (SystemExit, KeyboardInterrupt):
raise
except:
pass
return
def fromStream(self, stream, ownerDoc=None):
self.handler.initState(ownerDoc=ownerDoc)
#self.parser.parseFile(stream)
s = saxutils.prepare_input_source(stream)
self.parser.parse(s)
rt = self.handler.getRootNode()
#if hasattr(self.parser.parser,'deref'):
# self.parser.parser.deref()
#self.parser.parser = None
#self.parser = None
#self.handler = None
return rt
########################## Deprecated ##############################
def FromXmlStream(stream, ownerDocument=None, validate=0, keepAllWs=0,
catName=None, saxHandlerClass=XmlDomGenerator, parser=None):
reader = Reader(validate, keepAllWs, catName, saxHandlerClass, parser)
return reader.fromStream(stream, ownerDocument)
def FromXml(text, ownerDocument=None, validate=0, keepAllWs=0,
catName=None, saxHandlerClass=XmlDomGenerator, parser=None):
fp = cStringIO.StringIO(text)
rv = FromXmlStream(fp, ownerDocument, validate, keepAllWs, catName,
saxHandlerClass, parser)
return rv
def FromXmlFile(fileName, ownerDocument=None, validate=0, keepAllWs=0,
catName=None, saxHandlerClass=XmlDomGenerator, parser=None):
fp = open(fileName, 'r')
try:
rv = FromXmlStream(fp, ownerDocument, validate, keepAllWs, catName,
saxHandlerClass, parser)
finally:
fp.close()
return rv
def FromXmlUrl(url, ownerDocument=None, validate=0, keepAllWs=0,
catName=None, saxHandlerClass=XmlDomGenerator, parser=None):
fp = urllib2.urlopen(url)
try:
rv = FromXmlStream(fp, ownerDocument, validate, keepAllWs, catName,
saxHandlerClass, parser)
finally:
fp.close()
return rv
| [
"maheswar@MacBook-Pro.local"
] | maheswar@MacBook-Pro.local |
fb0a1027debe02013a8d8ada8b134957351cf9e8 | 7a4ed01a40e8d79126b26f5e8fca43c8e61e78fd | /Geeky Shows/Advance Pyhton/220.Module[34]/PythonProject/Example-1.py | 9bee856020e437b89c563e04fbe4e699c04ee8c1 | [] | no_license | satyam-seth-learnings/python_learning | 5a7f75bb613dcd7fedc31a1567a434039b9417f8 | 7e76c03e94f5c314dcf1bfae6f26b4a8a6e658da | refs/heads/main | 2023-08-25T14:08:11.423875 | 2021-10-09T13:00:49 | 2021-10-09T13:00:49 | 333,840,032 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | # Example-1.py <---- Main Module
import cal
print("Cal Module's Variable:",cal.a)
cal.name()
a=cal.add(10,20)
print(a)
print(cal.sub(20,10))
add=cal.add
print(add(4,7))
s=cal.sub
print(s(4,7)) | [
"satyam1998.1998@gmail.com"
] | satyam1998.1998@gmail.com |
7144f28804ed1051f0f61ebacd38ccfa966ca844 | 68f78e45e7cdeeaf2f9f3e8551c5bc922971a3be | /analysis/longbaseline/ch3cn_fiteach_e8.py | 7d23870c4560c80e7e9ba28d564704251679a105 | [] | no_license | keflavich/W51_ALMA_2013.1.00308.S | cdbb2b2aa569836c15fd424356d4a0778fccd2ec | 5389047e3e8adf7bcb4f91b1f4783bcc825961f6 | refs/heads/master | 2023-02-02T08:47:30.104031 | 2023-01-30T16:21:18 | 2023-01-30T16:21:18 | 44,909,889 | 3 | 12 | null | 2016-11-09T11:58:26 | 2015-10-25T12:51:54 | TeX | UTF-8 | Python | false | false | 5,040 | py | from ch3cn_fits import SpectralCube, pyspeckit, fits, u, np
import os
T=True
F=False
cubefn = '../../FITS/longbaseline/W51e8_CH3CN_cutout.fits'
if not os.path.exists(cubefn):
cube = SpectralCube.read('../../FITS/longbaseline/W51e2cax.CH3CN_K3_nat_all.image.fits')
scube = cube[:,1120:1440,2400:2760]
scube.write(cubefn)
cube = SpectralCube.read(cubefn).minimal_subcube()
contcubeK = cube.to(u.K, u.brightness_temperature(cube.beam,
cube.wcs.wcs.restfrq*u.Hz))
cubeK = cube.to(u.K, u.brightness_temperature(cube.beam,
cube.wcs.wcs.restfrq*u.Hz))
med = cubeK.percentile(25, axis=0)
cubeK.allow_huge_operations=True
cubeK = cubeK - med
# BAD error estimate
err = cubeK.std(axis=0)
err[:] = 5*u.K
peak = (cubeK).max(axis=0)
mask = (peak > 200*u.K)# & (peak > 6*err)
absorption_mask = cubeK.min(axis=0) < -150*u.K
mask = mask & (~absorption_mask)
pcube = pyspeckit.Cube(cube=cubeK[:400,:,:]) # crop out k=0,1
vguesses = 62*u.km/u.s
colguesses = np.ones_like(mask)*1e16
temguesses = np.ones_like(mask)*250.
widths = np.ones_like(mask)*2.0
#guesses = np.array([vguesses.value, widths, temguesses, colguesses])
guesses = [62, 2.0, 250., 1e16]
# For laptop
#mask &= (peak>10*u.K)
start_point = (43,43)#np.unravel_index(np.nanargmax(peak*mask), peak.shape)
position_order = 1./peak.value
position_order[np.isnan(peak)] = np.inf
sp = pcube.get_spectrum(*start_point)
sp.plotter()
sp.specfit(fittype='ch3cn', guesses=guesses)
pcube.fiteach(fittype='ch3cn', guesses=guesses, integral=False,
verbose_level=3, start_from_point=start_point,
use_neighbor_as_guess=True, position_order=position_order,
limitedmax=[T,T,T,T],
limitedmin=[T,T,T,T],
maxpars=[100,5,1500,1e18],
minpars=[0,0.1,50,1e13],
signal_cut=0,
maskmap=mask,
errmap=err.value, multicore=4)
pcube.write_fit('e8_CH3CN_Emission_fits.fits', clobber=True)
min_background = 100
background_guess = med.value
background_guess[background_guess < min_background] = min_background
guesses = np.empty((5,)+cube.shape[1:], dtype='float')
guesses[0,:] = 62
guesses[1,:] = 2
guesses[2,:] = 250.
guesses[3,:] = 1e16
guesses[4,:] = background_guess
# again, try cropping out the k=0,1 lines under the assumption that they do not
# trace the disk
pcube_cont = pyspeckit.Cube(cube=contcubeK[:400,:,:])
start_point = (71,66)#np.unravel_index(np.nanargmax(peak*mask), peak.shape)
sp = pcube_cont.get_spectrum(71,66)
sp.specfit(fittype='ch3cn_absorption', guesses=guesses[:,66,71],
limitedmax=[T,T,T,T,T], limitedmin=[T,T,T,T,T],
maxpars=[100,5,1500,1e18,10000],
minpars=[0,0.1,50,1e13,100],)
pcube_cont.fiteach(fittype='ch3cn_absorption', guesses=guesses, integral=False,
verbose_level=3, start_from_point=start_point,
use_neighbor_as_guess=True, position_order=position_order,
limitedmax=[T,T,T,T,T],
limitedmin=[T,T,T,T,T],
maxpars=[70,5,1500,1e18,10000],
minpars=[40,0.1,50,1e13,min_background],
signal_cut=0,
maskmap=absorption_mask,
errmap=err.value, multicore=4)
pcube_cont.write_fit('e8_CH3CN_Absorption_fits.fits', clobber=True)
from astropy import coordinates
e8 = coordinates.SkyCoord("19h23m43.90s", "14d30m28.3s", frame='fk5')
pcube_cont.show_fit_param(0,vmin=58,vmax=63)
pcube_cont.mapplot.FITSFigure.recenter(e8.ra.deg, e8.dec.deg, 0.3/3600.)
pcube.show_fit_param(0, vmin=60, vmax=70)
pcube.mapplot.FITSFigure.recenter(e8.ra.deg, e8.dec.deg, 0.3/3600.)
# from kinematic_analysis_pv_LB import diskycoords, outflowpath
# import pvextractor
# import pylab as pl
#
# pl.figure(5).clf()
# for width in (None, 0.05*u.arcsec, 0.1*u.arcsec, 0.15*u.arcsec):
# diskypath = pvextractor.Path(diskycoords, width)
# extracted_disky = pvextractor.extract_pv_slice(pcube_cont.parcube[0:1,:,:], diskypath, wcs=pcube_cont.wcs)
#
# pl.plot(extracted_disky.data.squeeze(), label=str(width))
#
# pl.xlabel("Offset (pixels)")
# pl.ylabel("Velocity (km/s)")
# pl.ylim(55,60)
# pl.xlim(855,890)
#
# pl.legend(loc='best')
#
# pl.figure(6).clf()
# diskypath = pvextractor.Path(diskycoords, width=None)
# extracted_disky = pvextractor.extract_pv_slice(pcube_cont.parcube[0:1,:,:], diskypath, wcs=pcube_cont.wcs)
# extracted_disky_width = pvextractor.extract_pv_slice(np.abs(pcube_cont.parcube[1:2,:,:]), diskypath, wcs=pcube_cont.wcs)
#
# pl.fill_between(np.arange(len(extracted_disky.data.squeeze())),
# extracted_disky.data.squeeze()-extracted_disky_width.data.squeeze(),
# extracted_disky.data.squeeze()+extracted_disky_width.data.squeeze(),
# alpha=0.5)
# pl.plot(extracted_disky.data.squeeze())
#
#
# pl.xlabel("Offset (pixels)")
# pl.ylabel("Velocity (km/s)")
# pl.ylim(52,62)
# pl.xlim(855,890)
#
| [
"keflavich@gmail.com"
] | keflavich@gmail.com |
aac340369aca6edb334b4fe4d8ece4fa11967c4b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2151/60788/281320.py | 6778e5f7a57d954b16e2da82cdf7a01966bd5874 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,828 | py | import copy
class Disjoint_set:
def __init__(self,n):
self.li=[[x+1] for x in range(n)]
def union(self,x,y):
index1=0
index2=0
for i in range(len(self.li)):
if x in self.li[i]:
index1=i
if y in self.li[i]:
index2=i
self.li[index1]+=self.li[index2]
self.li.pop(index2)
def query(self,x):
for i in range(len(self.li)):
if x in self.li[i]:
return i
return 0
class Edge:
def __init__(self,a,b,value):
self.dot1=a
self.dot2=b
self.value=value
def ten_to_three(number):
num=number
s=[]
while num!=0:
s.append(num%3)
num=int(num/3)
s.reverse()
return ''.join([str(x) for x in s])
def three_to_ten(a):
m=0
list_a=list(a)
for k in list_a:
m*=3
m+=int(k)
return m
def add(a,b):
list_a=list(a)
list_b=list(b)
s=[]
if len(list_a)>len(list_b):
for i in range(len(list_a)-len(list_b)):
s.append(list_a.pop(0))
elif len(list_b)>len(list_a):
for i in range(len(list_b)-len(list_a)):
s.append(list_b.pop(0))
for i in range(len(list_a)):
s.append(str((int(list_a[i])+int(list_b[i]))%3))
return ''.join(s)
def do_sum(x):
m='0'
for k in x:
m=add(m,str(k.value))
return three_to_ten(m)
def f(disjoint,edge_li,total_edge):
if len(edge_li)<total_edge :
return []
elif len(edge_li)==0:
return [[]]
else:
first_edge=edge_li.pop(0)
if disjoint.query(first_edge.dot1)!=disjoint.query(first_edge.dot2):
set1=f(disjoint,edge_li,total_edge)
disjoint.union(first_edge.dot1,first_edge.dot2)
set2 =f(disjoint,edge_li,total_edge-1)
for every_edges in set2:
every_edges.append(first_edge)
return set1+set2
else:
return f(disjoint,edge_li,total_edge)
def seek_all_tree(edge_li,total_edge_num):
if len(edge_li)==0 or len(edge_li)<total_edge_num:
return []
disjoint_set=Disjoint_set(total_edge_num+1)
first_edge=edge_li.pop(0)
trees1=seek_all_tree(copy.deepcopy(edge_li),total_edge_num)
disjoint_set.union(first_edge.dot1,first_edge.dot2)
trees2=f(disjoint_set,edge_li,total_edge_num-1)
for tree in trees2:
tree.append(first_edge)
return trees1+trees2
line1=input().strip()
dot_num=int(line1.split()[0])
edge_num=int(line1.split()[1])
edge_list=[]
for i in range(edge_num):
line=input().strip()
edge=Edge(int(line.split()[0]),int(line.split()[1]),int(line.split()[2]))
edge_list.append(edge)
trees=seek_all_tree(edge_list,dot_num-1)
trees_value=[do_sum(x) for x in trees]
print(sum(trees_value)%1000000007)
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
d2cd0667277f7d6b98e2d0ac48d781ee254dc794 | 2869eb01810389ab7b64355ec189800e7b1d49b9 | /MetaCTF 2021/Crypto/hideandseek/solve.py | 6cfdd54cf61a129c4c65c01876d7a134ef749e84 | [] | no_license | Giantpizzahead/ctf-archive | 096b1673296510bddef9f284700ebdb0e76d71a7 | 5063cd2889cd300aade440429faf9c4ca68511ef | refs/heads/master | 2021-12-28T14:23:33.598960 | 2021-12-20T17:23:55 | 2021-12-20T17:23:55 | 252,905,854 | 1 | 0 | null | 2021-12-20T17:14:05 | 2020-04-04T04:11:24 | C | UTF-8 | Python | false | false | 4,606 | py | import gmpy2
import math
import itertools
with open('raw2.txt', 'r') as fin:
raw = fin.readlines()
# Predict random bits
with open('randbits.txt', 'r') as fin:
rand_bits = eval(fin.read())
curr_bit = 0
def predictrandbits(n):
global curr_bit
x = 0
good = True
for i in reversed(range(n // 32)):
x *= (2 ** 32)
if rand_bits[curr_bit+i] == -1: good = False
x += rand_bits[curr_bit+i]
curr_bit += n // 32
return x if good else -1
last_00 = False
def parse_raw(i):
global last_00
# print(i, end=' ')
if raw[i][0] == 'C':
# Client
packet = bytes.fromhex(raw[i][2:])
if packet[0] == 0:
# Requesting new public key from the server
# print('Public key request')
last_00 = True
return ('request')
else:
# Encrypted message
# print('Sent encrypted message') # {}'.format(packet[2:]))
return ('sent', int.from_bytes(packet[1:], byteorder="big"))
else:
# Server
packet = bytes.fromhex(raw[i])
if packet[0] == 0:
# Getting a new key
e = int.from_bytes(packet[1:4], byteorder="big")
if last_00:
# Public key
n = int.from_bytes(packet[4:260], byteorder="big")
print('public')
# print('New public key e={} n={}'.format(e, n))
last_00 = False
# return ('public', e, n)
# Attempt to predict the public key
p = gmpy2.next_prime(predictrandbits(1024))
q = gmpy2.next_prime(predictrandbits(1024))
# print(e, p, q)
return ('public', e, p, q)
else:
# Private key
p = int.from_bytes(packet[4:132], byteorder="big")
q = int.from_bytes(packet[132:260], byteorder="big")
pp = gmpy2.next_prime(predictrandbits(1024))
pq = gmpy2.next_prime(predictrandbits(1024))
if pq != -1 and pq != -1 and (p != pp or q != pq):
print('rand error ', p, pp, q, pq)
# print('private {} {}'.format(p, q))
# print('New private key e={} p={} q={}'.format(e, p, q))
return ('private', e, p, q)
else:
# Receiving encrypted message
# print('Received encrypted message')
return ('received', int.from_bytes(packet[1:], byteorder="big"))
def encrypt(p, e, n):
guess = int.from_bytes(p.encode("utf-8"), byteorder="big")
enc = pow(guess, e, n)
return enc
keys = []
special = None
seen_private = set()
seen_public = set()
prime_pairs = []
for i in range(len(raw)):
res = parse_raw(i)
'''
if i == 72:
priv = gmpy2.invert(res[1], (res[2]-1) * (res[3]-1))
special = (int(priv), res[2] * res[3])
'''
if res[0] == 'private':
# Track private keys
priv = gmpy2.invert(res[1], (res[2]-1) * (res[3]-1))
key = (int(priv), res[2] * res[3])
keys += [key]
if key in seen_private:
print('SEEN', key)
seen_private.add(key)
prime_pairs += [(res[2], res[3])]
elif res[0] == 'received':
# Decrypt the message
'''
if i == 140:
msg = pow(res[1], special[0], special[1])
msg = pow(res[1], keys[0][0], keys[0][1])
else:
'''
msg = pow(res[1], keys[0][0], keys[0][1])
keys = keys[1:]
print('S:', msg.to_bytes(256, byteorder="big").lstrip(b'\x00'))
elif res[0] == 'public':
# Track public keys
key = (res[1], res[2], res[3])
keys += [key]
# if key in seen_public:
# print('SEEN', key)
seen_public.add(key)
elif res[0] == 'sent':
# Try to decrypt the message
e, p, q = keys[0]
keys = keys[1:]
if p != -1 and q != -1:
d = gmpy2.invert(e, (p-1) * (q-1))
msg = int(pow(res[1], d, p*q))
if msg < 0: msg = 0
print('C:', msg.to_bytes(256, byteorder="big").lstrip(b'\x00'))
else:
print('C: Cannot decrypt')
'''
e, n = keys[0]
keys = keys[1:]
guesses = [
'Do you want to play a game?',
'Wanna play a game?',
'Want to play a game?',
'Let\'s play a game.',
'Let\'s play a game!',
'Ok.',
'Ok!',
'Sure.',
'No.',
'No',
'Yes',
'Yes.',
'No problem!',
'No problem',
'No problem.',
'Nice.',
'Nice!',
'Nice',
'Not bad.',
'Not bad!',
'Not bid',
'Good job.',
'Good job!',
'Good job',
'Nice work.',
'Nice work!',
'Nice work',
'Fine',
'Fine.',
'Fine!',
]
for guess in itertools.permutations('abcdefghijklmnopqrstuvwxyz0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ.?!', r=2): #guesses:
guess = ''.join(guess)
enc = encrypt(guess, e, n)
if enc == res[1]:
print('CORRECT', res[1], enc)
'''
'''
for a in prime_pairs:
for b in seen_public:
if a[0] * a[1] == b[1]:
print('REUSED', a[0], a[1], b[1])
if math.gcd(a[0], b[1]) != 1 or math.gcd(a[1], b[1]) != 1:
print('GCD prime', a[0], a[1], b[1])
for a in seen_public:
for b in seen_public:
if a[1] != b[1] and math.gcd(a[1], b[1]) != 1:
print('GCD public', a[1], b[1])
'''
| [
"43867185+Giantpizzahead@users.noreply.github.com"
] | 43867185+Giantpizzahead@users.noreply.github.com |
25c0219880c40b5103c897230579f8ae55bbd1d3 | 2e682fa5f6a48d7b22b91e5d518fc28ff0fbb913 | /promoSystemProject/asgi.py | 8b2455dc7b1d3337f6fc58b37ddfa879e494bf9f | [] | no_license | Mohamed-awad/PromoSystem | 90c17d67fdac05c1652cd269b2d4b5f8015bdbb3 | 173abd9bade0524daff8c91c73d24c68d02cf4a5 | refs/heads/master | 2023-07-24T03:28:54.411511 | 2021-08-29T14:11:42 | 2021-08-29T14:11:42 | 299,772,631 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | """
ASGI config for promoSystemProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'promoSystemProject.settings')
application = get_asgi_application()
| [
"awadmohamed233@gmail.com"
] | awadmohamed233@gmail.com |
b80e0355fffad9d7410648f19d6a06c0bb3f39ff | 6f31a15cb73175084f2c4485d3dea0b8975b2ec9 | /egs/yomdle_zh/v1/local/prepare_lexicon.py | 3ebb52e38f4d3ce7f9cafbcc40bfb03b91006d74 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | Idlak/idlak | c7cd5e6c0b02918cda85dbb2fb5c7333a789c304 | 4be6f7d951ba0d707a84a2cf8cbfc36689b85a3c | refs/heads/master | 2021-11-23T13:28:43.709163 | 2021-11-01T15:51:46 | 2021-11-01T15:51:46 | 127,285,931 | 65 | 26 | NOASSERTION | 2021-11-01T15:51:47 | 2018-03-29T12:06:52 | Shell | UTF-8 | Python | false | false | 1,634 | py | #!/usr/bin/env python3
# Copyright 2018 Ashish Arora
# Chun-Chieh Chang
import argparse
import os
parser = argparse.ArgumentParser(description="""Creates the list of characters and words in lexicon""")
parser.add_argument('dir', type=str, help='output path')
parser.add_argument('--data-dir', type=str, default='data', help='Path to text file')
args = parser.parse_args()
### main ###
lex = {}
text_path = os.path.join(args.data_dir, 'train', 'text')
text_fh = open(text_path, 'r', encoding='utf-8')
# Used specially for Chinese.
# Uses the ChangJie keyboard input method to create subword units for Chinese.
cj5_table = {}
with open('download/cj5-cc.txt', 'r', encoding='utf-8') as f:
for line in f:
line_vect = line.strip().split()
if not line_vect[0].startswith('yyy') and not line_vect[0].startswith('z'):
cj5_table[line_vect[1]] = "cj5_" + " cj5_".join(list(line_vect[0]))
with open(text_path, 'r', encoding='utf-8') as f:
for line in f:
line_vect = line.strip().split()
for i in range(1, len(line_vect)):
characters = list(line_vect[i])
# Put SIL instead of "|". Because every "|" in the beginning of the words is for initial-space of that word
characters = " ".join([ 'SIL' if char == '|' else cj5_table[char] if char in cj5_table else char for char in characters])
characters = characters.replace('#','<HASH>')
lex[line_vect[i]] = characters
with open(os.path.join(args.dir, 'lexicon.txt'), 'w', encoding='utf-8') as fp:
for key in sorted(lex):
fp.write(key + " " + lex[key] + "\n")
| [
"dpovey@gmail.com"
] | dpovey@gmail.com |
090d7331f80b90bb5fcd60512140923e61e2f4ad | f9ed6d4ef53874453ff429bd67bfdc91369c5d82 | /misc/h5_stats.py | 06c8b662dca2d90920c7a3fa2f1ed56d03e87aa7 | [] | no_license | axeltidemann/propeller | 35a79464f6f0ee249c2d5e91d5e31d2fe6c08b33 | b9485f523812bf37c40404d052e05a43f6dc0799 | refs/heads/master | 2020-04-04T07:37:07.430215 | 2019-06-05T04:45:48 | 2019-06-05T04:45:48 | 39,445,622 | 5 | 1 | null | 2019-05-29T04:55:46 | 2015-07-21T13:02:33 | Python | UTF-8 | Python | false | false | 1,345 | py | # Copyright 2016 Telenor ASA, Author: Axel Tidemann
'''
Collects stats of categories.
'''
import argparse
import glob
import os
import shutil
import pandas as pd
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_folder',
help='Folder with Inception states for training')
parser.add_argument(
'--filename',
help='Filename to save',
default='counts.png')
parser.add_argument(
'--limit',
help='Minimum amount of data necessary',
default=1000)
parser.add_argument(
'--target',
help='Where to put the file',
default='/mnt/kaidee/curated/')
args = parser.parse_args()
files = sorted(glob.glob('{}/*.h5'.format(args.data_folder)))
counts = []
categories = []
for h5_file in files:
length = len(pd.read_hdf(h5_file, 'data'))
counts.append(length)
category = os.path.basename(h5_file).replace('.h5','')
categories.append(category)
print '{}: {}'.format(category, length)
if length > args.limit:
shutil.copy(h5_file, args.target)
print '--> copied to {}'.format(args.target)
sns.barplot(range(len(categories)), counts)
plt.xticks(range(len(categories)), categories)
plt.savefig(args.filename, dpi=300)
| [
"axel.tidemann@gmail.com"
] | axel.tidemann@gmail.com |
560a74eb37260a149149b1dd7d19a349fdb35649 | 6e51548bc288d9ff0630053ece14b349838331a0 | /_gen/stud.py | ca62221c579956b2adf75a4b09842e8aee1fbca7 | [] | no_license | Olisss/students | 70128cba924ef3fc9301bb39f795781976a27b7b | 60672aa728ee2b6edc54932b14343a05ff970548 | refs/heads/master | 2021-08-31T22:14:43.570241 | 2017-09-19T12:56:29 | 2017-09-19T12:56:29 | 104,072,995 | 0 | 0 | null | 2017-09-19T12:40:02 | 2017-09-19T12:40:02 | null | UTF-8 | Python | false | false | 8,222 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 uralbash <root@uralbash.ru>
#
# Distributed under terms of the MIT license.
"""
Parse students
"""
# standard library
import os
import json
import shutil
import logging
import collections
from pathlib import Path
# third-party
import http.client
from typing import Any, Dict, List # flake8: noqa
from mako.template import Template
PATH_TO_GROUP: str = '../Группы/'
class Course(object):
'''
Course
'''
def __init__(
self,
name: str,
values: Dict[str, Any]
) -> None:
self.name = name
self.year: int = values.get('year')
self.session: int = values.get('session')
self.exam: str = 'exam' if values.get('exam') else 'noexam'
self.__course_path: Path = Path('..') / Path(name)
self.template_name: \
str = f'{self.year}.{self.session}.{self.exam}.student.json'
@property
def data(self) -> Dict[str, Any]:
if (self.template_path.exists()):
return json.load(self.template_path.open())
return {}
@property
def checkpoints(self) -> List[str]:
return [
f'`{values.get("name", "")}'
f' <{values.get("url", "./")}>`_'
f' {values.get("date")}'
for key, values in self.data.get('checkpoints', {}).items()
]
@property
def template_path(self) -> Path:
path: Path = self.course_path / '_templates' / self.template_name
if not path.exists():
logging.warning(f'Template "{path}" doesn\'t exist')
return Path('__NO_EXIST__')
return path
@property
def course_path(self) -> Path:
if not self.__course_path.exists():
logging.warning(f'Course "{self.name}" doesn\'t exist')
return Path('__NO_EXIST__')
return self.__course_path
def score_path(self, group) -> Path:
return self.course_path / f'{self.year}.{self.session}.{group}.rst'
class Student(object):
'''
Student factory
'''
def __init__(
self,
name: str,
group: str,
github: str,
path: Path,
subjects: Dict[str, Any]
) -> None:
self.name: str = name
self.group: str = group
self.github: str = github
self.path: Path = path
self.subjects: Dict[str, Any] = subjects
def set_github(self, course: Course) -> None:
if self.github:
return None
dst: Dict[str, Any] = json.load(self._dst_path(course).open())
github_nickname: str = dst.get('github_nickname', None)
if not github_nickname:
return None
self.github = github_nickname
path: Path = Path(PATH_TO_GROUP) / f'{self.group}.json'
data: Dict[str, Any] = json.load(path.open())
for item in data.get('students'):
if item.get('name') == self.name:
item['github'] = github_nickname
json.dump(data, path.open('w'), ensure_ascii=False, indent=2)
def checkpoints(self, course: Course) -> List[Dict[str, Any]]:
dst_path: Path = self._dst_path(course)
data: Dict[str, Any] = json.load(dst_path.open())
return [
{
**items.get('total', {}),
**{
'score': items.get('score', 0)
if items.get('score', 0) > 0
else ''
}
}
for _, items in data.get('checkpoints', {}).items()
]
def _dst_path(self, course: Course) -> Path:
return self.path / f'{course.year}.{course.session}.{course.name}.json'
def make(self) -> None:
key: str
values: Dict[str, Any]
# Walk courses
for key, values in self.subjects.items():
course: Course = Course(key, values)
course_template: Path = course.template_path
if not course_template.exists():
continue
# Check file exist and update is it
dst_path: Path = self._dst_path(course)
if not dst_path.exists():
get_from_github(self.github)
shutil.copy(course_template, dst_path)
else:
self.set_github(course)
merge_json_files(
course_template,
dst_path,
{
'github_nickname': self.github
} if self.github else {})
def merge_json_files(
src_path: Path,
dst_path: Path,
overwrite: Dict[str, Any] = {}) -> None:
'''
Merge 2 JSON file
'''
src: Dict[str, Any] = json.load(src_path.open())
dst: Dict[str, Any] = json.load(dst_path.open())
src.update(dst)
for key, value in overwrite.items():
dst[key] = value
json.dump(dst, dst_path.open('w'), ensure_ascii=False, indent=2)
def get_from_github(login: str) -> str:
'''
Return avatar if user exist
'''
if not login.strip():
return ''
conn = http.client.HTTPSConnection("api.github.com")
conn.request("GET", f"/users/{login}", headers={'User-Agent': 'USTU/IIT'})
response = conn.getresponse()
if response.status == 403:
conn = http.client.HTTPSConnection("github.com")
conn.request(
"GET", f"/{login}", headers={'User-Agent': 'USTU/IIT'}
)
response = conn.getresponse()
if response.status == 200:
return ''
else:
logging.error(response.status, response.read()) # type: ignore
raise Exception(f'User {login} not found')
elif response.status != 200:
logging.error(response.status, response.read()) # type: ignore
raise Exception(f'User {login} not found')
info: Dict[str, Any] = json.load(response) # type: ignore
if info['type'] != 'User':
raise Exception(f'Wrong user type {info["type"]}')
return info['avatar_url']
def make_group(file_name: str) -> None:
'''
Create group folder and add students
'''
with open(file_name) as file:
data: Dict[str, Any] = json.load(file)
group: str = data.get("name", "")
students: List[Dict] = data.get("students", "")
subjects: Dict[str, Any] = data.get("subjects", {})
# Make group dir
if not all((group, students)):
raise Exception(f'No group or students in {file_name}')
path: Path = Path(PATH_TO_GROUP + group)
path.mkdir(exist_ok=True)
# Walk students
students_obj: List[Student] = []
student: Dict[str, Any]
for student in students:
name: str = student.get('name', '')
if len(name.split()) < 2:
raise Exception(f'Bad fullname {name}')
stud_path = path / name
stud_path.mkdir(exist_ok=True)
github: str = student.get('github', '')
try:
_obj: Student = Student(
name,
group,
github,
stud_path,
subjects
)
except Exception as e:
logging.error(str(e))
continue
print(_obj.name)
_obj.make()
students_obj.append(_obj)
# Rebuild score reports
for key, values in subjects.items():
course: Course = Course(key, values)
score_path: Path = course.score_path(group)
if not score_path.parent.exists():
continue
with open('./score.mako') as fo:
score: str = Template(fo.read()).render(
course=course,
group_name=group,
students=students_obj,
collections=collections
)
score_path.write_text(score)
# Walk groups
for pos_json in os.listdir(PATH_TO_GROUP):
if pos_json.endswith('.json'):
make_group(PATH_TO_GROUP + pos_json)
| [
"root@uralbash.ru"
] | root@uralbash.ru |
4c37c257452df18c4aa08f338e5f08b9751c4952 | 80a3d98eae1d755d6914b5cbde63fd10f5cc2046 | /autox/autox_video/mmaction2/configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py | 4a8ffbfc977e37ecd0bf127efd5462d57e5d6ed2 | [
"Apache-2.0"
] | permissive | 4paradigm/AutoX | efda57b51b586209e1d58e1dab7d0797083aadc5 | 7eab9f4744329a225ff01bb5ec360c4662e1e52e | refs/heads/master | 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 | Apache-2.0 | 2022-07-12T08:28:09 | 2021-07-21T09:45:41 | Jupyter Notebook | UTF-8 | Python | false | false | 2,471 | py | model = dict(
type='SkeletonGCN',
backbone=dict(
type='AGCN',
in_channels=3,
graph_cfg=dict(layout='ntu-rgb+d', strategy='agcn')),
cls_head=dict(
type='STGCNHead',
num_classes=60,
in_channels=256,
loss_cls=dict(type='CrossEntropyLoss')),
train_cfg=None,
test_cfg=None)
dataset_type = 'PoseDataset'
ann_file_train = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/train.pkl'
ann_file_val = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/val.pkl'
train_pipeline = [
dict(type='PaddingWithLoop', clip_len=300),
dict(type='PoseDecode'),
dict(type='JointToBone'),
dict(type='FormatGCNInput', input_format='NCTVM'),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
val_pipeline = [
dict(type='PaddingWithLoop', clip_len=300),
dict(type='PoseDecode'),
dict(type='JointToBone'),
dict(type='FormatGCNInput', input_format='NCTVM'),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
test_pipeline = [
dict(type='PaddingWithLoop', clip_len=300),
dict(type='PoseDecode'),
dict(type='JointToBone'),
dict(type='FormatGCNInput', input_format='NCTVM'),
dict(type='Collect', keys=['keypoint', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['keypoint'])
]
data = dict(
videos_per_gpu=12,
workers_per_gpu=2,
test_dataloader=dict(videos_per_gpu=1),
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix='',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=val_pipeline),
test=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix='',
pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='step', step=[30, 40])
total_epochs = 80
checkpoint_config = dict(interval=3)
evaluation = dict(interval=3, metrics=['top_k_accuracy'])
log_config = dict(interval=100, hooks=[dict(type='TextLoggerHook')])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/2sagcn_80e_ntu60_xsub_bone_3d/'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"caixiaochen@4ParadigmdeMacBook-Pro.local"
] | caixiaochen@4ParadigmdeMacBook-Pro.local |
c9940272738fd5a2a46479dd072d9a4990c8b199 | bec623f2fab5bafc95eb5bd95e7527e06f6eeafe | /django-gc-shared/payments/systems/forsazh.py | 05a67bfb1ea63578e3361e141481e6e5c7b25d26 | [] | no_license | riyanhax/a-demo | d714735a8b59eceeb9cd59f788a008bfb4861790 | 302324dccc135f55d92fb705c58314c55fed22aa | refs/heads/master | 2022-01-21T07:24:56.468973 | 2017-10-12T13:48:55 | 2017-10-12T13:48:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,908 | py | # -*- coding: utf-8 -*-
import re
from decimal import Decimal
from django import forms
from django.utils.functional import lazy
from django.utils.translation import ugettext_lazy as _, string_concat
from currencies.currencies import RUR
from geobase.models import Country
from payments.systems import base
from payments.systems.base import (
FormWithCity, FormWithCountry, FormWithName, PhonePursePaymentForm
)
from shared.widgets import DateWidget
name = u"Форсаж"
slug = __name__.rsplit(".", 1)[-1]
mt4_payment_slug = "Forsazh"
logo = "moneygram.png"
languages = ('ru',)
currencies = ['USD', 'EUR', 'RUB']
display_amount = lazy(RUR.display_amount, unicode)
transfer_details = {
"withdraw": {
"time": _("up to 3 days"),
"min_amount": display_amount(5000),
"fee": u"3%",
}
}
templates = {
"deposit": "payments/forms/deposit/moneygram.html",
"withdraw": "payments/forms/withdraw/wu_fastpost_moneygram.html",
}
class DepositForm(base.DepositForm):
pass
class DetailsForm(base.DetailsForm, FormWithName, FormWithCountry, FormWithCity, PhonePursePaymentForm):
nationality = forms.ModelChoiceField(label=u"Гражданство", queryset=Country.objects.all())
address = forms.CharField(label=u"Адрес", max_length=100, help_text=u"Адрес постоянной регистрации")
document_type = forms.CharField(label=u"Тип документа", max_length=100, help_text=u"Тип документа, "
u"удостоверяющего личность")
document_number = forms.CharField(label=u"Номер документа", max_length=100, help_text=u"Номер документа")
document_issuer = forms.CharField(label=u"Кем выдан", max_length=100)
document_issued_date = forms.DateField(label=u"Дата выдачи", widget=DateWidget())
document_expiry = forms.DateField(label=u"Срок действия", widget=DateWidget(), required=False,
help_text=u"Срок действия документа, удостоверяющего личность")
birthday = forms.DateField(label=u"Дата рождения", widget=DateWidget())
birthplace = forms.CharField(label=u"Место рождения", max_length=100)
def __init__(self, *args, **kwargs):
super(DetailsForm, self).__init__(*args, **kwargs)
self.fields["purse"].label = u"Номер телефона"
self.fields["country"].help_text = _("The country in which you receive transfer")
self.fields["city"].help_text = _("The city in which you receive transfer")
profile = self.request.user.profile
self.fields["purse"].initial = profile.phone_mobile
self.fields["nationality"].initial = profile.country
self.fields["birthday"].initial = profile.birthday
def clean_purse(self):
if not re.match(r'^[A-Z -]*$', self.cleaned_data['purse']):
raise forms.ValidationError(_('Only uppercase latin letters and space are allowed'))
return self.cleaned_data['purse'].upper()
class WithdrawForm(base.WithdrawForm):
commission_rate = Decimal("0.03")
info = string_concat(
_("Withdrawal of funds from the account - %(time)s"),
"\n",
_("Rouble transfer commission - %(fee)s"),
"\n",
_("Minimal sum of withdrawal - %(min_amount)s")
)
def __init__(self, *args, **kwargs):
super(WithdrawForm, self).__init__(*args, **kwargs)
# self.fields["country"].help_text = _("The country in which you receive transfer")
# self.fields["city"].help_text = _("The city in which you receive transfer")
self.fields["currency"].help_text = _("Choose currency")
def _get_min_amount(self, account):
return 5000, RUR | [
"ibalyko@ubuntu-server-16-04"
] | ibalyko@ubuntu-server-16-04 |
0e90c8be1d35f2c8c360430c4472a00833372326 | a8062308fb3bf6c8952257504a50c3e97d801294 | /problems/N1162_As_Far_From_Land_As_Possible.py | 10b5552d6fd60c2f0ab9b8a7512332ed2e7b0d49 | [] | no_license | wan-catherine/Leetcode | 650d697a873ad23c0b64d08ad525bf9fcdb62b1b | 238995bd23c8a6c40c6035890e94baa2473d4bbc | refs/heads/master | 2023-09-01T00:56:27.677230 | 2023-08-31T00:49:31 | 2023-08-31T00:49:31 | 143,770,000 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | """
consider from '1' not '0'
"""
class Solution(object):
def maxDistance(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
n = len(grid)
stack = []
res = -1
for i in range(n):
for j in range(n):
if grid[i][j] == 1:
stack.append((i, j))
grid[i][j] = 2
step = 0
directions = [(1,0),(0,1),(-1,0),(0,-1)]
while stack:
new_stack = []
for i, j in stack:
for r, c in directions:
row, col = r+i, c+j
if row < 0 or row >= n or col < 0 or col >= n:
continue
if grid[row][col] == 0:
res = max(res, step + 1)
grid[row][col] = 2
new_stack.append((row, col))
continue
if grid[row][col] == 2:
continue
stack = new_stack
step += 1
return res
| [
"rarry2012@gmail.com"
] | rarry2012@gmail.com |
fe2afc4e27559474c454289a0f2d9f23f9d89c63 | 45614a944ffbdb75a0bef955582a722da5ce7492 | /python/udp_server.py | 02620d0743a3376499c7cbd8f12ad360b23ad43e | [] | no_license | wccgoog/pass | 1c8ab5393547634a27c7543556a75dec771a9e3d | 0ec01536ae10b3d99707002c0e726072acb50231 | refs/heads/2 | 2023-01-15T13:27:26.312648 | 2019-10-23T09:30:45 | 2019-10-23T09:30:45 | 122,595,075 | 0 | 2 | null | 2023-01-07T10:42:38 | 2018-02-23T08:38:36 | JavaScript | UTF-8 | Python | false | false | 320 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 11 14:53:08 2018
@author: wccgo
"""
import socket
s=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind(('127.0.0.1',9999))
print('Bind UDP on 9999')
while True:
data,addr=s.recvfrom(1024)
print('Received from %s:%s.'%addr)
s.sendto(b'Hello,%s!'%data,addr) | [
"wcc3@sina.com"
] | wcc3@sina.com |
237fca580df650487e817d5233cd1dbf6fa66ee2 | 3de6f7f6d8497e728101c368ec778e67f769bd6c | /notes/algo-ds-practice/problems/array/kth_smallest.py | b8e24a89f9d21d12c470f70ead0f6c1325cee1e6 | [
"MIT"
] | permissive | arnabs542/interview-notes | 1fceae0cafa74ef23d0ce434e2bc8e85c4c76fdd | 65af75e2b5725894fa5e13bb5cd9ecf152a0d652 | refs/heads/master | 2023-01-03T06:38:59.410704 | 2020-10-25T06:49:43 | 2020-10-25T06:49:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | '''
**Solution 1:**
1. Max-heapify the first k elements.
2. For every index i > k, heap.pushpop(n[i]).
3. At the end, we will have a max-heap containing the k smallest elements.
O(k + (n-k)logk)
**Solution 2:**
1. Min-heapify the whole array in O(n).
2. Call heap.pop() k times.
O(n + klogk)
**Solution 3:**
1. Call the quicksort partition() on array (either on the first or the last element).
2. If the pivot position == k, then return it straightaway.
3. Else, recur for left or right array accordingly.
O(n*n) worst case but O(n) average.
**Solution 4:**
1. Same as solution 3 but use randomized pivot selection.
Expected O(n).
**Solution 5:**
1. Same as solution 4 but use median-of-median to get the median as the pivot.
Guaranteed O(n)
'''
| [
"ajaggi@linkedin.com"
] | ajaggi@linkedin.com |
8948a012db16666753807e0753d132e0481dea7e | 6b3b965597c8f203c0232af19b4f4cd5962699c4 | /src/motan/android_vulnerabilities/invalid_server_certificate/invalid_server_certificate.py | 704cd996c4365579e163f3ddd5795ebce2251d7b | [
"MIT"
] | permissive | cqr-cryeye-forks/motan | 8167060cfabfc6a218fc76eceb439ee8e82257ba | 7687cf23842ac0ddbd0e11efa7788670d4d220c0 | refs/heads/master | 2023-08-15T21:07:00.369536 | 2021-10-22T12:46:42 | 2021-10-22T12:46:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,408 | py | #!/usr/bin/env python3
import logging
import os
from typing import Optional, List
from androguard.core.analysis.analysis import MethodAnalysis
from androguard.core.bytecodes.dvm import EncodedMethod
import motan.categories as categories
from motan import vulnerability as vuln
from motan.analysis import AndroidAnalysis
from motan.taint_analysis import TaintAnalysis
from motan.util import is_class_implementing_interfaces
class CustomTaintAnalysis(TaintAnalysis):
def vulnerable_path_found_callback(
self,
full_path: List[MethodAnalysis],
caller: MethodAnalysis = None,
target: MethodAnalysis = None,
last_invocation_params: list = None,
):
# This method is not used for the current vulnerability check, we only need this
# class to use one of its methods to get the paths to a target method.
pass
class InvalidServerCertificate(categories.ICodeVulnerability):
def __init__(self):
self.logger = logging.getLogger(self.__class__.__name__)
super().__init__()
def check_vulnerability(
self, analysis_info: AndroidAnalysis
) -> Optional[vuln.VulnerabilityDetails]:
self.logger.debug(f"Checking '{self.__class__.__name__}' vulnerability")
try:
vulnerability_found = False
# Load the vulnerability details.
details = vuln.get_vulnerability_details(
os.path.dirname(os.path.realpath(__file__)), analysis_info.language
)
details.id = self.__class__.__name__
dx = analysis_info.get_dex_analysis()
# Look for the implementation(s) of the X509TrustManager interface
# (https://developer.android.com/reference/javax/net/ssl/X509TrustManager)
# and check if checkServerTrusted method is empty.
interface_implementations = []
classes = dx.get_internal_classes()
for clazz in classes:
if is_class_implementing_interfaces(
clazz.get_vm_class(), ["Ljavax/net/ssl/X509TrustManager;"]
):
for method in clazz.get_vm_class().get_methods():
if (method.get_name() == "checkServerTrusted") and (
method.get_descriptor()
== "([Ljava/security/cert/X509Certificate; "
"Ljava/lang/String;)V"
):
# The method has only one (return) instruction, so there is
# no validation on the server certificates.
if len(list(method.get_instructions())) <= 1:
interface_implementations.append(
method.get_class_name()
)
# No X509TrustManager interface implementation was not found, there is no
# reason to continue checking this vulnerability.
if not interface_implementations:
return None
# The list of methods that contain the vulnerability. The key is the full
# method signature where the vulnerable code was found, while the value is
# a tuple with the signature of the vulnerable API/other info about the
# vulnerability and the full path leading to the vulnerability.
vulnerable_methods = {}
# Find the method(s) where the custom X509TrustManager is used.
for clazz in interface_implementations:
class_analysis = dx.get_class_analysis(clazz)
if not class_analysis:
continue
for caller in class_analysis.get_xref_from():
for m in caller.get_methods():
m = m.get_method()
# Ignore excluded methods (if any).
if analysis_info.ignore_libs:
if any(
m.get_class_name().startswith(prefix)
for prefix in analysis_info.ignored_classes_prefixes
):
continue
if isinstance(m, EncodedMethod):
for i in m.get_instructions():
if i.get_op_value() == 0x22: # 0x22 = "new-instance"
if i.get_string() in interface_implementations:
# A new instance of the custom X509TrustManager
# was found.
taint_analysis = CustomTaintAnalysis(
dx.get_method(m), analysis_info
)
path_to_caller = (
taint_analysis.get_paths_to_target_method()[
0
]
)
vulnerable_methods[
f"{m.get_class_name()}->"
f"{m.get_name()}{m.get_descriptor()}"
] = (
clazz,
" --> ".join(
f"{p.class_name}->"
f"{p.name}{p.descriptor}"
for p in path_to_caller
),
)
for key, value in vulnerable_methods.items():
vulnerability_found = True
details.code.append(vuln.VulnerableCode(value[0], key, value[1]))
if vulnerability_found:
return details
else:
return None
except Exception as e:
self.logger.error(
f"Error during '{self.__class__.__name__}' vulnerability check: {e}"
)
raise
finally:
analysis_info.checked_vulnerabilities.append(self.__class__.__name__)
| [
"gabriel@claudiugeorgiu.com"
] | gabriel@claudiugeorgiu.com |
e9de33ecda71093ff5047338a10ffb1936155a4b | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/meetup/24bbef018bfa4ae6a84c2b8e3373973d.py | ece76266e33e9054f25f37179abb3518522019ef | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 2,880 | py | from datetime import date, timedelta
class MeetupDayException(Exception):
pass
def _normalize_ordinality(ordinality):
''' Maps inputs like "third" and "1st" to values we can deal with '''
mappings = {
'1st': '1',
'first': '1',
'2nd': '2',
'second': '2',
'3rd': '3',
'third': '3',
'4th': '4',
'fourth': '4',
'5th': '5',
'fifth': '5'}
return (
mappings[ordinality.lower()] if ordinality.lower() in mappings else ordinality)
def _map_weekday_to_number(weekday):
''' Maps an English-language weekday to a corresponding number '''
return {
'monday': 0,
'tuesday': 1,
'wednesday': 2,
'thursday': 3,
'friday': 4,
'saturday': 5,
'sunday': 6
}[weekday.lower()]
def meetup_day(year, month, weekday, ordinality):
'''
Returns a date corresponding to the inputs given, where ordinality
is something like '1st', '2nd', 'third', 'last', 'teenth', etc.
'''
ordinality = _normalize_ordinality(ordinality)
numerical_weekday = _map_weekday_to_number(weekday)
if ordinality.isdigit():
first_of_month = date(year=year, month=month, day=1)
if first_of_month.weekday() == numerical_weekday:
first_weekday_of_month = first_of_month
elif first_of_month.weekday() < numerical_weekday:
first_weekday_of_month = (
first_of_month + timedelta(days=(numerical_weekday - first_of_month.weekday())))
else:
prev_monday = first_of_month - timedelta(days=first_of_month.weekday())
first_weekday_of_month = (
(prev_monday + timedelta(days=7)) + timedelta(days=numerical_weekday))
meetup_day = first_weekday_of_month + timedelta(days=((int(ordinality) - 1) * 7))
if meetup_day.month != month:
# we have gone past the end of the month
raise MeetupDayException("The month you provided does not contain the specified day.")
else:
return meetup_day
elif ordinality == 'teenth':
first_teenth = date(year=year, month=month, day=13)
if first_teenth.weekday() == numerical_weekday:
return first_teenth
else:
return (first_teenth - timedelta(days=first_teenth.weekday())) + timedelta(days=numerical_weekday)
elif ordinality == 'last':
last_day_of_month = date(year=year, month=(month + 1), day=1) - timedelta(days=1)
if last_day_of_month.weekday() == numerical_weekday:
return last_day_of_month
else:
return (
(last_day_of_month - timedelta(days=last_day_of_month.weekday())) +
timedelta(days=numerical_weekday))
else:
raise MeetupDayException("Unexpected ordinality: %s" % ordinality)
| [
"rrc@berkeley.edu"
] | rrc@berkeley.edu |
47d6c70ccdd47afa280e1e3ca4fd576780a18f33 | 627628700ac9eb3b855fdbf5724d44728dce0b63 | /python/spaceinvaders.py | 685ef9f6a82ca98705cece133c2617631dc1439f | [] | no_license | mn113/codewars-solutions | ffa6f9967f43dd58e00282b04aeb73c65d89747f | f41c93aa16d820d2a4a267b7aa7881323ab8bf46 | refs/heads/master | 2021-01-13T09:52:02.007767 | 2020-06-05T16:58:45 | 2020-06-05T16:58:45 | 69,618,778 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,344 | py | def blast_sequence(aliens, player):
return Game(aliens, player).run()
class Game(object):
def __init__(self, aliens, player):
# Wrap each cell in array:
self.aliens = [[[a] if abs(a) > 0 else [] for a in r] for r in aliens]
self.player_x = player[1]
self.player_y = player[0]
self.died = False
self.width = len(self.aliens[0])
def extend_grid(self, num_rows):
while num_rows > 0:
self.aliens.append([[] for x in range(self.width)])
num_rows -= 1
def print_grid(self):
for row in self.aliens:
print(row)
def run(self):
self.hit_blasts = []
t = 0
while self.count_all_aliens() > 0:
t += 1
# Extend grid if aliens in bottom row:
if self.count_row(-1) > 0:
self.extend_grid(1)
# Move each alien and see if one gets us:
self.move_all_aliens()
if self.died:
return None
# Blast them - hit or miss?
if self.blast(t):
self.hit_blasts.append(t-1)
#print(self.count_all_aliens(), "aliens remain")
# Killed all aliens:
return self.hit_blasts
def count_row(self, y):
return sum([len(a) for a in self.aliens[y]])
def count_all_aliens(self):
return sum([self.count_row(y) for y in range(len(self.aliens))])
def move_all_aliens(self):
empty_grid = [[[] for x in range(self.width)] for y in range(len(self.aliens))]
for y in range(len(self.aliens)):
for x in range(self.width):
for a in range(len(self.aliens[y][x])):
val = self.aliens[y][x][a]
dest = self.move_alien(x,y,val)
if (dest):
empty_grid[dest[1]][dest[0]].append(dest[2])
else:
return
self.aliens = empty_grid
def move_alien(self, x, y, val):
dir = val // abs(val)
moved = 0
# Move along same row:
while moved < abs(val) and x + dir >= 0 and x + dir < self.width:
x += dir
moved += 1
# Enter next row (create it if necessary):
if moved < abs(val):
y += 1
if y == self.player_y:
print(val, "got you!")
self.died = True # game over soon
return False
moved += 1
dir *= -1 # reverse dir
val *= -1
# Move along next row:
while moved < abs(val) and x + dir >= 0 and x + dir < self.width:
x += dir
moved += 1
return [x,y,val]
def blast(self, t):
x = self.player_x
y = len(self.aliens)
while y > 0:
y -= 1
victims = self.aliens[y][x]
if len(victims) > 0:
# Blast kills who? sort by 1: max(abs(v)), 2: max(v)
victims.sort() # lo to hi
if abs(victims[0]) > abs(victims[-1]):
victims.reverse()
v = victims.pop()
#print(v, "got blasted at", x, y, "by blast", t-1)
return True
# Missed everyone:
#print("blast missed")
return False
| [
"recyclebing+github@gmail.com"
] | recyclebing+github@gmail.com |
40d967e4f64b55a845f8e91ffb91090ccc5aa01e | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_7/models/api_token.py | 57ed30dc74b9aad64924eb1885eeaed495b5a357 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 3,845 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.7, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_7 import models
class ApiToken(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created_at': 'int',
'expires_at': 'int',
'token': 'str'
}
attribute_map = {
'created_at': 'created_at',
'expires_at': 'expires_at',
'token': 'token'
}
required_args = {
}
def __init__(
self,
created_at=None, # type: int
expires_at=None, # type: int
token=None, # type: str
):
"""
Keyword args:
created_at (int): Creation time in milliseconds since the UNIX epoch.
expires_at (int): Expiration time in milliseconds since the UNIX epoch.
token (str): An Admin API token. A newly-created token is visible as the result of the POST operation which created it. An existing token is visible when `expose_api_token` is `true` and it is being requested by the user to whom it belongs. In all other cases, the token will be masked.
"""
if created_at is not None:
self.created_at = created_at
if expires_at is not None:
self.expires_at = expires_at
if token is not None:
self.token = token
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `ApiToken`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ApiToken, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiToken):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"tlewis@purestorage.com"
] | tlewis@purestorage.com |
a6afb02780f5418e60601aa83c740eb85a13e07e | bd7d091565ba63c930351203ed912ff82461d540 | /tree_515_largestValuesPerLevel.py | 23dfe044a516310d559d478557227a2d3c61658f | [] | no_license | screnary/Algorithm_python | 6ea3ab571763b5c0a519bdb7eed64dd5b74e8a8f | 8290ad1c763d9f7c7f7bed63426b4769b34fd2fc | refs/heads/master | 2022-12-07T02:59:42.786259 | 2020-08-25T04:27:45 | 2020-08-25T04:27:45 | 258,499,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def largestValues(self, root: TreeNode) -> List[int]:
if root is None:
return []
# init max_list
max_list = []
max_list.append(root.val)
# broad first search
queue = []
queue.append((root, 0))
while len(queue) > 0:
item = queue.pop(0)
node, level = item[0], item[1]
if level == len(max_list):
max_list.append(node.val)
else:
if max_list[level] < node.val:
max_list[level] = node.val
if node.left is not None:
queue.append((node.left, level+1))
if node.right is not None:
queue.append((node.right, level+1))
return max_list
| [
"screnary@qq.com"
] | screnary@qq.com |
72b11e9bf6e3be6f40f8d80e557bd486a016a2e9 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_prickles.py | 7b0da8887b113dba5f2d0ab69d5f213b2af264f5 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _PRICKLES():
def __init__(self,):
self.name = "PRICKLES"
self.definitions = prickle
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['prickle']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
f08d4ae3acf8695083cbbc555f751dc5040bd771 | 3ccf552a92dc78da06bcd7242ab62b3adf0c9019 | /algorithm_demo/exp_demo.py | 877817780251e1142207b47ea85518ccef039bc7 | [] | no_license | linkcheng/python_demo | a3437bd56083f04d440ae3b4d44f6bffe806dee2 | 332b75948745d66d1a74253033444a2c63fb8e51 | refs/heads/master | 2022-07-08T10:45:53.587317 | 2020-08-07T10:23:49 | 2020-08-07T10:23:49 | 52,152,418 | 2 | 1 | null | 2021-03-25T21:43:04 | 2016-02-20T12:33:20 | Python | UTF-8 | Python | false | false | 3,985 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*
"""
给你一个字符串 s 和一个字符规律 p,请你来实现一个支持 '.' 和 '*' 的正则表达式匹配。
'.' 匹配任意单个字符
'*' 匹配零个或多个前面的那一个元素
所谓匹配,是要涵盖 整个 字符串 s的,而不是部分字符串。
说明:
s 可能为空,且只包含从 a-z 的小写字母。
p 可能为空,且只包含从 a-z 的小写字母,以及字符 . 和 *。
"""
class Solution:
def is_match(self, s: str, p: str) -> bool:
# 特殊值处理
if not p:
return not s
if len(p) == 1 and not s:
return False
m, n = len(s)+1, len(p)+1
dp = [[False]*n for _ in range(m)]
# base case
dp[0][0] = True
dp[0][1] = False
# for i in range(1, m):
# dp[i][0] = False
for i in range(2, n):
if p[i-1] == '*':
dp[0][i] = dp[0][i-2]
# other
# dp[i][j] 表示的状态是 s 的前 i 项和 p 的前 j 项是否匹配。
for i in range(1, m):
for j in range(1, n):
if p[j-1] in {s[i-1], '.'}:
dp[i][j] = dp[i-1][j-1]
elif p[j-1] == '*':
# 如果 p 最后一位是 *
if p[j-2] in {s[i-1], '.'}:
# 如果 * 星号前一个值 == s 当前值或者 . 重复 0 次或者重复 1 次
dp[i][j] = dp[i][j-2] or dp[i-1][j]
else:
# 如果 * 星号前一个值 != s 当前值或者 . 只能重复 0 次
dp[i][j] = dp[i][j-2]
else:
dp[i][j] = False
return dp[m-1][n-1]
def is_match_rec(self, s: str, p: str) -> bool:
if not p:
return not s
if len(p) == 1 and not s:
return False
return self._is_match(s, p, len(s), len(p), 0, 0)
def _is_match(self, s: str, p: str, s_len: int, p_len: int, s_i: int, p_i: int) -> bool:
if p_i >= p_len:
return s_i == s_len
if s_i >= s_len:
return s_i == p_len
# 如果当前长度最后一位是 *,则有两种情况
# 1. 重复前值 0 次
# 2. 重复前值 1 次
match = s_i < s_len and p[p_i] in {s[s_i], '.'}
if (p_i+1) < p_len and p[p_i+1] == '*':
return (
self._is_match(s, p, s_len, p_len, s_i, p_i+2)
or
(match and self._is_match(s, p, s_len, p_len, s_i+1, p_i))
)
else:
return match and self._is_match(s, p, s_len, p_len, s_i+1, p_i+1)
class Solution1:
def isMatch(self, s: str, p: str) -> bool:
if not s:
return p in {'', '*', '?'}
if not p:
return False
return self._isMatch(s, p, len(s), len(p), 0, 0)
def _isMatch(self, s, p, s_len, p_len, i, j) -> bool:
if i == s_len:
return j == p_len
# if j == p_len:
# return i == s_len
if p[j] in {s[i], '?'}:
return self._isMatch(s, p, s_len, p_len, i+1, j+1)
elif p[j] == '*':
return (
# 重复 0 次
self._isMatch(s, p, s_len, p_len, i, j+1)
or
# 重复 1 次
self._isMatch(s, p, s_len, p_len, i+1, j)
)
else:
return False
if __name__ == '__main__':
# so = Solution()
# res = so.is_match_rec("aa", "a*")
# res = so.is_match_rec("mississippi", "mis*is*p*.")
# print(res)
# res = so.is_match("mississippi", "mis*is*p*.")
# print(res)
# res = so.is_match_rec("aab", "c*a*b")
# print(res)
# res = so.is_match("aab", "c*a*b")
# print(res)
so1 = Solution1()
res = so1.isMatch("adceb", "*a*b")
print(res)
| [
"zheng.long@shoufuyou.com"
] | zheng.long@shoufuyou.com |
c807e9124d07528aea5e10ce3e928ae1f5052d14 | f55000f1e466b200a2eaa223c907c71131d641c4 | /dgcca_tool/mlp.py | 3488b23337648e57ec29d8307c15745c1bc016b6 | [] | no_license | EtienneGagnon1/gcca | fc12faeb3e75e5e74187c0f827f03932bd836287 | ae41a18c6fb869f6b45ba729fc68e80cfd639dea | refs/heads/master | 2020-11-24T10:24:03.976846 | 2019-01-19T08:47:44 | 2019-01-19T08:47:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,941 | py | """
Lifted from Theano multilayer perceptron tutorial. Adapted to take the partial derivative of
some external loss w.r.t. output layer and back-propagate this loss. Output layer has no
nonlinearity.
Adrian Benton
9/13/2016
"""
from __future__ import print_function
__docformat__ = 'restructedtext en'
import os
import sys
import timeit
import numpy as np
from numpy.random import RandomState
import theano
from theano.gradient import jacobian
import theano.tensor as T
from functools import reduce
import unittest
theano.config.compute_test_value = 'off' # Use 'warn' to activate this feature, 'off' otherwise
# Test values. . .
np.random.seed(12345)
sample_n_examples = 400
sample_n_hidden = [ 50, 10, 5 ]
sample_input = np.random.randn(sample_n_examples, sample_n_hidden[0])
sample_externalGrad = np.random.randn(sample_n_examples, sample_n_hidden[-1])
class HiddenLayer(object):
def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh, includeBias=False, vname=''):
"""
:type rng: numpy.random.RandomState
:param rng: a random number generator used to initialize weights
:type input: theano.tensor.dmatrix
:param input: a symbolic tensor of shape (n_examples, n_in)
:type n_in: int
:param n_in: dimensionality of input
:type n_out: int
:param n_out: number of hidden units
:type activation: theano.Op or function
:param activation: Non linearity to be applied in the hidden
layer
:type includeBias: bool
:param includeBias: Whether this layer should have a bias term
:type vname: str
:param vname: name to attach to this layer's weights
"""
self.input = input
# Weight initialization for different nonlinearities
if W is None:
if activation == T.nnet.sigmoid:
W_values = 4. * np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out))
, dtype=theano.config.floatX
)
elif activation == T.nnet.relu:
W_values = np.asarray(
rng.normal(
0.0,
2.0/n_in,
size=(n_in, n_out))
, dtype=theano.config.floatX
)
else:
# Xavier initialization for tanh
W_values = np.asarray(
rng.uniform(
low=-np.sqrt(6. / (n_in + n_out)),
high=np.sqrt(6. / (n_in + n_out)),
size=(n_in, n_out))
, dtype=theano.config.floatX
)
W = theano.shared(value=W_values.astype(theano.config.floatX),
name='%s_W' % (vname), borrow=True)
if b is None:
b_values = np.zeros((n_out,), dtype=theano.config.floatX)
b = theano.shared(value=b_values.astype(theano.config.floatX),
name='%s_b' % (vname), borrow=True)
self.W = W
self.b = b
if includeBias:
lin_output = T.dot(input, self.W) + self.b
self.params = [self.W, self.b]
else:
lin_output = T.dot(input, self.W)
self.params = [self.W]
self.output = (
lin_output if activation is None
else activation(lin_output)
)
class MLPWithLinearOutput(object):
"""Multi-Layer Perceptron Class
This has no softmax at the output layer, just a stack of layers with
nonlinearities -- final layer has no non-linearity, and then a linear projection
to shared space.
"""
def __init__(self, seed, layerWidths, activation, gccaDim, optimizer, L1_reg=0.0, L2_reg=0.0, vname=''):
"""Initialize the parameters for the multilayer perceptron
:type seed: int
:param seed: to init random number generator
:type layerWidths: [ int ]
:param layerWidths: width of each layer
:type activation: Tensor -> Tensor
:param activation: activation function for hidden layers
:type gccaDim: int
:param gccaDim: dimensionality of shared layer
:type optimizer: Optimizer
:param optimizer: generates weight updates
:type L1_reg: float
:param L1_reg: weight to place on L1 penalty on weights
:type L2_reg: float
:param L2_reg: weight to place on L2 penalty
:type vname: str
:param vname: name to attach to layer weights
"""
rng = RandomState(seed)
self.optimizer = optimizer
self.L1_reg = theano.shared(np.array(L1_reg).astype(theano.config.floatX),
'L1_%s' % (vname))
self.L2_reg = theano.shared(np.array(L2_reg).astype(theano.config.floatX),
'L2_%s' % (vname))
# Learned externally by GCCA routine
self.U = theano.shared(np.random.randn(layerWidths[-1],
gccaDim).astype(theano.config.floatX),
'U_%s' % (vname), allow_downcast=True)
self.__hiddenLayers = []
self.input = T.matrix('X_%s' % (vname))
self.missing = T.vector('K_missing_%s' % (vname)) # to compute mean of output layer when we have missing data
self.__externalGrad = T.matrix('ExternalGrad_%s' % (vname)) # Partial derivative of loss w.r.t. output layer -- computed somewhere else
self.input.tag.test_value = sample_input
self.__externalGrad.tag.test_value = sample_externalGrad
Ws = []
# Connect hidden layers
for layerIndex, (nIn, nOut) in enumerate(zip(layerWidths, layerWidths[1:])):
prevLayer = self.input if layerIndex == 0 else self.__hiddenLayers[-1].output
act = activation if layerIndex < ( len(layerWidths) - 2 ) else None
hiddenLayer = HiddenLayer(
rng=rng,
input=prevLayer,
n_in=nIn,
n_out=nOut,
activation=act,
includeBias=True,
vname='%s_layer-%d' % (vname, layerIndex)
)
self.__hiddenLayers.append(hiddenLayer)
Ws.append(hiddenLayer.W)
if layerIndex == 0:
self.L1 = abs(hiddenLayer.W).sum()
self.L2_sqr = (hiddenLayer.W ** 2).sum()
else:
self.L1 += abs(hiddenLayer.W).sum()
self.L2_sqr += (hiddenLayer.W ** 2).sum()
# L1/L2 regularization terms
self.__reg_cost = (
self.L1_reg * self.L1
+ self.L2_reg * self.L2_sqr
)
# Mean-centers the output layer. Calculated on training data.
self.Bcenter = theano.shared(np.zeros((1, layerWidths[-1])).
astype(theano.config.floatX),
name='%s_BmeanCenter' % (vname),
broadcastable=(True, False), borrow=True,
allow_downcast=True)
self.__output_uncentered = self.__hiddenLayers[-1].output
self.output_centered = self.__output_uncentered - self.Bcenter
self.shared_output = self.output_centered.dot(self.U)
mask = T.tile(self.missing.reshape((self.output_centered.shape[0], 1)),
(1,self.output_centered.shape[1]))
denom = 1./mask.sum(axis=0, keepdims=True)
# Recenter based on current training data
self.__Bcenter_current = (mask * self.__output_uncentered).sum(axis=0,
keepdims=True) * denom
self.output_traindata_centered = self.__output_uncentered - self.__Bcenter_current
# so we can update all parameters at once
self.__params = reduce(lambda x,y: x+y,
[layer.params for layer in self.__hiddenLayers])
# Hack to get theano autodiff to compute and backprop gradients for me.
# Idea from Nanyun Peng.
self.__external_cost = T.sum( self.output_centered * self.__externalGrad )
self.__cost = self.__reg_cost + self.__external_cost
# Gradient for just the external loss.
self.__gparams = [T.grad(self.__external_cost, p) for p in self.__params]
self.__reg_gparams = [T.grad(self.__reg_cost, p) for p in Ws]
# Full gradient update
self.__full_gparams = [T.grad(self.__cost, p) for p in self.__params]
self.buildFns()
def getWeights(self):
wts = [p.get_value() for p in self.__params]
return wts
def setWeights(self, weights):
'''
Parameters
----------
:type weights: [ np.array ]
:param weights: should be the same number of elements and shapes as self.__params
'''
for param, wts in zip(self.__params, weights):
param.set_value(np.float32(wts))
def buildFns(self):
# What to call when applying to test
self.get_shared_output = theano.function(
inputs=[self.input],
outputs=self.shared_output
, allow_input_downcast=True)
# What to call when training
self.get_centered_output = theano.function(
inputs=[self.input],
outputs=self.output_centered
, allow_input_downcast=True)
self.get_uncentered_output = theano.function(
inputs=[self.input],
outputs=self.__output_uncentered
, allow_input_downcast=True)
# Different cost and gradient functions for inspection/debugging.
self.calc_gradient = theano.function( inputs=[ self.input, self.__externalGrad ],
outputs=self.__gparams , allow_input_downcast=True)
self.calc_regOnly_gradient = theano.function( inputs=[], outputs=self.__reg_gparams, allow_input_downcast=True)
self.calc_reg_gradient = theano.function( inputs=[ self.input, self.__externalGrad ],
outputs=self.__full_gparams , allow_input_downcast=True)
self.calc_external_cost = theano.function( inputs=[ self.input, self.__externalGrad ],
outputs=self.__external_cost , allow_input_downcast=True)
self.calc_reg_cost = theano.function( inputs=[ ], outputs=self.__reg_cost , allow_input_downcast=True)
self.calc_total_cost = theano.function( inputs=[ self.input,
self.__externalGrad ],
outputs=self.__cost , allow_input_downcast=True)
# Gradient w.r.t. output layer
self.calc_gradient_wrtOutput = theano.function(
inputs=[ self.input, self.__externalGrad ],
outputs=T.grad(self.__cost, self.output_centered)
, allow_input_downcast=True)
self.calc_gradient_wrtOutputUC = theano.function(
inputs=[ self.input, self.__externalGrad ],
outputs=T.grad(self.__cost, self.__output_uncentered)
, allow_input_downcast=True)
# For debugging, get hidden layer values
self.get_layer_values = theano.function(inputs=[ self.input ],
outputs=[h.output for h in self.__hiddenLayers]
, allow_input_downcast=True)
if self.optimizer is not None:
self.setOptimizer(self.optimizer)
self.Bupdate = (self.Bcenter, self.__Bcenter_current)
# Set B to the mean of these data, and return output
self.center_output = theano.function( inputs=[ self.input, self.missing ],
outputs=self.output_traindata_centered,
updates=[self.Bupdate] , allow_input_downcast=True)
def setOptimizer(self, optimizer):
self.optimizer = optimizer
self.__updates = self.optimizer.getUpdates(self.__params, self.__full_gparams)
updates = self.__updates
params = self.__params
grads = self.__full_gparams
self.take_step = theano.function([ self.input, self.__externalGrad ],
outputs=[],
updates=self.__updates, allow_input_downcast=True)
| [
"879837607@qq.com"
] | 879837607@qq.com |
bcff14a0fc7e9a6fa5d3750e08b2ce7db4660179 | 23b333449524887594530f73c0079ce60cb8eefb | /python_module/examples/340_Longest_Substring_with_At_Most_K_Distinct_Characters.py | 9bf96f58a4f7a28334865e8030552dbcdf9c9b3e | [] | no_license | benbendaisy/CommunicationCodes | 9deb371095f5d67e260030d3d8abf211c90e7642 | 444cc502ef26810b46115797f2e26ab305a4ebdf | refs/heads/master | 2023-08-09T21:46:58.691987 | 2023-07-20T05:11:39 | 2023-07-20T05:11:39 | 27,856,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,188 | py | from collections import defaultdict
class Solution:
"""
Given a string s and an integer k, return the length of the longest
substring of s that contains at most k distinct characters.
Example 1:
Input: s = "eceba", k = 2
Output: 3
Explanation: The substring is "ece" with length 3.
Example 2:
Input: s = "aa", k = 1
Output: 2
Explanation: The substring is "aa" with length 2.
Constraints:
1 <= s.length <= 5 * 104
0 <= k <= 50
"""
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
if not s or k < 1:
return 0
hashMap = defaultdict(lambda: 0)
maxLength = 0
l = 0
for i in range(len(s)):
hashMap[s[i]] = i
if len(hashMap) == k + 1:
minIndex = min(hashMap.values())
del hashMap[s[minIndex]]
l = minIndex + 1
maxLength = max(maxLength, i - l + 1)
return maxLength
if __name__ == "__main__":
s = "ab"
solution = Solution()
ret = solution.lengthOfLongestSubstringKDistinct(s, 1)
print(ret) | [
"benbendaisy@users.noreply.github.com"
] | benbendaisy@users.noreply.github.com |
479d98009251330b275180ead5af42146eaac085 | 8a3102e1d3ed5346a0468aafdc0413858ddc25e3 | /check.py | ac20fee4e32b946d265060c6b115a3848bdadec5 | [] | no_license | a13140120a/E_sun_bank_competition | f9bbb6a9c2ef9581cdeae46bd12102f165038685 | 5286e2d8bdb5e285bc0109233eeb2aa3368eb362 | refs/heads/master | 2023-08-11T14:23:10.438964 | 2021-09-24T05:51:00 | 2021-09-24T05:51:00 | 409,580,809 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py | import pickle
import matplotlib.pyplot as plt
import os
def load_pickle_file(pile_path):
with open(pile_path,'rb') as file:
pickle_file = pickle.load(file)
return pickle_file
batch_acc = load_pickle_file('./analyze/batch/acc.pickle')
batch_lr = load_pickle_file('./analyze/batch/lr.pickle')
epoch_acc = load_pickle_file('./analyze/epoch/acc.pickle')
epoch_loss = load_pickle_file('./analyze/epoch/loss.pickle')
epoch_lr = load_pickle_file('./analyze/epoch/lr.pickle')
epoch_val_acc = load_pickle_file('./analyze/epoch/val_acc.pickle')
epoch_val_loss = load_pickle_file('./analyze/epoch/val_loss.pickle')
plt.plot(batch_acc, batch_lr, marker='.')
plt.legend(loc='best')
plt.grid()
plt.xlabel('batch_acc')
plt.ylabel('batch_lr')
plt.show()
| [
"you@example.com"
] | you@example.com |
ac2715ebe0b94a249fbed5fd1d620bf5cb57d913 | d12b59b33df5c467abf081d48e043dac70cc5a9c | /uhd_restpy/testplatform/sessions/ixnetwork/topology/bgpipv6adl2vpn_dfa30e45f6798c9ecc0ef8b85351cb5d.py | 72d340153453849055dd7a7ea2876877d879b7e7 | [
"MIT"
] | permissive | ajbalogh/ixnetwork_restpy | 59ce20b88c1f99f95a980ff01106bda8f4ad5a0f | 60a107e84fd8c1a32e24500259738e11740069fd | refs/heads/master | 2023-04-02T22:01:51.088515 | 2021-04-09T18:39:28 | 2021-04-09T18:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,090 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
class BgpIpv6AdL2Vpn(Base):
"""BGP AD-L2VPN Configuration
The BgpIpv6AdL2Vpn class encapsulates a list of bgpIpv6AdL2Vpn resources that are managed by the user.
A list of resources can be retrieved from the server using the BgpIpv6AdL2Vpn.find() method.
The list can be managed by using the BgpIpv6AdL2Vpn.add() and BgpIpv6AdL2Vpn.remove() methods.
"""
__slots__ = ()
_SDM_NAME = 'bgpIpv6AdL2Vpn'
_SDM_ATT_MAP = {
'Active': 'active',
'AsNumberVplsId': 'asNumberVplsId',
'AsNumberVplsRd': 'asNumberVplsRd',
'AsNumberVplsRt': 'asNumberVplsRt',
'AssignedNumberVplsId': 'assignedNumberVplsId',
'AssignedNumberVplsRd': 'assignedNumberVplsRd',
'AssignedNumberVplsRt': 'assignedNumberVplsRt',
'ConnectedVia': 'connectedVia',
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'DutIpv6': 'dutIpv6',
'Errors': 'errors',
'ImportRDAsRT': 'importRDAsRT',
'ImportVplsIdAsRd': 'importVplsIdAsRd',
'IpAddressVplsId': 'ipAddressVplsId',
'IpAddressVplsRd': 'ipAddressVplsRd',
'IpAddressVplsRt': 'ipAddressVplsRt',
'LocalIpv6': 'localIpv6',
'LocalRouterID': 'localRouterID',
'Multiplier': 'multiplier',
'Name': 'name',
'NumberVsiId': 'numberVsiId',
'SessionStatus': 'sessionStatus',
'StackedLayers': 'stackedLayers',
'StateCounts': 'stateCounts',
'Status': 'status',
'TypeVplsId': 'typeVplsId',
'TypeVplsRd': 'typeVplsRd',
'TypeVplsRt': 'typeVplsRt',
'TypeVsiId': 'typeVsiId',
}
def __init__(self, parent):
super(BgpIpv6AdL2Vpn, self).__init__(parent)
@property
def Active(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Activate/Deactivate Configuration
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Active']))
@property
def AsNumberVplsId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS ID AS Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumberVplsId']))
@property
def AsNumberVplsRd(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Distinguisher AS Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumberVplsRd']))
@property
def AsNumberVplsRt(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Target AS Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AsNumberVplsRt']))
@property
def AssignedNumberVplsId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS ID Assigned Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AssignedNumberVplsId']))
@property
def AssignedNumberVplsRd(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Distinguisher Assigned Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AssignedNumberVplsRd']))
@property
def AssignedNumberVplsRt(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Target Assigned Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['AssignedNumberVplsRt']))
@property
def ConnectedVia(self):
"""DEPRECATED
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of layers this layer is used to connect with to the wire.
"""
return self._get_attribute(self._SDM_ATT_MAP['ConnectedVia'])
@ConnectedVia.setter
def ConnectedVia(self, value):
self._set_attribute(self._SDM_ATT_MAP['ConnectedVia'], value)
@property
def Count(self):
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def DutIpv6(self):
"""
Returns
-------
- list(str): DUT IP
"""
return self._get_attribute(self._SDM_ATT_MAP['DutIpv6'])
@property
def Errors(self):
"""
Returns
-------
- list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str])): A list of errors that have occurred
"""
return self._get_attribute(self._SDM_ATT_MAP['Errors'])
@property
def ImportRDAsRT(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Use RD As RT
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ImportRDAsRT']))
@property
def ImportVplsIdAsRd(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Use VPLS ID As Route Distinguisher
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ImportVplsIdAsRd']))
@property
def IpAddressVplsId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS ID IP Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressVplsId']))
@property
def IpAddressVplsRd(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Distinguisher IP Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressVplsRd']))
@property
def IpAddressVplsRt(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): Route Target IP Address
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['IpAddressVplsRt']))
@property
def LocalIpv6(self):
"""
Returns
-------
- list(str): Local IP
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIpv6'])
@property
def LocalRouterID(self):
"""
Returns
-------
- list(str): Router ID
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalRouterID'])
@property
def Multiplier(self):
"""
Returns
-------
- number: Number of layer instances per parent instance (multiplier)
"""
return self._get_attribute(self._SDM_ATT_MAP['Multiplier'])
@Multiplier.setter
def Multiplier(self, value):
self._set_attribute(self._SDM_ATT_MAP['Multiplier'], value)
@property
def Name(self):
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def NumberVsiId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VSI ID Number
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['NumberVsiId']))
@property
def SessionStatus(self):
"""
Returns
-------
- list(str[down | notStarted | up]): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
"""
return self._get_attribute(self._SDM_ATT_MAP['SessionStatus'])
@property
def StackedLayers(self):
"""
Returns
-------
- list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*]): List of secondary (many to one) child layer protocols
"""
return self._get_attribute(self._SDM_ATT_MAP['StackedLayers'])
@StackedLayers.setter
def StackedLayers(self, value):
self._set_attribute(self._SDM_ATT_MAP['StackedLayers'], value)
@property
def StateCounts(self):
"""
Returns
-------
- dict(total:number,notStarted:number,down:number,up:number): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
"""
return self._get_attribute(self._SDM_ATT_MAP['StateCounts'])
@property
def Status(self):
"""
Returns
-------
- str(configured | error | mixed | notStarted | started | starting | stopping): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
"""
return self._get_attribute(self._SDM_ATT_MAP['Status'])
@property
def TypeVplsId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VPLS ID Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeVplsId']))
@property
def TypeVplsRd(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): RD Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeVplsRd']))
@property
def TypeVplsRt(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): RT Type
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeVplsRt']))
@property
def TypeVsiId(self):
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): VSI ID
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['TypeVsiId']))
def update(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Updates bgpIpv6AdL2Vpn resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def add(self, ConnectedVia=None, Multiplier=None, Name=None, StackedLayers=None):
"""Adds a new bgpIpv6AdL2Vpn resource on the server and adds it to the container.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
Returns
-------
- self: This instance with all currently retrieved bgpIpv6AdL2Vpn resources using find and the newly added bgpIpv6AdL2Vpn resources available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
def remove(self):
"""Deletes all the contained bgpIpv6AdL2Vpn resources in this instance from the server.
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
self._delete()
def find(self, ConnectedVia=None, Count=None, DescriptiveName=None, DutIpv6=None, Errors=None, LocalIpv6=None, LocalRouterID=None, Multiplier=None, Name=None, SessionStatus=None, StackedLayers=None, StateCounts=None, Status=None):
"""Finds and retrieves bgpIpv6AdL2Vpn resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve bgpIpv6AdL2Vpn resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all bgpIpv6AdL2Vpn resources from the server.
Args
----
- ConnectedVia (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of layers this layer is used to connect with to the wire.
- Count (number): Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
- DescriptiveName (str): Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
- DutIpv6 (list(str)): DUT IP
- Errors (list(dict(arg1:str[None | /api/v1/sessions/1/ixnetwork//.../*],arg2:list[str]))): A list of errors that have occurred
- LocalIpv6 (list(str)): Local IP
- LocalRouterID (list(str)): Router ID
- Multiplier (number): Number of layer instances per parent instance (multiplier)
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
- SessionStatus (list(str[down | notStarted | up])): Current state of protocol session: Not Started - session negotiation not started, the session is not active yet. Down - actively trying to bring up a protocol session, but negotiation is didn't successfully complete (yet). Up - session came up successfully.
- StackedLayers (list(str[None | /api/v1/sessions/1/ixnetwork/topology/.../*])): List of secondary (many to one) child layer protocols
- StateCounts (dict(total:number,notStarted:number,down:number,up:number)): A list of values that indicates the total number of sessions, the number of sessions not started, the number of sessions down and the number of sessions that are up
- Status (str(configured | error | mixed | notStarted | started | starting | stopping)): Running status of associated network element. Once in Started state, protocol sessions will begin to negotiate.
Returns
-------
- self: This instance with matching bgpIpv6AdL2Vpn resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of bgpIpv6AdL2Vpn data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the bgpIpv6AdL2Vpn resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
def get_device_ids(self, PortNames=None, Active=None, AsNumberVplsId=None, AsNumberVplsRd=None, AsNumberVplsRt=None, AssignedNumberVplsId=None, AssignedNumberVplsRd=None, AssignedNumberVplsRt=None, ImportRDAsRT=None, ImportVplsIdAsRd=None, IpAddressVplsId=None, IpAddressVplsRd=None, IpAddressVplsRt=None, NumberVsiId=None, TypeVplsId=None, TypeVplsRd=None, TypeVplsRt=None, TypeVsiId=None):
"""Base class infrastructure that gets a list of bgpIpv6AdL2Vpn device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- Active (str): optional regex of active
- AsNumberVplsId (str): optional regex of asNumberVplsId
- AsNumberVplsRd (str): optional regex of asNumberVplsRd
- AsNumberVplsRt (str): optional regex of asNumberVplsRt
- AssignedNumberVplsId (str): optional regex of assignedNumberVplsId
- AssignedNumberVplsRd (str): optional regex of assignedNumberVplsRd
- AssignedNumberVplsRt (str): optional regex of assignedNumberVplsRt
- ImportRDAsRT (str): optional regex of importRDAsRT
- ImportVplsIdAsRd (str): optional regex of importVplsIdAsRd
- IpAddressVplsId (str): optional regex of ipAddressVplsId
- IpAddressVplsRd (str): optional regex of ipAddressVplsRd
- IpAddressVplsRt (str): optional regex of ipAddressVplsRt
- NumberVsiId (str): optional regex of numberVsiId
- TypeVplsId (str): optional regex of typeVplsId
- TypeVplsRd (str): optional regex of typeVplsRd
- TypeVplsRt (str): optional regex of typeVplsRt
- TypeVsiId (str): optional regex of typeVsiId
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
def Abort(self, *args, **kwargs):
"""Executes the abort operation on the server.
Abort CPF control plane (equals to demote to kUnconfigured state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
abort(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
abort(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('abort', payload=payload, response_object=None)
def ReadvertiseADVPLSRoute(self, *args, **kwargs):
"""Executes the readvertiseADVPLSRoute operation on the server.
Re-advertise Aged out BGP Routes in a Route Range
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
readvertiseADVPLSRoute(SessionIndices=list)
-------------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
readvertiseADVPLSRoute(SessionIndices=string)
---------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('readvertiseADVPLSRoute', payload=payload, response_object=None)
def Readvertiseadvplsroute(self, *args, **kwargs):
"""Executes the readvertiseadvplsroute operation on the server.
Readvertise only the aged-out routes contained in this route range.
readvertiseadvplsroute(Arg2=list)list
-------------------------------------
- Arg2 (list(number)): List of indices into the group. An empty list indicates all instances in the group.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('readvertiseadvplsroute', payload=payload, response_object=None)
def RestartDown(self, *args, **kwargs):
"""Executes the restartDown operation on the server.
Stop and start interfaces and sessions that are in Down state.
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
restartDown(SessionIndices=list)
--------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
restartDown(SessionIndices=string)
----------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('restartDown', payload=payload, response_object=None)
def Start(self, *args, **kwargs):
"""Executes the start operation on the server.
Start CPF control plane (equals to promote to negotiated state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
start(SessionIndices=list)
--------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
start(SessionIndices=string)
----------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('start', payload=payload, response_object=None)
def Stop(self, *args, **kwargs):
"""Executes the stop operation on the server.
Stop CPF control plane (equals to demote to PreValidated-DoDDone state).
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
stop(SessionIndices=list)
-------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
stop(SessionIndices=string)
---------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('stop', payload=payload, response_object=None)
def WithdrawADVPLSRoute(self, *args, **kwargs):
"""Executes the withdrawADVPLSRoute operation on the server.
Age out percentage of BGP Routes in a Route Range
The IxNetwork model allows for multiple method Signatures with the same name while python does not.
withdrawADVPLSRoute(SessionIndices=list)
----------------------------------------
- SessionIndices (list(number)): This parameter requires an array of session numbers 1 2 3
withdrawADVPLSRoute(SessionIndices=string)
------------------------------------------
- SessionIndices (str): This parameter requires a string of session numbers 1-4;6;7-12
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('withdrawADVPLSRoute', payload=payload, response_object=None)
def Withdrawadvplsroute(self, *args, **kwargs):
"""Executes the withdrawadvplsroute operation on the server.
Completely/Partially age out routes contained in this route range.
withdrawadvplsroute(Arg2=list)list
----------------------------------
- Arg2 (list(number)): List of indices into the group. An empty list indicates all instances in the group.
- Returns list(str): ID to associate each async action invocation
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
for i in range(len(args)): payload['Arg%s' % (i + 2)] = args[i]
for item in kwargs.items(): payload[item[0]] = item[1]
return self._execute('withdrawadvplsroute', payload=payload, response_object=None)
| [
"andy.balogh@keysight.com"
] | andy.balogh@keysight.com |
031a0a3da8d9322431d08fee84417af102ea9749 | 9e7e9669ae912e5c3b17deff4267ebf29f32f633 | /seq/__init__.py | 8ab4e9ddd5f93f9551fbac07c408647d01f29f54 | [] | no_license | tjacek/DeepActionLearning | 2304d050986dc8dbd917a7d065b3e68fc03bf9c0 | 153ca8849c78ec265543602728ce0a104da60dcc | refs/heads/master | 2021-01-10T18:29:33.535524 | 2019-08-02T19:53:47 | 2019-08-02T19:53:47 | 40,564,155 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,905 | py | import numpy as np
import utils
class ActionGroup(object):
def __init__(self, actions):
self.actions = actions
def __len__(self):
return len(self.actions)
def __getitem__(self, key):
return self.actions[key]
def raw(self):
if(type(self.actions)==dict):
return self.actions.values()
return self.actions
def select(self,selector=None,as_group=False):
if(not selector):
selector=lambda action_i:(action_i.person % 2)==1
train,test=utils.split(self.actions,selector)
if(as_group):
return ActionGroup(train),ActionGroup(test)
return train,test
def normalization(self):
feats=self.as_array()
mean_feats=np.mean(feats,axis=0)
std_feats=np.std(feats,axis=0)
for action_i in self.actions:
img_seq_i=action_i.as_array()
img_seq_i-=mean_feats
img_seq_i/=std_feats
action_i.img_seq=list(img_seq_i)
def as_array(self):
feats=[]
for action_i in self.actions:
feats+=action_i.img_seq
return np.array(feats)
class Action(object):
def __init__(self,img_seq,name,cat,person):
self.img_seq=img_seq
self.name=name
self.cat=cat
self.person=person
def __str__(self):
return self.name
def __len__(self):
return len(self.img_seq)
def __call__(self,fun,whole_seq=True,feats=False):
#print(self.name)
img_seq=self.as_features() if(feats) else self.img_seq
if(whole_seq):
new_seq=fun(img_seq)
else:
new_seq=[ fun(img_i) for img_i in img_seq]
if(feats):
new_seq=np.array(new_seq).T
return Action(new_seq,self.name,self.cat,self.person)
def clone(self,img_seq):
return Action(img_seq,self.name,self.cat,self.person)
def dim(self):
frame=self.img_seq[0]
if(type(frame)==list):
return len(frame)
return frame.shape[0]
def as_array(self):
return np.array(self.img_seq)
def as_features(self):
action_array=self.as_array().T
return [ feature_i for feature_i in action_array]
def as_pairs(self):#,norm=255.0):
#norm_imgs=[ (img_i/norm)
# for img_i in self.img_seq]
return [ (self.cat,img_i) for img_i in norm_imgs]
def by_cat(actions):
cats=[action_i.cat for action_i in actions]
actions_by_cat={ cat_i:[] for cat_i in np.unique(cats)}
for action_i in actions:
actions_by_cat[action_i.cat].append(action_i)
return actions_by_cat
def person_rep(actions):
reps={}
for action_i in actions:
action_id=str(action_i.cat)+str(action_i.person)
if(not action_id in reps):
reps[action_id]=action_i
return reps.values() | [
"tjacek@student.agh.edu.pl"
] | tjacek@student.agh.edu.pl |
ba68f05dcf1908bb7c1f823730e0551e1e00a52f | 8faf4780686c31588ade78ca69b3737b72857fab | /tests/python/test_while.py | 805c752709cd2dd2765e425ef2a2d072e90dbb58 | [
"MIT"
] | permissive | AnimatedRNG/taichi | 0333ce8da61debe51872ff183ba47cd0bbf56469 | f1f403042dadf8b58887431dbf7a9a661c005bb2 | refs/heads/master | 2020-08-08T06:17:23.366495 | 2019-10-08T20:31:20 | 2019-10-08T20:31:20 | 213,751,271 | 0 | 0 | NOASSERTION | 2019-10-08T20:49:48 | 2019-10-08T20:49:48 | null | UTF-8 | Python | false | false | 372 | py | import taichi as ti
def test_while():
for arch in [ti.x86_64, ti.cuda]:
ti.reset()
ti.cfg.arch = arch
x = ti.var(ti.f32)
N = 1
@ti.layout
def place():
ti.root.dense(ti.i, N).place(x)
@ti.kernel
def func():
i = 0
s = 0
while i < 10:
s += i
i += 1
x[0] = s
func()
assert x[0] == 45
| [
"yuanmhu@gmail.com"
] | yuanmhu@gmail.com |
7b5be917715da7fd754a056100561d7bcd7acb68 | 5864e86954a221d52d4fa83a607c71bacf201c5a | /eve/client/script/ui/control/pointerPanel.py | b422abd2b54630179759c716e4977f3dd4c7bec5 | [] | no_license | connoryang/1v1dec | e9a2303a01e5a26bf14159112b112be81a6560fd | 404f2cebf13b311e754d45206008918881496370 | refs/heads/master | 2021-05-04T02:34:59.627529 | 2016-10-19T08:56:26 | 2016-10-19T08:56:26 | 71,334,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,219 | py | #Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\control\pointerPanel.py
from carbonui.primitives.container import Container
from carbonui.primitives.frame import Frame
from carbonui.primitives.transform import Transform
from carbonui.primitives.layoutGrid import LayoutGrid
import carbonui.const as uiconst
import blue
import weakref
from eve.client.script.ui.control.themeColored import FrameThemeColored, SpriteThemeColored
LEFTGROUP = [uiconst.POINT_LEFT_2, uiconst.POINT_LEFT_1, uiconst.POINT_LEFT_3]
RIGHTGROUP = [uiconst.POINT_RIGHT_2, uiconst.POINT_RIGHT_1, uiconst.POINT_RIGHT_3]
TOPGROUP = [uiconst.POINT_TOP_2, uiconst.POINT_TOP_1, uiconst.POINT_TOP_3]
BOTTOMGROUP = [uiconst.POINT_BOTTOM_2, uiconst.POINT_BOTTOM_1, uiconst.POINT_BOTTOM_3]
CORNERGROUP = [uiconst.POINT_TOPLEFT,
uiconst.POINT_TOPRIGHT,
uiconst.POINT_BOTTOMLEFT,
uiconst.POINT_BOTTOMRIGHT]
POINT_CORRECTION = {uiconst.POINT_LEFT_1: LEFTGROUP + RIGHTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_LEFT_2: LEFTGROUP + RIGHTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_LEFT_3: LEFTGROUP + RIGHTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_RIGHT_1: RIGHTGROUP + LEFTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_RIGHT_2: RIGHTGROUP + LEFTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_RIGHT_3: RIGHTGROUP + LEFTGROUP + BOTTOMGROUP + TOPGROUP,
uiconst.POINT_BOTTOM_1: BOTTOMGROUP + LEFTGROUP + TOPGROUP + RIGHTGROUP,
uiconst.POINT_BOTTOM_2: [uiconst.POINT_LEFT_2] + BOTTOMGROUP + LEFTGROUP + TOPGROUP + RIGHTGROUP,
uiconst.POINT_BOTTOM_3: BOTTOMGROUP + LEFTGROUP + TOPGROUP + RIGHTGROUP,
uiconst.POINT_TOP_1: TOPGROUP + LEFTGROUP + BOTTOMGROUP + RIGHTGROUP,
uiconst.POINT_TOP_2: TOPGROUP + LEFTGROUP + BOTTOMGROUP + RIGHTGROUP,
uiconst.POINT_TOP_3: TOPGROUP + LEFTGROUP + BOTTOMGROUP + RIGHTGROUP}
FRAME_WITH_POINTER_SKIN_GENERAL = 'general'
FRAME_WITH_POINTER_SKIN_BADGE = 'badgeStyle'
class GeneralFrameWithPointerSkin(object):
def __init__(self):
self.skinName = 'general'
self.leftTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_left_02.png'
self.rightTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_right_02.png'
self.topRightTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_topright_02.png'
self.topLeftTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_topleft_02.png'
self.upTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_up_02.png'
self.downTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_down_02.png'
self.bottomLeftTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_bottomleft_02.png'
self.bottomRightTexture = 'res:/UI/Texture/classes/FrameWithPointer/pointer_bottomright_02.png'
self.backgroundTexture = 'res:/UI/Texture/classes/FrameWithPointer/background_04.png'
self.backgroundOffset = -15
self.backgroundCornerSize = 19
class BadgeStyleFrameWithPointerSkin(GeneralFrameWithPointerSkin):
def __init__(self):
super(BadgeStyleFrameWithPointerSkin, self).__init__()
self.skinName = 'badgeStyle'
self.leftTexture = 'res:/UI/Texture/classes/Notifications/pointer_left_02.png'
self.rightTexture = 'res:/UI/Texture/classes/Notifications/pointer_right_02.png'
self.backgroundTexture = 'res:/UI/Texture/classes/Notifications/newItemsBadgeBase.png'
self.backgroundOffset = -6
self.backgroundCornerSize = 10
SKIN_NAME_TO_CLASS = {FRAME_WITH_POINTER_SKIN_GENERAL: GeneralFrameWithPointerSkin,
FRAME_WITH_POINTER_SKIN_BADGE: BadgeStyleFrameWithPointerSkin}
class PointerPanel(LayoutGrid):
default_state = uiconst.UI_PICKCHILDREN
default_align = uiconst.ABSOLUTE
default_opacity = 0.0
default_cellClipChildren = False
beingDestroyed = False
_owner = None
pointerSize = 9
scaleTransform = None
defaultPointer = uiconst.POINT_BOTTOM_2
def ApplyAttributes(self, attributes):
attributes.align = uiconst.TOPLEFT
LayoutGrid.ApplyAttributes(self, attributes)
self.backgroundFrame = FrameWithPointer(bgParent=self, color=attributes.color)
if attributes.owner:
self.owner = attributes.owner
else:
self.opacity = 1.0
def SetBackgroundColor(self, color):
self.backgroundFrame.SetColor(color)
def SetBackgroundAlpha(self, alphaValue):
self.backgroundFrame.SetAlpha(alphaValue)
@apply
def owner():
doc = "Weakref'd Owner of the panel"
def fset(self, value):
self._owner = weakref.ref(value)
def fget(self):
if self._owner:
owner = self._owner()
if owner and not owner.destroyed:
return owner
self._owner = None
return property(**locals())
def Close(self, *args):
LayoutGrid.Close(self, *args)
if getattr(self, 'debugFrame', None):
self.debugFrame.Close()
if self.scaleTransform:
self.scaleTransform.Close()
self.scaleTransform = None
@apply
def pos():
doc = 'Position of UI element'
def fget(self):
return (self._left,
self._top,
self._width,
self._height)
def fset(self, value):
left, top, width, height = value
adjustedLeft = int(round(left))
adjustedTop = int(round(top))
adjustedWidth = int(round(width))
adjustedHeight = int(round(height))
self._left = adjustedLeft
self._top = adjustedTop
self._width = adjustedWidth
self._height = adjustedHeight
self._displayX = uicore.ScaleDpi(adjustedLeft)
self._displayY = uicore.ScaleDpi(adjustedTop)
self._displayWidth = uicore.ScaleDpi(adjustedWidth)
self._displayHeight = uicore.ScaleDpi(adjustedHeight)
ro = self.renderObject
if ro:
ro.displayX = self._displayX
ro.displayY = self._displayY
ro.displayWidth = self._displayWidth
ro.displayHeight = self._displayHeight
self.UpdateBackgrounds()
return property(**locals())
@apply
def left():
doc = 'x-coordinate of UI element'
def fget(self):
return self._left
def fset(self, value):
adjustedValue = int(round(value))
if adjustedValue != self._left:
self._left = adjustedValue
self._displayX = uicore.ScaleDpi(adjustedValue)
ro = self.renderObject
if ro:
ro.displayX = self._displayX
return property(**locals())
@apply
def top():
doc = 'y-coordinate of UI element'
def fget(self):
return self._top
def fset(self, value):
adjustedValue = int(round(value))
if adjustedValue != self._top:
self._top = adjustedValue
self._displayY = uicore.ScaleDpi(adjustedValue)
ro = self.renderObject
if ro:
ro.displayY = self._displayY
return property(**locals())
@apply
def width():
doc = 'Width of UI element'
def fget(self):
return self._width
def fset(self, value):
adjustedValue = int(round(value))
if adjustedValue != self._width:
self._width = adjustedValue
self._displayWidth = uicore.ScaleDpi(adjustedValue)
ro = self.renderObject
if ro:
ro.displayWidth = self._displayWidth
self.UpdateBackgrounds()
owner = self.owner
if owner is None:
return
RefreshPanelPosition(self)
return property(**locals())
@apply
def height():
doc = 'Height of UI element'
def fget(self):
return self._height
def fset(self, value):
adjustedValue = int(round(value))
if adjustedValue != self._height:
self._height = adjustedValue
self._displayHeight = uicore.ScaleDpi(adjustedValue)
ro = self.renderObject
if ro:
ro.displayHeight = self._displayHeight
self.UpdateBackgrounds()
owner = self.owner
if owner is None:
return
RefreshPanelPosition(self)
return property(**locals())
def GetPointerOffset(self):
return self.backgroundFrame.pointerOffset
def ShowPanel(self, owner):
blue.synchro.Yield()
if self.destroyed:
return
blue.synchro.SleepWallclock(2)
if self.destroyed:
return
uicore.animations.FadeTo(self, startVal=self.opacity, endVal=1.0, duration=0.05, curveType=uiconst.ANIM_SMOOTH)
class FrameWithPointer(Container):
pointerOffset = None
skinCache = {}
def ApplyAttributes(self, attributes):
Container.ApplyAttributes(self, attributes)
skinName = attributes.get('skinName', 'general')
actualSkinClass = SKIN_NAME_TO_CLASS.get(skinName)
if not FrameWithPointer.skinCache.get(skinName):
FrameWithPointer.skinCache[skinName] = actualSkinClass()
self.skin = FrameWithPointer.skinCache[skinName]
self._pointer = SpriteThemeColored(texturePath='res:/UI/Texture/classes/FrameWithPointer/pointer_down_02.png', parent=self, colorType=attributes.colorType or uiconst.COLORTYPE_UIHILIGHTGLOW, opacity=0.95)
self._background = FrameThemeColored(name='__underlay', bgParent=self, cornerSize=self.skin.backgroundCornerSize, offset=self.skin.backgroundOffset, texturePath=self.skin.backgroundTexture, colorType=attributes.colorType or uiconst.COLORTYPE_UIHILIGHTGLOW, opacity=0.95)
def SetColor(self, color):
self._background.color = color
self._pointer.color = color
def SetAlpha(self, alphaValue):
self._background.SetAlpha(alphaValue)
self._pointer.SetAlpha(alphaValue)
def UpdatePointerPosition(self, positionFlag):
if positionFlag == uiconst.POINTER_NONE:
self._pointer.display = False
return (0, 0)
SIZE = 24
BACKOFFSET = 8
x, y = positionFlag
self._pointer.displayX = [-SIZE + BACKOFFSET,
0,
(self.displayWidth - SIZE) / 2,
self.displayWidth - SIZE,
self.displayWidth - BACKOFFSET][x]
self._pointer.displayY = [-SIZE + BACKOFFSET,
0,
(self.displayHeight - SIZE) / 2,
self.displayHeight - SIZE,
self.displayHeight - BACKOFFSET][y]
self._pointer.displayWidth = SIZE
self._pointer.displayHeight = SIZE
if y == 0:
if x == 0:
self._pointer.displayX = -11
self._pointer.displayY = -11
resPath = self.skin.topLeftTexture
self.pointerOffset = [self._pointer.displayX + 5, self._pointer.displayY + 5]
elif x == 4:
self._pointer.displayX = self.displayWidth - 13
self._pointer.displayY = -11
resPath = self.skin.topRightTexture
self.pointerOffset = [self._pointer.displayX + 19, self._pointer.displayY + 5]
else:
resPath = self.skin.upTexture
self.pointerOffset = [self._pointer.displayX + SIZE / 2, self._pointer.displayY + 10]
elif y == 4:
if x == 0:
self._pointer.displayX = -11
self._pointer.displayY = self.displayHeight - 13
resPath = self.skin.bottomLeftTexture
self.pointerOffset = [self._pointer.displayX + 5, self._pointer.displayY + 19]
elif x == 4:
self._pointer.displayX = self.displayWidth - 13
self._pointer.displayY = self.displayHeight - 13
resPath = self.skin.bottomRightTexture
self.pointerOffset = [self._pointer.displayX + 19, self._pointer.displayY + 19]
else:
resPath = self.skin.downTexture
self.pointerOffset = [self._pointer.displayX + SIZE / 2, self._pointer.displayY + 14]
elif x == 0:
resPath = self.skin.leftTexture
self.pointerOffset = [self._pointer.displayX + 10, self._pointer.displayY + SIZE / 2]
elif x == 4:
resPath = self.skin.rightTexture
self.pointerOffset = [self._pointer.displayX + 14, self._pointer.displayY + SIZE / 2]
self._pointer.SetTexturePath(resPath)
self._pointer.display = True
return self.pointerOffset
def RefreshPanelPosition(pointerPanel):
owner = pointerPanel.owner
if owner is None:
return
panelPosition, isBlocked = GetPanelInterestFromObject(owner, checkIfBlockedByOther=getattr(pointerPanel, 'checkIfBlocked', True))
if isBlocked:
pointer = pointerPanel.defaultPointer
else:
pointer = GetPanelPointerFromOwner(owner)
if pointer is None:
pointer = pointerPanel.defaultPointer
UpdatePanelPosition(pointerPanel, panelPosition, pointer)
def GetPanelInterestFromObject(uiObject, checkIfBlockedByOther = True):
if hasattr(uiObject, 'GetTooltipPosition'):
customInterestRect = uiObject.GetTooltipPosition()
if customInterestRect:
return (customInterestRect, False)
if uiObject is uicore.uilib.GetAuxMouseOver() and uicore.uilib.auxiliaryTooltip and uicore.uilib.auxiliaryTooltipPosition:
absolutePosition = uicore.uilib.auxiliaryTooltipPosition
else:
absolutePosition = uiObject.GetAbsolute()
retLeft, retTop, retWidth, retHeight = absolutePosition
retRight = retLeft + retWidth
retBottom = retTop + retHeight
obj = uiObject.parent
while obj:
l, t, w, h = obj.GetAbsolute()
retLeft = max(retLeft, l)
retTop = max(retTop, t)
retRight = min(retRight, l + w)
retBottom = min(retBottom, t + h)
obj = obj.parent
if checkIfBlockedByOther:
isBlocked = IsPartiallyBlockedByOther(uiObject, (retLeft,
retTop,
retRight,
retBottom))
if isBlocked:
retLeft = max(retLeft, uicore.uilib.x - 8)
retTop = max(retTop, uicore.uilib.y - 8)
retRight = min(retRight, uicore.uilib.x + 8)
retBottom = min(retBottom, uicore.uilib.y + 8)
else:
isBlocked = False
return ((retLeft,
retTop,
retRight - retLeft,
retBottom - retTop), isBlocked)
def IsPartiallyBlockedByOther(uiObject, rect):
retLeft, retTop, retRight, retBottom = rect
hierarchyTrace = GetObjectDesktopHierarchyPosition(uiObject)
windows = uicore.registry.GetValidWindows()
for window in windows:
if not window.display or uiObject.IsUnder(window):
continue
windowHierarchyTrace = GetObjectDesktopHierarchyPosition(window)
if hierarchyTrace < windowHierarchyTrace:
continue
l2, t2, w2, h2 = window.GetAbsolute()
overlapx = not (retRight <= l2 or retLeft >= l2 + w2)
overlapy = not (retBottom <= t2 or retTop >= t2 + h2)
if overlapx and overlapy:
return True
return False
def SubtractBlockingUIElements(uiObject, rect):
retLeft, retTop, retRight, retBottom = rect
hierarchyTrace = GetObjectDesktopHierarchyPosition(uiObject)
windows = uicore.registry.GetValidWindows()
for window in windows:
if not window.display or uiObject.IsUnder(window):
continue
windowHierarchyTrace = GetObjectDesktopHierarchyPosition(window)
if hierarchyTrace < windowHierarchyTrace:
continue
l2, t2, w2, h2 = window.GetAbsolute()
overlapx = not (retRight <= l2 or retLeft >= l2 + w2)
overlapy = not (retBottom <= t2 or retTop >= t2 + h2)
if not (overlapx and overlapy):
continue
skipVertical = False
if retLeft < l2 < retRight:
retRight = l2
skipVertical = True
if retLeft < l2 + w2 < retRight:
retLeft = l2 + w2
skipVertical = True
if not skipVertical:
if retTop < t2 < retBottom:
retBottom = t2
if retTop < t2 + h2 < retBottom:
retTop = t2 + h2
return (retLeft,
retTop,
retRight,
retBottom)
def GetObjectDesktopHierarchyPosition(uiObject):
trace = []
obj = uiObject
while obj.parent:
idx = obj.parent.children.index(obj)
trace.insert(0, idx)
obj = obj.parent
return trace
def GetPanelPointerFromOwner(uiObject):
if hasattr(uiObject, 'GetTooltipPointer'):
customPointer = uiObject.GetTooltipPointer()
if customPointer is not None:
return customPointer
return getattr(uiObject, 'tooltipPointer', None)
def UpdatePanelPosition(panel, interestRect, menuPointFlag = None, fallbackPointFlags = None):
if menuPointFlag is None:
menuPointFlag = uiconst.POINT_BOTTOM_2
if fallbackPointFlags is None:
if hasattr(panel.owner, 'GetTooltipPositionFallbacks'):
fallbackPointFlags = panel.owner.GetTooltipPositionFallbacks()
else:
fallbackPointFlags = POINT_CORRECTION.get(menuPointFlag, [])[:]
fallbackPointFlags += CORNERGROUP
AlignPointPanelToInterest(panel, menuPointFlag, interestRect)
if fallbackPointFlags and panel.parent:
if panel.left < 0 or panel.left + panel.width > uicore.desktop.width or panel.top < 0 or panel.top + panel.height > uicore.desktop.height:
tryFlag = fallbackPointFlags.pop(0)
UpdatePanelPosition(panel, interestRect, menuPointFlag=tryFlag, fallbackPointFlags=fallbackPointFlags)
def AlignPointPanelToInterest(panel, menuPointFlag, interestRect):
al, at, aw, ah = interestRect
if getattr(panel, 'debugShowInterest', False):
if getattr(panel, 'debugFrame', None):
panel.debugFrame.Close()
panel.debugFrame = Frame(parent=uicore.layer.main, pos=interestRect, align=uiconst.TOPLEFT, idx=0)
pointerOffset = panel.backgroundFrame.UpdatePointerPosition(menuPointFlag)
px, py = pointerOffset
px = uicore.ReverseScaleDpi(px)
py = uicore.ReverseScaleDpi(py)
fx, fy = menuPointFlag
panel.menuPointFlag = menuPointFlag
if fy == 4:
panel.top = at - panel.height - panel.pointerSize
elif fy == 0:
panel.top = at + ah + panel.pointerSize
else:
panel.top = at + ah / 2 - py
if fx == 4:
panel.left = al - panel.width - panel.pointerSize
elif fx == 0:
panel.left = al + aw + panel.pointerSize
else:
panel.left = al + aw / 2 - px
def FadeOutPanelAndClose(panel, duration = 0.2):
if panel.destroyed or panel.beingDestroyed:
return
panel.beingDestroyed = True
duration *= panel.opacity
if not duration:
panel.Close()
return
pointerOffset = panel.backgroundFrame.pointerOffset
if not pointerOffset:
panel.Close()
return
x, y = uicore.ReverseScaleDpi(panel.displayX + pointerOffset[0]), uicore.ReverseScaleDpi(panel.displayY + pointerOffset[1])
panel.scaleTransform = Transform(parent=panel.parent, state=uiconst.UI_DISABLED, align=uiconst.TOALL, scalingCenter=(x / float(uicore.desktop.width), y / float(uicore.desktop.height)))
panel.parent.renderObject.children.remove(panel.renderObject)
panel.scaleTransform.renderObject.children.append(panel.renderObject)
uicore.animations.FadeTo(panel.scaleTransform, startVal=panel.opacity, endVal=0.0, duration=duration * 0.5, curveType=uiconst.ANIM_SMOOTH)
uicore.animations.Tr2DScaleTo(panel.scaleTransform, panel.scaleTransform.scale, (0.0, 0.0), duration=duration, callback=panel.Close)
| [
"le02005@163.com"
] | le02005@163.com |
988ec595b96df9a55ca984a2c195065bc0b9b7bc | 3152fd9ec9ccd83b6e0d2ea40aa36a4b145aea2e | /part3/09_面向对象特性/hm_11_多态案例.py | 6383bd69e621871bb5e25b67ee5e824b7d626a37 | [] | no_license | derekduan1028/hm_python | cf1b6037ac1cde8dcac393453a291c39b5a936c2 | ae79f817a55d1b3bfdbdf1b50d5147946c8b7401 | refs/heads/master | 2023-01-21T00:33:00.927709 | 2020-11-27T00:00:35 | 2020-11-27T00:00:35 | 291,869,531 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/python
# coding:utf-8
"""
@author:derek
@contract:derek_duan@sina.com
@file: hm_11_多态案例.py
@time: 2020-10-24 22:27
"""
class Dog(object):
def __init__(self, name):
self.name = name
def game(self):
print("%s 蹦蹦跳跳。。。" % self.name)
class XiaoTianDog(Dog):
def game(self):
print("%s 飞到天上去玩耍" % self.name)
class Person(object):
def __init__(self, name):
self.name = name
def game_with_dog(self, dog):
print("%s 和 %s 快乐的玩耍" % (self.name, dog.name))
# 让狗玩耍
dog.game()
# 创建一个狗对象
# wangcai = Dog("旺财")
wangcai = XiaoTianDog("飞天旺财")
# 创建人的对象
xiaoming = Person("小明")
# 小明调用和狗玩的方法
xiaoming.game_with_dog(wangcai)
| [
"derek@Derek-Mbp"
] | derek@Derek-Mbp |
f457721ccf69c25c17cde506ef7326a745186344 | 6199ab5c7d7b135c4d4020a0298c33cddcfea960 | /NanoGardener/python/framework/samples/Run2016_102X_nAODv7.py | 9b673aa310e429efad7128f4e114c7036faf2411 | [] | no_license | friccita/LatinoAnalysis | 45b353b76e3561dc43e4a6c65878813e23369d01 | 954bb150de098f9cc9e96276aca68453c6c42adf | refs/heads/master | 2022-12-27T03:47:46.553078 | 2020-10-14T15:10:32 | 2020-10-14T15:10:32 | 290,237,710 | 0 | 0 | null | 2020-08-25T14:29:39 | 2020-08-25T14:29:38 | null | UTF-8 | Python | false | false | 4,207 | py | Samples = {}
Samples["DoubleEG_Run2016B-02Apr2020_ver1-v1"] = {'nanoAOD': '/DoubleEG/Run2016B-02Apr2020_ver1-v1/NANOAOD'}
Samples["DoubleEG_Run2016B-02Apr2020_ver2-v1"] = {'nanoAOD': '/DoubleEG/Run2016B-02Apr2020_ver2-v1/NANOAOD'}
Samples["DoubleEG_Run2016C-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016C-02Apr2020-v1/NANOAOD'}
Samples["DoubleEG_Run2016D-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016D-02Apr2020-v1/NANOAOD'}
Samples["DoubleEG_Run2016E-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016E-02Apr2020-v1/NANOAOD'}
Samples["DoubleEG_Run2016F-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016F-02Apr2020-v1/NANOAOD'}
Samples["DoubleEG_Run2016G-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016G-02Apr2020-v1/NANOAOD'}
Samples["DoubleEG_Run2016H-02Apr2020-v1"] = {'nanoAOD': '/DoubleEG/Run2016H-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016B-02Apr2020_ver1-v1"] = {'nanoAOD': '/DoubleMuon/Run2016B-02Apr2020_ver1-v1/NANOAOD'}
Samples["DoubleMuon_Run2016B-02Apr2020_ver2-v1"] = {'nanoAOD': '/DoubleMuon/Run2016B-02Apr2020_ver2-v1/NANOAOD'}
Samples["DoubleMuon_Run2016C-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016C-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016D-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016D-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016E-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016E-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016F-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016F-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016G-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016G-02Apr2020-v1/NANOAOD'}
Samples["DoubleMuon_Run2016H-02Apr2020-v1"] = {'nanoAOD': '/DoubleMuon/Run2016H-02Apr2020-v1/NANOAOD'}
Samples["MuonEG_Run2016B-02Apr2020_ver1-v1"] = {'nanoAOD': '/MuonEG/Run2016B-02Apr2020_ver1-v1/NANOAOD'}
Samples["MuonEG_Run2016B-02Apr2020_ver2-v1"] = {'nanoAOD': '/MuonEG/Run2016B-02Apr2020_ver2-v1/NANOAOD'}
Samples["MuonEG_Run2016C-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016C-02Apr2020-v1/NANOAOD'}
Samples["MuonEG_Run2016D-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016D-02Apr2020-v1/NANOAOD'}
Samples["MuonEG_Run2016E-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016E-02Apr2020-v2/NANOAOD'}
Samples["MuonEG_Run2016F-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016F-02Apr2020-v1/NANOAOD'}
Samples["MuonEG_Run2016G-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016G-02Apr2020-v1/NANOAOD'}
Samples["MuonEG_Run2016H-02Apr2020-v1"] = {'nanoAOD': '/MuonEG/Run2016H-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016B-02Apr2020_ver1-v1"] = {'nanoAOD': '/SingleElectron/Run2016B-02Apr2020_ver1-v1/NANOAOD'}
Samples["SingleElectron_Run2016B-02Apr2020_ver2-v1"] = {'nanoAOD': '/SingleElectron/Run2016B-02Apr2020_ver2-v1/NANOAOD'}
Samples["SingleElectron_Run2016C-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016C-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016D-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016D-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016E-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016E-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016F-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016F-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016G-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016G-02Apr2020-v1/NANOAOD'}
Samples["SingleElectron_Run2016H-02Apr2020-v1"] = {'nanoAOD': '/SingleElectron/Run2016H-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016B-02Apr2020_ver1-v1"] = {'nanoAOD': '/SingleMuon/Run2016B-02Apr2020_ver1-v1/NANOAOD'}
Samples["SingleMuon_Run2016B-02Apr2020_ver2-v1"] = {'nanoAOD': '/SingleMuon/Run2016B-02Apr2020_ver2-v1/NANOAOD'}
Samples["SingleMuon_Run2016C-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016C-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016D-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016D-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016E-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016E-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016F-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016F-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016G-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016G-02Apr2020-v1/NANOAOD'}
Samples["SingleMuon_Run2016H-02Apr2020-v1"] = {'nanoAOD': '/SingleMuon/Run2016H-02Apr2020-v1/NANOAOD'}
| [
"lorenzo.viliani@cern.ch"
] | lorenzo.viliani@cern.ch |
e84e8006b86eaf2f65c1be47f8302b65318544e2 | ecc93fd36c8da14a8031f8349882981c23b4ebdd | /src/zrc/conf/base.py | 7536c107e8745222d6e4923fe728711b8ee51411 | [] | no_license | bvhme/gemma-zaakregistratiecomponent | 44b65b90853c78904bb420275c0503f2b5a8cdf0 | 00f06f442e05863edf1639e03ea6dd96873ee926 | refs/heads/master | 2020-03-28T13:59:07.970662 | 2018-09-06T15:10:05 | 2018-09-06T15:10:05 | 148,447,973 | 0 | 0 | null | 2018-09-12T08:32:50 | 2018-09-12T08:32:49 | null | UTF-8 | Python | false | false | 9,347 | py | import os
# Django-hijack (and Django-hijack-admin)
from django.urls import reverse_lazy
from .api import * # noqa
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
DJANGO_PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
BASE_DIR = os.path.abspath(os.path.join(DJANGO_PROJECT_DIR, os.path.pardir, os.path.pardir))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.getenv('DB_NAME', 'zrc'),
'USER': os.getenv('DB_USER', 'zrc'),
'PASSWORD': os.getenv('DB_PASSWORD', 'zrc'),
'HOST': os.getenv('DB_HOST', 'localhost'),
'PORT': os.getenv('DB_PORT', 5432),
}
}
# Application definition
INSTALLED_APPS = [
# Note: contenttypes should be first, see Django ticket #10827
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
# Note: If enabled, at least one Site object is required
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Optional applications.
'django.contrib.admin',
'django.contrib.gis',
# 'django.contrib.admindocs',
# 'django.contrib.humanize',
# External applications.
'axes',
'django_filters',
'zds_schema', # before drf_yasg to override the management command
'drf_yasg',
'rest_framework',
'rest_framework_gis',
'rest_framework_filters',
# Project applications.
'zrc.accounts',
'zrc.api',
'zrc.datamodel',
'zrc.utils',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'zrc.urls'
# List of callables that know how to import templates from various sources.
RAW_TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'admin_tools.template_loaders.Loader',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(DJANGO_PROJECT_DIR, 'templates'),
],
'APP_DIRS': False, # conflicts with explicity specifying the loaders
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'zrc.utils.context_processors.settings',
],
'loaders': RAW_TEMPLATE_LOADERS
},
},
]
WSGI_APPLICATION = 'zrc.wsgi.application'
# Database: Defined in target specific settings files.
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'nl-nl'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
USE_THOUSAND_SEPARATOR = True
# Translations
LOCALE_PATHS = (
os.path.join(DJANGO_PROJECT_DIR, 'conf', 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(DJANGO_PROJECT_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
FIXTURE_DIRS = (
os.path.join(DJANGO_PROJECT_DIR, 'fixtures'),
)
DEFAULT_FROM_EMAIL = 'zrc@example.com'
EMAIL_TIMEOUT = 10
LOGGING_DIR = os.path.join(BASE_DIR, 'log')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(levelname)s %(name)s %(module)s %(process)d %(thread)d %(message)s'
},
'timestamped': {
'format': '%(asctime)s %(levelname)s %(name)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'performance': {
'format': '%(asctime)s %(process)d | %(thread)d | %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'timestamped'
},
'django': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_DIR, 'django.log'),
'formatter': 'verbose',
'maxBytes': 1024 * 1024 * 10, # 10 MB
'backupCount': 10
},
'project': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_DIR, 'zrc.log'),
'formatter': 'verbose',
'maxBytes': 1024 * 1024 * 10, # 10 MB
'backupCount': 10
},
'performance': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(LOGGING_DIR, 'performance.log'),
'formatter': 'performance',
'maxBytes': 1024 * 1024 * 10, # 10 MB
'backupCount': 10
},
},
'loggers': {
'zrc': {
'handlers': ['project'],
'level': 'INFO',
'propagate': True,
},
'django.request': {
'handlers': ['django'],
'level': 'ERROR',
'propagate': True,
},
'django.template': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
}
}
#
# Additional Django settings
#
# Custom user model
AUTH_USER_MODEL = 'accounts.User'
# Allow logging in with both username+password and email+password
AUTHENTICATION_BACKENDS = [
'zrc.accounts.backends.UserModelEmailBackend',
'django.contrib.auth.backends.ModelBackend'
]
#
# Custom settings
#
PROJECT_NAME = 'zrc'
ENVIRONMENT = None
SHOW_ALERT = True
#
# Library settings
#
ADMIN_INDEX_SHOW_REMAINING_APPS = True
# Django-axes
AXES_LOGIN_FAILURE_LIMIT = 30 # Default: 3
AXES_LOCK_OUT_AT_FAILURE = True # Default: True
AXES_USE_USER_AGENT = False # Default: False
AXES_COOLOFF_TIME = 1 # One hour
AXES_BEHIND_REVERSE_PROXY = True # Default: False (we are typically using Nginx as reverse proxy)
AXES_ONLY_USER_FAILURES = False # Default: False (you might want to block on username rather than IP)
AXES_LOCK_OUT_BY_COMBINATION_USER_AND_IP = False # Default: False (you might want to block on username and IP)
HIJACK_LOGIN_REDIRECT_URL = '/'
HIJACK_LOGOUT_REDIRECT_URL = reverse_lazy('admin:accounts_user_changelist')
HIJACK_REGISTER_ADMIN = False
# This is a CSRF-security risk.
# See: http://django-hijack.readthedocs.io/en/latest/configuration/#allowing-get-method-for-hijack-views
HIJACK_ALLOW_GET_REQUESTS = True
# Raven
SENTRY_DSN = os.getenv('SENTRY_DSN')
if SENTRY_DSN:
INSTALLED_APPS = INSTALLED_APPS + [
'raven.contrib.django.raven_compat',
]
RAVEN_CONFIG = {
'dsn': SENTRY_DSN,
# 'release': raven.fetch_git_sha(BASE_DIR), doesn't work in Docker
}
LOGGING['handlers'].update({
'sentry': {
'level': 'WARNING',
'class': 'raven.handlers.logging.SentryHandler',
'dsn': RAVEN_CONFIG['dsn']
},
})
| [
"sergei@maykinmedia.nl"
] | sergei@maykinmedia.nl |
11c8ba607d1b4e76729e420fcabe7d2cd974a13f | acf426a78ded4a078063d05457075fedba8f5310 | /mn_wifi/sixLoWPAN/link.py | 7891f424ccddc3cdaece64372d00029bbb6803f7 | [
"LicenseRef-scancode-x11-stanford"
] | permissive | intrig-unicamp/mininet-wifi | 3b58e6cf7b422cfe0f8990e173e77d7ba1d54616 | 985bf0ca2f11ca2ba17e44518e0df550070ddfba | refs/heads/master | 2023-08-27T03:36:41.005380 | 2023-07-27T13:07:32 | 2023-07-27T13:07:32 | 35,002,369 | 419 | 278 | NOASSERTION | 2023-09-12T03:42:45 | 2015-05-03T22:03:07 | Python | UTF-8 | Python | false | false | 10,998 | py | "author: Ramon Fontes (ramonrf@dca.fee.unicamp.br)"
import os
import re
from mininet.link import Intf, TCIntf, Link
from mininet.log import error, debug
class IntfSixLoWPAN(Intf):
"Basic interface object that can configure itself."
def __init__(self, name, node=None, port=None, link=None,
mac=None, ip6=None, **params):
"""name: interface name (e.g. h1-eth0)
node: owning node (where this intf most likely lives)
link: parent link if we're part of a link
other arguments are passed to config()"""
self.node = node
self.name = name
self.link = link
self.mac = mac
self.ip, self.ip6, self.prefixLen = None, ip6, None
# if interface is lo, we know the ip is 127.0.0.1.
# This saves an ipaddr command per node
if self.name == 'lo':
self.ip = '127.0.0.1'
self.prefixLen = 8
node.addWIntf(self, port=port)
# Save params for future reference
self.params = params
self.config(**params)
def ipAddr6(self, *args):
self.cmd('ip -6 addr flush ', self.name)
return self.cmd('ip -6 addr add ', args[0], 'dev', self.name)
def ipLink(self, *args):
"Configure ourselves using ip link"
return self.cmd('ip link set', self.name, *args)
def setIP6(self, ipstr, prefixLen=None, **args):
"""Set our IP6 address"""
# This is a sign that we should perhaps rethink our prefix
# mechanism and/or the way we specify IP addresses
if '/' in ipstr:
self.ip6, self.prefixLen = ipstr.split('/')
return self.ipAddr6(ipstr)
else:
if prefixLen is None:
raise Exception('No prefix length set for IP address %s'
% (ipstr,))
self.ip6, self.prefixLen = ipstr, prefixLen
return self.ipAddr6('%s/%s' % (ipstr, prefixLen))
_ip6MatchRegex = re.compile(r'\d+\::\d+')
def updateIP(self):
"Return updated IP address based on ip addr"
# use pexec instead of node.cmd so that we dont read
# backgrounded output from the cli.
ipAddr, _err, _exitCode = self.node.pexec(
'ip -6 addr show %s' % self.name)
ips = self._ip6MatchRegex.findall(ipAddr)
self.ip = ips[0] if ips else None
return self.ip
def updateMAC(self):
"Return updated MAC address based on ip addr"
ipAddr = self.ipAddr()
macs = self._macMatchRegex.findall(ipAddr)
self.mac = macs[0] if macs else None
return self.mac
# Instead of updating ip and mac separately,
# use one ipAddr call to do it simultaneously.
# This saves an ipAddr command, which improves performance.
def updateAddr(self):
"Return IP address and MAC address based on ipAddr."
ipAddr = self.ipAddr()
ips = self._ipMatchRegex.findall(ipAddr)
macs = self._macMatchRegex.findall(ipAddr)
self.ip = ips[0] if ips else None
self.mac = macs[0] if macs else None
return self.ip, self.mac
def IP6( self ):
"Return IPv6 address"
return self.ip6
def isUp(self, setUp=False):
"Return whether interface is up"
if setUp:
cmdOutput = self.ipLink('up')
# no output indicates success
if cmdOutput:
# error( "Error setting %s up: %s " % ( self.name, cmdOutput ) )
return False
else:
return True
else:
return "UP" in self.ipAddr6()
def config( self, mac=None, ip6=None, ipAddr=None,
up=True, **_params ):
"""Configure Node according to (optional) parameters:
mac: MAC address
ip: IP address
ipAddr: arbitrary interface configuration
Subclasses should override this method and call
the parent class's config(**params)"""
# If we were overriding this method, we would call
# the superclass config method here as follows:
# r = Parent.config( **params )
r = {}
self.setParam(r, 'setMAC', mac=mac)
self.setParam(r, 'setIP6', ip=ip6)
self.setParam(r, 'isUp', up=up)
self.setParam(r, 'ipAddr', ipAddr=ipAddr)
return r
def delete( self ):
"Delete interface"
self.cmd( 'iwpan dev ' + self.node.params['wpan'][0] + ' del' )
# We used to do this, but it slows us down:
# if self.node.inNamespace:
# Link may have been dumped into root NS
# quietRun( 'ip link del ' + self.name )
self.node.delIntf( self )
self.link = None
class TC6LoWPANLink(TCIntf, IntfSixLoWPAN):
"""Interface customized by tc (traffic control) utility
Allows specification of bandwidth limits (various methods)
as well as delay, loss and max queue length"""
def config(self, bw=None, delay=None, jitter=None, loss=None,
gro=False, speedup=0, use_hfsc=False, use_tbf=False,
latency_ms=None, enable_ecn=False, enable_red=False,
max_queue_size=None, **params):
"""Configure the port and set its properties.
bw: bandwidth in b/s (e.g. '10m')
delay: transmit delay (e.g. '1ms' )
jitter: jitter (e.g. '1ms')
loss: loss (e.g. '1%' )
gro: enable GRO (False)
txo: enable transmit checksum offload (True)
rxo: enable receive checksum offload (True)
speedup: experimental switch-side bw option
use_hfsc: use HFSC scheduling
use_tbf: use TBF scheduling
latency_ms: TBF latency parameter
enable_ecn: enable ECN (False)
enable_red: enable RED (False)
max_queue_size: queue limit parameter for netem"""
# Support old names for parameters
gro = not params.pop('disable_gro', not gro)
result = IntfSixLoWPAN.config(self, **params)
def on(isOn):
"Helper method: bool -> 'on'/'off'"
return 'on' if isOn else 'off'
# Set offload parameters with ethool
self.cmd('ethtool -K', self,
'gro', on(gro))
# Optimization: return if nothing else to configure
# Question: what happens if we want to reset things?
if (bw is None and not delay and not loss
and max_queue_size is None):
return
# Clear existing configuration
tcoutput = self.tc('%s qdisc show dev %s')
if "priomap" not in tcoutput and "noqueue" not in tcoutput \
and "fq_codel" not in tcoutput and "qdisc fq" not in tcoutput:
cmds = [ '%s qdisc del dev %s root' ]
else:
cmds = []
# Bandwidth limits via various methods
bwcmds, parent = self.bwCmds(bw=bw, speedup=speedup,
use_hfsc=use_hfsc, use_tbf=use_tbf,
latency_ms=latency_ms,
enable_ecn=enable_ecn,
enable_red=enable_red)
cmds += bwcmds
# Delay/jitter/loss/max_queue_size using netem
delaycmds, parent = self.delayCmds(delay=delay, jitter=jitter,
loss=loss,
max_queue_size=max_queue_size,
parent=parent)
cmds += delaycmds
# Execute all the commands in our node
debug("at map stage w/cmds: %s\n" % cmds)
tcoutputs = [ self.tc(cmd) for cmd in cmds ]
for output in tcoutputs:
if output != '':
error("*** Error: %s" % output)
debug("cmds:", cmds, '\n')
debug("outputs:", tcoutputs, '\n')
result[ 'tcoutputs'] = tcoutputs
result[ 'parent' ] = parent
return result
class LoWPAN(Link, IntfSixLoWPAN):
def __init__(self, node1, node2, **params):
"""Create 6LoWPAN pair link
node: node
intf: default interface class/constructor"""
os.system('wpan-hwsim edge add {} {} >/dev/null 2>&1'.format(node1.id, node2.id))
os.system('wpan-hwsim edge add {} {} >/dev/null 2>&1'.format(node2.id, node1.id))
class LowPANLink(Link, IntfSixLoWPAN):
def __init__(self, node, wpan, port=None, addr=None,
cls=IntfSixLoWPAN, **params):
"""Create 6LoWPAN link to another node.
node: node
intf: default interface class/constructor"""
self.pan = '{}-pan{}'.format(node.name, wpan)
self.name = self.pan
self.node = node
node.addWAttr(self, port=wpan)
self.range = 50
self.voltage = 10.0
self.consumption = 0.0
self.ip6 = None
self.set_attr(node, wpan)
wpan = '{}-wpan{}'.format(node.name, wpan)
self.name = wpan
self.ipLink('down')
self.set_pan_id(wpan, '0xbeef')
self.add_lowpan_iface(wpan)
self.ipLink('up')
self.name = self.pan
self.ipLink('up')
if params is None:
params = {}
if port is not None:
params['port'] = port
if 'port' not in params:
params['port'] = node.newPort()
if not self.name:
ifacename = 'pan%s' % wpan
self.name = self.wpanName(node, ifacename, node.newPort())
if not cls:
cls = IntfSixLoWPAN
if 'ip6' in node.params:
params['ip6'] = node.params['ip6']
intf1 = cls(name=self.name, node=node, mac=addr,
link=self, **params)
# All we are is dust in the wind, and our two interfaces
self.intf1, self.intf2 = intf1, '6lowpan'
def add_lowpan_iface(self, wpan):
return self.cmd('ip link add link {} name {} '
'type lowpan'.format(wpan, self.pan))
def set_pan_id(self, wpan, pan_id):
return self.cmd('iwpan dev %s set pan_id "%s"' % (wpan, pan_id))
def set_attr(self, node, wpan):
for key in self.__dict__.keys():
if key in node.params:
if isinstance(node.params[key], list):
value = node.params[key][wpan]
setattr(self, key, value)
else:
setattr(self, key, node.params[key])
def wpanName(self, node, ifacename, n):
"Construct a canonical interface name node-ethN for interface n."
# Leave this as an instance method for now
assert self
return node.name + '-' + ifacename # + repr(n)
def delete(self):
"Delete this link"
self.intf1.delete()
self.intf1 = None
def status(self):
"Return link status as a string"
return "(%s %s)" % (self.intf1.status(), self.intf2)
| [
"ramonreisfontes@gmail.com"
] | ramonreisfontes@gmail.com |
c9d1dacded44e996624f8b185466fb68157b8d0a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_74/1139.py | c78596798869e06ec2874789c7afc38cee242367 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | data = open('data(2).in', 'r')
testNum = int(data.readline())
for x in range(testNum):
input = data.readline().split()
heldTurns = 0
orange = 1
blue = 1
last = False #false = orange || true = blue
totalSteps = 0
turns = int(input[0])
input = input[1:]
for point in range(turns):
bot = input[point*2]
button = int(input[(point*2)+1])
if point == 0:
stepsTaken = 0
if bot == 'O':
stepsTaken = abs(button-orange)
orange = button
last = False
elif bot == 'B':
stepsTaken = abs(button-blue)
blue = button
last = True
heldTurns = stepsTaken + 1
totalSteps += heldTurns
else:
stepsTaken = 0
curr = False
if bot == 'O':
stepsTaken = abs(button-orange)
orange = button
curr = False
elif bot == 'B':
stepsTaken = abs(button-blue)
blue = button
curr = True
if bool(curr) ^ bool(last):
if stepsTaken != 0:
if heldTurns < stepsTaken:
stepsTaken -= heldTurns
else:
stepsTaken = 0
heldTurns = 0
heldTurns += stepsTaken +1
totalSteps += stepsTaken +1
last = curr
print('Case #' + str(x+1) + ': ' + str(totalSteps))
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
08cc0a8d706f7860b4a1caba53e29db55b105154 | e57a122cba8d00aac9d014a45e815063cb9f0359 | /imagepy/core/engine/report.py | 86fdd6bf79d9af6773d6ab2ed74ea11eb2e35f06 | [
"BSD-2-Clause"
] | permissive | WeisongZhao/imagepy | 9d66664578c77eb2d463de922c8d06af4f8af35a | 43cd5c4dcb9d6fefdcf11b8b9e9c0d56e11fab1e | refs/heads/master | 2020-04-25T23:44:31.304590 | 2020-01-29T06:10:26 | 2020-01-29T06:10:26 | 173,155,520 | 1 | 0 | NOASSERTION | 2019-02-28T17:21:56 | 2019-02-28T17:21:55 | null | UTF-8 | Python | false | false | 2,151 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 29 01:48:23 2016
@author: yxl
"""
import wx
from imagepy import IPy
from imagepy.core.manager import WidgetsManager, TaskManager, ImageManager
from imagepy.core.manager import ReaderManager, ViewerManager, TableManager
from imagepy.ui.propertygrid import GridDialog
from imagepy.core.util import xlreport
from time import time
import openpyxl as pyxl
class Report:
def __init__(self, title, cont):
self.title = title
self.cont = cont
def __call__(self): return self
def runasyn(self, wb, info, key, para = None, callback = None):
TaskManager.add(self)
for i in para:
if i in key and key[i][0] == 'img':
ips = ImageManager.get(para[i])
para[i] = ips if ips is None else ips.img
if i in key and key[i][0] == 'tab':
tps = TableManager.get(para[i])
para[i] = tps if tps is None else tps.data
start = time()
xlreport.fill_value(wb, info, para)
wb.save(para['path'])
IPy.set_info('%s: cost %.3fs'%(self.title, time()-start))
TaskManager.remove(self)
if callback!=None:callback()
def start(self, para=None, callafter=None):
wb = pyxl.load_workbook(self.cont)
xlreport.repair(wb)
info, key = xlreport.parse(wb)
if para is not None:
return self.runasyn(wb, info, para, callafter)
dialog = GridDialog(IPy.curapp, self.title, info, key)
rst = dialog.ShowModal()
para = dialog.GetValue()
dialog.Destroy()
if rst != 5100: return
filt = '|'.join(['%s files (*.%s)|*.%s'%('XLSX', 'xlsx', 'xlsx')])
if not IPy.getpath('Save..', filt, 'save', para): return
win = WidgetsManager.getref('Macros Recorder')
if win!=None: win.write('{}>{}'.format(self.title, para))
self.runasyn(wb, info, key, para, callafter)
def show_rpt(data, title):
wx.CallAfter(Report(title, data).start)
ViewerManager.add('rpt', show_rpt)
def read_rpt(path): return path
ReaderManager.add('rpt', read_rpt, tag='rpt') | [
"imagepy@sina.com"
] | imagepy@sina.com |
725dea3840c0a4ed9891327e89a1d202b33e6b90 | bf939848c38f0e42c163d030f273c64ba545306e | /source/migrations/0005_auto_20160531_2028.py | 4e2f2f5d3e88747507b822caf8ec691750100343 | [] | no_license | security-force-monitor/sfm-cms | 17e25f6990066ae1bf1f14783a3ad5ca176f4384 | 057039ad854a3a6703e373f7d2ec84be6af03565 | refs/heads/master | 2023-02-14T02:22:40.600997 | 2023-01-25T19:48:25 | 2023-01-25T19:48:25 | 57,247,158 | 9 | 4 | null | 2022-11-17T16:26:59 | 2016-04-27T21:00:34 | Python | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('source', '0004_auto_20160513_2044'),
]
operations = [
migrations.RemoveField(
model_name='publication',
name='country',
),
migrations.AddField(
model_name='publication',
name='country_iso',
field=models.CharField(max_length=2, default=''),
preserve_default=False,
),
migrations.AddField(
model_name='publication',
name='country_name',
field=models.CharField(max_length=2, default=''),
preserve_default=False,
),
]
| [
"eric.vanzanten@gmail.com"
] | eric.vanzanten@gmail.com |
5c767b52310fbce16a1ce44060ce7e3208459470 | ea262de505a1dd5ae1c7b546b85184309c3fdd35 | /src/models/modules/aspp.py | 1ef76d63d0bc32feb48961866576035219c35dac | [
"MIT"
] | permissive | Runki2018/CvPytorch | 306ff578c5f8d3d196d0834e5cad5adba7a89676 | 1e1c468e5971c1c2b037334f7911ae0a5087050f | refs/heads/master | 2023-08-25T09:48:48.764117 | 2021-10-15T05:11:21 | 2021-10-15T05:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,064 | py | # !/usr/bin/env python
# -- coding: utf-8 --
# @Time : 2020/9/15 14:27
# @Author : liumin
# @File : aspp.py
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class _ASPPModule(nn.Module):
def __init__(self, inplanes, planes, kernel_size, padding=0, dilation=1):
super(_ASPPModule, self).__init__()
self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,
stride=1, padding=padding, dilation=dilation, bias=False)
self.bn = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self._init_weight()
def forward(self, x):
x = self.atrous_conv(x)
x = self.bn(x)
return self.relu(x)
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
class ASPP(nn.Module):
def __init__(self, inplanes=2048, dilations = [6, 12, 18], drop_rate=0.1):
super(ASPP, self).__init__()
mid_channels = 256
self.aspp1 = _ASPPModule(inplanes, mid_channels, 1)
self.aspp2 = _ASPPModule(inplanes, mid_channels, 3, padding=dilations[0], dilation=dilations[0])
self.aspp3 = _ASPPModule(inplanes, mid_channels, 3, padding=dilations[1], dilation=dilations[1])
self.aspp4 = _ASPPModule(inplanes, mid_channels, 3, padding=dilations[2], dilation=dilations[2])
self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d(1),
nn.Conv2d(inplanes, mid_channels, 1, bias=False),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True))
self.conv_bn_relu = nn.Sequential(nn.Conv2d(mid_channels*5, mid_channels, 1, bias=False) ,
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Dropout(p=drop_rate))
self._init_weight()
def forward(self, x):
x1 = self.aspp1(x)
x2 = self.aspp2(x)
x3 = self.aspp3(x)
x4 = self.aspp4(x)
x5 = self.global_avg_pool(x)
x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)
x = torch.cat((x1, x2, x3, x4, x5), dim=1)
x = self.conv_bn_relu(x)
return x
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
| [
"569793357@qq.com"
] | 569793357@qq.com |
595d0c4eb7364999f7be3eef043687131a208ba3 | 27317b3adb1ccd99afa86cb931d2d14e23b9b175 | /bcs-app/backend/apps/configuration/models/base.py | 254f7e997e26384e25f1311248aedd20fa4b4394 | [
"BSD-3-Clause",
"LicenseRef-scancode-unicode",
"ICU",
"LicenseRef-scancode-unknown-license-reference",
"Artistic-2.0",
"Zlib",
"LicenseRef-scancode-openssl",
"NAIST-2003",
"ISC",
"NTP",
"BSL-1.0",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"MIT"
] | permissive | freyzheng/bk-bcs-saas | cf5a6c4ab1c20959bda1362bc31de7884451acd7 | 96373cda9d87038aceb0b4858ce89e7873c8e149 | refs/heads/master | 2021-07-05T04:11:08.555930 | 2020-09-22T12:26:37 | 2020-09-22T12:26:37 | 201,279,048 | 0 | 1 | NOASSERTION | 2020-09-16T03:07:16 | 2019-08-08T14:48:27 | Python | UTF-8 | Python | false | false | 1,844 | py | # -*- coding: utf-8 -*-
#
# Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
# Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
import logging
from django.db import models
from django.utils import timezone
COPY_TEMPLATE = "-copy"
logger = logging.getLogger(__name__)
# 所有包含 pod 的资源
# TODO mark refactor 移到constants文件中
POD_RES_LIST = ['K8sDeployment', 'K8sDaemonSet', 'K8sJob', 'K8sStatefulSet']
def get_default_version():
"""版本号:默认为时间戳
"""
return timezone.localtime().strftime('%Y%m%d-%H%M%S')
class BaseModel(models.Model):
"""Model with 'created' and 'updated' fields.
"""
creator = models.CharField("创建者", max_length=32)
updator = models.CharField("更新者", max_length=32)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
is_deleted = models.BooleanField(default=False)
deleted_time = models.DateTimeField(null=True, blank=True)
class Meta:
abstract = True
def delete(self, *args, **kwargs):
self.is_deleted = True
self.deleted_time = timezone.now()
self.save(update_fields=['is_deleted', 'deleted_time'])
| [
"gejun.coolfriend@gmail.com"
] | gejun.coolfriend@gmail.com |
f2e6c14e48abbbab4103a628056f283f5f408a1c | 04ce34e125fd9957b51196f5e31c6651ae8cb0ae | /Day-2/itertools/groupby.py | 8fc08a1564dbb0e1962fdd69fc71bfdba9e59d88 | [] | no_license | morshedmasud/100_days_with_python | 403e1d34cb6cebf983295281613045c60737d8f8 | 1c90d6222835f3b105a9ba26f98e3eb7bab40b30 | refs/heads/master | 2020-03-11T19:53:33.082287 | 2018-07-03T18:41:47 | 2018-07-03T18:41:47 | 130,220,708 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py | # Go to the link to know about the problem
# https://www.hackerrank.com/challenges/compress-the-string/problem
from itertools import groupby
s = input()
for k, g in groupby(s):
print("({}, {})".format(len(list(g)), k), end=" ")
| [
"masudraj6@gmail.com"
] | masudraj6@gmail.com |
65da5795fa137a569d17f96a32989d23ac977d85 | bdb1c323968cd9d5441a187a29ed7e25a2e4f07e | /cp0/metrix/urls.py | 8a45beca0a3e6a133e38cbc258df9fb61e4a7845 | [] | no_license | liangzhaowang/automation_system | beee351dd9f09a51e2b81617ac5bee63023ea9b8 | f77ef433c2366253dc9d9fdb7c54911cb38ed3e8 | refs/heads/master | 2022-02-19T11:07:44.047000 | 2019-09-23T02:16:00 | 2019-09-23T02:16:00 | 209,732,359 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | from django.conf.urls import url
import views as metrix_views
urlpatterns = [
# Metrix
url(r'^metrix/$', metrix_views.metrix_index),
url(r'^metrix/data/$', metrix_views.metrix_data),
url(r'^metrix/examp/(.+)/$', metrix_views.examp),
url(r'^metrix/jira_raw_data/(.+)/$', metrix_views.get_jira_raw_data),
url(r'^metrix/examp_templates/(.+).html$', metrix_views.examp_templates),
url(r'^metrix/get_dedualt_config/$',metrix_views.Metrix_Defualt_Config),
] | [
"zhaowangx.liang@intel.com"
] | zhaowangx.liang@intel.com |
406e86df76bdd822048227349e4e85b27e833f25 | bb2e9401499974a3ba6a2c083eba54af301736f9 | /lectures/110opengl/gmtutorials/010triangle/gm013.py | de6246611d9ffda296a5d450738ada5d91dfa700 | [] | no_license | geofmatthews/csci480 | 69188cd6ffc5d5ed3e9d9ea9d3c98deb6988e3f2 | fc21f487b57629ecaa042447d05f83b0837edf16 | refs/heads/master | 2021-01-10T11:18:53.451056 | 2015-11-30T23:10:22 | 2015-11-30T23:10:22 | 43,301,371 | 2 | 6 | null | 2016-04-27T19:28:24 | 2015-09-28T13:06:20 | Python | UTF-8 | Python | false | false | 5,086 | py | # Add a rotation matrix
from ctypes import c_void_p
from OpenGL.GL import *
from OpenGL.GL.shaders import compileShader, compileProgram
import pygame
from pygame.locals import *
import numpy as N
# We will be sending C objects to the graphics card, so we need
# some help making sure python sends the right thing.
# The ctypes module provides a way to create a null pointer:
null = c_void_p(0)
# Sizes in OpenGL are bytes. Since we're going to use 32 bit
# floats, provided by numpy, we'll need to tell OpenGL how big
# they are:
sizeOfFloat = 4
# The vertex shader has one input, the position of the vertex.
# It needs to have one output, also the position of the vertex.
# This is a simple passthrough shader.
strVertexShader = """
#version 330
in vec4 position;
uniform mat4 rotation;
void main()
{
gl_Position = rotation * position;
}
"""
# The fragment shader needs to have one output, the color
# of the fragment. Here we set it to white.
strFragmentShader = """
#version 330
out vec4 outputColor;
void main()
{
outputColor = vec4(1.0f, 1.0f, 1.0f, 1.0f);
}
"""
# Use PyOpenLG's compile shader programs, which simplify this task.
# Assign the compiled program to theShaders.
def initializeShaders():
global theShaders, positionAttrib, rotationAttrib
theShaders = compileProgram(
compileShader(strVertexShader, GL_VERTEX_SHADER),
compileShader(strFragmentShader, GL_FRAGMENT_SHADER)
)
positionAttrib = glGetAttribLocation(theShaders, "position")
rotationAttrib = glGetUniformLocation(theShaders, "rotation")
print "Attribs:", positionAttrib, rotationAttrib
# Vertex Data
#
# Three vertices of a triangle, with an x,y,z & w for each.
vertexPositions = N.array([
0.0, 0.75, 0.0, 1.0,
-0.75, -0.75, 0.0, 1.0,
0.75, -0.75, 0.0, 1.0], dtype=N.float32)
# number of components per point, 4 since we're using homogeneous
# 3d points. This helps figuring out how many triangles we're
# drawing, with len(vertexPositions)/vertexComponents
vertexComponents = 4
def initializeVertexBuffer():
global positionBufferObject
positionBufferObject = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, positionBufferObject)
glBufferData(GL_ARRAY_BUFFER, vertexPositions, GL_STATIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
# Ask the graphics card to create a VAO object.
# A VAO object stores one or more vertex buffer objects.
def initializeVAO():
n = 1
vaoArray = N.zeros(n, dtype=N.uint)
vaoArray = glGenVertexArrays(n)
glBindVertexArray( vaoArray )
# Called once at application start-up.
# Must be called after we have an OpenGL context, i.e. after the pygame
# window is created
def init():
initializeShaders()
initializeVertexBuffer()
initializeVAO()
# Called to redraw the contents of the window
def display(time):
# Clear the display
glClearColor(0.0, 0.0, 0.0, 0.0)
glClear(GL_COLOR_BUFFER_BIT)
# Set the shader program
glUseProgram(theShaders)
# compute rotation matrix:
s = N.sin(time)
c = N.cos(time)
rot = N.array(((c,-s,0,0),
(s,c,0,0),
(0,0,1,0),
(0,0,0,1)), dtype=N.float32)
# send rotation matrix to the shader program
glUniformMatrix4fv(rotationAttrib, # attrib location
1, # how many are we sending?
GL_TRUE, # row-major?
rot # the array
)
# Use the buffered data
glBindBuffer(GL_ARRAY_BUFFER, positionBufferObject)
# Tell the shader program which attribute to use for this buffer
glEnableVertexAttribArray(positionAttrib)
# Tell the shader program what the data in the buffer look like
glVertexAttribPointer(positionAttrib, # attrib location
vertexComponents, # elements per vertex
GL_FLOAT, # type of element
GL_FALSE, # normalize 0-256 to 0.0-1.0?
0, # stride
c_void_p(0) # offset
)
# Use that data to draw triangles
glDrawArrays(GL_TRIANGLES, 0, len(vertexPositions) / vertexComponents)
# Stop using that buffered data
glDisableVertexAttribArray(0)
# Stop using the shader program
glUseProgram(0)
def main():
global screen
pygame.init()
screen = pygame.display.set_mode((512,512), OPENGL|DOUBLEBUF)
clock = pygame.time.Clock()
init()
time = 0.0
while True:
clock.tick(30)
time += 0.01
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYUP and event.key == K_ESCAPE:
return
display(time)
pygame.display.flip()
if __name__ == '__main__':
try:
main()
except RuntimeError, err:
for s in err:
print s
raise RuntimeError(err)
finally:
pygame.quit()
| [
"geoffrey.matthews@wwu.edu"
] | geoffrey.matthews@wwu.edu |
c647eb211f1f09affb1fb04662ac52f8ad38bc12 | a518141ca3ba2b6fa63a7961b51936d9438ff022 | /11172 - Relational Operator.py | d328188ace2103f64e0214f15f2353ea201ea847 | [] | no_license | jlhung/UVA-Python | ec93b2c98e04c753e8356f3e4825584fae4a8663 | 7a0db4fecffd7ac4f377f93da41291a8e998ee9b | refs/heads/master | 2022-11-28T04:47:49.270187 | 2020-08-10T13:19:58 | 2020-08-10T13:19:58 | 116,969,745 | 19 | 9 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | '''
20180114 jlhung v1.0
'''
n = int(input())
for i in range(n):
x, y = map(int, input().split())
if x > y:
print(">")
elif x < y:
print("<")
else:
print("=") | [
"35291112+jlhung@users.noreply.github.com"
] | 35291112+jlhung@users.noreply.github.com |
300ac82a1c4baf849884d7d2fe7cd07efe1e0f2d | bbf744bfbfd9a935bd98c7cf54152a5d41194161 | /chapter_05/e5-10_ordinal_numbers.py | cf9d01ad4c3aee9140eec1916f92707ff314803c | [] | no_license | terranigmark/python-crash-course-projects | 65a7863be2d26fe8b91ac452b12203386eb0259a | 79ed9ed8e6a1bf015990a9556689379274231d13 | refs/heads/master | 2022-12-05T21:59:00.352140 | 2020-08-21T04:59:50 | 2020-08-21T04:59:50 | 266,263,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py |
def main():
numbers = [number for number in range(1, 10)]
print(numbers)
for number in numbers:
if number == 1:
print(f"{number}st")
elif number == 2:
print(f"{number}nd")
elif number == 3:
print(f"{number}rd")
else:
print(f"{number}th")
if __name__ == "__main__":
main() | [
"linnk99@gmail.com"
] | linnk99@gmail.com |
70de302fb8ee0861e18fba202c456c1afe87f9f9 | e90a772733e73e45b4cdbb5f240ef3b4a9e71de1 | /346. Moving Average from Data Stream.py | 7d66cf03d18e6f3b8d76865f2b9725ca038acae7 | [] | no_license | jiewu-stanford/leetcode | 102829fcbcace17909e4de49c01c3d705b6e6e3a | cbd47f713d3307f900daf55c8f27301c70542fc4 | refs/heads/master | 2022-05-28T18:25:00.885047 | 2022-05-18T05:16:22 | 2022-05-18T05:16:22 | 214,486,622 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | '''
Title : 346. Moving Average from Data Stream ($$$)
Problem : https://leetcode.com/problems/moving-average-from-data-stream/
: https://www.lintcode.com/problem/moving-average-from-data-stream/description
'''
class MovingAverage:
def __init__(self, size):
self._size = size
self._array = []
self._sum = 0
def next(self, val):
self._sum += val
self._array.append(val)
if len(self._array) > self._size:
self._sum -= self._array.pop(0)
return self._sum/len(self._array)
# Your MovingAverage object will be instantiated and called as such:
# obj = MovingAverage(size)
# param = obj.next(val) | [
"bayernscience@hotmail.com"
] | bayernscience@hotmail.com |
3b2c4f1223e39221895f4054e2206db3288db630 | 741c5c70bf4a0adb05db6b0777c8d07e28eb9cf6 | /lib/python3.4/site-packages/IPython/nbconvert/preprocessors/tests/test_extractoutput.py | 4b2c515324994f3df53c0b8285135c1fe45333fe | [] | no_license | andybp85/hyLittleSchemer | e686d2dc0f9067562367ea1173f275e8e2d2cb85 | af5cb6adf6a196cc346aa7d14d7f9509e084c414 | refs/heads/master | 2021-01-19T07:48:31.309949 | 2015-01-04T00:57:30 | 2015-01-04T00:57:30 | 28,496,304 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,601 | py | """
Module with tests for the extractoutput preprocessor
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, the IPython Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from .base import PreprocessorTestsBase
from ..extractoutput import ExtractOutputPreprocessor
#-----------------------------------------------------------------------------
# Class
#-----------------------------------------------------------------------------
class TestExtractOutput(PreprocessorTestsBase):
"""Contains test functions for extractoutput.py"""
def build_preprocessor(self):
"""Make an instance of a preprocessor"""
preprocessor = ExtractOutputPreprocessor()
preprocessor.extract_output_types = {'text', 'png', 'application/pdf'}
preprocessor.enabled = True
return preprocessor
def test_constructor(self):
"""Can a ExtractOutputPreprocessor be constructed?"""
self.build_preprocessor()
def test_output(self):
"""Test the output of the ExtractOutputPreprocessor"""
nb = self.build_notebook()
res = self.build_resources()
preprocessor = self.build_preprocessor()
nb, res = preprocessor(nb, res)
# Check if text was extracted.
output = nb.worksheets[0].cells[0].outputs[1]
assert 'text_filename' in output
text_filename = output['text_filename']
# Check if png was extracted.
output = nb.worksheets[0].cells[0].outputs[6]
assert 'png_filename' in output
png_filename = output['png_filename']
# Check that pdf was extracted
output = nb.worksheets[0].cells[0].outputs[7]
assert 'application/pdf_filename' in output
pdf_filename = output['application/pdf_filename']
# Verify text output
assert text_filename in res['outputs']
self.assertEqual(res['outputs'][text_filename], b'b')
# Verify png output
assert png_filename in res['outputs']
self.assertEqual(res['outputs'][png_filename], b'g')
# Verify pdf output
assert pdf_filename in res['outputs']
self.assertEqual(res['outputs'][pdf_filename], b'h')
| [
"andy@youshallthrive.com"
] | andy@youshallthrive.com |
f099090fe0b4ffbb9c84bebdf4e60b95afc751e5 | 86b6385bc0a0cc471e4a7e288bcdbe1f287adf52 | /utils/response_object.py | 62ac58196901874f16e12c99d13a39d96e34c88d | [] | no_license | AidarTaziev/EXAMPLE_PERSONAL_SERVICES | b688340c502380971798921caa6adc432eefb807 | 509ca43f254eb1f2626374fb5e429e32a0372015 | refs/heads/main | 2023-01-13T16:50:37.286108 | 2020-11-16T08:22:48 | 2020-11-16T08:22:48 | 313,234,087 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | from django.http import JsonResponse
def create_response_object(error, message):
return JsonResponse({'error': error, 'data': message}) | [
"you@example.com"
] | you@example.com |
e1bacd8d697526fcd796c413b95da822d193d3a9 | 11ef4bbb8086ba3b9678a2037d0c28baaf8c010e | /Source Code/server/binaries/chromium/pyproto/components/gcm_driver/crypto/proto/gcm_encryption_data_pb2.py | 320d5e4043ed8b7fe0408cfcfca9734732f3f583 | [] | no_license | lineCode/wasmview.github.io | 8f845ec6ba8a1ec85272d734efc80d2416a6e15b | eac4c69ea1cf0e9af9da5a500219236470541f9b | refs/heads/master | 2020-09-22T21:05:53.766548 | 2019-08-24T05:34:04 | 2019-08-24T05:34:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | true | 6,313 | py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: gcm_encryption_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='gcm_encryption_data.proto',
package='gcm',
syntax='proto2',
serialized_options=_b('H\003'),
serialized_pb=_b('\n\x19gcm_encryption_data.proto\x12\x03gcm\"v\n\x07KeyPair\x12\"\n\x04type\x18\x01 \x02(\x0e\x32\x14.gcm.KeyPair.KeyType\x12\x13\n\x0bprivate_key\x18\x02 \x01(\x0c\x12\x12\n\npublic_key\x18\x04 \x01(\x0c\"\x18\n\x07KeyType\x12\r\n\tECDH_P256\x10\x00J\x04\x08\x03\x10\x04\"\x81\x01\n\x0e\x45ncryptionData\x12\x0e\n\x06\x61pp_id\x18\x01 \x02(\t\x12\x19\n\x11\x61uthorized_entity\x18\x04 \x01(\t\x12\x1a\n\x04keys\x18\x02 \x03(\x0b\x32\x0c.gcm.KeyPair\x12\x13\n\x0bprivate_key\x18\x05 \x01(\t\x12\x13\n\x0b\x61uth_secret\x18\x03 \x01(\x0c\x42\x02H\x03')
)
_KEYPAIR_KEYTYPE = _descriptor.EnumDescriptor(
name='KeyType',
full_name='gcm.KeyPair.KeyType',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ECDH_P256', index=0, number=0,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=122,
serialized_end=146,
)
_sym_db.RegisterEnumDescriptor(_KEYPAIR_KEYTYPE)
_KEYPAIR = _descriptor.Descriptor(
name='KeyPair',
full_name='gcm.KeyPair',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='gcm.KeyPair.type', index=0,
number=1, type=14, cpp_type=8, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='gcm.KeyPair.private_key', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='public_key', full_name='gcm.KeyPair.public_key', index=2,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
_KEYPAIR_KEYTYPE,
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=34,
serialized_end=152,
)
_ENCRYPTIONDATA = _descriptor.Descriptor(
name='EncryptionData',
full_name='gcm.EncryptionData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='app_id', full_name='gcm.EncryptionData.app_id', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='authorized_entity', full_name='gcm.EncryptionData.authorized_entity', index=1,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='keys', full_name='gcm.EncryptionData.keys', index=2,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='private_key', full_name='gcm.EncryptionData.private_key', index=3,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='auth_secret', full_name='gcm.EncryptionData.auth_secret', index=4,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=155,
serialized_end=284,
)
_KEYPAIR.fields_by_name['type'].enum_type = _KEYPAIR_KEYTYPE
_KEYPAIR_KEYTYPE.containing_type = _KEYPAIR
_ENCRYPTIONDATA.fields_by_name['keys'].message_type = _KEYPAIR
DESCRIPTOR.message_types_by_name['KeyPair'] = _KEYPAIR
DESCRIPTOR.message_types_by_name['EncryptionData'] = _ENCRYPTIONDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
KeyPair = _reflection.GeneratedProtocolMessageType('KeyPair', (_message.Message,), dict(
DESCRIPTOR = _KEYPAIR,
__module__ = 'gcm_encryption_data_pb2'
# @@protoc_insertion_point(class_scope:gcm.KeyPair)
))
_sym_db.RegisterMessage(KeyPair)
EncryptionData = _reflection.GeneratedProtocolMessageType('EncryptionData', (_message.Message,), dict(
DESCRIPTOR = _ENCRYPTIONDATA,
__module__ = 'gcm_encryption_data_pb2'
# @@protoc_insertion_point(class_scope:gcm.EncryptionData)
))
_sym_db.RegisterMessage(EncryptionData)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"wasmview@gmail.com"
] | wasmview@gmail.com |
721879e136706a48003650b8fbd0d1770c84fa4a | b5c17b494204ed215ecfdc65932b2c960fa9e121 | /test/functional/create_cache.py | 8cac7417df6be7b4791ae67c8af9e25fabf8b543 | [
"MIT"
] | permissive | syglee7/zenacoin-ver2 | 9c8943c84b8eefad4ce3fee6ac15a9878b87f1df | 90079b95bdf0ea2b7fce644c56d2a9626526e5e4 | refs/heads/master | 2023-03-10T07:29:47.772820 | 2021-02-21T13:57:41 | 2021-02-21T13:57:41 | 340,617,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2019 The Zenacoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Create a blockchain cache.
Creating a cache of the blockchain speeds up test execution when running
multiple functional tests. This helper script is executed by test_runner when multiple
tests are being run in parallel.
"""
from test_framework.test_framework import ZenacoinTestFramework
class CreateCache(ZenacoinTestFramework):
# Test network and test nodes are not required:
def set_test_params(self):
self.num_nodes = 0
def setup_network(self):
pass
def run_test(self):
pass
if __name__ == '__main__':
CreateCache().main()
| [
"syglee7@gmail.com"
] | syglee7@gmail.com |
50b67108d8f94ed9c10e2666d6a23633853c4494 | 216a9037982a23a2acd5ccc80d512fa6fc41cfc3 | /basic/febo.py | 9b63d2de86d2d0e38e302b463364d2f24be4fcad | [] | no_license | PritamKrishnaMali/python | c6d7355c60392c3a1114c7a96cc2195df7b39594 | aa9a95dec41596662bd0218140d179c80b7de222 | refs/heads/master | 2021-06-13T10:10:01.031848 | 2017-03-11T03:09:39 | 2017-03-11T03:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | a = -1
b = 1
c = 0
i = 0
number = input("Enter the Number")
while i <= number:
c = a + b
print c
a = b
b = c
i = i+1
| [
"you@example.com"
] | you@example.com |
6db0e717a75b8c2e97b0faa41d5ff9342ac4dd59 | f84a247c47fbc44d2327c5fba9b66af9cbb6904d | /Code/DeNovoCompeting/applyExtreme.py | 46e1be8a3a335fec2220c7b694c54218169e4ed4 | [] | no_license | eggduzao/Costa_TfbsPrediction | e530b2e090d0a1382693a340d581c6f4aa5e441c | c47b9f8e38e3d414f06905f12aeb2c77a75ae55f | refs/heads/master | 2020-12-02T08:08:59.185158 | 2019-08-13T23:56:57 | 2019-08-13T23:56:57 | 96,719,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,410 | py | #################################################################################################
# Runs the pipeline for the EXTREME algorithm.
#
# The following are arguments for GappedKmerSearch.py, the word searching algorithm for the seeding:
# -l HALFLENGTH. The number of exact letters on each side of the word (default 4).
# -ming MINGAP. The minimum number of universal wildcard letters in the middle (default 0).
# -maxg MAXGAP. The maximum number of universal wildcard letters in the middle (default 10).
# -minsites MINSITES. Minimum number of sites a word should have to be included (default 10).
# -zthresh ZTHRESHOLD. Minimum normalized z-score for a word to be saved. A lower threshold increases
# the number of words saved (default 5).
#
# The following are arguments for run_consensus_clusering_using_wm.pl, the hierarchical clustering algorithm for the seeding:
# THRESHOLD. The threshold for the clustering. Has values between 0 and 1. A value closer to 1
# decreases the number of clusters, while a value closer to 0 increases the number
# of clusters. Recommended value is 0.3.
#
# The following are arguments for EXTREME.py, the EXTREME algorithm:
# -t TRIES. The number of different bias factors to try before giving up on the current seed.
# -s SEED. Random seed for shuffling sequences and dataset positions.
# -p PSEUDOCOUNTS. Uniform pseudo counts to add to initial PFM guess (default 0.0).
# -q INITALSTEP. The initial step size for the online EM algorithm. A VERY sensitive parameter.
# I get best success for ChIP size data (about 100,000 to 1,000,000 bps) with a step
# size of 0.05. For DNase footprinting, which usually has >5,000,000 bps, I find 0.02
# works best (default 0.05).
# -minsites MINSITES. Minimum number of sites the motif should have (default 10).
# -maxsites MAXSITES. Minimum number of sites the motif should have. If not specified, it is set to
# five times the number of predicted motif sites based on the initial PFM guess
# -saveseqs SAVESEQS. A switch. If used, the positive and negative sequence set will be saved to
# Positive_seq.fa and Negative_seq.fa, respectively, with instances of the
# discovered motif replaced with capital Ns.
#################################################################################################
params = []
params.append("###")
params.append("Input: ")
params.append(" 1. bedFileName = Name of the input bed file.")
params.append(" 2. genomeFileName = Genome to extract the sequences.")
params.append(" 3. outputLocation = Location of the output and temporary files.")
params.append("###")
params.append("Output: ")
params.append(" 1. EXTREME output.")
params.append("###")
#################################################################################################
# Import
import os
import sys
import glob
import re
lib_path = os.path.abspath("/".join(os.path.realpath(__file__).split("/")[:-2]))
sys.path.append(lib_path)
from util import *
if(len(sys.argv) <= 1):
for e in params: print e
sys.exit(0)
def natural_sort(l):
convert = lambda text: int(text) if text.isdigit() else text.lower()
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
###################################################################################################
# INPUT
###################################################################################################
# Reading input
bedFileName = sys.argv[1]
genomeFileName = sys.argv[2]
outputLocation = sys.argv[3]
if(outputLocation[-1] != "/"): outputLocation+="/"
# Parameters for GappedKmerSearch
halfSiteLength = "8"
ming = "0"
maxg = "10"
minsites = "5"
# Parameters for run_consensus_clusering_using_wm
clusterThresh = "0.3"
# Creating fasta file
inName = ".".join(bedFileName.split("/")[-1].split(".")[:-1])
fastaFileName = outputLocation+inName+".fasta"
toRemove = [fastaFileName]
os.system("fastaFromBed -fi "+genomeFileName+" -bed "+bedFileName+" -fo "+fastaFileName)
###################################################################################################
# EXTREME
###################################################################################################
loc = "/home/egg/Desktop/footprint_motifmatch/Extreme/EXTREME-2.0.0/src/"
# 1. Generates a dinucleotide shuffled version of the positive sequence set to serve as a negative sequence set
shuffledFileName = outputLocation+inName+"_shuffled.fasta"
toRemove.append(shuffledFileName)
os.system("python "+loc+"fasta-dinucleotide-shuffle.py -f "+fastaFileName+" > "+shuffledFileName)
# 2. Finds gapped words with two half-sites of length "-l", between "-ming" and "-maxg" universal wildcard gap letters, and at least "-minsites" occurrences in the positive sequence set
wordsFileName = outputLocation+inName+".words"
toRemove.append(wordsFileName)
os.system("python "+loc+"GappedKmerSearch.py -l "+halfSiteLength+" -ming "+ming+" -maxg "+maxg+" -minsites "+minsites+" "+fastaFileName+" "+shuffledFileName+" "+wordsFileName)
# 3. Clusters the words and outputs the results
clusterFileName = wordsFileName+".cluster.aln"
toRemove.append(clusterFileName)
os.system("perl "+loc+"run_consensus_clusering_using_wm.pl "+wordsFileName+" "+clusterThresh)
# 4. Converts the clusters into PFMs which can be used as seeds for the online EM algorithm
wmFileName = outputLocation+inName+".wm"
toRemove.append(wmFileName)
os.system("python "+loc+"Consensus2PWM.py "+clusterFileName+" "+wmFileName)
nbClusters = 0
wmFile = open(wmFileName,"r")
for line in wmFile:
if(line[0] == ">"): nbClusters += 1
wmFile.close()
# 5. EM algorithm
memePfmFileName = outputLocation+"all_pwms.meme"
memePfmFile = open(memePfmFileName,"w")
memePfmFile.write("MEME version 4.9.0\n\nALPHABET= ACGT\n\nstrands: + -\n\nBackground letter frequencies (from uniform background):\nA 0.25000 C 0.25000 G 0.25000 T 0.25000\n\n")
htmlFileName = outputLocation+"all_motifs.html"
htmlFile = open(htmlFileName,"w")
htmlFile.write("<html>\n<head></head>\n<body>\n")
for i in range(1,nbClusters+1):
os.system("python "+loc+"EXTREME.py "+fastaFileName+" "+shuffledFileName+" "+wmFileName+" "+str(i))
memeFile = open("./cluster"+str(i)+"/MEMEoutput.meme","r")
flagStart = False
for line in memeFile:
if(len(line) > 5 and line[:5] == "MOTIF"): flagStart = True
if(flagStart): memePfmFile.write(line)
htmlFile.write("<h1>cluster"+str(i)+"</h1>\n")
htmlFile.write("<table cellpadding=\"4\" style=\"border: 1px solid #000000; border-collapse: collapse;\" border=\"1\">\n")
pngList = natural_sort(glob.glob(outputLocation+"cluster"+str(i)+"/Motif*.png"))
for pngFileName in pngList:
mname = ".".join(pngFileName.split("/")[-1].split(".")[:-1])
htmlFile.write("<tr><td>"+mname+"</td><td><img src=\""+pngFileName+"\"></td></tr>\n")
htmlFile.write("</table>\n")
memeFile.close()
memePfmFile.close()
htmlFile.close()
###################################################################################################
# OUTPUT
###################################################################################################
# Remove temporary files
for e in toRemove: os.system("rm "+e)
| [
"eggduzao@gmail.com"
] | eggduzao@gmail.com |
2877643adfbcb52d6cc723fbb5d42cda73d5fde7 | 910eb6e2905fd10337b67f6265d80fa975c63768 | /freenasUI/plugins/migrations/0012_auto__del_field_configuration_collectionurl__add_field_configuration_r.py | abf536b3fea706cef2d8c8abc0f79516e739887a | [] | no_license | truenas/freenas-migrate93 | b6e7a32615137677dbb20f002f0455f5ca3cf5a2 | 9cd6170fc7bf636d55b0513af794f955fb7bfbce | refs/heads/master | 2023-08-18T20:24:45.513212 | 2023-02-07T12:58:01 | 2023-02-07T12:58:01 | 77,483,696 | 1 | 0 | null | 2023-04-27T10:41:55 | 2016-12-27T21:28:51 | Python | UTF-8 | Python | false | false | 2,999 | py | # -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
from freenasUI.plugins.plugin import PLUGINS_REPO
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Configuration.collectionurl'
db.delete_column(u'plugins_configuration', 'collectionurl')
# Adding field 'Configuration.repourl'
db.add_column(u'plugins_configuration', 'repourl',
self.gf('django.db.models.fields.CharField')(default=PLUGINS_REPO, max_length=255, blank=True),
keep_default=False)
def backwards(self, orm):
# Adding field 'Configuration.collectionurl'
db.add_column(u'plugins_configuration', 'collectionurl',
self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True),
keep_default=False)
# Deleting field 'Configuration.repourl'
db.delete_column(u'plugins_configuration', 'repourl')
models = {
u'plugins.configuration': {
'Meta': {'object_name': 'Configuration'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'repourl': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
u'plugins.plugins': {
'Meta': {'object_name': 'Plugins'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'plugin_api_version': ('django.db.models.fields.CharField', [], {'default': "'1'", 'max_length': '20'}),
'plugin_arch': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'plugin_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'plugin_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'plugin_jail': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'plugin_name': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'plugin_path': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'plugin_pbiname': ('django.db.models.fields.CharField', [], {'max_length': '120'}),
'plugin_port': ('django.db.models.fields.IntegerField', [], {'max_length': '120'}),
'plugin_secret': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['services.RPCToken']", 'on_delete': 'models.PROTECT'}),
'plugin_version': ('django.db.models.fields.CharField', [], {'max_length': '120'})
},
u'services.rpctoken': {
'Meta': {'object_name': 'RPCToken'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '1024'})
}
}
complete_apps = ['plugins']
| [
"suraj@ixsystems.com"
] | suraj@ixsystems.com |
c85075fd007feb5b4fdc25b58ff728536d490e0f | 4074db4436d5fc5fa5395de072557def620f993e | /0x05-python-exceptions/2-safe_print_list_integers.py~ | 45b48e50405d79479ecfaf25f5542b82b3a9cb0b | [] | no_license | Hunt66/holbertonschool-higher_level_programming | 49b4a93a8b565cdd588e26e6348bed5d3e9d6953 | b26f42c1d41bb24842d77bf5cf86c441bd8fcf51 | refs/heads/master | 2020-03-28T11:11:52.204554 | 2019-03-25T19:45:28 | 2019-03-25T19:45:28 | 148,187,536 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 390 | #!/usr/bin/python3
def safe_print_list_integers(my_list=[], x=0):
try:
for i in range(0, x):
print("{:d}".format(my_list[i]), end='')
print('')
return i
except ValueError:
return 0
except IndexError:
for i in my_list:
print("{:d}".format(i), end='')
print('')
return i
except:
return 0
| [
"489@holbertonschool.com"
] | 489@holbertonschool.com | |
33d87a72c879d79bfd1ceb6f6d0e13ea24c6d194 | 44dea7bd1f7fc93ce5fa3ca14c949120024ef554 | /exercise/bubble.py | 50d760b4c94e3ff3e62b4a1102edb1ad23a72292 | [] | no_license | WillGhost/op | 09b5f94401453d7abdcdf1319d7a753288b4f083 | 1685f18ba3949a23295d408bc781f07f87a1009d | refs/heads/master | 2023-08-28T05:38:59.105302 | 2023-08-24T08:56:36 | 2023-08-24T08:56:36 | 14,950,011 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | #!/usr/bin/env python3
import sys
inp = sys.argv[1:]
for i in range(len(inp)):
inp[i] = int(inp[i])
def bubble(xx):
length = len(xx)
for i in range(length-1):
for j in range(length-1-i):
if xx[j] > xx[j+1]:
xx[j],xx[j+1] = xx[j+1],xx[j]
return xx
print(inp)
print(bubble(inp))
| [
"you@example.com"
] | you@example.com |
7df9d108f3898a03159219c602db43624a790af0 | 256f817910dd698970fab89871c6ce66a3c416e7 | /1. solvedProblems/1239. Maximum Length of a Concatenated String with Unique Characters/tempCodeRunnerFile.py | 13d8f02fb2762e4ce4564f4df6039ae66a6109ce | [] | no_license | tgaochn/leetcode | 5926c71c1555d2659f7db4eff9e8cb9054ea9b60 | 29f1bd681ae823ec6fe755c8f91bfe1ca80b6367 | refs/heads/master | 2023-02-25T16:12:42.724889 | 2021-02-04T21:05:34 | 2021-02-04T21:05:34 | 319,225,860 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29 | py |
def updateCharSet(ch | [
"tgaochn@gmail.com"
] | tgaochn@gmail.com |
c104b0cb17230d79f8f8a4956851bd07e3ee11e2 | 9d81fb4bfac02630c0ecf36c35cc63515470961e | /demo/calculated_flow.py | f2f685fa5c857feaf9ee78b842e750e61cefb0fd | [] | no_license | lechat/jenkinsflow | d36cc20af9f03dbbc1a82af5ce1c83f6bc365f2d | 87396069dda4f0681829e5d4e264e4f09ae34131 | refs/heads/master | 2020-04-05T04:35:39.314623 | 2016-08-26T22:18:59 | 2016-08-26T22:18:59 | 10,026,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,959 | py | #!/usr/bin/env python
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
# Demonstrate that the flow can be dynamically calculated by the script
from __future__ import print_function
import sys, os, tempfile
import logging
from collections import OrderedDict
import demo_setup
demo_setup.sys_path()
from jenkinsflow.flow import serial
from jenkinsflow.unbuffered import UnBuffered
# Unbuffered output does not work well in Jenkins/Hudson, so in case
# this is run from a jenkins/hudson job, we want unbuffered output
sys.stdout = UnBuffered(sys.stdout)
import demo_security as security
def main(api):
logging.basicConfig()
logging.getLogger("").setLevel(logging.WARNING)
print("Doing stuff before flow ...")
demo_name = 'jenkinsflow_demo__calculated_flow'
hudson = os.environ.get('HUDSON_URL')
graph_output_dir = os.path.join(tempfile.gettempdir(), demo_name + ('_hudson' if hudson else '_jenkins'))
if not os.path.exists(graph_output_dir):
os.makedirs(graph_output_dir)
g1_components = range(1)
g2_components = range(2)
g3_components = range(2)
component_groups = OrderedDict((('g1', g1_components), ('g2', g2_components), ('g3', g3_components)))
# Flow
with serial(api, timeout=70, securitytoken=security.securitytoken, job_name_prefix=demo_name + '__', report_interval=3,
# Write json flow graph to display in browser, see INSTALL.md
json_dir=graph_output_dir, json_indent=4) as ctrl1:
ctrl1.invoke('prepare')
with ctrl1.parallel(timeout=0, report_interval=3) as ctrl2:
for gname, group in component_groups.items():
with ctrl2.serial(timeout=0, report_interval=3) as ctrl3:
for component in group:
ctrl3.invoke('deploy_component_' + gname + '_' + str(component))
with ctrl1.parallel(timeout=0, report_interval=3) as ctrl2:
ctrl2.invoke('report_deploy')
ctrl2.invoke('prepare_tests')
with ctrl1.parallel(timeout=40, report_interval=3) as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3) as ctrl3:
ctrl3.invoke('test_ui')
with ctrl3.parallel(timeout=0, report_interval=3) as ctrl4:
for gname, group in component_groups.items():
for component in group:
ctrl4.invoke('test_component_' + gname + '_' + str(component))
ctrl2.invoke('test_x')
ctrl1.invoke('report', password='Y', s1='tst_regression', c1='complete')
ctrl1.invoke('promote')
print("Doing stuff after flow ...")
if __name__ == '__main__':
from jenkinsflow.jenkins_api import Jenkins
jenkins = Jenkins(os.environ.get('JENKINS_URL') or os.environ.get('HUDSON_URL') or "http://localhost:8080")
main(jenkins)
| [
"lhn@hupfeldtit.dk"
] | lhn@hupfeldtit.dk |
2d407ed2a59369382421c860ca69164825530a1b | ccddff63d10b9bf857ff5907ed14b1be85975a47 | /test/luna/run_api_tests.py | 608e3b5b61bfe8a2b08c3aacf560e0526a124536 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | webosose/avoutputd | 4d9aff450acce8c62faad22c1fe10d5984993ca1 | 32fa2fc02e76be75b0943a13e8c97d43fb71abdb | refs/heads/master | 2021-03-31T01:04:56.961145 | 2018-07-09T04:49:30 | 2018-08-01T05:29:15 | 125,002,453 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 244 | py | #!/usr/bin/python2
import unittest
import os
if __name__ == '__main__':
testdir = os.path.dirname(os.path.realpath(__file__))
testsuite = unittest.TestLoader().discover(testdir)
unittest.TextTestRunner(verbosity=1).run(testsuite)
| [
"changhyeok.bae@lge.com"
] | changhyeok.bae@lge.com |
5aa885bbab92bb6d71a9a139107820fa31c1f3b6 | 5bfca95abf14f7bb0ff29b58b018fc9062d3f837 | /apps/first_draft/migrations/0041_auto_20171120_2256.py | 34592e123aa65322113d68d5e838de3b53506734 | [] | no_license | wdudek82/django-ogame-clone | 621afb20ea2dd3c0f2e4b93dfdd604e0628bd7b8 | 472971da826d078176a5d619b3b5cad89e3d1c5c | refs/heads/master | 2021-09-08T15:19:49.407650 | 2017-11-26T19:14:48 | 2017-11-26T19:14:48 | 124,670,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-20 21:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('first_draft', '0040_auto_20171120_2250'),
]
operations = [
migrations.RenameField(
model_name='resource',
old_name='new_amount',
new_name='additional_amount',
),
migrations.AlterField(
model_name='resource',
name='amount',
field=models.PositiveIntegerField(default=0),
),
]
| [
"wdudek82@gmail.com"
] | wdudek82@gmail.com |
68fc75e9c08edbc00449bd58859742b88ae934ec | 5dfbfa153f22b3f58f8138f62edaeef30bad46d3 | /old_ws/build/baxter_simulator/baxter_gazebo/catkin_generated/pkg.develspace.context.pc.py | 0164fbae495acd3846f2e807fea2c9e97795f147 | [] | no_license | adubredu/rascapp_robot | f09e67626bd5a617a569c9a049504285cecdee98 | 29ace46657dd3a0a6736e086ff09daa29e9cf10f | refs/heads/master | 2022-01-19T07:52:58.511741 | 2019-04-01T19:22:48 | 2019-04-01T19:22:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "baxter_core_msgs;gazebo_ros_control;roscpp;controller_manager_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "baxter_gazebo"
PROJECT_SPACE_DIR = "/home/bill/ros_ws/devel"
PROJECT_VERSION = "1.2.12"
| [
"alphonsusbq436@gmail.com"
] | alphonsusbq436@gmail.com |
ae114df5ae9c386214f76e7cd76204f262611484 | ee3e1092daa3b8140ceea4bfef623b54302dfb6c | /core/views.py | 35017542527561aebd701de68bb91d62a9ae7655 | [] | no_license | safwanvk/craigslist | fa68a13b179f9b65ec76119448c03f93bfc845bb | 2ad79c2e6d418b5d7809871a928a24ae10e705ab | refs/heads/master | 2022-11-30T06:10:05.310661 | 2020-08-21T04:23:23 | 2020-08-21T04:23:23 | 289,067,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,602 | py | from urllib.parse import quote_plus
import requests
from django.shortcuts import render
from bs4 import BeautifulSoup
# Create your views here.
from . models import Search
BASE_CRAIGSLIST_URL = 'https://losangeles.craigslist.org/search/?query={}'
BASE_IMAGE_URL = 'https://images.craigslist.org/{}_300x300.jpg'
def home(request):
return render(request, 'core/base.html')
def new_search(request):
search = request.POST.get('search')
Search.objects.create(search=search)
final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
response = requests.get(final_url)
data = response.text
soup = BeautifulSoup(data, features='html.parser')
post_listings = soup.find_all('li', {'class': 'result-row'})
final_postings = []
for post in post_listings:
post_title = post.find(class_='result-title').text
post_url = post.find('a').get('href')
if post.find(class_='result-price'):
post_price = post.find(class_='result-price').text
else:
post_price = 'N/A'
if post.find(class_='result-image').get('data-ids'):
post_image_id = post.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]
post_image_url = BASE_IMAGE_URL.format(post_image_id)
else:
post_image_url = 'https://craigslist.org/images/peace.jpg'
final_postings.append((post_title, post_url, post_price, post_image_url))
context = {
'search': search,
'final_postings': final_postings
}
return render(request, 'core/new_search.html', context)
| [
"safwanvalakundil@gmail.com"
] | safwanvalakundil@gmail.com |
293e1a3742444a723d6ba6b124a56450cbe4848f | 377fc6e13101a2a45826cd118110c790f396a805 | /abc098-b.py | 18748153555b09fd04b0e3e02a01bd011f6989da | [] | no_license | number09/atcoder | 4076e7223f424b9923754e73992d6442e0bb0de7 | f521ca1205b254d99744abaf6a7a5bfe69845fe0 | refs/heads/master | 2021-06-04T23:16:39.021645 | 2021-01-19T08:30:39 | 2021-01-19T08:30:39 | 132,128,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | int_n = int(input())
str_s = input()
result_ar = []
for i in range(len(str_s)):
# print(str_s[:i], str_s[i:])
result_ar.append(len(set(str_s[:i]) & set(str_s[i:])))
print(max(result_ar))
| [
"cielo.abierto09@gmail.com"
] | cielo.abierto09@gmail.com |
aa52b2b896281df2c1629a0c22b6384ce12f94ca | 7a704e838d89f942a1099fec141f1fbe9828e528 | /third/slim/nets/mobilenet_v1_test.py | b8c403698d9c55e6d4e4022e2ca4e8bcdb8aea87 | [
"Apache-2.0"
] | permissive | cap-ntu/Video-to-Retail-Platform | 3ee00d22b7fd94925adac08c5ea733ee647f4574 | 757c68d9de0778e3da8bbfa678d89251a6955573 | refs/heads/hysia_v2 | 2023-02-14T05:22:16.792928 | 2021-01-10T02:31:43 | 2021-01-10T02:31:43 | 212,741,650 | 63 | 20 | Apache-2.0 | 2021-01-10T02:32:00 | 2019-10-04T05:22:08 | Python | UTF-8 | Python | false | false | 26,250 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for MobileNet v1."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import mobilenet_v1
slim = tf.contrib.slim
class MobilenetV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith(
'MobilenetV1/Logits/SpatialSqueeze'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildPreLogitsNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(net.op.name.startswith('MobilenetV1/Logits/AvgPool'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1, 1, 1024])
self.assertFalse('Logits' in end_points)
self.assertFalse('Predictions' in end_points)
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(inputs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_13'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise',
'Conv2d_4_depthwise', 'Conv2d_4_pointwise',
'Conv2d_5_depthwise', 'Conv2d_5_pointwise',
'Conv2d_6_depthwise', 'Conv2d_6_pointwise',
'Conv2d_7_depthwise', 'Conv2d_7_pointwise',
'Conv2d_8_depthwise', 'Conv2d_8_pointwise',
'Conv2d_9_depthwise', 'Conv2d_9_pointwise',
'Conv2d_10_depthwise', 'Conv2d_10_pointwise',
'Conv2d_11_depthwise', 'Conv2d_11_pointwise',
'Conv2d_12_depthwise', 'Conv2d_12_pointwise',
'Conv2d_13_depthwise', 'Conv2d_13_pointwise']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'MobilenetV1/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points.keys())
def testBuildCustomNetworkUsingConvDefs(self):
batch_size = 5
height, width = 224, 224
conv_defs = [
mobilenet_v1.Conv(kernel=[3, 3], stride=2, depth=32),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=64),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=128),
mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=512)
]
inputs = tf.random_uniform((batch_size, height, width, 3))
net, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_3_pointwise', conv_defs=conv_defs)
self.assertTrue(net.op.name.startswith('MobilenetV1/Conv2d_3'))
self.assertListEqual(net.get_shape().as_list(),
[batch_size, 56, 56, 512])
expected_endpoints = ['Conv2d_0',
'Conv2d_1_depthwise', 'Conv2d_1_pointwise',
'Conv2d_2_depthwise', 'Conv2d_2_pointwise',
'Conv2d_3_depthwise', 'Conv2d_3_pointwise']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise',
use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 7, 7, 512],
'Conv2d_12_pointwise': [batch_size, 7, 7, 1024],
'Conv2d_13_depthwise': [batch_size, 7, 7, 1024],
'Conv2d_13_pointwise': [batch_size, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride16BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 16
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 14, 14, 256],
'Conv2d_6_pointwise': [batch_size, 14, 14, 512],
'Conv2d_7_depthwise': [batch_size, 14, 14, 512],
'Conv2d_7_pointwise': [batch_size, 14, 14, 512],
'Conv2d_8_depthwise': [batch_size, 14, 14, 512],
'Conv2d_8_pointwise': [batch_size, 14, 14, 512],
'Conv2d_9_depthwise': [batch_size, 14, 14, 512],
'Conv2d_9_pointwise': [batch_size, 14, 14, 512],
'Conv2d_10_depthwise': [batch_size, 14, 14, 512],
'Conv2d_10_pointwise': [batch_size, 14, 14, 512],
'Conv2d_11_depthwise': [batch_size, 14, 14, 512],
'Conv2d_11_pointwise': [batch_size, 14, 14, 512],
'Conv2d_12_depthwise': [batch_size, 14, 14, 512],
'Conv2d_12_pointwise': [batch_size, 14, 14, 1024],
'Conv2d_13_depthwise': [batch_size, 14, 14, 1024],
'Conv2d_13_pointwise': [batch_size, 14, 14, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testOutputStride8BuildAndCheckAllEndPointsUptoConv2d_13(self):
batch_size = 5
height, width = 224, 224
output_stride = 8
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise')
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, output_stride=output_stride,
final_endpoint='Conv2d_13_pointwise', use_explicit_padding=True)
endpoints_shapes = {'Conv2d_0': [batch_size, 112, 112, 32],
'Conv2d_1_depthwise': [batch_size, 112, 112, 32],
'Conv2d_1_pointwise': [batch_size, 112, 112, 64],
'Conv2d_2_depthwise': [batch_size, 56, 56, 64],
'Conv2d_2_pointwise': [batch_size, 56, 56, 128],
'Conv2d_3_depthwise': [batch_size, 56, 56, 128],
'Conv2d_3_pointwise': [batch_size, 56, 56, 128],
'Conv2d_4_depthwise': [batch_size, 28, 28, 128],
'Conv2d_4_pointwise': [batch_size, 28, 28, 256],
'Conv2d_5_depthwise': [batch_size, 28, 28, 256],
'Conv2d_5_pointwise': [batch_size, 28, 28, 256],
'Conv2d_6_depthwise': [batch_size, 28, 28, 256],
'Conv2d_6_pointwise': [batch_size, 28, 28, 512],
'Conv2d_7_depthwise': [batch_size, 28, 28, 512],
'Conv2d_7_pointwise': [batch_size, 28, 28, 512],
'Conv2d_8_depthwise': [batch_size, 28, 28, 512],
'Conv2d_8_pointwise': [batch_size, 28, 28, 512],
'Conv2d_9_depthwise': [batch_size, 28, 28, 512],
'Conv2d_9_pointwise': [batch_size, 28, 28, 512],
'Conv2d_10_depthwise': [batch_size, 28, 28, 512],
'Conv2d_10_pointwise': [batch_size, 28, 28, 512],
'Conv2d_11_depthwise': [batch_size, 28, 28, 512],
'Conv2d_11_pointwise': [batch_size, 28, 28, 512],
'Conv2d_12_depthwise': [batch_size, 28, 28, 512],
'Conv2d_12_pointwise': [batch_size, 28, 28, 1024],
'Conv2d_13_depthwise': [batch_size, 28, 28, 1024],
'Conv2d_13_pointwise': [batch_size, 28, 28, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testBuildAndCheckAllEndPointsApproximateFaceNet(self):
batch_size = 5
height, width = 128, 128
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
_, end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75)
_, explicit_padding_end_points = mobilenet_v1.mobilenet_v1_base(
inputs, final_endpoint='Conv2d_13_pointwise', depth_multiplier=0.75,
use_explicit_padding=True)
# For the Conv2d_0 layer FaceNet has depth=16
endpoints_shapes = {'Conv2d_0': [batch_size, 64, 64, 24],
'Conv2d_1_depthwise': [batch_size, 64, 64, 24],
'Conv2d_1_pointwise': [batch_size, 64, 64, 48],
'Conv2d_2_depthwise': [batch_size, 32, 32, 48],
'Conv2d_2_pointwise': [batch_size, 32, 32, 96],
'Conv2d_3_depthwise': [batch_size, 32, 32, 96],
'Conv2d_3_pointwise': [batch_size, 32, 32, 96],
'Conv2d_4_depthwise': [batch_size, 16, 16, 96],
'Conv2d_4_pointwise': [batch_size, 16, 16, 192],
'Conv2d_5_depthwise': [batch_size, 16, 16, 192],
'Conv2d_5_pointwise': [batch_size, 16, 16, 192],
'Conv2d_6_depthwise': [batch_size, 8, 8, 192],
'Conv2d_6_pointwise': [batch_size, 8, 8, 384],
'Conv2d_7_depthwise': [batch_size, 8, 8, 384],
'Conv2d_7_pointwise': [batch_size, 8, 8, 384],
'Conv2d_8_depthwise': [batch_size, 8, 8, 384],
'Conv2d_8_pointwise': [batch_size, 8, 8, 384],
'Conv2d_9_depthwise': [batch_size, 8, 8, 384],
'Conv2d_9_pointwise': [batch_size, 8, 8, 384],
'Conv2d_10_depthwise': [batch_size, 8, 8, 384],
'Conv2d_10_pointwise': [batch_size, 8, 8, 384],
'Conv2d_11_depthwise': [batch_size, 8, 8, 384],
'Conv2d_11_pointwise': [batch_size, 8, 8, 384],
'Conv2d_12_depthwise': [batch_size, 4, 4, 384],
'Conv2d_12_pointwise': [batch_size, 4, 4, 768],
'Conv2d_13_depthwise': [batch_size, 4, 4, 768],
'Conv2d_13_pointwise': [batch_size, 4, 4, 768]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
self.assertItemsEqual(endpoints_shapes.keys(),
explicit_padding_end_points.keys())
for endpoint_name, expected_shape in endpoints_shapes.items():
self.assertTrue(endpoint_name in explicit_padding_end_points)
self.assertListEqual(
explicit_padding_end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
normalizer_fn=slim.batch_norm):
mobilenet_v1.mobilenet_v1_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(3217920, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys() if key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = mobilenet_v1.mobilenet_v1(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = mobilenet_v1.mobilenet_v1(
inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testGlobalPoolUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 1
height, width = 250, 300
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = mobilenet_v1.mobilenet_v1(inputs, num_classes,
global_pool=True)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Conv2d_13_pointwise']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 8, 10, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('MobilenetV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
mobilenet_v1.mobilenet_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = mobilenet_v1.mobilenet_v1(eval_inputs, num_classes,
reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = mobilenet_v1.mobilenet_v1(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
| [
"huaizhen001@e.ntu.edu.sg"
] | huaizhen001@e.ntu.edu.sg |
a869f7fe8be2069a8b166e8e4e6df23fef8201c7 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/84/usersdata/162/53871/submittedfiles/lista1.py | 1b054bc98171dfca2ac844d491d8d9aa57da965c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | n=int(input('Digite o tamanho da lista:'))
lista=[]
for i in range(1,n+1,1):
numero=int(input('Digite o número:'))
lista.append(numero)
soma=0
soma2=0
cont=0
cont2=0
for i in range(0,len(lista),1):
if lista[i]%2!=0:
cont=cont+1
soma=soma+lista[i]
else:
soma2=soma2+lista[i]
cont2=cont2+1
print(soma)
print(soma2)
print(cont)
print(cont2)
print(lista)
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
158882e1a339bf6f2984af63cfcedd9e9c4c750e | 482b28d5680768c2e95d214a9c8b97e5c6e93191 | /project1/core python startingclasses/21.07.2018.2.py | 668431893f450270a62ad22fe48366da27a046f3 | [] | no_license | prasadnaidu1/projects | 2bf3cd0fa3b93e549775e928077e1848ac05955b | e41182a9f3c9ea46a2ac0352eac1c81fab79c08a | refs/heads/master | 2020-04-12T19:33:04.834208 | 2018-12-21T12:32:37 | 2018-12-21T12:32:37 | 155,504,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | def calculate_find(enternumberofadults ,enternumberofchilds):
aduldsprice = 37555.0
childsprice = 12518.3
cost = aduldsprice + childsprice
tax = (cost * 7) / 100
discount = (cost+tax)*10 / 100
d=(aduldsprice+childsprice+tax)-discount
return d
x=input('enteradults,enter chids:')
print(x) | [
"you@example.com"
] | you@example.com |
c5f19460363ba45b81ea544fda6aa629abf232d1 | 98afa7053fc691d833e8981f59aa210c198cb72f | /python_isz_test/iszErpRequest/auditHouseContract.py | 00b589c420f4cbfdfc80a63e7d4164a4ecd55ed6 | [] | no_license | zhonglinglong/pyqt5_demo | 4abd1618f38f32875c52426e1af97b9e935ca7b4 | b4f109a5a025f70940c3707a1e16881ef72c4b41 | refs/heads/master | 2020-04-19T02:38:19.270131 | 2019-01-28T06:27:58 | 2019-01-28T06:27:58 | 167,909,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48,492 | py | # -*- coding: utf-8 -*-
# @Time : 2019/1/17 13:27
# @Author : linglong
# @File : auditHouseContract.py
import json
import os
import subprocess
import sys
from datetime import time
from PyQt5 import QtCore, QtGui, QtWidgets
import pymongo
import requests
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QApplication, QMainWindow, QLineEdit, QMessageBox
import execjs
# 获取cookie
class NewMongDB(object):
"""没有具体的应用场景,先设置基本的连接和单表排序的查询"""
def __init__(self, phone):
self.mgConn = pymongo.MongoClient('mongodb://root:Ishangzu_mongodb@192.168.0.200:27020/')
self.phone = phone
def db_find(self):
self.mgDB = self.mgConn.sms.smsMtHis # 指定库名,表名。
for i in range(60):
result = self.mgDB.find({"destPhone": self.phone, "template_key": "login_safety_system", }).sort(
[("create_time", -1)]).limit(1)
for results in result:
try:
# print(results['content'])
return results['content'][24:28]
except:
time.sleep(1)
pass
return
def getCpuID():
"""
获取当前电脑ProcessorID
:return: ProcessorID
"""
p = subprocess.check_output('wmic CPU get ProcessorID')
ProcessorID = str(p).split('\\r\\r\\n')[1].strip()
return ProcessorID
def getMAC():
"""
获取当前电脑的物理地址
:return:物理地址
"""
p = subprocess.check_output('getmac')
return p.decode('gbk')[154:171]
def getAuthKey():
"""
获取ERP客户端验证码
:return: 客户端登录校验码
"""
os.environ["NODE_PATH"] = os.getcwd() + "\\node_modules"
parser = execjs.compile("""
function strEnc(data,firstKey,secondKey,thirdKey){
var leng = data.length;
var encData = "";
var firstKeyBt,secondKeyBt,thirdKeyBt,firstLength,secondLength,thirdLength;
if(firstKey != null && firstKey != ""){
firstKeyBt = getKeyBytes(firstKey);
firstLength = firstKeyBt.length;
}
if(secondKey != null && secondKey != ""){
secondKeyBt = getKeyBytes(secondKey);
secondLength = secondKeyBt.length;
}
if(thirdKey != null && thirdKey != ""){
thirdKeyBt = getKeyBytes(thirdKey);
thirdLength = thirdKeyBt.length;
}
if(leng > 0){
if(leng < 4){
var bt = strToBt(data);
var encByte ;
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != ""){
var tempBt;
var x,y,z;
tempBt = bt;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
for(z = 0;z < thirdLength ;z ++){
tempBt = enc(tempBt,thirdKeyBt[z]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != ""){
var tempBt;
var x,y;
tempBt = bt;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !=""){
var tempBt;
var x = 0;
tempBt = bt;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData = bt64ToHex(encByte);
}else{
var iterator = parseInt(leng/4);
var remainder = leng%4;
var i=0;
for(i = 0;i < iterator;i++){
var tempData = data.substring(i*4+0,i*4+4);
var tempByte = strToBt(tempData);
var encByte ;
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != ""){
var tempBt;
var x,y,z;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
for(z = 0;z < thirdLength ;z ++){
tempBt = enc(tempBt,thirdKeyBt[z]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != ""){
var tempBt;
var x,y;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !=""){
var tempBt;
var x;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData += bt64ToHex(encByte);
}
if(remainder > 0){
var remainderData = data.substring(iterator*4+0,leng);
var tempByte = strToBt(remainderData);
var encByte ;
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != ""){
var tempBt;
var x,y,z;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
for(z = 0;z < thirdLength ;z ++){
tempBt = enc(tempBt,thirdKeyBt[z]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != ""){
var tempBt;
var x,y;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
for(y = 0;y < secondLength ;y ++){
tempBt = enc(tempBt,secondKeyBt[y]);
}
encByte = tempBt;
}else{
if(firstKey != null && firstKey !=""){
var tempBt;
var x;
tempBt = tempByte;
for(x = 0;x < firstLength ;x ++){
tempBt = enc(tempBt,firstKeyBt[x]);
}
encByte = tempBt;
}
}
}
encData += bt64ToHex(encByte);
}
}
}
return encData;
}
/*
* decrypt the encrypted string to the original string
*
* return the original string
*/
function strDec(data,firstKey,secondKey,thirdKey){
var leng = data.length;
var decStr = "";
var firstKeyBt,secondKeyBt,thirdKeyBt,firstLength,secondLength,thirdLength;
if(firstKey != null && firstKey != ""){
firstKeyBt = getKeyBytes(firstKey);
firstLength = firstKeyBt.length;
}
if(secondKey != null && secondKey != ""){
secondKeyBt = getKeyBytes(secondKey);
secondLength = secondKeyBt.length;
}
if(thirdKey != null && thirdKey != ""){
thirdKeyBt = getKeyBytes(thirdKey);
thirdLength = thirdKeyBt.length;
}
var iterator = parseInt(leng/16);
var i=0;
for(i = 0;i < iterator;i++){
var tempData = data.substring(i*16+0,i*16+16);
var strByte = hexToBt64(tempData);
var intByte = new Array(64);
var j = 0;
for(j = 0;j < 64; j++){
intByte[j] = parseInt(strByte.substring(j,j+1));
}
var decByte;
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != "" && thirdKey != null && thirdKey != ""){
var tempBt;
var x,y,z;
tempBt = intByte;
for(x = thirdLength - 1;x >= 0;x --){
tempBt = dec(tempBt,thirdKeyBt[x]);
}
for(y = secondLength - 1;y >= 0;y --){
tempBt = dec(tempBt,secondKeyBt[y]);
}
for(z = firstLength - 1;z >= 0 ;z --){
tempBt = dec(tempBt,firstKeyBt[z]);
}
decByte = tempBt;
}else{
if(firstKey != null && firstKey !="" && secondKey != null && secondKey != ""){
var tempBt;
var x,y,z;
tempBt = intByte;
for(x = secondLength - 1;x >= 0 ;x --){
tempBt = dec(tempBt,secondKeyBt[x]);
}
for(y = firstLength - 1;y >= 0 ;y --){
tempBt = dec(tempBt,firstKeyBt[y]);
}
decByte = tempBt;
}else{
if(firstKey != null && firstKey !=""){
var tempBt;
var x,y,z;
tempBt = intByte;
for(x = firstLength - 1;x >= 0 ;x --){
tempBt = dec(tempBt,firstKeyBt[x]);
}
decByte = tempBt;
}
}
}
decStr += byteToString(decByte);
}
return decStr;
}
/*
* chang the string into the bit array
*
* return bit array(it's length % 64 = 0)
*/
function getKeyBytes(key){
var keyBytes = new Array();
var leng = key.length;
var iterator = parseInt(leng/4);
var remainder = leng%4;
var i = 0;
for(i = 0;i < iterator; i ++){
keyBytes[i] = strToBt(key.substring(i*4+0,i*4+4));
}
if(remainder > 0){
keyBytes[i] = strToBt(key.substring(i*4+0,leng));
}
return keyBytes;
}
/*
* chang the string(it's length <= 4) into the bit array
*
* return bit array(it's length = 64)
*/
function strToBt(str){
var leng = str.length;
var bt = new Array(64);
if(leng < 4){
var i=0,j=0,p=0,q=0;
for(i = 0;i<leng;i++){
var k = str.charCodeAt(i);
for(j=0;j<16;j++){
var pow=1,m=0;
for(m=15;m>j;m--){
pow *= 2;
}
bt[16*i+j]=parseInt(k/pow)%2;
}
}
for(p = leng;p<4;p++){
var k = 0;
for(q=0;q<16;q++){
var pow=1,m=0;
for(m=15;m>q;m--){
pow *= 2;
}
bt[16*p+q]=parseInt(k/pow)%2;
}
}
}else{
for(i = 0;i<4;i++){
var k = str.charCodeAt(i);
for(j=0;j<16;j++){
var pow=1;
for(m=15;m>j;m--){
pow *= 2;
}
bt[16*i+j]=parseInt(k/pow)%2;
}
}
}
return bt;
}
/*
* chang the bit(it's length = 4) into the hex
*
* return hex
*/
function bt4ToHex(binary) {
var hex;
switch (binary) {
case "0000" : hex = "0"; break;
case "0001" : hex = "1"; break;
case "0010" : hex = "2"; break;
case "0011" : hex = "3"; break;
case "0100" : hex = "4"; break;
case "0101" : hex = "5"; break;
case "0110" : hex = "6"; break;
case "0111" : hex = "7"; break;
case "1000" : hex = "8"; break;
case "1001" : hex = "9"; break;
case "1010" : hex = "A"; break;
case "1011" : hex = "B"; break;
case "1100" : hex = "C"; break;
case "1101" : hex = "D"; break;
case "1110" : hex = "E"; break;
case "1111" : hex = "F"; break;
}
return hex;
}
/*
* chang the hex into the bit(it's length = 4)
*
* return the bit(it's length = 4)
*/
function hexToBt4(hex) {
var binary;
switch (hex) {
case "0" : binary = "0000"; break;
case "1" : binary = "0001"; break;
case "2" : binary = "0010"; break;
case "3" : binary = "0011"; break;
case "4" : binary = "0100"; break;
case "5" : binary = "0101"; break;
case "6" : binary = "0110"; break;
case "7" : binary = "0111"; break;
case "8" : binary = "1000"; break;
case "9" : binary = "1001"; break;
case "A" : binary = "1010"; break;
case "B" : binary = "1011"; break;
case "C" : binary = "1100"; break;
case "D" : binary = "1101"; break;
case "E" : binary = "1110"; break;
case "F" : binary = "1111"; break;
}
return binary;
}
/*
* chang the bit(it's length = 64) into the string
*
* return string
*/
function byteToString(byteData){
var str="";
for(i = 0;i<4;i++){
var count=0;
for(j=0;j<16;j++){
var pow=1;
for(m=15;m>j;m--){
pow*=2;
}
count+=byteData[16*i+j]*pow;
}
if(count != 0){
str+=String.fromCharCode(count);
}
}
return str;
}
function bt64ToHex(byteData){
var hex = "";
for(i = 0;i<16;i++){
var bt = "";
for(j=0;j<4;j++){
bt += byteData[i*4+j];
}
hex+=bt4ToHex(bt);
}
return hex;
}
function hexToBt64(hex){
var binary = "";
for(i = 0;i<16;i++){
binary+=hexToBt4(hex.substring(i,i+1));
}
return binary;
}
/*
* the 64 bit des core arithmetic
*/
function enc(dataByte,keyByte){
var keys = generateKeys(keyByte);
var ipByte = initPermute(dataByte);
var ipLeft = new Array(32);
var ipRight = new Array(32);
var tempLeft = new Array(32);
var i = 0,j = 0,k = 0,m = 0, n = 0;
for(k = 0;k < 32;k ++){
ipLeft[k] = ipByte[k];
ipRight[k] = ipByte[32+k];
}
for(i = 0;i < 16;i ++){
for(j = 0;j < 32;j ++){
tempLeft[j] = ipLeft[j];
ipLeft[j] = ipRight[j];
}
var key = new Array(48);
for(m = 0;m < 48;m ++){
key[m] = keys[i][m];
}
var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight),key))), tempLeft);
for(n = 0;n < 32;n ++){
ipRight[n] = tempRight[n];
}
}
var finalData =new Array(64);
for(i = 0;i < 32;i ++){
finalData[i] = ipRight[i];
finalData[32+i] = ipLeft[i];
}
return finallyPermute(finalData);
}
function dec(dataByte,keyByte){
var keys = generateKeys(keyByte);
var ipByte = initPermute(dataByte);
var ipLeft = new Array(32);
var ipRight = new Array(32);
var tempLeft = new Array(32);
var i = 0,j = 0,k = 0,m = 0, n = 0;
for(k = 0;k < 32;k ++){
ipLeft[k] = ipByte[k];
ipRight[k] = ipByte[32+k];
}
for(i = 15;i >= 0;i --){
for(j = 0;j < 32;j ++){
tempLeft[j] = ipLeft[j];
ipLeft[j] = ipRight[j];
}
var key = new Array(48);
for(m = 0;m < 48;m ++){
key[m] = keys[i][m];
}
var tempRight = xor(pPermute(sBoxPermute(xor(expandPermute(ipRight),key))), tempLeft);
for(n = 0;n < 32;n ++){
ipRight[n] = tempRight[n];
}
}
var finalData =new Array(64);
for(i = 0;i < 32;i ++){
finalData[i] = ipRight[i];
finalData[32+i] = ipLeft[i];
}
return finallyPermute(finalData);
}
function initPermute(originalData){
var ipByte = new Array(64);
for (i = 0, m = 1, n = 0; i < 4; i++, m += 2, n += 2) {
for (j = 7, k = 0; j >= 0; j--, k++) {
ipByte[i * 8 + k] = originalData[j * 8 + m];
ipByte[i * 8 + k + 32] = originalData[j * 8 + n];
}
}
return ipByte;
}
function expandPermute(rightData){
var epByte = new Array(48);
for (i = 0; i < 8; i++) {
if (i == 0) {
epByte[i * 6 + 0] = rightData[31];
} else {
epByte[i * 6 + 0] = rightData[i * 4 - 1];
}
epByte[i * 6 + 1] = rightData[i * 4 + 0];
epByte[i * 6 + 2] = rightData[i * 4 + 1];
epByte[i * 6 + 3] = rightData[i * 4 + 2];
epByte[i * 6 + 4] = rightData[i * 4 + 3];
if (i == 7) {
epByte[i * 6 + 5] = rightData[0];
} else {
epByte[i * 6 + 5] = rightData[i * 4 + 4];
}
}
return epByte;
}
function xor(byteOne,byteTwo){
var xorByte = new Array(byteOne.length);
for(i = 0;i < byteOne.length; i ++){
xorByte[i] = byteOne[i] ^ byteTwo[i];
}
return xorByte;
}
function sBoxPermute(expandByte){
var sBoxByte = new Array(32);
var binary = "";
var s1 = [
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7],
[0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8],
[4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0],
[15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13 ]];
/* Table - s2 */
var s2 = [
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10],
[3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5],
[0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15],
[13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9 ]];
/* Table - s3 */
var s3= [
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8],
[13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1],
[13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7],
[1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12 ]];
/* Table - s4 */
var s4 = [
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15],
[13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9],
[10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4],
[3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14 ]];
/* Table - s5 */
var s5 = [
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9],
[14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6],
[4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14],
[11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3 ]];
/* Table - s6 */
var s6 = [
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11],
[10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8],
[9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6],
[4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13 ]];
/* Table - s7 */
var s7 = [
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1],
[13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6],
[1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2],
[6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12]];
/* Table - s8 */
var s8 = [
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7],
[1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2],
[7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8],
[2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11]];
for(m=0;m<8;m++){
var i=0,j=0;
i = expandByte[m*6+0]*2+expandByte[m*6+5];
j = expandByte[m * 6 + 1] * 2 * 2 * 2
+ expandByte[m * 6 + 2] * 2* 2
+ expandByte[m * 6 + 3] * 2
+ expandByte[m * 6 + 4];
switch (m) {
case 0 :
binary = getBoxBinary(s1[i][j]);
break;
case 1 :
binary = getBoxBinary(s2[i][j]);
break;
case 2 :
binary = getBoxBinary(s3[i][j]);
break;
case 3 :
binary = getBoxBinary(s4[i][j]);
break;
case 4 :
binary = getBoxBinary(s5[i][j]);
break;
case 5 :
binary = getBoxBinary(s6[i][j]);
break;
case 6 :
binary = getBoxBinary(s7[i][j]);
break;
case 7 :
binary = getBoxBinary(s8[i][j]);
break;
}
sBoxByte[m*4+0] = parseInt(binary.substring(0,1));
sBoxByte[m*4+1] = parseInt(binary.substring(1,2));
sBoxByte[m*4+2] = parseInt(binary.substring(2,3));
sBoxByte[m*4+3] = parseInt(binary.substring(3,4));
}
return sBoxByte;
}
function pPermute(sBoxByte){
var pBoxPermute = new Array(32);
pBoxPermute[ 0] = sBoxByte[15];
pBoxPermute[ 1] = sBoxByte[ 6];
pBoxPermute[ 2] = sBoxByte[19];
pBoxPermute[ 3] = sBoxByte[20];
pBoxPermute[ 4] = sBoxByte[28];
pBoxPermute[ 5] = sBoxByte[11];
pBoxPermute[ 6] = sBoxByte[27];
pBoxPermute[ 7] = sBoxByte[16];
pBoxPermute[ 8] = sBoxByte[ 0];
pBoxPermute[ 9] = sBoxByte[14];
pBoxPermute[10] = sBoxByte[22];
pBoxPermute[11] = sBoxByte[25];
pBoxPermute[12] = sBoxByte[ 4];
pBoxPermute[13] = sBoxByte[17];
pBoxPermute[14] = sBoxByte[30];
pBoxPermute[15] = sBoxByte[ 9];
pBoxPermute[16] = sBoxByte[ 1];
pBoxPermute[17] = sBoxByte[ 7];
pBoxPermute[18] = sBoxByte[23];
pBoxPermute[19] = sBoxByte[13];
pBoxPermute[20] = sBoxByte[31];
pBoxPermute[21] = sBoxByte[26];
pBoxPermute[22] = sBoxByte[ 2];
pBoxPermute[23] = sBoxByte[ 8];
pBoxPermute[24] = sBoxByte[18];
pBoxPermute[25] = sBoxByte[12];
pBoxPermute[26] = sBoxByte[29];
pBoxPermute[27] = sBoxByte[ 5];
pBoxPermute[28] = sBoxByte[21];
pBoxPermute[29] = sBoxByte[10];
pBoxPermute[30] = sBoxByte[ 3];
pBoxPermute[31] = sBoxByte[24];
return pBoxPermute;
}
function finallyPermute(endByte){
var fpByte = new Array(64);
fpByte[ 0] = endByte[39];
fpByte[ 1] = endByte[ 7];
fpByte[ 2] = endByte[47];
fpByte[ 3] = endByte[15];
fpByte[ 4] = endByte[55];
fpByte[ 5] = endByte[23];
fpByte[ 6] = endByte[63];
fpByte[ 7] = endByte[31];
fpByte[ 8] = endByte[38];
fpByte[ 9] = endByte[ 6];
fpByte[10] = endByte[46];
fpByte[11] = endByte[14];
fpByte[12] = endByte[54];
fpByte[13] = endByte[22];
fpByte[14] = endByte[62];
fpByte[15] = endByte[30];
fpByte[16] = endByte[37];
fpByte[17] = endByte[ 5];
fpByte[18] = endByte[45];
fpByte[19] = endByte[13];
fpByte[20] = endByte[53];
fpByte[21] = endByte[21];
fpByte[22] = endByte[61];
fpByte[23] = endByte[29];
fpByte[24] = endByte[36];
fpByte[25] = endByte[ 4];
fpByte[26] = endByte[44];
fpByte[27] = endByte[12];
fpByte[28] = endByte[52];
fpByte[29] = endByte[20];
fpByte[30] = endByte[60];
fpByte[31] = endByte[28];
fpByte[32] = endByte[35];
fpByte[33] = endByte[ 3];
fpByte[34] = endByte[43];
fpByte[35] = endByte[11];
fpByte[36] = endByte[51];
fpByte[37] = endByte[19];
fpByte[38] = endByte[59];
fpByte[39] = endByte[27];
fpByte[40] = endByte[34];
fpByte[41] = endByte[ 2];
fpByte[42] = endByte[42];
fpByte[43] = endByte[10];
fpByte[44] = endByte[50];
fpByte[45] = endByte[18];
fpByte[46] = endByte[58];
fpByte[47] = endByte[26];
fpByte[48] = endByte[33];
fpByte[49] = endByte[ 1];
fpByte[50] = endByte[41];
fpByte[51] = endByte[ 9];
fpByte[52] = endByte[49];
fpByte[53] = endByte[17];
fpByte[54] = endByte[57];
fpByte[55] = endByte[25];
fpByte[56] = endByte[32];
fpByte[57] = endByte[ 0];
fpByte[58] = endByte[40];
fpByte[59] = endByte[ 8];
fpByte[60] = endByte[48];
fpByte[61] = endByte[16];
fpByte[62] = endByte[56];
fpByte[63] = endByte[24];
return fpByte;
}
function getBoxBinary(i) {
var binary = "";
switch (i) {
case 0 :binary = "0000";break;
case 1 :binary = "0001";break;
case 2 :binary = "0010";break;
case 3 :binary = "0011";break;
case 4 :binary = "0100";break;
case 5 :binary = "0101";break;
case 6 :binary = "0110";break;
case 7 :binary = "0111";break;
case 8 :binary = "1000";break;
case 9 :binary = "1001";break;
case 10 :binary = "1010";break;
case 11 :binary = "1011";break;
case 12 :binary = "1100";break;
case 13 :binary = "1101";break;
case 14 :binary = "1110";break;
case 15 :binary = "1111";break;
}
return binary;
}
/*
* generate 16 keys for xor
*
*/
function generateKeys(keyByte){
var key = new Array(56);
var keys = new Array();
keys[ 0] = new Array();
keys[ 1] = new Array();
keys[ 2] = new Array();
keys[ 3] = new Array();
keys[ 4] = new Array();
keys[ 5] = new Array();
keys[ 6] = new Array();
keys[ 7] = new Array();
keys[ 8] = new Array();
keys[ 9] = new Array();
keys[10] = new Array();
keys[11] = new Array();
keys[12] = new Array();
keys[13] = new Array();
keys[14] = new Array();
keys[15] = new Array();
var loop = [1,1,2,2,2,2,2,2,1,2,2,2,2,2,2,1];
for(i=0;i<7;i++){
for(j=0,k=7;j<8;j++,k--){
key[i*8+j]=keyByte[8*k+i];
}
}
var i = 0;
for(i = 0;i < 16;i ++){
var tempLeft=0;
var tempRight=0;
for(j = 0; j < loop[i];j ++){
tempLeft = key[0];
tempRight = key[28];
for(k = 0;k < 27 ;k ++){
key[k] = key[k+1];
key[28+k] = key[29+k];
}
key[27]=tempLeft;
key[55]=tempRight;
}
var tempKey = new Array(48);
tempKey[ 0] = key[13];
tempKey[ 1] = key[16];
tempKey[ 2] = key[10];
tempKey[ 3] = key[23];
tempKey[ 4] = key[ 0];
tempKey[ 5] = key[ 4];
tempKey[ 6] = key[ 2];
tempKey[ 7] = key[27];
tempKey[ 8] = key[14];
tempKey[ 9] = key[ 5];
tempKey[10] = key[20];
tempKey[11] = key[ 9];
tempKey[12] = key[22];
tempKey[13] = key[18];
tempKey[14] = key[11];
tempKey[15] = key[ 3];
tempKey[16] = key[25];
tempKey[17] = key[ 7];
tempKey[18] = key[15];
tempKey[19] = key[ 6];
tempKey[20] = key[26];
tempKey[21] = key[19];
tempKey[22] = key[12];
tempKey[23] = key[ 1];
tempKey[24] = key[40];
tempKey[25] = key[51];
tempKey[26] = key[30];
tempKey[27] = key[36];
tempKey[28] = key[46];
tempKey[29] = key[54];
tempKey[30] = key[29];
tempKey[31] = key[39];
tempKey[32] = key[50];
tempKey[33] = key[44];
tempKey[34] = key[32];
tempKey[35] = key[47];
tempKey[36] = key[43];
tempKey[37] = key[48];
tempKey[38] = key[38];
tempKey[39] = key[55];
tempKey[40] = key[33];
tempKey[41] = key[52];
tempKey[42] = key[45];
tempKey[43] = key[41];
tempKey[44] = key[49];
tempKey[45] = key[35];
tempKey[46] = key[28];
tempKey[47] = key[31];
switch(i){
case 0: for(m=0;m < 48 ;m++){ keys[ 0][m] = tempKey[m]; } break;
case 1: for(m=0;m < 48 ;m++){ keys[ 1][m] = tempKey[m]; } break;
case 2: for(m=0;m < 48 ;m++){ keys[ 2][m] = tempKey[m]; } break;
case 3: for(m=0;m < 48 ;m++){ keys[ 3][m] = tempKey[m]; } break;
case 4: for(m=0;m < 48 ;m++){ keys[ 4][m] = tempKey[m]; } break;
case 5: for(m=0;m < 48 ;m++){ keys[ 5][m] = tempKey[m]; } break;
case 6: for(m=0;m < 48 ;m++){ keys[ 6][m] = tempKey[m]; } break;
case 7: for(m=0;m < 48 ;m++){ keys[ 7][m] = tempKey[m]; } break;
case 8: for(m=0;m < 48 ;m++){ keys[ 8][m] = tempKey[m]; } break;
case 9: for(m=0;m < 48 ;m++){ keys[ 9][m] = tempKey[m]; } break;
case 10: for(m=0;m < 48 ;m++){ keys[10][m] = tempKey[m]; } break;
case 11: for(m=0;m < 48 ;m++){ keys[11][m] = tempKey[m]; } break;
case 12: for(m=0;m < 48 ;m++){ keys[12][m] = tempKey[m]; } break;
case 13: for(m=0;m < 48 ;m++){ keys[13][m] = tempKey[m]; } break;
case 14: for(m=0;m < 48 ;m++){ keys[14][m] = tempKey[m]; } break;
case 15: for(m=0;m < 48 ;m++){ keys[15][m] = tempKey[m]; } break;
}
}
return keys;
}
function test(mac, cpuid) {
var machineId = strEnc(mac + cpuid, 'isz', 'isz', 'isz')
var time = new Date().getTime()
var code = machineId + '*' + time
var key = strEnc(code, 'ishangzu', 'ishangzu', 'ishangzu')
return key
}
""")
return parser.call('test', getMAC(), getCpuID())
def get_cookie(user, pwd):
"""
获取cookie
:return:
"""
headers = {
'content-type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'
}
# 使用客户端登录
# 客户端校验登录接口并获取cookie
auth_key = getAuthKey()
print('客户端校验登录码:'+auth_key)
# 检查授权
url = 'http://sys.ishangzu.com/isz_base/LoginAuthController/checkLoginAuth.action'
data = {'auth_key': auth_key}
results = requests.post(url, data=json.dumps(data), headers=headers)
result = json.loads(results.text)
print('授权结果:'+str(result))
try:
if '授权成功' in result['msg']:
authTag = result['obj']['authTag']
else:
return '客户端登录第一步:检查授权失败'
except:
return result
# 检查用户名密码
url = 'http://sys.ishangzu.com/isz_base/LoginController/checkUserPassWord.action'
data = {
'auth_code': auth_key,
'authTag': authTag,
'user_phone': user, 'user_pwd': pwd
}
results = requests.post(url, data=json.dumps(data), headers=headers)
result = json.loads(results.text)
print('用户账号密码校验:'+str(result))
if '用户名密码正确' not in result['msg']:
return '客户端登录第二步:检查用户名密码失败'
# 获取短信验证码
url = 'http://sys.ishangzu.com/isz_base/LoginController/getVerificationCode.action'
data = {
'authTag': authTag,
'mobile': user
}
results = requests.post(url, data=json.dumps(data), headers=headers)
result = json.loads(results.text)
print('短信获取结果:'+str(result))
if result['msg'] != 'ok' and u'验证码发送过于频繁' not in result['msg']:
return '客户端登录第三步:获取短信验证码失败'
# 验证码登录
url = 'http://isz.ishangzu.com/isz_base/LoginController/checkVerificationCode.action'
data = {
'auth_code': auth_key,
'authTag': authTag,
'user_phone': user,
'user_pwd': pwd,
'verificationCode': NewMongDB(user).db_find()
}
response = requests.post(url, data=json.dumps(data), headers=headers)
result = json.loads(response.text)
print('获取的cookies结果:'+str(result))
if result['msg'] == 'ok':
cookies = requests.utils.dict_from_cookiejar(response.cookies)
cookie = {}
cookie['cookies'] = cookies
return cookie
else:
return '客户端登录第四步:验证码登录失败'
# 接口方法重写
class NewRequest(object):
def __init__(self, url, data=None, cookie=None):
self.headers = {
'content-type': 'application/json',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.110 Safari/537.36'
}
self.url = url
self.data = data
self.cookie = cookie
def post(self):
"""
post 请求,如果登录失效,重新登录一次,然后重新请求一次
:return:
"""
req = requests.post(
url=self.url, headers=self.headers, cookies=self.cookie,
data=json.dumps(self.data) if isinstance(self.data, dict) else self.data
).text
return json.loads(req)
def get(self):
"""
get 请求,如果登录失效,重新登录一次,然后重新请求一次
:return:
"""
req = requests.get(
url=self.url, headers=self.headers, cookies=self.cookie).text
return json.loads(req)
def put(self):
"""
put 请求,如果登录失效,重新登录一次,然后重新请求一次
:return:
"""
req = requests.put(
url=self.url, headers=self.headers, cookies=self.cookie,
data=json.dumps(self.data) if isinstance(self.data, dict) else self.data
).text
return json.loads(req)
# 审核合同
class AuditHouseContract():
"""
审核合同
"""
def __init__(self, contract_num, house_code, cookie):
"""
初始化
:param contract_num:
:param house_code:
"""
self.contract_num = contract_num
self.house_code = house_code
self.cookie = cookie
def audit(self):
# 获取合同contract_id
url = "http://erp.ishangzu.com/isz_housecontract/houseContractController/searchHouseContractListByEs"
data = {"contract_num": self.contract_num, "residential_name": self.house_code}
try:
contract_info = NewRequest(url, data, self.cookie).post()['obj']['rows'][0]
contract_id = contract_info['contract_id']
except BaseException as e:
print('根据合同和房源编号查的委托合同数据一条都没。请重新输入')
return (str(e)+'根据合同和房源编号查的委托合同数据一条都没')
try:
re = NewRequest(url, data, self.cookie).post()['obj']['rows'][1]
print('根据提供的房源编号和委托合同号查出的数据至少有2条,请重新输入')
return "根据提供的房源编号和委托合同号查出的数据至少有2条,请重新输入"
except:
pass
# 获取委托合同详情
url = "http://erp.ishangzu.com/isz_housecontract/houseContractController/searchHouseContractInfo/" + contract_id
details = NewRequest(url, cookie=self.cookie).get()
print('获取的委托合同详情:'+str(details))
if details['code'] != 0:
return details['msg']
details = details["obj"]
# 审第一个页面
url = "http://erp.ishangzu.com/isz_housecontract/houseContractController/saveOrUpdateHouseContractDetailByPart"
houseContractFrist = {}
houseContractFrist["houseContractFrist"] = details["houseContractFrist"]
houseContractFrist["entrust_type"] = contract_info['entrust_type']
houseContractFrist["house_id"] = contract_info['house_id']
data = {
"auditForm": {
"audit_status": "PASS",
"content": "同意!"
},
"action_type": "AUDIT",
"save_part": "ONE",
"contract_id": contract_id
}
data.update(houseContractFrist)
result = NewRequest(url, data, self.cookie).post()
print('审核第一个页面返回结果:'+str(result))
if result['code'] != 0:
return result['msg']
# 审第二个页面
data = {
"auditForm": {
"audit_status": "PASS",
"content": "同意!"
},
"action_type": "AUDIT",
"save_part": "TWO",
"contract_id": contract_id
}
data["houseContractSecond"] = details["houseContractSecond"]
result = NewRequest(url, data, self.cookie).post()
print('审核第二个页面返回结果:'+str(result))
if result['code']:
return result['msg']
# 初审第三个页面
data = {
"auditForm": {
"audit_status": "PASS",
"content": "同意!"
},
"action_type": "AUDIT",
"save_part": "THREE",
"contract_id": contract_id}
data["houseContractThird"] = details["houseContractThird"]
result = NewRequest(url, data, self.cookie).post()
print('审核第三个页面返回结果:'+str(result))
if result['code'] != 0:
return result['msg']
# 初审第四个页面
data = {
"auditForm": {
"audit_status": "PASS",
"content": "同意!"
},
"action_type": "AUDIT",
"save_part": "FOUR",
"contract_id": contract_id}
data["houseContractFour"] = details["houseContractFour"]
result = NewRequest(url, data, self.cookie).post()
print('审核第四个页面返回结果:'+str(result))
if result['code']:
return result['msg']
# 复审完结
url = "http://erp.ishangzu.com/isz_housecontract/houseContractController/houseContractAudit"
data = {"audit_status": "PASS", "content": "合同内容、资料、备件无误,正常审核通过。同意!", "is_normal_approved": "0",
"contract_id": contract_id}
result = NewRequest(url, data, self.cookie).put()
print('审核第五个页面返回结果:'+str(result))
if result['code'] != 0:
return result['msg']
print('审核委托合同成功!')
return
# 客户端页面
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(388, 181)
Form.setFixedSize(self.width(), self.height())
Form.setStyleSheet("background-color:rgb(234,248,249)")
# self.lineEdit = QtWidgets.QLineEdit(Form)
# self.lineEdit.setGeometry(QtCore.QRect(10, 30, 113, 20))
# self.lineEdit.setObjectName("lineEdit")
# self.lineEdit.setPlaceholderText('ERP账号')
# self.lineEdit.setStyleSheet("color:rgb(66,66,240)")
# # self.lineEdit.setStyleSheet("background-color:rgb(221,249,248)")
# self.lineEdit.setFont(QFont("Timers", 10))
# self.lineEdit.resize(170, 30)
#
# self.lineEdit_2 = QtWidgets.QLineEdit(Form)
# self.lineEdit_2.setGeometry(QtCore.QRect(200, 30, 113, 20))
# self.lineEdit_2.setObjectName("lineEdit_2")
# self.lineEdit_2.setPlaceholderText('ERP密码')
# self.lineEdit_2.setFont(QFont("Timers", 10))
# self.lineEdit_2.resize(170, 30)
# self.lineEdit_2.setStyleSheet("color:rgb(66,66,240)")
# # self.lineEdit_2.setStyleSheet("background-color:rgb(221,249,248)")
self.lineEdit_3 = QtWidgets.QLineEdit(Form)
self.lineEdit_3.setGeometry(QtCore.QRect(10, 30, 113, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.lineEdit_3.setPlaceholderText('房源编号')
self.lineEdit_3.setFont(QFont("Timers", 10))
self.lineEdit_3.resize(170, 30)
self.lineEdit_3.setStyleSheet("color:rgb(66,66,240)")
# self.lineEdit_3.setStyleSheet("background-color:rgb(221,249,248)")
self.lineEdit_3.setFont(QFont("Timers", 10))
self.lineEdit_4 = QtWidgets.QLineEdit(Form)
self.lineEdit_4.setGeometry(QtCore.QRect(200, 30, 113, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_4.setPlaceholderText('委托合同号')
self.lineEdit_4.resize(170, 30)
self.lineEdit_4.setStyleSheet("color:rgb(66,66,240)")
# self.lineEdit_4.setStyleSheet("background-color:rgb(255,255,255)")
self.lineEdit_4.setFont(QFont("Timers", 10))
self.pushButton = QtWidgets.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(280, 130, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton.setStyleSheet("background-color:rgb(227,69,31)")
self.pushButton.setFont(QFont("Timers", 10))
self.lineEdit_4.resize(170, 30)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "委托合同审核"))
self.pushButton.setText(_translate("Form", "复审"))
class SysUi(Ui_Form, QMainWindow):
def __init__(self):
try:
super(SysUi, self).__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.get)
self.show()
except BaseException as e:
QMessageBox.about(self, "客户端初始化报错:", str(e))
def get(self):
# use = QLineEdit.displayText(self.lineEdit)
# pwd = QLineEdit.displayText(self.lineEdit_2)
house_code = QLineEdit.displayText(self.lineEdit_3)
comtract_num = QLineEdit.displayText(self.lineEdit_4)
if house_code == '' or comtract_num == '':
print("您有部分数据为空,请输入完整信息之后再点击审核!")
return
# 获取cookie
result = get_cookie('18279881085', 'a123456789')
cookes = result['cookies']
# 审核
result = AuditHouseContract(comtract_num, house_code, cookes).audit()
if result != None:
print(result)
return
else:
return
if __name__ == '__main__':
app = QApplication(sys.argv)
myWin = SysUi()
myWin.show()
sys.exit(app.exec_())
| [
"125772195@qq.com"
] | 125772195@qq.com |
6822c50c813c29ba240b591f977d8bd81ba30f4c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02411/s645571797.py | e8d470430047756d2fbf592005537cd9683254d5 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | while 1:
a, b, c = map(int, input().split())
if a == b == c == -1:
break
if a == -1 or b == -1 or a + b < 30:print('F')
elif a + b >= 80:print('A')
elif a + b >= 65: print('B')
elif a + b >= 50:print('C')
elif a + b >= 30 and c >= 50:print('C')
else:print('D')
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
f6363026bf6685e36bc6dcd9bde036110c010a38 | cdb7bb6215cc2f362f2e93a040c7d8c5efe97fde | /C/CountPairsWithXORinaRange.py | bf199e04ea01b2a524ac620f74ff4ac3d4592251 | [] | no_license | bssrdf/pyleet | 8861bbac06dfe0f0f06f6ad1010d99f8def19b27 | 810575368ecffa97677bdb51744d1f716140bbb1 | refs/heads/master | 2023-08-20T05:44:30.130517 | 2023-08-19T21:54:34 | 2023-08-19T21:54:34 | 91,913,009 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,091 | py | '''
-Hard-
*Trie*
Given a (0-indexed) integer array nums and two integers low and high, return the number of nice pairs.
A nice pair is a pair (i, j) where 0 <= i < j < nums.length and low <= (nums[i] XOR nums[j]) <= high.
Example 1:
Input: nums = [1,4,2,7], low = 2, high = 6
Output: 6
Explanation: All nice pairs (i, j) are as follows:
- (0, 1): nums[0] XOR nums[1] = 5
- (0, 2): nums[0] XOR nums[2] = 3
- (0, 3): nums[0] XOR nums[3] = 6
- (1, 2): nums[1] XOR nums[2] = 6
- (1, 3): nums[1] XOR nums[3] = 3
- (2, 3): nums[2] XOR nums[3] = 5
Example 2:
Input: nums = [9,8,4,2,1], low = 5, high = 14
Output: 8
Explanation: All nice pairs (i, j) are as follows:
- (0, 2): nums[0] XOR nums[2] = 13
- (0, 3): nums[0] XOR nums[3] = 11
- (0, 4): nums[0] XOR nums[4] = 8
- (1, 2): nums[1] XOR nums[2] = 12
- (1, 3): nums[1] XOR nums[3] = 10
- (1, 4): nums[1] XOR nums[4] = 9
- (2, 3): nums[2] XOR nums[3] = 6
- (2, 4): nums[2] XOR nums[4] = 5
Constraints:
1 <= nums.length <= 2 * 104
1 <= nums[i] <= 2 * 104
1 <= low <= high <= 2 * 104
'''
from typing import List
class TrieNode:
def __init__(self):
self.child = {}
self.go = 0 # Number of elements goes through this node
def increase(self, number, d):
cur = self
for i in range(15, -1, -1):
bit = (number >> i) & 1
if bit not in cur.child: cur.child[bit] = TrieNode()
cur = cur.child[bit]
cur.go += d
def find(self, number, k):
cur, ans = self, 0
for i in range(15, -1, -1):
if not cur: break
bitx = (number >> i) & 1
bitk = (k >> i) & 1
# if bitk == 0, we know all nodes that have different bit value as
# "number" will result something larger so we can ignore all those and
# only traverse the sub-trie that has the same value as "bitx" (which,
# after xor, will result this digit to be zero)
if bitk == 0:
cur = cur.child.get(bitx, None)
continue
# if bitk == 1, then we know that all nodes having the same bit value as "number"
# will result this digit to be 0 after xor, which are guaranteed to
# be smaller than "k", so we can add all of those nodes and move
# onto the sub-trie that have different value than "bitx" (1^bit is
# just a fancier way to say "change 0 to 1 and change 1 to 0")
if bitx in cur.child:
ans += cur.child[bitx].go
cur = cur.child.get(1-bitx, None)
return ans
class Solution:
def countPairs(self, nums: List[int], low: int, high: int) -> int:
trieNode = TrieNode()
ans = 0
for x in nums:
print('x',x)
ans += trieNode.find(x, high+1) - trieNode.find(x, low)
trieNode.increase(x, 1)
return ans
if __name__ == "__main__":
print(Solution().countPairs(nums = [1,4,2,7], low = 2, high = 6)) | [
"merlintiger@hotmail.com"
] | merlintiger@hotmail.com |
1c6d5333b3330a919f394f03370a64f6476f1f2a | 8a2657f276e3b9c63c8bd0ce612237d9d66d80ae | /updater/update/ratelimit.py | f724af0aaa79792c4cd22f016025f032f8255a1c | [
"BSD-3-Clause"
] | permissive | codl/status.chitter.xyz | 25d46d5f3eb1904c25492f5374d6d0aa9ad2f3d6 | 034d04df05e2741507c329f36d427b0e4e4c7585 | refs/heads/master | 2023-05-14T21:17:49.797516 | 2022-02-03T14:38:17 | 2022-02-03T14:41:38 | 131,777,102 | 1 | 0 | BSD-3-Clause | 2023-05-01T20:20:43 | 2018-05-02T00:23:44 | JavaScript | UTF-8 | Python | false | false | 1,504 | py | from redis import StrictRedis as Redis
from pathlib import Path
import hashlib
import time
lua_script_path = Path(__file__).parent / 'ratelimit.lua'
with open(lua_script_path) as f:
LUA_SCRIPT = f.read()
del lua_script_path # don't want it polluting the module
class RateLimit(object):
def __init__(self,
redis_url='redis://',
redis_key_prefix='ratelimit',
bucket_size=50,
bucket_period=30):
self.redis = Redis.from_url(redis_url)
self.script = self.redis.register_script(LUA_SCRIPT)
self.redis_key_prefix = redis_key_prefix
self.bucket_size = bucket_size
self.bucket_period = bucket_period
def _exec(self, identifier, clear=False):
identifier_h = hashlib.blake2s(
identifier.encode('utf-8'), digest_size=6).hexdigest()
token_count_key = "{}:{}:count".format(self.redis_key_prefix,
identifier_h)
token_last_add_key = "{}:{}:last-add".format(self.redis_key_prefix,
identifier_h)
keys = [token_count_key, token_last_add_key]
argv = [self.bucket_size, self.bucket_period, int(time.time())]
if clear:
argv += [True]
return self.script(keys, argv)
def hit(self, identifier):
return int(self._exec(identifier))
def clear(self, identifier):
self._exec(identifier, clear=True)
| [
"codl@codl.fr"
] | codl@codl.fr |
d909f630eb3400d202f3924c5cbbe3919adb6f2c | ae65873c3584cef7139066b224daad04410af6d2 | /CodeTest.py | 2237c834df11974a47ef8e96969b150e0100c6cb | [] | no_license | rajatkashyap/Python | 2240c7472d07803c460c7a55d570e20694b694f9 | f74c85c65b0e209a5f7ab25b653d42835222faaf | refs/heads/master | 2022-06-25T19:20:52.847498 | 2022-06-08T14:40:45 | 2022-06-08T14:40:45 | 145,714,257 | 0 | 0 | null | 2022-04-25T00:18:37 | 2018-08-22T13:39:14 | Python | UTF-8 | Python | false | false | 620 | py | ## input no of elements
# input elements
# input threshold
#3 4 5 6 t -10
n=raw_input("Enter no of elements:")
a=raw_input("Enter elements:")
t=int(raw_input("Enter threshold:"))
elements=a.split()
print elements
sum=0
maxlist=[]
for x in range(0,len(elements)):
sum=0
for y in range(x, len(elements)):
if (sum + int(elements[y]) < t):
sum+=int(elements[y])
if (y==len(elements)-1):
maxlist.append(sum)
else:
maxlist.append(sum)
break
if(len(maxlist)==0): print 0
else: print "Max is:",max(maxlist)
| [
"rajatkashyap@Rajats-MBP.T-mobile.com"
] | rajatkashyap@Rajats-MBP.T-mobile.com |
667813317af2bd29df762c292eef9962fdee11a2 | d7d4df5f6db160cb7d090dd761aa43363eeb3351 | /Example/main_ev_nissan_leaf.py | 362df677c199363423a467061810958bc216b547 | [] | no_license | ximitiejiang/EVCable | 4fd9bb0a45e02f22faea80a54e09e2c21656f8c4 | 19959ea0eb2293107edc1953797bebe5c91bea1c | refs/heads/master | 2020-04-14T19:15:45.621049 | 2019-01-06T05:39:48 | 2019-01-06T05:39:48 | 164,051,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,763 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 4 22:58:22 2019
@author: suliang
"""
import os,sys
from EVCable import models
from importlib import import_module
from addict import Dict
class ConfigDict(Dict): # 可用于字典的便捷获取,并且适用于嵌套字典
def __init__(self, cfg_dict):
super().__init__()
self.cfg_dict = cfg_dict
# def __getattr__(self, name):
# value = super().__getattr__(name)
# return value
class Config():
pass
def main():
obj_type = 'Motor'
obj_type = getattr(models, obj_type)
motor = obj_type()
# 尝试实现Config功能
path = '../EVCable/configs/cfg_ev_nissan_leaf.py'
path = os.path.abspath(path)
# 加载dir
sys.path.insert(0, os.path.dirname(path)) # 不能带地址?不能带.py?
# 导入module
cfg = import_module(os.path.basename(path)[:-3])
# 去除dir
sys.path.pop(0)
print(cfg.run_config)
print(cfg.vehicle)
# print(cfg.battery) # battery是嵌套在内层的dict,不能识别
# cfg已经可以按属性访问了,但还是不够方便:不能识别嵌套的dict,也不能iter,也不能切片
# 解决办法是放到addict库的Dict类中
_cfg_dict = {
name: value
for name, value in cfg.__dict__.items() #__dict__存储对象的属性
if not name.startswith('__') #获得属性要么切片要么从__dict__去找
}
cfg_dict = Dict(_cfg_dict) # 字典数据转换为字典对象
print(cfg_dict.vehicle.type) # 调用字典对象的__getattr__()方法
print(cfg_dict.vehicle.battery)
if __name__=='__main__':
main() | [
"ximitiejiang@163.com"
] | ximitiejiang@163.com |
1ac29adf7a5a87fb932f4279d3b9685c0d7af77a | e70374ec1a1cb7a3a117780fabfbb6c16a111f33 | /setup.py | c2ed70ad479a1c296224ae1072382aedfaa89bad | [
"MIT"
] | permissive | smaggs/silica | 89bddd6b56a10fe6d6ea01bdf70d0faf29208e5f | f7f0a4cdcea1cd4c3c6b58e85aa26bc8152c5960 | refs/heads/master | 2021-01-17T21:46:38.955961 | 2016-06-10T01:26:07 | 2016-06-10T01:26:07 | 61,233,431 | 0 | 0 | null | 2016-06-15T19:02:41 | 2016-06-15T19:02:40 | null | UTF-8 | Python | false | false | 2,157 | py | """
The MIT License (MIT)
Copyright (c) 2016 Zagaran, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@author: Zags (Benjamin Zagorsky)
"""
import sys
from setuptools import setup, find_packages
if sys.version < '2.5':
print "ERROR: python version 2.5 or higher is required"
sys.exit(1)
setup(
name = "silica",
version = "0.0.1",
packages = find_packages(),
zip_safe = False,
include_package_data = True,
author = "Zagaran, Inc.",
author_email = "zags at zagaran.com",
description = "An automation layer between frontend and backend code. Currently supports Django and Angular.",
license = "MIT",
keywords = "django angular interface",
url = "https://zagaran.com",
install_requires = ["django >= 1.9"],
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Programming Language :: Python",
"Topic :: Database",
],
)
| [
"iamzags@gmail.com"
] | iamzags@gmail.com |
bba29333945223175b7565aeacc4cf5303e67b15 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/core/helpers/formatting.py | c75df90da927cdb7c8941b04ce060cc06cb1483d | [
"Apache-2.0"
] | permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 3,047 | py | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
import math
from functools import partial
# ============= local library imports ==========================
def uformat_percent_error(u, *args, **kw):
from uncertainties import std_dev, nominal_value
return format_percent_error(nominal_value(u), std_dev(u), *args, **kw)
def format_percent_error(v, e, n=2, include_percent_sign=False):
p = calc_percent_error(v, e)
if not p == 'NaN':
sigpee = '{{:0.{}f}}'.format(n).format(p)
if include_percent_sign:
sigpee = '{}%'.format(sigpee)
else:
sigpee = 'NaN'
return sigpee
def calc_percent_error(v, e, scale=100):
try:
return abs(e / v * scale)
except (ZeroDivisionError, TypeError):
return 'NaN'
def errorfmt(v, e):
pe = format_percent_error(v, e)
return '{} ({}%)'.format(floatfmt(e), pe)
def standard_sigfigsfmt(v, e):
sf = 0
if abs(e) < 1:
sf = math.ceil((abs(math.log10(e))))
fmt = '{{:0.{}f}}'.format(sf)
return fmt.format(v), fmt.format(e)
def floatfmt(f, n=4, s=4, max_width=None, default='NaN', use_scientific=False):
"""
f: value to format
n: number of sig figs
use scientific notation
if f<10^-n (e.g n=#.## f=0.00001)
or
f>10^(s+1) (e.g s=### f=3001)
"""
if isinstance(f, str):
return f
if f is None:
return default
absf = abs(f)
if absf < 1e-20:
v = '0.0'
else:
if absf < math.pow(10, -n) or absf > math.pow(10, s + 1):
if use_scientific:
fmt = '{{:0.{}E}}'.format(s)
else:
if absf < math.pow(10, s + 1):
# f = Decimal(f)
# n = int(math.ceil(abs(math.log10(absf))))
n = int(round(abs(math.log10(absf))))
fmt = '{{:0.{}f}}'.format(n)
else:
fmt = '{{:0.{}f}}'.format(n)
v = fmt.format(f)
if max_width:
if len(v) > max_width:
v = v[:max_width]
return v
def pfloatfmt(**kw):
return partial(floatfmt, **kw)
# ============= EOF =============================================
| [
"jirhiker@gmail.com"
] | jirhiker@gmail.com |
68e01c21b8b43dc3c2a111b20661ff326b70db45 | d16fd8e182fb2e0997166c47432ff6dbefde3079 | /ibm_mq/tests/test_ibm_mq.py | 393f6354a99ec5de42a809556a9778a62c2afae4 | [] | permissive | CIBailey/integrations-core | 501345e4c9c164413929f61eeb27aaf948c9e43a | 2fe90fb044ff3b5db149407b5682cf2b5e0774bf | refs/heads/master | 2020-04-30T19:20:04.294666 | 2019-03-21T18:31:34 | 2019-03-21T18:31:34 | 177,035,275 | 1 | 0 | BSD-3-Clause | 2019-03-21T22:50:12 | 2019-03-21T22:50:12 | null | UTF-8 | Python | false | false | 2,774 | py | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import pytest
from datadog_checks.ibm_mq import IbmMqCheck
log = logging.getLogger(__file__)
METRICS = [
'ibm_mq.queue.service_interval',
'ibm_mq.queue.inhibit_put',
'ibm_mq.queue.depth_low_limit',
'ibm_mq.queue.inhibit_get',
'ibm_mq.queue.harden_get_backout',
'ibm_mq.queue.service_interval_event',
'ibm_mq.queue.trigger_control',
'ibm_mq.queue.usage',
'ibm_mq.queue.scope',
'ibm_mq.queue.type',
'ibm_mq.queue.depth_max',
'ibm_mq.queue.backout_threshold',
'ibm_mq.queue.depth_high_event',
'ibm_mq.queue.depth_low_event',
'ibm_mq.queue.trigger_message_priority',
'ibm_mq.queue.depth_current',
'ibm_mq.queue.depth_max_event',
'ibm_mq.queue.open_input_count',
'ibm_mq.queue.persistence',
'ibm_mq.queue.trigger_depth',
'ibm_mq.queue.max_message_length',
'ibm_mq.queue.depth_high_limit',
'ibm_mq.queue.priority',
'ibm_mq.queue.input_open_option',
'ibm_mq.queue.message_delivery_sequence',
'ibm_mq.queue.retention_interval',
'ibm_mq.queue.open_output_count',
'ibm_mq.queue.trigger_type',
'ibm_mq.queue.depth_percent',
'ibm_mq.queue_manager.dist_lists',
'ibm_mq.queue_manager.max_msg_list',
]
OPTIONAL_METRICS = [
'ibm_mq.queue.max_channels',
'ibm_mq.channel.batch_size',
'ibm_mq.channel.batch_interval',
'ibm_mq.channel.long_retry_count',
'ibm_mq.channel.long_retry_interval',
'ibm_mq.channel.max_message_length',
'ibm_mq.channel.short_retry_count',
]
@pytest.mark.usefixtures("dd_environment")
def test_check(aggregator, instance, seed_data):
check = IbmMqCheck('ibm_mq', {}, {})
check.check(instance)
for metric in METRICS:
aggregator.assert_metric(metric)
for metric in OPTIONAL_METRICS:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_check_pattern(aggregator, instance_pattern, seed_data):
check = IbmMqCheck('ibm_mq', {}, {})
check.check(instance_pattern)
for metric in METRICS:
aggregator.assert_metric(metric)
for metric in OPTIONAL_METRICS:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_all_metrics_covered()
@pytest.mark.usefixtures("dd_environment")
def test_check_all(aggregator, instance_collect_all, seed_data):
check = IbmMqCheck('ibm_mq', {}, {})
check.check(instance_collect_all)
for metric in METRICS:
aggregator.assert_metric(metric)
for metric in OPTIONAL_METRICS:
aggregator.assert_metric(metric, at_least=0)
aggregator.assert_all_metrics_covered()
| [
"noreply@github.com"
] | CIBailey.noreply@github.com |
14805452716dfbf1e0d78d9c5e03cc883ea00a45 | e4610216140a5646e5ac6bb0b4931c198e6bbdd1 | /Day1/ex03.py | 9e64a62158eac2300b715961b2d054af5ce4e4ae | [] | no_license | kayartaya-vinod/2020_SEP_ABB_ADV_PYTHON | ce40e9e6d8664369e4e0540a4809c9579286ca09 | 79a1108ca5b1432df45f7193ea5bc878d93d1d46 | refs/heads/master | 2022-12-27T10:05:53.904914 | 2020-09-30T11:54:17 | 2020-09-30T11:54:17 | 299,226,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | from ex00 import Person
class Employee(Person):
# the inherited __init__ is hidden or overridden by the newly defined __init__
def __init__(self, **kwargs):
# in other OO languages, like c++/Java/C# automatic execution of the
# super/base class constructor takes place.
# in Python, this constructor must make an explicit call to base/super constructor
super().__init__(**kwargs)
# Person.__init__(self, **kwargs)
self.salary = kwargs.get('salary', 35000) # executing the salary.setter function
def __str__(self):
return f'Employee ({super().__str__()}, Salary={self.salary})'
@property
def salary(self): return self.__salary
@salary.setter
def salary(self, value): self.__salary = value # TBD: validation of value
def main():
e1 = Employee(name='John', age=22)
print(f'attributes in e1 are {dir(e1)}')
print(e1) # e1.__str__()
e1.__init__(name='scott', salary=22200)
print(e1)
if __name__ == '__main__':
main()
| [
"kayartaya.vinod@gmail.com"
] | kayartaya.vinod@gmail.com |
54f211040364e5c9d83d6df4722c2fd8d7fc5ab8 | 2e7aaf5367b9555554ae22338b9184c616e3f69e | /fair_coin_toss_experiment.py | c4c8e300762b05824f528c63fb1d4f43293c856d | [
"MIT"
] | permissive | noelevans/sandpit | 413ec8586811dfdb8f31f5a50ba1f72fe4a79307 | e97bd599249be3ed7c9958328e3ded0d51fa1859 | refs/heads/master | 2023-03-10T08:59:27.178057 | 2023-03-06T21:12:54 | 2023-03-06T21:12:54 | 34,741,371 | 1 | 0 | MIT | 2021-03-20T01:25:00 | 2015-04-28T16:12:01 | HTML | UTF-8 | Python | false | false | 859 | py | from __future__ import division
import numpy as np
""" Implementing a slide from a PyCon talk, Statistics for Hackers.
https://speakerdeck.com/pycon2016/jake-vanderplas-statistics-for-hackers
The question posed:
"You toss a coin 30 times and see 22 heads. Is it a fair coin"
The key line of interest is the the if statement. Notice that we count the
test as an example of the criteria if 22 heads *or more* are rolled. Not
just 22 exactly.
"""
def main():
twenty_twos = 0
tests = 100000
for _ in range(tests):
run = np.random.randint(1+1, size=30)
if run.sum() >= 22: # 22
twenty_twos += 1
print(twenty_twos / tests)
print('Reject the null hypothesis; is the coin biased?')
print('Yes!' if twenty_twos / tests < 0.05 else 'No!')
if __name__ == '__main__':
main()
| [
"noelevans@gmail.com"
] | noelevans@gmail.com |
7dd335f4f20890e829af77798c5e171300f810bd | 3c3d9cbad17627316c18b28d894f4225b17e7cf4 | /tools/builtins_templates/render.tmpl.pyi | cfa8c7e31b467ca94df42fb4c5742c30a515d587 | [
"MIT",
"CC-BY-3.0"
] | permissive | WilliamTambellini/godot-python | 77ce74c0366b7ceaa8fd0aa4e0716238cf1ae59b | 74ba482c6f06e6b8eae15386e6e21c703acdf102 | refs/heads/master | 2022-11-23T15:02:05.310388 | 2020-07-25T18:47:18 | 2020-07-25T18:47:18 | 282,775,578 | 0 | 0 | NOASSERTION | 2020-07-27T02:31:24 | 2020-07-27T02:31:23 | null | UTF-8 | Python | false | false | 1,429 | pyi | {#- `render_target` must be defined by calling context -#}
{% set py_type = render_target_to_py_type(render_target) %}
{% set gd_type = py_to_gd_type(py_type) %}
{#- Define rendering macros -#}
{% macro render_method(pyname, return_type=None, args=(), gdname=None, gdapi="10") %}
{% set gdname = gdname or pyname %}
{% set return_type = cook_return_type(return_type) %}
{% set args = cook_args(args) %}
def {{ pyname }}(self{%- if args -%},{%- endif -%}
{%- for arg in args %}
{{ arg["name"] }}: {{ arg["py_type"] }}
{%- if not arg["is_base_type"] and arg["gd_type"] != "godot_variant" %}
not None
{%- endif -%}
,
{%- endfor -%}
) -> {{ return_type["signature_type"] }}: ...
{% endmacro %}
{% macro render_operator_eq() %}
def __eq__(self, other) -> bool: ...
{% endmacro %}
{% macro render_operator_ne() %}
def __ne__(self, other) -> bool: ...
{% endmacro %}
{% macro render_operator_lt() %}
def __lt__(self, other) -> bool: ...
{% endmacro %}
{% macro render_property(pyname, type, gdname_getter, gdname_setter=None) %}
{{ pyname }}: {{ type }}
{% endmacro %}
{#- Overwrite blocks to be ignored -#}
{% block python_defs %}
pass
{% endblock %}
{% block pxd_header %}{% endblock %}
{% block pyx_header %}{% endblock %}
{% block python_consts %}{% endblock %}
{% block cdef_attributes %}{% endblock %}
{#- Now the template will be generated with the context -#}
{% extends render_target_to_template(render_target) %}
| [
"emmanuel.leblond@gmail.com"
] | emmanuel.leblond@gmail.com |
365807961d027fee5bce6f43d0db5c9f20b67479 | 4ccff4c78aac4ad934fce42637f24829bdf26be7 | /icml/master_tree.py | 83207f7cb09efe97bf1acaa9c2c4dd49d94f5703 | [
"MIT"
] | permissive | islamazhar/trees | 185ddc67f1935149414d6ae12b82205213659ad6 | 502565c5bf02503c7bece09cddd93f9368da02c3 | refs/heads/master | 2022-11-16T12:03:55.299111 | 2020-07-19T13:45:10 | 2020-07-19T13:45:10 | 268,391,749 | 0 | 0 | MIT | 2020-06-01T00:51:28 | 2020-06-01T00:51:27 | null | UTF-8 | Python | false | false | 977 | py | import numpy as np
from trees import Tree, TreeNode, TreeLeaf
import cPickle as pickle
def make_master(X, y, name):
if name == 'mnist' or name == 'iris' or name == '20news':
return make_class_tree(X, y)
if name == 'zoo':
with open('data/zoo/zoo2.tree', 'rb') as fp:
tree = pickle.load(fp)
return tree
def make_class_tree(X, y):
tree = Tree()
tree.root = TreeNode()
C = np.unique(y)
nodes = {c: TreeNode() for c in C}
for i, c in enumerate(y):
node = nodes[c]
leaf = TreeLeaf(i)
node.add_child(leaf)
for node in nodes.values():
tree.root.add_child(node)
return tree
if __name__ == "__main__":
from trees.data import load
from util import plot_tree
mnist = load('mnist')
X, y = mnist.X, mnist.y.astype(np.int)
idx = np.random.permutation(xrange(X.shape[0]))[:100]
X = X[idx]
y = y[idx]
tree = make_master(X, y, 'mnist')
plot_tree(tree)
| [
"sharad.vikram@gmail.com"
] | sharad.vikram@gmail.com |
75dce560bda8c932b19b6dee9dc2bb62e480d742 | ca42e62ce157095ace5fbaec0bf261a4fb13aa6a | /edivorce/apps/core/views/pdf.py | e5942b9fac102fc6004dcfef11301e48b7edcd0b | [
"Apache-2.0"
] | permissive | ronald-rgr/ai-chatbot-smartguide | 58f1e7c76b00248923f5fe85f87c318b45e38836 | c9c830feb6b66c2e362f8fb5d147ef0c4f4a08cf | refs/heads/master | 2021-04-18T03:15:23.720397 | 2020-03-23T17:55:47 | 2020-03-23T17:55:47 | 249,500,344 | 0 | 0 | Apache-2.0 | 2021-04-16T20:45:28 | 2020-03-23T17:35:37 | Python | UTF-8 | Python | false | false | 3,392 | py | """ Views for generated forms """
import json
from django.conf import settings
from django.http import HttpResponse
from django.template.loader import render_to_string
import requests
from ..decorators import bceid_required
from ..utils.derived import get_derived_data
from ..utils.user_response import get_responses_from_db
EXHIBITS = list('ABCDEFGHIJKLMNOPQRSTUVWXYZ'[::-1])
@bceid_required
def form(request, form_number):
""" View for rendering PDF's and previews """
responses = get_responses_from_db(request.user)
if (form_number == '1' or form_number.startswith('37') or
form_number.startswith('38')):
# Add an array of children that includes blanks for possible children
under = int(responses.get('number_children_under_19') or 0)
over = int(responses.get('number_children_under_19') or 0)
actual = json.loads(responses.get('claimant_children', '[]'))
total = len(actual)
responses['children'] = [actual[i] if i < total else {}
for i in range(0, max(under + over, total))]
if form_number == "37":
responses["which_claimant"] = 'both'
elif form_number == "37_claimant1":
form_number = "37"
responses = __add_claimant_info(responses, '_you')
responses['which_claimant'] = 'Claimant 1'
elif form_number == '37_claimant2':
form_number = '37'
responses = __add_claimant_info(responses, '_spouse')
responses['which_claimant'] = 'Claimant 2'
if form_number == "38":
responses["which_claimant"] = 'both'
elif form_number == '38_claimant1':
form_number = '38'
responses = __add_claimant_info(responses, '_you')
responses['which_claimant'] = 'Claimant 1'
elif form_number == '38_claimant2':
form_number = '38'
responses = __add_claimant_info(responses, '_spouse')
responses['which_claimant'] = 'Claimant 2'
return __render_form(request, 'form%s' % form_number, {
'css_root': settings.WEASYPRINT_CSS_LOOPBACK,
'responses': responses,
'derived': get_derived_data(responses),
'exhibits': EXHIBITS[:],
})
def __render_form(request, form_name, context):
output_as_html = request.GET.get('html', None) is not None
if output_as_html:
context['css_root'] = settings.FORCE_SCRIPT_NAME[:-1]
# render to form as HTML
rendered_html = render_to_string('pdf/' + form_name + '.html',
context=context, request=request)
# if '?html' is in the querystring, then return the plain html
if output_as_html:
return HttpResponse(rendered_html)
# post the html to the weasyprint microservice
url = settings.WEASYPRINT_URL + '/pdf?filename=' + form_name + '.pdf'
pdf = requests.post(url, data=rendered_html.encode('utf-8'))
# return the response as a pdf
response = HttpResponse(pdf, content_type='application/pdf')
response['Content-Disposition'] = 'inline;filename=' + form_name + '.pdf'
return response
def __add_claimant_info(responses, claimant):
claimant_info = {}
for key in responses:
if key.endswith(claimant):
claimant_key = key.replace(claimant, '_claimant')
claimant_info[claimant_key] = responses[key]
responses.update(claimant_info)
return responses
| [
"ronald.garcia@gmail.com"
] | ronald.garcia@gmail.com |
131d6d0f90195feb8309fe3d5371985ceac32b40 | 31e141e8929a52ae248eb020f5a769b73d6f36d8 | /lino/modlib/checkdata/fixtures/checkdata.py | 825dc20c61e076973facbd427bdb8038062b21d8 | [
"BSD-2-Clause"
] | permissive | NewRGB/lino | e07e1bb9a6c25103ac9eda9b09de36afd4e6ecbc | 43799e42107169ff173d3b8bc0324d5773471499 | refs/heads/master | 2020-09-09T18:59:15.996197 | 2019-11-12T04:50:52 | 2019-11-12T04:50:52 | 221,534,233 | 1 | 0 | BSD-2-Clause | 2019-11-13T19:20:02 | 2019-11-13T19:20:01 | null | UTF-8 | Python | false | false | 299 | py | # -*- coding: UTF-8 -*-
# Copyright 2015 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Runs the :manage:`checkdata` management command with `--fix`
option.
"""
from django.core.management import call_command
def objects():
call_command('checkdata', fix=True)
return []
| [
"luc.saffre@gmail.com"
] | luc.saffre@gmail.com |
3283d2ba7e9fbf4a897ed18e7e8bc8ec4d73b48f | 2dde58651cf409ad28d1b9b9218a53a17fcacb26 | /convokit/model/utterance.py | 0180e3aca18bab705234622b958c0ba68692ee2a | [
"MIT"
] | permissive | KatyBlumer/Cornell-Conversational-Analysis-Toolkit | 34bae043c4a5680d508af69ce16801a2143a81f1 | 086a6ea25ada45ad70c43e01ec9f82e8dbfa16e7 | refs/heads/master | 2020-07-22T11:05:29.358854 | 2020-02-14T03:41:18 | 2020-02-14T03:41:18 | 207,177,918 | 0 | 0 | MIT | 2019-09-08T21:49:09 | 2019-09-08T21:49:09 | null | UTF-8 | Python | false | false | 2,653 | py | from typing import Dict, List, Collection, Hashable, Callable, Set, Generator, Tuple, Optional, ValuesView
from .user import User
class Utterance:
"""Represents a single utterance in the dataset.
:param id: the unique id of the utterance. Can be any hashable type.
:param user: the user giving the utterance.
:param root: the id of the root utterance of the conversation.
:param reply_to: id of the utterance this was a reply to.
:param timestamp: timestamp of the utterance. Can be any
comparable type.
:param text: text of the utterance.
:type text: str
:ivar id: the unique id of the utterance.
:ivar user: the user giving the utterance.
:ivar root: the id of the root utterance of the conversation.
:ivar reply_to: id of the utterance this was a reply to.
:ivar timestamp: timestamp of the utterance.
:ivar text: text of the utterance.
"""
def __init__(self, id: Optional[Hashable]=None, user: Optional[User]=None,
root: Optional[Hashable]=None, reply_to: Optional[Hashable]=None,
timestamp: Optional[int]=None, text: Optional[str]=None,
meta: Optional[Dict]=None):
self.id = id
self.user = user
self.root = root
self.reply_to = reply_to
self.timestamp = timestamp
self.text = text
self.meta = meta if meta is not None else {}
def get(self, key: str):
if key == "id":
return self.id
elif key == "user":
return self.user
elif key == "root":
return self.root
elif key == "reply_to":
return self.reply_to
elif key == "timestamp":
return self.timestamp
elif key == "text":
return self.text
elif key == "meta":
return self.meta
# def copy(self):
# """
# :return: A duplicate of this Utterance with the same data and metadata
# """
# return Utterance(id=self.id,
# user=self.user,
# root=self.root,
# reply_to=self.reply_to,
# timestamp=self.timestamp,
# text=self.text,
# meta=self.meta.copy())
def add_meta(self, key: Hashable, value) -> None:
"""
Add a key-value pair to the Utterance metadata
:return: None
"""
self.meta[key] = value
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "Utterance(" + str(self.__dict__) + ")"
| [
"calebchiam@gmail.com"
] | calebchiam@gmail.com |
99dfcd1c549667547af246b081b2db3b89f1c7d7 | b120f8ec437709571dd363b5750835db6e6d4e32 | /twitter/__init__.py | 4f098cc9981f61bdabf54b21e136a9698206e5f7 | [] | no_license | pipoteam/pipobot-modules | 2b3e61542e95c0144118e56a4a6817e48d0c1822 | dcc6ec037723633f8fc314c6b7e314185e29860a | refs/heads/master | 2022-04-27T00:50:56.805541 | 2022-03-13T19:34:53 | 2022-03-13T19:34:53 | 4,283,568 | 3 | 6 | null | 2017-05-15T09:47:49 | 2012-05-10T11:22:35 | Python | UTF-8 | Python | false | false | 3,988 | py | # -*- coding: utf-8 -*-
from pipobot.lib.modules import AsyncModule, Pasteque
from pipobot.lib.utils import unescape
from twython import Twython, TwythonError
from .model import LastTweets, Tweets
URL = 'https://twitter.com'
RT = 'retweeted_status'
REPLY_NAME = 'in_reply_to_screen_name'
REPLY_TWEET = 'in_reply_to_status_id_str'
def user_url(user):
return '<a href="%s/%s">%s</a>' % (URL, user, user)
class Twitter(AsyncModule):
"""A module to follow tweets form some users"""
_config = (("users", list, []), ("app_key", str, ""), ("app_secret", str, ""),
("avoid_rt", bool, True), ("shy_start", bool, True), ("max_errors", int, 3))
def __init__(self, bot):
AsyncModule.__init__(self,
bot,
name="twitter",
desc="Displays tweets",
delay=60)
for user in self.users:
last = self.bot.session.query(LastTweets).order_by(LastTweets.last.desc()).first()
last_id = last.last if last is not None else 0
if not self.bot.session.query(LastTweets).filter(LastTweets.user == user).first():
self.bot.session.add(LastTweets(user=user, last=last_id))
self.bot.session.commit()
token = Twython(self.app_key, self.app_secret, oauth_version=2).obtain_access_token()
self.twitter = Twython(self.app_key, access_token=token)
self.err = 0
if self.shy_start:
self.action(say=False)
def action(self, say=True):
tweets = set()
def already_said(id):
if id in tweets:
return True
tweets.add(id)
q = self.bot.session.query(Tweets).filter(Tweets.id == id)
return self.bot.session.query(q.exists()).scalar()
for user in self.users:
last_tweet = self.bot.session.query(LastTweets).filter(LastTweets.user == user).first()
try:
timeline = self.twitter.get_user_timeline(screen_name=user)
except TwythonError as err:
if self.err >= self.max_errors:
raise Pasteque("TWITTER IS DOWN OMG OMG OMG\n%s" % err)
self.err += 1
return
self.err = 0
for tweet in timeline:
if tweet['id'] <= last_tweet.last:
break
if say and not (self.avoid_rt and RT in tweet and already_said(tweet[RT]['id'])):
text = tweet['text']
if RT in tweet:
fmt = u'Tweet de %s retweeté par %s : '
initial = tweet[RT][u'user'][u'screen_name']
fmt_text = fmt % (initial, user)
fmt_html = fmt % (user_url(initial), user_url(user))
text = tweet[RT]['text']
elif REPLY_NAME in tweet and tweet[REPLY_NAME] is not None and tweet[REPLY_TWEET] is not None:
fmt = u'Tweet de %s en réponse à %s : '
url_text = '%s/%s/status/%s' % (URL, tweet[REPLY_NAME], tweet[REPLY_TWEET])
url_html = '<a href="%s">%s</a>' % (url_text, tweet[REPLY_NAME])
fmt_text = fmt % (user, url_text)
fmt_html = fmt % (user_url(user), url_html)
else:
fmt = u'Tweet de %s : '
fmt_text = fmt % user
fmt_html = fmt % user_url(user)
self.bot.say({'text': fmt_text + unescape(text),
'xhtml': fmt_html + Twython.html_for_tweet(tweet)})
tweets.add(tweet['id'])
if timeline:
last_tweet.last = timeline[0]['id']
for tweet in tweets:
self.bot.session.merge(Tweets(id=tweet))
self.bot.session.commit()
| [
"guilhem.saurel@gmail.com"
] | guilhem.saurel@gmail.com |
57f671e63cea9812f85acdc16e062023c06efa55 | 10ca532599d96032aba24cfb2cf542407b0d2389 | /pylearn2/scripts/icml_2013_wrepl/black_box/learn_zca.py | 8f5aceaee06bd48f2ff2c7aba91f2c0ee7cfd1e1 | [
"BSD-3-Clause"
] | permissive | Coderx7/pylearn2 | a748a522e115c86611ab8a476075f4a9c47c5fac | 96edb376ced1b828962c749240059903686da549 | refs/heads/master | 2020-05-29T11:37:45.469205 | 2017-02-14T19:22:22 | 2017-02-14T19:22:22 | 66,489,783 | 1 | 0 | BSD-3-Clause | 2020-02-03T05:18:32 | 2016-08-24T18:34:44 | Python | UTF-8 | Python | false | false | 237 | py | from pylearn2.datasets.preprocessing import ZCA
from pylearn2.utils import serial
from black_box_dataset import BlackBoxDataset
extra = BlackBoxDataset('extra')
zca = ZCA(filter_bias=.1)
zca.fit(extra.X)
serial.save('zca.pkl', zca)
| [
"goodfellow.ian@gmail.com"
] | goodfellow.ian@gmail.com |
856a9964bbccb127acdd0865de509dd26a2b556a | 4a76ac7ad1aaeec44729ab6d5b121b1cae0d910c | /Week 1/SalesByMatch.py | 4b2ca73d4288dcf2642a84b22826b3fbd29d0e14 | [] | no_license | kalmad99/CompetitiveProgramming | 2d825e839faa9e13ef43dbb45498bd3eef6723ab | 6cbb1f12f7670d0016fa2af8f2dd597d9123070d | refs/heads/main | 2023-03-25T20:18:23.389396 | 2021-03-24T21:36:52 | 2021-03-24T21:36:52 | 325,816,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # Complete the sockMerchant function below.
# 9
# 10 20 20 10 10 30 50 10 20
import os
def sockMerchant(n, ar):
emptyl = []
result = 0
count = 0
for i in ar:
if i in emptyl:
continue
else:
emptyl.append(i)
for j in emptyl:
count = ar.count(j)
if count%2==0:
result += count/2
else:
result += count//2
return int(result)
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
ar = list(map(int, input().rstrip().split()))
result = sockMerchant(n, ar)
fptr.write(str(result) + '\n')
fptr.close()
| [
"kalemesfin12@gmail.com"
] | kalemesfin12@gmail.com |
166c74052f4fdefdbd33eb3e69f815e776e5218b | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20180510/CreateCdnDeliverTaskRequest.py | c1172c6f8e8f3ba180a7ac69b9972bc493a9d39a | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 2,142 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcdn.endpoint import endpoint_data
class CreateCdnDeliverTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2018-05-10', 'CreateCdnDeliverTask')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Reports(self): # String
return self.get_body_params().get('Reports')
def set_Reports(self, Reports): # String
self.add_body_params('Reports', Reports)
def get_Schedule(self): # String
return self.get_body_params().get('Schedule')
def set_Schedule(self, Schedule): # String
self.add_body_params('Schedule', Schedule)
def get_Name(self): # String
return self.get_body_params().get('Name')
def set_Name(self, Name): # String
self.add_body_params('Name', Name)
def get_DomainName(self): # String
return self.get_body_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_body_params('DomainName', DomainName)
def get_Deliver(self): # String
return self.get_body_params().get('Deliver')
def set_Deliver(self, Deliver): # String
self.add_body_params('Deliver', Deliver)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
3e2965e09f1f4be90046e654e074f3b621f3af66 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/keyvault/azure-mgmt-keyvault/azure/mgmt/keyvault/aio/_key_vault_management_client_async.py | f8518d26e4589fdf7dcba814153a62ec3da38be8 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 9,092 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Serializer, Deserializer
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from ._configuration_async import KeyVaultManagementClientConfiguration
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class KeyVaultManagementClient(MultiApiClientMixin, _SDKClient):
"""The Azure management API provides a RESTful set of web services that interact with Azure Key Vault.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str api_version: API version to use if no profile is provided, or if
missing in profile.
:param str base_url: Service URL
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '2019-09-01'
_PROFILE_TAG = "azure.mgmt.keyvault.KeyVaultManagementClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential, # type: "AsyncTokenCredential"
subscription_id, # type: str
api_version=None,
base_url=None,
profile=KnownProfiles.default,
**kwargs # type: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = KeyVaultManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(KeyVaultManagementClient, self).__init__(
credential,
self._config,
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 2016-10-01: :mod:`v2016_10_01.models<azure.mgmt.keyvault.v2016_10_01.models>`
* 2018-02-14: :mod:`v2018_02_14.models<azure.mgmt.keyvault.v2018_02_14.models>`
* 2019-09-01: :mod:`v2019_09_01.models<azure.mgmt.keyvault.v2019_09_01.models>`
"""
if api_version == '2016-10-01':
from ..v2016_10_01 import models
return models
elif api_version == '2018-02-14':
from ..v2018_02_14 import models
return models
elif api_version == '2019-09-01':
from ..v2019_09_01 import models
return models
raise NotImplementedError("APIVersion {} is not available".format(api_version))
@property
def operations(self):
"""Instance depends on the API version:
* 2016-10-01: :class:`Operations<azure.mgmt.keyvault.v2016_10_01.aio.operations_async.Operations>`
* 2018-02-14: :class:`Operations<azure.mgmt.keyvault.v2018_02_14.aio.operations_async.Operations>`
* 2019-09-01: :class:`Operations<azure.mgmt.keyvault.v2019_09_01.aio.operations_async.Operations>`
"""
api_version = self._get_api_version('operations')
if api_version == '2016-10-01':
from ..v2016_10_01.aio.operations_async import Operations as OperationClass
elif api_version == '2018-02-14':
from ..v2018_02_14.aio.operations_async import Operations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import Operations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_endpoint_connections(self):
"""Instance depends on the API version:
* 2018-02-14: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.keyvault.v2018_02_14.aio.operations_async.PrivateEndpointConnectionsOperations>`
* 2019-09-01: :class:`PrivateEndpointConnectionsOperations<azure.mgmt.keyvault.v2019_09_01.aio.operations_async.PrivateEndpointConnectionsOperations>`
"""
api_version = self._get_api_version('private_endpoint_connections')
if api_version == '2018-02-14':
from ..v2018_02_14.aio.operations_async import PrivateEndpointConnectionsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import PrivateEndpointConnectionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def private_link_resources(self):
"""Instance depends on the API version:
* 2018-02-14: :class:`PrivateLinkResourcesOperations<azure.mgmt.keyvault.v2018_02_14.aio.operations_async.PrivateLinkResourcesOperations>`
* 2019-09-01: :class:`PrivateLinkResourcesOperations<azure.mgmt.keyvault.v2019_09_01.aio.operations_async.PrivateLinkResourcesOperations>`
"""
api_version = self._get_api_version('private_link_resources')
if api_version == '2018-02-14':
from ..v2018_02_14.aio.operations_async import PrivateLinkResourcesOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import PrivateLinkResourcesOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
@property
def vaults(self):
"""Instance depends on the API version:
* 2016-10-01: :class:`VaultsOperations<azure.mgmt.keyvault.v2016_10_01.aio.operations_async.VaultsOperations>`
* 2018-02-14: :class:`VaultsOperations<azure.mgmt.keyvault.v2018_02_14.aio.operations_async.VaultsOperations>`
* 2019-09-01: :class:`VaultsOperations<azure.mgmt.keyvault.v2019_09_01.aio.operations_async.VaultsOperations>`
"""
api_version = self._get_api_version('vaults')
if api_version == '2016-10-01':
from ..v2016_10_01.aio.operations_async import VaultsOperations as OperationClass
elif api_version == '2018-02-14':
from ..v2018_02_14.aio.operations_async import VaultsOperations as OperationClass
elif api_version == '2019-09-01':
from ..v2019_09_01.aio.operations_async import VaultsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
async def close(self):
await self._client.close()
async def __aenter__(self):
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details):
await self._client.__aexit__(*exc_details)
| [
"noreply@github.com"
] | YijunXieMS.noreply@github.com |
99adeed36d9009c0fdcbc3c4d52457f5f704a9a0 | e42ab858ae8161aa2a0f691d03878e8f674f4ad6 | /svca_limix/limix/hcache/__init__.py | 98cdc3661c4fea8396ea3f878dfbae6e4ba648fa | [
"Apache-2.0"
] | permissive | damienArnol/svca | 3d597a55d7cf92caae0338ca074fd128ab3a97a4 | 1454c908b360a6118c2384c675aecdb9de7fca2e | refs/heads/master | 2021-04-28T06:47:33.904261 | 2019-01-16T00:29:44 | 2019-01-16T00:29:44 | 122,208,856 | 15 | 10 | Apache-2.0 | 2018-11-07T14:42:31 | 2018-02-20T14:28:29 | C++ | UTF-8 | Python | false | false | 56 | py | from ._hcache import Cached
from ._hcache import cached
| [
"damien.arnol@gmail.com"
] | damien.arnol@gmail.com |
84b81d5d7f7cb356e4f0b46b571e0a5e4fe4eea9 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2114/61053/281233.py | 919219fd9ce722f2f5cba1d774979a7bc09062df | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 368 | py | def minSum(n):
lst = []
lst.append(0)
for i in range(1,n+1):
for j in range(1,i+1):
if i - j**2 >= 0:
if j == 1:
lst.append(lst[i-1]+1)
else:
lst[i] = min(lst[i],lst[i-j**2]+1)
return lst[n]
if __name__ == "__main__":
n = int(input())
print(minSum(n)) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
fb566f65414eca423c9aa729951f0a5a47e2a081 | 72167bc16ffc678e5270dc83000013158d69aaf1 | /Chapter05/5.4.3.py | b2c941ecbc32e30f8281eeceff08a270f27d430f | [] | no_license | OSYouth/PythonCrashCourse | 6e37b53b5a83705db3366543b9ab80ef5dd4e3b7 | bbb88a087f489baa535d2b1477c08d023ae5b913 | refs/heads/master | 2021-08-22T17:35:27.702205 | 2021-04-16T13:01:16 | 2021-04-16T13:01:16 | 249,334,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 428 | py | available_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple', 'extra cheese']
requested_toppings = ['mushrooms', 'french fries', 'extra cheese']
for requested_topping in requested_toppings:
if requested_topping in available_toppings:
print("Adding " + requested_topping + ".")
else:
print("Sorry, we don't have " + requested_topping + ".")
print("\nFinished making your pizza!") | [
"benleolee@163.com"
] | benleolee@163.com |
7ce555ca57ff028ac036d84ed81059373b3fe342 | 07a0eac02cc8ecc079f3527c4615a8e57d565d61 | /SimPy/InOutFunctions.py | d252a7bdd8f1f35d78798149da1337455f873f37 | [] | no_license | yaesoubilab/SimPy | 688933070f25f487586c8af13fb8af2fcc209189 | 2c456d92741ba751af6bc90c49608b49f524182d | refs/heads/main | 2022-06-13T02:43:30.330488 | 2022-04-27T00:20:10 | 2022-04-27T00:20:10 | 117,696,850 | 6 | 7 | null | null | null | null | UTF-8 | Python | false | false | 8,466 | py | import csv
import os
import numpy as numpy
def make_directory(filename):
"""
creates a directory to save the file for which the filename is provided
:param filename: (string) name of the file for which the directory should be created for
"""
# get directory
directory_path = os.path.dirname(filename)
# create the directory if does not exist
if directory_path != '':
if not os.path.exists(directory_path):
os.makedirs(directory_path)
def write_csv(rows, file_name='csvfile.csv', delimiter=',', directory='', delete_existing_files=False):
""" write a list to a csv file
:param rows: list of lists to be imported to the csv file
:param file_name: the file name to be given to the csv file
:param delimiter: to separate by comma, use ',' and by tab, use '\t'
:param directory: directory (relative to the current root) where the files should be located
for example use 'Example' to create and save the csv file under the folder Example
:param delete_existing_files: set to True to delete the existing trace files in the specified directory
"""
if file_name[0] == '/':
file_name = file_name[1:]
if len(directory) > 0 and directory[0] == '/':
directory = directory[1:]
# create a new file
file_name = os.path.join(directory, file_name)
# get directory
directory_path = os.path.dirname(file_name)
# delete existing files
if delete_existing_files:
delete_files(extension='.csv', path=os.getcwd() + '/' + directory)
# create the directory if does not exist
if directory_path != '':
if not os.path.exists(directory_path):
os.makedirs(directory_path)
with open(file_name, "w", newline='') as file:
csv_file = csv.writer(file, delimiter=delimiter)
for row in rows:
csv_file.writerow(row)
file.close()
def write_columns_to_csv(cols, file_name='csvfile.csv', delimiter=',', directory='', delete_existing_files=False):
""" write a list of columns to a csv file
:param cols: list of columns to be imported to the csv file
:param file_name: the file name to be given to the csv file
:param delimiter: to separate by comma, use ',' and by tab, use '\t'
:param directory: directory (relative to the current root) where the files should be located
for example use 'Example' to create and save the csv file under the folder Example
:param delete_existing_files: set to True to delete the existing trace files in the specified directory
"""
write_csv(
rows=_cols_to_rows(cols=cols),
file_name=file_name, delimiter=delimiter, directory=directory, delete_existing_files=delete_existing_files
)
def write_dictionary_to_csv(dictionary, file_name='csvfile.csv', directory=''):
"""
:param dictionary: a dictionary of lists with keys to be csv headers
:param file_name: filename to save the dictionary as
"""
cols = []
for key, values in dictionary.items():
col = [key]
col.extend(values)
cols.append(col)
write_columns_to_csv(cols=cols, file_name=file_name, directory=directory)
def read_csv_rows(file_name, if_ignore_first_row, delimiter=',', if_convert_float=False):
""" reads the rows of a csv file
:param file_name: the csv file name
:param if_ignore_first_row: set true to ignore the first row
:param delimiter: to separate by comma, use ',' and by tab, use '\t'
:param if_convert_float: set true to convert row values to numbers (otherwise, the values are stored as string)
:returns a list containing the rows of the csv file
"""
with open(file_name, "r") as file:
csv_file = csv.reader(file, delimiter=delimiter)
rows = _csv_file_to_rows(csv_file=csv_file,
if_del_first_row=if_ignore_first_row)
# convert column values to float if needed
if if_convert_float:
for i in range(0, len(rows)):
rows[i] = _convert_to_float(rows[i])
file.close()
return rows
def read_csv_cols(file_name, if_ignore_first_row, n_cols=None, delimiter=',', if_convert_float=False):
""" reads the columns of a csv file
:param file_name: the csv file name
:param n_cols: number of columns in the csv file
:param if_ignore_first_row: set True to ignore the first row
:param delimiter: to separate by comma, use ',' and by tab, use '\t'
:param if_convert_float: set True to convert column values to numbers
:returns a list containing the columns of the csv file
"""
with open(file_name, "r", encoding='utf-8', errors='ignore') as file:
csv_file = csv.reader(file, delimiter=delimiter)
rows = _csv_file_to_rows(csv_file=csv_file,
if_del_first_row=if_ignore_first_row)
if n_cols is None:
n_cols = len(rows[0])
cols = _rows_to_cols(rows=rows, n_cols=n_cols)
# convert column values to float if needed
if if_convert_float:
for j in range(0, n_cols):
cols[j] = _convert_to_float(cols[j])
file.close()
return cols
def read_csv_cols_to_dictionary(file_name, delimiter=',', if_convert_float=False):
dict_of_columns = dict() # dictionary of columns
csv_file = open(file_name, "r", encoding='utf-8', errors='ignore')
col_headers = next(csv.reader(csv_file, delimiter=delimiter))
n_cols = len(col_headers)
cols = read_csv_cols(
file_name,
n_cols=n_cols,
if_ignore_first_row=True,
delimiter=delimiter,
if_convert_float=if_convert_float)
csv_file.close()
# add columns to the dictionary
for j, col in enumerate(cols):
if col_headers[j] in dict_of_columns:
raise ValueError("Key '{}' already exists in the dictionary of parameters.".format(col_headers[j]))
else:
dict_of_columns[col_headers[j]] = col
return dict_of_columns
def delete_file(file_name):
os.remove(file_name)
def delete_files(extension='.txt', path='..'):
""" delete every files with the specified extension inside the directory
:param extension: (string) extension of the files to be removed
:param path: (string) path (relative to the current root) where the files are located
(the folder should already exist) use os.getcwd() to get the working directory
"""
try:
for f in os.listdir(path):
if f.endswith(extension):
os.remove(os.path.join(path, f))
except FileNotFoundError:
pass
def _csv_file_to_rows(csv_file, if_del_first_row):
# read rows
rows = []
for i, row in enumerate(csv_file):
rows.append(row)
# delete the first row if needed
if if_del_first_row:
del rows[0]
return rows
def _rows_to_cols(rows, n_cols):
# initialize the list to store column values
cols = []
for j in range(0, n_cols):
cols.append([])
# read columns
for row in rows:
if len(row) != n_cols:
raise ValueError('All rows should have the same length.')
for j in range(0, n_cols):
cols[j].append(row[j])
return cols
def _cols_to_rows(cols):
# find the size of the largest column
size_of_largest_column = 0
for col in cols:
size = len(col)
if size > size_of_largest_column:
size_of_largest_column = size
# add None values to the columns smaller than the largest column
for col in cols:
if size_of_largest_column > len(col):
col.extend([None]*(size_of_largest_column-len(col)))
# initialize rows
rows = []
for i in range(0, size_of_largest_column):
rows.append([])
# populate rows
for col in cols:
for i, val in enumerate(col):
rows[i].append(val)
return rows
def _convert_to_float(list_of_objs):
try:
results = numpy.array(list_of_objs).astype(float)
except:
results = []
for i in range(len(list_of_objs)):
try:
x = float(list_of_objs[i])
except:
if list_of_objs[i] in ('N/A', 'None', 'none', ''):
x = None
else:
x = list_of_objs[i]
results.append(x)
return results
| [
"reza.yaesoubi@yale.edu"
] | reza.yaesoubi@yale.edu |
07eae20cb6635e30f8d0e94a525a5b351d928f79 | ee8c4c954b7c1711899b6d2527bdb12b5c79c9be | /assessment2/amazon/run/core/controllers/call.py | e0f9b7714730874a42c41006d2e199a47dc7723c | [] | no_license | sqlconsult/byte | 02ac9899aebea4475614969b594bfe2992ffe29a | 548f6cb5038e927b54adca29caf02c981fdcecfc | refs/heads/master | 2021-01-25T14:45:42.120220 | 2018-08-11T23:45:31 | 2018-08-11T23:45:31 | 117,135,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | #!/usr/bin/env python3
from flask import Blueprint, Flask, render_template, request, url_for
controller = Blueprint('call', __name__, url_prefix='/call')
# @controller.route('/<string:title>', methods=['GET'])
# def lookup(title):
# if title == 'Republic': # TODO 2
# return render_template('republic.html') # TODO 2
# else:
# pass
| [
"sqlconsult@hotmail.com"
] | sqlconsult@hotmail.com |
542bf395f97e2cd0e31c965ff605355d11844823 | 5a9d8c64c6478f3816b63f59f1cdaca73c0848eb | /Base/ex10/zy2_myfac.py | 657a9b0e2b55ce1ba2bb48adc87931f48d69de85 | [] | no_license | wangredfei/nt_py | f68134977e6d1e05cf17cec727644509f084c462 | fedf03c0d52565f588e9b342d1c51df0b6dc2681 | refs/heads/master | 2020-04-08T07:55:08.302589 | 2018-11-23T09:53:48 | 2018-11-23T09:53:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py |
# 2. 给出一个数n,写一个函数myfac来计算n!(n的阶乘)
# n! = 1 * 2 * 3 * ... * n
# print(myfac(5)) # 120
def myfac(n):
N = 1
for i in range(1,n+1):
N *= i
return N
print(myfac(5)) | [
"289498360@qq.com"
] | 289498360@qq.com |
3fdf966b91d3d8fcc8fcb14fcfcbf40710556b46 | 1958631675a1eda1c2c5a018b636cb1117d90d9e | /0x0A-python-inheritance/10-square.py | 8788e4d9e7980b491126277e76078d9cf55b34e0 | [] | no_license | Valinor13/holbertonschool-higher_level_programming | f32d6507546584c3af59e3e8ece345af70a698d6 | 5fad6ea9f28f845820b5a893feb20e83ed3fe7b4 | refs/heads/main | 2023-06-05T21:15:29.774676 | 2021-06-27T23:08:54 | 2021-06-27T23:08:54 | 361,808,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | #!/usr/bin/python3
"""A module containing the square class inherited from Rectangle"""
Rectangle = __import__('9-rectangle').Rectangle
class Square(Rectangle):
"""A class inherited from rectangle that deals with squares"""
def __init__(self, size):
super().integer_validator("size", size)
self.__size = size
super().__init__(size, size)
def area(self):
return self.__size * self.__size
| [
"jwcalhoun2@gmail.com"
] | jwcalhoun2@gmail.com |
785bc2ace46161ddefd564e79c2b6b21fc34a99e | c6ad3f65da16e714619bb62b27565efc00364c1c | /work-packages/f5500/python/f5500/CiscoIOSXRCryptoMibsIpsecflowmonCfg_ns.py | 881f7c8bb924a008aa386b5a48312eecf1fe3276 | [] | no_license | janlindblad/eantc20 | c0c74a1e6476ca5e5a2906f18472259d8a515b21 | e9051c77de16578c97dcf1f7ecb0a5ef30ad2e68 | refs/heads/master | 2020-12-21T19:09:36.076797 | 2020-03-17T18:17:04 | 2020-03-17T18:17:04 | 236,523,910 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | #
# BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE
# This file has been auto-generated by the confdc compiler.
# Source: ../load-dir/Cisco-IOS-XR-crypto-mibs-ipsecflowmon-cfg.fxs
# BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE BEWARE
#
# Autogenerated namespace class for YANG module Cisco-IOS-XR-crypto-mibs-ipsecflowmon-cfg.yang
class ns(object):
hash = 1114965550
id = "_f5500-nc-1.0:f5500-nc-1.0#http://cisco.com/ns/yang/Cisco-IOS-XR-crypto-mibs-ipsecflowmon-cfg"
uri = "_f5500-nc-1.0:f5500-nc-1.0#http://cisco.com/ns/yang/Cisco-IOS-XR-crypto-mibs-ipsecflowmon-cfg"
prefix = "Cisco-IOS-XR-crypto-mibs-ipsecflowmon-cfg"
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_ip_sec = 1865369357
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_ip_sec_ = "ip-sec"
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_tunnel_stop = 516899736
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_tunnel_stop_ = "tunnel-stop"
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_tunnel_start = 1619857664
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_tunnel_start_ = "tunnel-start"
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_isakmp = 1297549514
Cisco_IOS_XR_crypto_mibs_ipsecflowmon_cfg_isakmp_ = "isakmp"
| [
"jlindbla@cisco.com"
] | jlindbla@cisco.com |
d10e8caa2292cc529759d48213936b44634fb90e | abcf63644ff1f4767f81261f8bc32efb91767771 | /zuoye3/ZUOYE.PY | 8c88abc6faf459d15562fa92e6d67032322f9774 | [] | no_license | Ran-oops/python | cf296711a01c1a01e2794e2310447a5684181f53 | 5ba09234120084f4ea7b13a4945935d38f356dea | refs/heads/master | 2020-11-29T21:18:58.391800 | 2019-12-27T10:52:17 | 2019-12-27T10:52:17 | 230,217,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import os
os.chdir(r'C:\Users\Acer\Desktop\zuoye3')
print(os.getcwd())
list1=os.listdir(os.getcwd())
print(list1)
while True:
handle_file=input('请输入接下来的操作:m:创建文件;rm:删除文件;n:修改文件名;e:退出\n')
if handle_file=='m':
mk_file=input('请输入需要创建的文件夹名称\n')
os.mkdir(mk_file)
if mk_file in os.listdir(os.getcwd()):
print('创建成功')
elif handle_file=='rm':
rm_file=input('请输入您要删除的文件夹名称:\n')
if rm_file in os.listdir(os.getcwd()):
os.rmdir(rm_file)
if rm_file not in os.listdir(getcwd()):
print('恭喜您,删除成功!')
else:
print('您输入的文件夹不存在!')
elif handle_file=='n':
res=input('请输入需要修改的文件:\n')
if res in os.listdir(getcwd()):
re=input('请输入需要改成的名字!\n')
os.rename(res,re)
if re in os.listdir(os.getcwd()):
print('修改成功!')
else:
print('您输入的文件不存在!')
elif handle_file=='e':
exit()
else:
print('输入有误,请按提示输入内容,谢谢!')
| [
"18291893776@139.com"
] | 18291893776@139.com |
c689ff58ee57c2c81dfde575173f89051306f798 | 3ae62276c9aad8b9612d3073679b5cf3cb695e38 | /easyleetcode/leetcodes/Leetcode_005_Longest_Palindromic_Substring.py | dd29056683ec824c6e618d3d9a6846c6283a2d06 | [
"Apache-2.0"
] | permissive | gongtian1234/easy_leetcode | bc0b33c3c4f61d58a6111d76707903efe0510cb4 | d2b8eb5d2cafc71ee1ca633ce489c1a52bcc39ce | refs/heads/master | 2022-11-16T17:48:33.596752 | 2020-07-13T02:55:03 | 2020-07-13T02:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,499 | py | # 1穷举
class Solution:
def str(self, s):
if not s:
return ''
n = len(s)
logest, left, right = 0, 0, 0
for i in range(0, n):
for j in range(i + 1, n + 1):
substr = s[i:j]
if self.isPalindrome(substr) and len(substr) > logest:
logest = len(substr)
left, right = i, j
return s[left:right]
def isPalindrome(self, s):
if not s:
return False
return s == s[::-1]
class Solution2:
def str(self, s):
if s == None or len(s) == 0:
return s
res = ''
for i in range(len(s)):
# 奇数情况
t = self.pali(s, i, i)
if len(t) > len(res):
res = t
# 偶数情况
t = self.pali(s, i, i + 1)
if len(t) > len(res):
res = t
return res
def pali(self, s, l, r):
# 只要是回文串,就一直扩充
while l >= 0 and r < len(s) and s[l] == s[r]:
l -= 1
r += 1
# l+1 !!
return s[l + 1:r]
print('<iframe src="//player.bilibili.com/player.html?aid=968696647&bvid=BV1Rp4y1U7ag&cid=208409065&page=1" scrolling="no" border="0" frameborder="no" framespacing="0" allowfullscreen="true"> </iframe>')
if __name__ == '__main__':
s = Solution()
# s2 = Solution2()
print(s.str('abcdzdcab'))
# print(s2.str('abcdzdcab111'))
| [
"425776024@qq.com"
] | 425776024@qq.com |
a3435a2e629040cfcb20a64c892e352dade4cde3 | a6dd8fd9b2e991fc11215941bc72b172f8521528 | /src/pythonbrasil2014/config.py | 4cca6178da1fea70ba4bf66803f5105b56a48fed | [] | no_license | pythonbrasil/pythonbrasil2014 | e36da54c91bf3ece2ff6fc4de566d7a8ab1f83cc | 050acc931470f29781ed2c0be7277faab1224b54 | refs/heads/master | 2021-01-23T20:50:00.500865 | 2013-11-12T15:48:18 | 2013-11-12T15:48:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 57 | py | # -*- coding:utf-8 -*-
PROJECTNAME = 'pythonbrasil2014'
| [
"erico@simplesconsultoria.com.br"
] | erico@simplesconsultoria.com.br |
dc3ce13c9c1b5aa4cbcaf2e9b393ddfacadc689b | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/ppapi/native_client/src/untrusted/pnacl_support_extension/pnacl_component_crx_gen.py | 3d35b3f8fbdfedaec21585f80e1c716db66bd3a1 | [
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-khronos"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 13,105 | py | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script lays out the PNaCl translator files for a
normal Chrome installer, for one platform. Once run num-of-arches times,
the result can then be packed into a multi-CRX zip file.
This script depends on and pulls in the translator nexes and libraries
from the toolchain directory (so that must be downloaded first) and
it depends on the pnacl_irt_shim.
"""
import json
import logging
import optparse
import os
import platform
import re
import shutil
import sys
J = os.path.join
######################################################################
# Target arch and build arch junk to convert between all the
# silly conventions between SCons, Chrome and PNaCl.
# The version of the arch used by NaCl manifest files.
# This is based on the machine "building" this extension.
# We also used this to identify the arch-specific different versions of
# this extension.
def CanonicalArch(arch):
if arch in ('x86_64', 'x86-64', 'x64', 'amd64'):
return 'x86-64'
# TODO(jvoung): be more specific about the arm architecture version?
if arch in ('arm', 'armv7'):
return 'arm'
if re.match('^i.86$', arch) or arch in ('x86_32', 'x86-32', 'ia32', 'x86'):
return 'x86-32'
return None
def GetBuildArch():
arch = platform.machine()
return CanonicalArch(arch)
BUILD_ARCH = GetBuildArch()
ARCHES = ['x86-32', 'x86-64', 'arm']
def IsValidArch(arch):
return arch in ARCHES
# The version of the arch used by configure and pnacl's build.sh.
def StandardArch(arch):
return {'x86-32': 'i686',
'x86-64': 'x86_64',
'arm' : 'armv7'}[arch]
######################################################################
def GetNaClRoot():
""" Find the native_client path, relative to this script.
This script is in ppapi/... and native_client is a sibling of ppapi.
"""
script_file = os.path.abspath(__file__)
def SearchForNaCl(cur_dir):
if cur_dir.endswith('ppapi'):
parent = os.path.dirname(cur_dir)
sibling = os.path.join(parent, 'native_client')
if not os.path.isdir(sibling):
raise Exception('Could not find native_client relative to %s' %
script_file)
return sibling
# Detect when we've the root (linux is /, but windows is not...)
next_dir = os.path.dirname(cur_dir)
if cur_dir == next_dir:
raise Exception('Could not find native_client relative to %s' %
script_file)
return SearchForNaCl(next_dir)
return SearchForNaCl(script_file)
NACL_ROOT = GetNaClRoot()
######################################################################
# Normalize the platform name to be the way SCons finds chrome binaries.
# This is based on the platform "building" the extension.
def GetBuildPlatform():
if sys.platform == 'darwin':
platform = 'mac'
elif sys.platform.startswith('linux'):
platform = 'linux'
elif sys.platform in ('cygwin', 'win32'):
platform = 'windows'
else:
raise Exception('Unknown platform: %s' % sys.platform)
return platform
BUILD_PLATFORM = GetBuildPlatform()
def DetermineInstallerArches(target_arch):
arch = CanonicalArch(target_arch)
if not IsValidArch(arch):
raise Exception('Unknown target_arch %s' % target_arch)
# On windows, we need x86-32 and x86-64 (assuming non-windows RT).
if BUILD_PLATFORM == 'windows':
if arch.startswith('x86'):
return ['x86-32', 'x86-64']
else:
raise Exception('Unknown target_arch on windows w/ target_arch == %s' %
target_arch)
else:
return [arch]
######################################################################
class PnaclPackaging(object):
package_base = os.path.dirname(__file__)
# File paths that are set from the command line.
pnacl_template = None
tool_revisions = None
# Agreed-upon name for pnacl-specific info.
pnacl_json = 'pnacl.json'
@staticmethod
def SetPnaclInfoTemplatePath(path):
PnaclPackaging.pnacl_template = path
@staticmethod
def SetToolsRevisionPath(path):
PnaclPackaging.tool_revisions = path
@staticmethod
def PnaclToolsRevision():
with open(PnaclPackaging.tool_revisions, 'r') as f:
for line in f.read().splitlines():
if line.startswith('PNACL_VERSION'):
_, version = line.split('=')
# CWS happens to use version quads, so make it a quad too.
# However, each component of the quad is limited to 64K max.
# Try to handle a bit more.
max_version = 2 ** 16
version = int(version)
version_more = version / max_version
version = version % max_version
return '0.1.%d.%d' % (version_more, version)
raise Exception('Cannot find PNACL_VERSION in TOOL_REVISIONS file: %s' %
PnaclPackaging.tool_revisions)
@staticmethod
def GeneratePnaclInfo(target_dir, abi_version, arch):
# A note on versions: pnacl_version is the version of translator built
# by the NaCl repo, while abi_version is bumped when the NaCl sandbox
# actually changes.
pnacl_version = PnaclPackaging.PnaclToolsRevision()
with open(PnaclPackaging.pnacl_template, 'r') as pnacl_template_fd:
pnacl_template = json.load(pnacl_template_fd)
out_name = J(target_dir, UseWhitelistedChars(PnaclPackaging.pnacl_json,
None))
with open(out_name, 'w') as output_fd:
pnacl_template['pnacl-arch'] = arch
pnacl_template['pnacl-version'] = pnacl_version
json.dump(pnacl_template, output_fd, sort_keys=True, indent=4)
######################################################################
class PnaclDirs(object):
toolchain_dir = J(NACL_ROOT, 'toolchain')
output_dir = J(toolchain_dir, 'pnacl-package')
@staticmethod
def TranslatorRoot():
return J(PnaclDirs.toolchain_dir, 'pnacl_translator')
@staticmethod
def LibDir(target_arch):
return J(PnaclDirs.TranslatorRoot(), 'lib-%s' % target_arch)
@staticmethod
def SandboxedCompilerDir(target_arch):
return J(PnaclDirs.toolchain_dir,
'pnacl_translator', StandardArch(target_arch), 'bin')
@staticmethod
def SetOutputDir(d):
PnaclDirs.output_dir = d
@staticmethod
def OutputDir():
return PnaclDirs.output_dir
@staticmethod
def OutputAllDir(version_quad):
return J(PnaclDirs.OutputDir(), version_quad)
@staticmethod
def OutputArchBase(arch):
return '%s' % arch
@staticmethod
def OutputArchDir(arch):
# Nest this in another directory so that the layout will be the same
# as the "all"/universal version.
parent_dir = J(PnaclDirs.OutputDir(), PnaclDirs.OutputArchBase(arch))
return (parent_dir, J(parent_dir, PnaclDirs.OutputArchBase(arch)))
######################################################################
def StepBanner(short_desc, long_desc):
logging.info("**** %s\t%s", short_desc, long_desc)
def Clean():
out_dir = PnaclDirs.OutputDir()
StepBanner('CLEAN', 'Cleaning out old packaging: %s' % out_dir)
if os.path.isdir(out_dir):
shutil.rmtree(out_dir)
else:
logging.info('Clean skipped -- no previous output directory!')
######################################################################
def UseWhitelistedChars(orig_basename, arch):
""" Make the filename match the pattern expected by nacl_file_host.
Currently, this assumes there is prefix "pnacl_public_" and
that the allowed chars are in the set [a-zA-Z0-9_].
"""
if arch:
target_basename = 'pnacl_public_%s_%s' % (arch, orig_basename)
else:
target_basename = 'pnacl_public_%s' % orig_basename
result = re.sub(r'[^a-zA-Z0-9_]', '_', target_basename)
logging.info('UseWhitelistedChars using: %s' % result)
return result
def CopyFlattenDirsAndPrefix(src_dir, arch, dest_dir):
""" Copy files from src_dir to dest_dir.
When copying, also rename the files such that they match the white-listing
pattern in chrome/browser/nacl_host/nacl_file_host.cc.
"""
for (root, dirs, files) in os.walk(src_dir, followlinks=True):
for f in files:
# Assume a flat directory.
assert (f == os.path.basename(f))
full_name = J(root, f)
target_name = UseWhitelistedChars(f, arch)
shutil.copy(full_name, J(dest_dir, target_name))
def BuildArchForInstaller(version_quad, arch, lib_overrides):
""" Build an architecture specific version for the chrome installer.
"""
target_dir = PnaclDirs.OutputDir()
StepBanner('BUILD INSTALLER',
'Packaging for arch %s in %s' % (arch, target_dir))
# Copy llc.nexe and ld.nexe, but with some renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.SandboxedCompilerDir(arch),
arch,
target_dir)
# Copy native libraries, also with renaming and directory flattening.
CopyFlattenDirsAndPrefix(PnaclDirs.LibDir(arch), arch, target_dir)
# Also copy files from the list of overrides.
# This needs the arch tagged onto the name too, like the other files.
if arch in lib_overrides:
for override in lib_overrides[arch]:
override_base = os.path.basename(override)
target_name = UseWhitelistedChars(override_base, arch)
shutil.copy(override, J(target_dir, target_name))
def BuildInstallerStyle(version_quad, lib_overrides, arches):
""" Package the pnacl component for use within the chrome installer
infrastructure. These files need to be named in a special way
so that white-listing of files is easy.
"""
StepBanner("BUILD_ALL", "Packaging installer for version: %s" % version_quad)
for arch in arches:
BuildArchForInstaller(version_quad, arch, lib_overrides)
# Generate pnacl info manifest.
# Hack around the fact that there may be more than one arch, on Windows.
if len(arches) == 1:
arches = arches[0]
PnaclPackaging.GeneratePnaclInfo(PnaclDirs.OutputDir(), version_quad, arches)
######################################################################
def Main():
usage = 'usage: %prog [options] version_arg'
parser = optparse.OptionParser(usage)
# We may want to accept a target directory to dump it in the usual
# output directory (e.g., scons-out).
parser.add_option('-c', '--clean', dest='clean',
action='store_true', default=False,
help='Clean out destination directory first.')
parser.add_option('-d', '--dest', dest='dest',
help='The destination root for laying out the extension')
parser.add_option('-L', '--lib_override',
dest='lib_overrides', action='append', default=[],
help='Specify path to a fresher native library ' +
'that overrides the tarball library with ' +
'(arch:libfile) tuple.')
parser.add_option('-t', '--target_arch',
dest='target_arch', default=None,
help='Only generate the chrome installer version for arch')
parser.add_option('--info_template_path',
dest='info_template_path', default=None,
help='Path of the info template file')
parser.add_option('--tool_revisions_path', dest='tool_revisions_path',
default=None, help='Location of NaCl TOOL_REVISIONS file.')
parser.add_option('-v', '--verbose', dest='verbose', default=False,
action='store_true',
help='Print verbose debug messages.')
(options, args) = parser.parse_args()
if options.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.ERROR)
logging.info('pnacl_component_crx_gen w/ options %s and args %s\n'
% (options, args))
# Set destination directory before doing any cleaning, etc.
if options.dest:
PnaclDirs.SetOutputDir(options.dest)
if options.clean:
Clean()
if options.info_template_path:
PnaclPackaging.SetPnaclInfoTemplatePath(options.info_template_path)
if options.tool_revisions_path:
PnaclPackaging.SetToolsRevisionPath(options.tool_revisions_path)
lib_overrides = {}
for o in options.lib_overrides:
arch, override_lib = o.split(',')
arch = CanonicalArch(arch)
if not IsValidArch(arch):
raise Exception('Unknown arch for -L: %s (from %s)' % (arch, o))
if not os.path.isfile(override_lib):
raise Exception('Override native lib not a file for -L: %s (from %s)' %
(override_lib, o))
override_list = lib_overrides.get(arch, [])
override_list.append(override_lib)
lib_overrides[arch] = override_list
if len(args) != 1:
parser.print_help()
parser.error('Incorrect number of arguments')
abi_version = int(args[0])
arches = DetermineInstallerArches(options.target_arch)
BuildInstallerStyle(abi_version, lib_overrides, arches)
return 0
if __name__ == '__main__':
sys.exit(Main())
| [
"karun.matharu@gmail.com"
] | karun.matharu@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.