blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
864ceef09f60b62a0a11649678e819700f532179
|
535fe1b2b746e096c2a39c61792dffe702024841
|
/ch5_client/4_10mTestWithThread.py
|
09750dedd80a510c622f3c7f13e932617d0013b9
|
[] |
no_license
|
AstinCHOI/book_thisIsRedis
|
bad890a7570767da3661069aba55b604a2c1284f
|
9ec10df7a757e05e7459f003fadfcc4eab892a3b
|
refs/heads/master
| 2020-03-11T18:16:06.826665
| 2018-05-22T03:00:02
| 2018-05-22T03:00:02
| 130,172,385
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
import time
import threading
import redis
TOTAL_OP = 10000000
THREAD = 5
def redis_thread(pool, idx):
r = redis.Redis(connection_pool=pool)
for i in range(TOTAL_OP):
if i % THREAD == idx:
key = value = "key" + str(100000000 + i)
r.set(key, value)
pool = redis.BlockingConnectionPool(host='localhost', port=6379, db=0,
max_connections=500, decode_responses=True)
threads = []
start = int(time.time())
for i in range(THREAD):
t = threading.Thread(target=redis_thread, args=(pool, i))
threads.append(t)
t.start()
for t in threads:
t.join()
pool.disconnect()
elapsed = int(time.time()) - start
print("requests per second : {}".format(TOTAL_OP / elapsed))
print("time : {}s".format(elapsed))
|
[
"asciineo@gmail.com"
] |
asciineo@gmail.com
|
891c2662d78dca4bb7636a77a59d30e31e8a9460
|
6ace7e15e3191d1b8228ad7922a8552ca84f84e7
|
/.history/image_detector_20200614200237.py
|
41ec32060b2f3b5048ad6b999ba4dbbdd054c16f
|
[] |
no_license
|
mehmetaliarican/Similar-Image-Finder
|
f72e95be50c51aa03fc64954a03124b199ca64b1
|
a9e0015c443b4a73394099cccf60329cfc4c7cef
|
refs/heads/master
| 2022-10-27T00:57:43.173993
| 2020-06-14T18:02:16
| 2020-06-14T18:02:16
| 272,256,295
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,028
|
py
|
from skimage.metrics import structural_similarity as ssim
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import os
import argparse
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--threshold", type=float, default=0.9,
help="threshold")
ap.add_argument("-d", "--dataset", required=True,
help="path to input dataset")
args = vars(ap.parse_args())
def mse(imageA, imageB):
# the 'Mean Squared Error' between the two images is the
# sum of the squared difference between the two images;
# NOTE: the two images must have the same dimension
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
# return the MSE, the lower the error, the more "similar"
# the two images are
return err
def compare_images(path,imageA, imageB):
# compute the mean squared error and structural similarity
# index for the images
m = mse(imageA, imageB)
s = ssim(imageA, imageB)
tres = args['threshold']
if s >= tres:
print("____".join([str(path),str(m), str(tres), str(s)]))
twin = np.hstack([imageA, imageB])
cv2.imshow('', twin)
cv2.waitKey(0)
imagePaths = list(paths.list_images(args['dataset']))
companies = ['dhl', 'paypal', 'wellsfargo']
all_data = []
for path in imagePaths:
company = ''
for c in companies:
if c in path:
company = c
all_data.append({'comp': c, 'path': path})
for image in all_data:
try:
p1 = cv2.imread(image['path'])
p1 = cv2.resize(p1, (300, 300))
p1 = cv2.cvtColor(p1, cv2.COLOR_BGR2GRAY)
for i in all_data:
if i['path']!=image['path']:
p2 = cv2.imread(i['path'])
p2 = cv2.resize(p2, (300, 300))
p2 = cv2.cvtColor(p2, cv2.COLOR_BGR2GRAY)
compare_images(image['path'],p1, p2)
except Exception as e:
print(str(e))
|
[
"m.ali.arican@gmail.com"
] |
m.ali.arican@gmail.com
|
90fce63be2d2ea67ff71cb83ce336991d16670c6
|
94f8d393536a38136420b299555a47989cb95e06
|
/tengxunzhaopin123/tengxunzhaopin123/middlewares.py
|
a406fda992e2fc64e8c8f5e26f48c0662c6ef797
|
[] |
no_license
|
nolan0536/weizhiBigDataPython
|
9164ddc50cd0b850ec7536270d690dd0848b9f06
|
ef4ab9d749159166fcfe48883d680ac058b12425
|
refs/heads/main
| 2023-04-21T21:15:11.235258
| 2021-05-08T01:28:51
| 2021-05-08T01:28:51
| 361,971,771
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,670
|
py
|
# Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class Tengxunzhaopin123SpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class Tengxunzhaopin123DownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
[
"1552696515@qq.com"
] |
1552696515@qq.com
|
a1ccfcb82ab8dd810185bd40e65365e3fa67a304
|
e5a511e346f5be8a82fe9cb2edf457aa7e82859c
|
/Python/cppsecrets.com/program 14.py
|
7072f9e819a92b61e186cb3ff5e5ff835dad44e7
|
[] |
no_license
|
nekapoor7/Python-and-Django
|
8397561c78e599abc8755887cbed39ebef8d27dc
|
8fa4d15f4fa964634ad6a89bd4d8588aa045e24f
|
refs/heads/master
| 2022-10-10T20:23:02.673600
| 2020-06-11T09:06:42
| 2020-06-11T09:06:42
| 257,163,996
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
"""Python Program to Count the Occurrences of Each Word in a Given String Sentence"""
from collections import Counter
text = input()
occur = Counter(text)
print(occur)
|
[
"neha.kapoor070789@gmail.com"
] |
neha.kapoor070789@gmail.com
|
3701c020b39c94cf68bd19271c90cc4e2b9b1579
|
bfe9c678726a53421f26a7cfdc1447681624c4f2
|
/bast/graphics/mesh/sub_mesh.py
|
6bc0a14c7930c25239866280f4636279921bb430
|
[] |
no_license
|
adamlwgriffiths/bast
|
df983cf0322b320efdc8ef4ba0207214ebd31ef6
|
a78186e9d111a799581bd604b4985467638b0b10
|
refs/heads/master
| 2021-01-19T20:18:27.558273
| 2015-05-02T03:41:22
| 2015-05-02T03:41:22
| 31,202,746
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,756
|
py
|
from __future__ import absolute_import
from OpenGL import GL
from ...common.object import DescriptorMixin
from ..buffer.vertex_array import VertexArray
from ..buffer.buffer_pointer import BufferPointer
class SubMesh(DescriptorMixin):
def __init__(self, material, indices=None, primitive=GL.GL_TRIANGLES, **pointers):
self._pointers = pointers
self._material = material
self.primitive = primitive
self.indices = indices
for pointer in pointers.values():
if not isinstance(pointer, BufferPointer):
raise ValueError('Must be of type BufferPointer')
self._vertex_array = VertexArray()
self._bind_pointers()
def _bind_pointers(self):
# TODO: make this more efficient, don't just clear all pointers
self._vertex_array.clear()
# assign our pointers to the vertex array
for name, pointer in self._pointers.items():
if not isinstance(pointer, BufferPointer):
raise ValueError('Must be a buffer pointer')
attribute = self._material.program.attributes.get(name)
if attribute:
self._vertex_array[attribute.location] = pointer
def render(self, **uniforms):
# set our uniforms
self._material.set_uniforms(**uniforms)
# render
with self._material:
if self.indices is not None:
self._vertex_array.render_indices(self.indices, self.primitive)
else:
self._vertex_array.render(self.primitive)
@property
def material(self):
return self._material
@material.setter
def material(self, material):
self._material = material
self._bind_pointers()
|
[
"adam.lw.griffiths@gmail.com"
] |
adam.lw.griffiths@gmail.com
|
42a947cc4062cb659223ccf9afbd8090f7fdc4aa
|
74060c5771ae3904e99cda84ef3d1ead58940917
|
/app.py
|
0dd8b44dd6721d70dd36f820ac10c849c3f26655
|
[] |
no_license
|
claraj/river-level
|
423f34027287f03b0b10a79bddc6cce17d1c4226
|
8a8aed77382337de58af6b694b01c210ea3d6a72
|
refs/heads/main
| 2023-06-03T07:47:46.025175
| 2021-06-11T02:10:57
| 2021-06-11T02:10:57
| 375,766,339
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,273
|
py
|
from flask import Flask, abort
from flask.json import jsonify
import requests
from datetime import datetime
app = Flask(__name__)
@app.errorhandler(400)
def not_found(e):
return jsonify({'Error': 'Bad request. Ensure you use a valid river code, and the number of days must be between 1 and 365.'}), 404
@app.errorhandler(404)
def not_found(e):
return jsonify({'Error': 'Not found'}), 404
@app.errorhandler(500)
def problem(e):
return jsonify({'Error': 'There was an error. Please report this to Clara.'}), 500
@app.route('/')
def homepage():
return 'This is the home page.'
@app.route('/api/river/<site_id>/<days>')
def river_info(site_id, days):
url = 'https://waterservices.usgs.gov/nwis/iv'
parameter_code_map = {
# '00011': 'Water temperature, fahrenheit',
# '00060': 'Flow, cubic feet per second',
'00065': 'Gauge height, feet',
}
parameter_codes = ','.join(parameter_code_map.keys()) # height, flow, temp
# is period a positive number between 1 and 365?
try:
days = int(days)
if days < 1 or days > 365:
abort(400, 'Days must be an integer between 1 and 365')
except:
abort(400, 'Days must be an integer between 1 and 365')
params = {
'format': 'json',
'site': site_id,
'parameterCd': parameter_codes,
'siteStatus': 'all',
'period': f'P{days}D'
}
response = requests.get(url, params=params)
if response.status_code == 400: # Bad request, often unrecognized site number
app.logger.error(f'Bad request for site {site_id} because {response.text}')
abort(400)
response.raise_for_status()
# get site name, values of parameters, time measurement made
river_data = response.json()
time_series = river_data['value']['timeSeries']
if not time_series:
# no data or site number not found
app.logger.error(f'No series of data for site {site_id}')
abort(404)
simplified_data = {'data': {} }
for series in time_series:
code = series['variable']['variableCode'][0]['value']
simple_name = parameter_code_map[code]
values = series['values'][0]['value']
values_list = []
times_list = []
times_human_list = []
timestamp_list = []
for value_dict in values:
data_point = value_dict['value']
date_str = value_dict['dateTime']
date_time = datetime.fromisoformat(date_str)
# human_date = datetime.strftime(date_time, '%a %d %b %Y at %I:%M %p')
timestamp = date_time.timestamp()
timestamp_list.append(timestamp)
values_list.append(data_point)
times_list.append(date_str)
# times_human_list.append(human_date)
site_name = series['sourceInfo']['siteName']
site_name_title = site_name.title()
simplified_data['data'][simple_name] = {
'values': values_list,
'times': times_list,
'timestamps': timestamp_list
# 'formatted_times': times_human_list,
}
simplified_data['location'] = site_name_title
return jsonify(simplified_data)
|
[
"10088152+claraj@users.noreply.github.com"
] |
10088152+claraj@users.noreply.github.com
|
acd5eaa9f9e459be6493fb13f20f229d7bb22132
|
a94c446a0d9ce77df965674f63be54d54b2be577
|
/raspy/invalid_operation_exception.py
|
619a77c7c483963df689e25c8c88f7cc472685e5
|
[
"MIT"
] |
permissive
|
cyrusbuilt/RasPy
|
3434e02c2bff09ef9f3ff4995bda14edc781c14b
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
refs/heads/master
| 2020-03-18T20:19:27.426002
| 2018-08-03T17:07:25
| 2018-08-03T17:07:25
| 135,207,376
| 0
| 0
|
MIT
| 2018-08-03T17:07:26
| 2018-05-28T20:42:17
|
Python
|
UTF-8
|
Python
| false
| false
| 297
|
py
|
"""This module contains the InvalidOperationException exception class."""
class InvalidOperationException(Exception):
"""Invalid operation exception.
The exception that is thrown when an operation is attempted on an object
whose current state does not support it.
"""
pass
|
[
"cyrusbuilt@gmail.com"
] |
cyrusbuilt@gmail.com
|
5a691fd91db8702bd66b8e9e3d63005f7c6f009d
|
489814a9008e482eb1098c3c97aac23ff037b3cf
|
/www/rabota/context_processors.py
|
7227e85af53f954205996ae8e27a574bcda14bb5
|
[] |
no_license
|
boogiiieee/Delo70
|
f70fcb92c91f96348513d415b120aad3b4507721
|
5c48371a513b4b1bdd6068c90895a9bda126d88c
|
refs/heads/master
| 2021-09-04T03:10:13.362897
| 2018-01-15T03:57:48
| 2018-01-15T03:57:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 624
|
py
|
# -*- coding: utf-8 -*-
from rabota.forms import SearchMinForm
from geo.models import CustomLocation
##################################################################################################
##################################################################################################
def custom_proc(request):
return {
'search': SearchMinForm(),
'city': CustomLocation.objects.get(slug=u'tomsk')
}
##################################################################################################
##################################################################################################
|
[
"shalyapinalexander@gmail.com"
] |
shalyapinalexander@gmail.com
|
c755e9e0d94bb5396ef7140478ebd01d23417626
|
a566cb316ab93aeadd366b148f5110c327c7eb2b
|
/chp3/test.py
|
12ac2e4c8da6cf7e5fd81b79f352b6ddc085af59
|
[] |
no_license
|
piochelepiotr/crackingTheCode
|
4aeaffd2c46b2761b2f9642107292d0932731489
|
163ff60f723869a7096b330965d90dc1443d7199
|
refs/heads/master
| 2021-06-20T21:30:56.033989
| 2021-01-13T08:44:57
| 2021-01-13T08:44:57
| 172,414,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,050
|
py
|
import unittest
import ex2
import ex3
import ex4
import ex5
import ex6
import stack
class Testing(unittest.TestCase):
def test_s(self):
s = stack.Stack()
s.push(3)
s.push(4)
self.assertEqual(4, s.pop())
self.assertEqual(3, s.pop())
def test_min_s(self):
s = ex2.MinStack()
s.add(3)
s.add(4)
s.add(2)
self.assertEqual(2, s.min())
self.assertEqual(2, s.pop())
self.assertEqual(3, s.min())
self.assertEqual(4, s.pop())
def test_set_of_ss(self):
s = ex3.SetOfStacks(2)
s.push(1)
s.push(2)
s.push(3)
self.assertEqual(3, s.pop())
self.assertEqual(2, s.pop())
self.assertEqual(1, s.pop())
s.push(1)
s.push(2)
s.push(3)
self.assertEqual(2, s.pop_at(0))
self.assertEqual(3, s.pop())
self.assertEqual(1, s.pop())
def test_queue(self):
q = ex4.MyQueue()
q.push(1)
q.push(2)
q.push(3)
self.assertEqual(1, q.pull())
self.assertEqual(2, q.pull())
self.assertEqual(3, q.pull())
def test_sort_stack(self):
s = stack.Stack()
s.push(2)
s.push(1)
s.push(3)
s.push(5)
sorted_stack = ex5.sort_stack(s)
self.assertEqual(4, sorted_stack.size())
self.assertEqual(5, sorted_stack.pop())
self.assertEqual(3, sorted_stack.pop())
self.assertEqual(2, sorted_stack.pop())
self.assertEqual(1, sorted_stack.pop())
def test_shelter(self):
shelter = ex6.Shelter()
shelter.enqueue(ex6.Cat('Garfield'))
shelter.enqueue(ex6.Dog('Sirius'))
shelter.enqueue(ex6.Dog('Rantanplan'))
shelter.enqueue(ex6.Cat('Crookshanks'))
self.assertEqual('Sirius', shelter.dequeue_dog().name)
self.assertEqual('Garfield', shelter.dequeue_any().name)
self.assertEqual('Crookshanks', shelter.dequeue_cat().name)
if __name__ == "__main__":
unittest.main()
|
[
"piotr.wolski@telecom-paristech.fr"
] |
piotr.wolski@telecom-paristech.fr
|
421a92abcac080c140990d2ba04f70ef50b6473d
|
de3b77cb0927f28cbd85e9142c2dfd7c8be7c27e
|
/tests/migrations/024_add_updated_at_to_endpoint_params_down.py
|
293af995a4fa50583d125154ddd7dc3ad812b296
|
[
"MIT"
] |
permissive
|
LoansBot/database
|
f3dcbccde59fdb80c876d2612f250662946588e6
|
eeaed26c2dcfdf0f9637b47ebe15cd1e000d8cc4
|
refs/heads/master
| 2021-07-02T22:07:18.683278
| 2021-06-02T04:09:38
| 2021-06-02T04:09:38
| 239,400,935
| 0
| 1
|
MIT
| 2021-06-02T04:14:31
| 2020-02-10T01:06:53
|
Python
|
UTF-8
|
Python
| false
| false
| 690
|
py
|
import unittest
import helper
class DownTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.connection = helper.setup_connection()
cls.cursor = cls.connection.cursor()
@classmethod
def tearDownClass(cls):
cls.cursor.close()
cls.connection.rollback()
helper.teardown_connection(cls.connection)
def tearDown(self):
self.connection.rollback()
def test_updated_at_dne(self):
self.assertFalse(
helper.check_if_column_exist(
self.cursor, 'endpoint_params', 'updated_at'
)
)
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
LoansBot.noreply@github.com
|
e98a5646e95ea0a4833b1b0150b72feea5dc1830
|
c9aca558963537ae10e87b791cc878f8f6a33d77
|
/Chapter02/Simple_linear_regression.py
|
bff0e3c4dfc7ca91098e85038dc360cdf9cb04ec
|
[
"MIT"
] |
permissive
|
PacktPublishing/TensorFlow-1x-Deep-Learning-Cookbook
|
d1f8fe311fa127346122aee1a8cc12a85ef4cc8a
|
9e23044b0c43e2f6b9ad40a82023f7935757d3d0
|
refs/heads/master
| 2023-02-05T09:27:15.951141
| 2023-01-30T09:49:25
| 2023-01-30T09:49:25
| 114,516,232
| 91
| 84
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,767
|
py
|
"""
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def normalize(X):
mean = np.mean(X)
std = np.std(X)
X = (X - mean)/std
return X
# Data
boston = tf.contrib.learn.datasets.load_dataset('boston')
X_train, Y_train = boston.data[:,5], boston.target
#X_train = normalize(X_train)
n_samples = len(X_train)
#print(X_train)
# Placeholder for the Training Data
X = tf.placeholder(tf.float32, name='X')
Y = tf.placeholder(tf.float32, name='Y')
# Variables for coefficients initialized to 0
b = tf.Variable(0.0)
w = tf.Variable(0.0)
# The Linear Regression Model
Y_hat = X * w + b
# Loss function
loss = tf.square(Y - Y_hat, name='loss')
# Gradient Descent with learning rate of 0.01 to minimize loss
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01).minimize(loss)
# Initializing Variables
init_op = tf.global_variables_initializer()
total = []
# Computation Graph
with tf.Session() as sess:
# Initialize variables
sess.run(init_op)
writer = tf.summary.FileWriter('graphs', sess.graph)
# train the model for 100 epcohs
for i in range(100):
total_loss = 0
for x,y in zip(X_train,Y_train):
_, l = sess.run ([optimizer, loss], feed_dict={X:x, Y:y})
total_loss += l
total.append(total_loss / n_samples)
print('Epoch {0}: Loss {1}'.format(i, total_loss/n_samples))
writer.close()
b_value, w_value = sess.run([b, w])
Y_pred = X_train * w_value + b_value
print('Done')
# Plot the result
plt.plot(X_train, Y_train, 'bo', label='Real Data')
plt.plot(X_train,Y_pred, 'r', label='Predicted Data')
plt.legend()
plt.show()
plt.plot(total)
plt.show()
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
88e1bce19a600e0c2c679c6e5cda236a8c2c4e07
|
cd4d0df26a8cd40b01872e892dca7204aa66aa1e
|
/storescraper/bin/celeryconfig/defaults.py
|
31d6f1ec02656232b0614147fc23ef99adf4d7f3
|
[] |
no_license
|
SoloTodo/storescraper
|
f3486782c37f48d1b8aac4dc5fa6fa993711382e
|
b04490f5f3db21a92e9ad7cb67c4030a69e51434
|
refs/heads/develop
| 2023-08-30T21:43:16.725320
| 2023-08-30T19:36:27
| 2023-08-30T19:36:27
| 95,259,334
| 47
| 23
| null | 2023-08-03T15:34:41
| 2017-06-23T21:56:48
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
import sys
sys.path.append('../..')
broker_url = 'amqp://storescraper:storescraper@localhost/storescraper'
result_backend = 'rpc://'
imports = (
'storescraper.store'
)
|
[
"vkhemlan@gmail.com"
] |
vkhemlan@gmail.com
|
c44c0c145082573453dda43fc2c47dbb33847365
|
9955a91c6193f28bc2a47fb0ab3955a0a0f525f8
|
/model/medical_inpatient_medication_log.py
|
90147c432e8c7b46743c6e85b82bd58456195df1
|
[] |
no_license
|
asop-source/Klinik-
|
77372049fe6cdf2b2c922f093464980fea01bfc5
|
a7ab1ca80c73e62178e577a685be888ff6c370da
|
refs/heads/master
| 2020-12-11T06:38:11.765622
| 2020-01-14T08:14:51
| 2020-01-14T08:14:51
| 233,790,453
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# -*- coding: utf-8 -*-
# Part of BrowseInfo. See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api, _
from datetime import date,datetime
class medical_inpatient_medication_log(models.Model):
_name = 'medical.inpatient.medication.log'
admin_time = fields.Datetime(string='Date',readonly=True)
dose = fields.Float(string='Dose')
remarks = fields.Text(string='Remarks')
medical_inpatient_medication_log_id = fields.Many2one('medical.physician',string='Health Professional',readonly=True)
medical_dose_unit_id = fields.Many2one('medical.dose.unit',string='Dose Unt')
medical_inaptient_log_medicament_id = fields.Many2one('medical.inpatient.medication',string='Log History')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:s
|
[
"asopkarawang@gmail.com"
] |
asopkarawang@gmail.com
|
72f0fc5d92d6a4e25fec8d241b1e74159b598da8
|
cced1f1ad18c6d9c3b96b2ae53cac8e86846f1f5
|
/Blog/comment/views.py
|
7d2183dd9347af2c83f7990348ea803cb88433de
|
[] |
no_license
|
sug5806/portfolio
|
a3904be506a3746e16da57bba5926c38743783ad
|
b943955a52c622094a58fb9124323298261ae80a
|
refs/heads/master
| 2022-12-10T06:23:38.472893
| 2019-07-05T04:56:59
| 2019-07-05T04:56:59
| 190,156,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,313
|
py
|
from django.shortcuts import render
from .forms import CommentForm
from .models import Comment
from django.shortcuts import redirect
from django.urls import resolve
from urllib.parse import urlparse
from django.contrib import messages
def add_comment(request):
if not request.user.is_anonymous:
comment_form = CommentForm(request.POST)
comment_form.instance.author_id = request.user.id
if comment_form.is_valid():
comment_form.save()
messages.add_message(request, messages.SUCCESS, "댓글을 작성하였습니다.")
else:
messages.add_message(request, messages.WARNING, "Comment Invalid")
else:
messages.add_message(request, messages.WARNING, "댓글은 로그인 사용자만 남길 수 있습니다.")
referer = request.META['HTTP_REFERER']
return redirect(referer)
def delete_comment(request, pk):
comment = Comment.objects.filter(pk=pk)
if comment.exists() and comment[0].author == request.user :
comment.delete()
messages.add_message(request, messages.SUCCESS, "댓글을 삭제하였습니다.")
else:
messages.add_message(request, messages.WARNING, "댓글을 삭제할 수 없습니다.")
referer = request.META['HTTP_REFERER']
return redirect(referer)
|
[
"sug5806@gmail.com"
] |
sug5806@gmail.com
|
660031d63690e79aa8df1ed183d32e387b29d77b
|
5f5256284d4aa1c3d88dd99301024ba8fa04955e
|
/weis/multifidelity/test/test_trust_region.py
|
3fb88e17c210bb3135a5e410645a1c3a2b75c1d9
|
[
"Apache-2.0"
] |
permissive
|
dousuguang/WEIS
|
15fbff42dc4298d7592871b961c0f43fcd24feb7
|
1e4dbf6728050f75cee08cd483fe57c5614488fe
|
refs/heads/master
| 2023-07-08T15:46:45.508489
| 2021-05-18T22:46:55
| 2021-05-18T22:46:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,413
|
py
|
import unittest
import numpy as np
from weis.multifidelity.models.testbed_components import (
simple_2D_high_model,
simple_2D_low_model,
simple_1D_high_model,
simple_1D_low_model,
)
from weis.multifidelity.methods.trust_region import SimpleTrustRegion
class Test(unittest.TestCase):
def test_optimization(self):
np.random.seed(13)
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
model_low = simple_2D_low_model(desvars)
model_high = simple_2D_high_model(desvars)
trust_region = SimpleTrustRegion(model_low, model_high, bounds, disp=False)
trust_region.add_objective("y")
results = trust_region.optimize()
np.testing.assert_allclose(results["optimal_design"], [0.0, 0.333], atol=1e-3)
def test_constrained_optimization(self):
np.random.seed(13)
bounds = {"x": np.array([[0.0, 1.0], [0.0, 1.0]])}
desvars = {"x": np.array([0.0, 0.25])}
model_low = simple_2D_low_model(desvars)
model_high = simple_2D_high_model(desvars)
trust_region = SimpleTrustRegion(
model_low, model_high, bounds, num_initial_points=10, disp=False
)
trust_region.add_objective("y")
trust_region.add_constraint("con", equals=0.0)
results = trust_region.optimize(plot=False, num_iterations=10)
np.testing.assert_allclose(results["optimal_design"], [0.0, 0.10987], atol=1e-3)
np.testing.assert_allclose(results["outputs"]["con"], 0.0, atol=1e-5)
def test_1d_constrained_optimization(self):
np.random.seed(13)
bounds = {"x": np.array([[0.0, 1.0]])}
desvars = {"x": np.array([0.25])}
model_low = simple_1D_low_model(desvars)
model_high = simple_1D_high_model(desvars)
trust_region = SimpleTrustRegion(
model_low, model_high, bounds, num_initial_points=10, disp=False
)
trust_region.add_objective("y")
trust_region.add_constraint("con", equals=0.25)
results = trust_region.optimize(plot=False, num_iterations=10)
np.testing.assert_allclose(results["optimal_design"], 0.707105, atol=1e-3)
np.testing.assert_allclose(results["outputs"]["con"], 0.25, atol=1e-5)
if __name__ == "__main__":
unittest.main()
|
[
"johnjasa11@gmail.com"
] |
johnjasa11@gmail.com
|
a2eb28539aed3f9f4c023b85fe772c3742d174f8
|
f569978afb27e72bf6a88438aa622b8c50cbc61b
|
/douyin_open/Oauth2UserToken/__init__.py
|
57234d8fe6e43dd02aeb32d856adc28f5132d285
|
[] |
no_license
|
strangebank/swagger-petstore-perl
|
4834409d6225b8a09b8195128d74a9b10ef1484a
|
49dfc229e2e897cdb15cbf969121713162154f28
|
refs/heads/master
| 2023-01-05T10:21:33.518937
| 2020-11-05T04:33:16
| 2020-11-05T04:33:16
| 310,189,316
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# coding: utf-8
# flake8: noqa
"""
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import apis into sdk package
from douyin_open.Oauth2UserToken.api.access_token_api import AccessTokenApi
from douyin_open.Oauth2UserToken.api.oauth_code_api import OauthCodeApi
# import ApiClient
from douyin_open.Oauth2UserToken.api_client import ApiClient
from douyin_open.Oauth2UserToken.configuration import Configuration
# import models into sdk package
from douyin_open.Oauth2UserToken.models.description import Description
from douyin_open.Oauth2UserToken.models.error_code import ErrorCode
from douyin_open.Oauth2UserToken.models.inline_response200 import InlineResponse200
from douyin_open.Oauth2UserToken.models.inline_response200_data import InlineResponse200Data
|
[
"strangebank@gmail.com"
] |
strangebank@gmail.com
|
24a5685809e52808904bdcc90e982bc708d4cf22
|
6ff318a9f67a3191b2a9f1d365b275c2d0e5794f
|
/python/day26/复习面向对象.py
|
528ff46caa2096b7158566b26e0dedf26620f292
|
[] |
no_license
|
lvhanzhi/Python
|
c1846cb83660d60a55b0f1d2ed299bc0632af4ba
|
c89f882f601898b5caab25855ffa7d7a1794f9ab
|
refs/heads/master
| 2020-03-25T23:34:00.919197
| 2018-09-13T12:19:51
| 2018-09-13T12:19:51
| 144,281,084
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,391
|
py
|
# class OldboyStudent:
# school='Oldboy'
# def choose_course(self):
# print('is choosing course')
# print(OldboyStudent.__dict__)
# print(OldboyStudent.__dict__['school'])
# print(OldboyStudent.school)
# print(OldboyStudent.choose_course)
# OldboyStudent.choose_course(1)
# OldboyStudent.country='English'
# print(OldboyStudent.country)
# del OldboyStudent.school
# class Foo:
# pass
# class Foo2(Foo):
# pass
# f=Foo2()
# # print(Foo)
# # obj=Foo()
# # print(type(obj))
# print(Foo.__name__)
# print(Foo.__dict__)
# print(dir(Foo))
# print(Foo.__module__)
# print(f.__class__.__name__)
# print(isinstance(f,Foo2))
# class People:
# count=0
# def __init__(self,name):
# self.name=name
# People.count+=1
# egon=People('egon')
# print(egon.count)
# alex=People('alex')
# print(alex.count)
# class Person:
# def __init__(self,name,attack,life_value):
# self.name=name
# self.attack=attack
# self.life_value=life_value
# def attacking(self):
# dog.life_value=dog.life_value-self.attack
# class Dog:
# def __init__(self,name,attack,life_value):
# self.name=name
# self.attack=attack
# self.life_value=life_value
# def attacking(self):
# egon.life_value=egon.life_value-self.attack
# egon=Person('egon',20,100)
# dog=Dog('dog',10,100)
# print('egon的生命',egon.life_value)
# print('dog的生命',dog.life_value)
# egon.attacking()
# print('egon的生命',egon.life_value)
# print('dog的生命',dog.life_value)
# class Birthday:
# def __init__(self,year,month,day):
# self.year=year
# self.month=month
# self.day=day
# class Course:
# def __init__(self,name,price,period):
# self.name=name
# self.price=price
# self.period=period
# class Teacher:
# def __init__(self,name,year,month,day,price,period,salary):
# self.name=name
# self.salary=salary
# self.birthday=Birthday(year,month,day)
# self.course=Course(name,price,period)
# egon=Teacher('egon',1998,5,5,19800,5.5,2000)
# class Birthday:
# def __init__(self,year,mothday,day):
# self.year=year
# self.mothday=mothday
# self.day=day
# class Course:
# def __init__(self,name,period,price):
# self.name=name
# self.period=period
# self.price=price
# class Teacher:
# def __init__(self,name,age,sex):
# self.name=name
# self.age=age
# self.sex=sex
# egg=Teacher('egon',28,'male')
# egg.birthday=Birthday(2018,8,14)
# print(egg.birthday.year)
# egg.course=Course('python',5.5,158000)
# print(egg.course.name)
# class A:
# def test(self):
# print('a')
# class B(A):
# def test(self):
# print('b')
# obj=B()
# print(B.mro())
# class People:
# def __init__(self,name,age,sex):
# self.name=name
# self.age=age
# self.sex=sex
# class Student(People):
# def __init__(self,name,age,sex):
# People.__init__(self,name,age,sex)
# stu=Student('tom',18,'male')
# print(stu.__dict__)
# class People:
# def __init__(self,name,age,sex):
# self.name=name
# self.age=age
# self.sex=sex
# class Teacher(People):
# def __init__(self,name,age,sex):
# super(Teacher,self).__init__(name,age,sex)
# tea=Teacher('egon',18,'male')
# print(tea.__dict__)
|
[
"1541860665@qq.com"
] |
1541860665@qq.com
|
d864a4a18f26361ad7c9a9e508e92e54f8250bc2
|
d8b5aba2a1f53fbf3fcfc388c26e547afa76b13f
|
/modules/andForensics/modules/utils/android_sqlite3.py
|
455f93e4b7c5cde1330befb1977acbfd3297ff38
|
[
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
dfrc-korea/carpe
|
e88b4e3bcb536355e2a64d00e807bccd631f8c93
|
f9299b8ad0cb2a6bbbd5e65f01d2ba06406c70ac
|
refs/heads/master
| 2023-04-28T01:12:49.138443
| 2023-04-18T07:37:39
| 2023-04-18T07:37:39
| 169,518,336
| 75
| 38
|
Apache-2.0
| 2023-02-08T00:42:41
| 2019-02-07T04:21:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,933
|
py
|
#-*- coding: utf-8 -*-
import sqlite3
import logging
import sys
logger = logging.getLogger('andForensics')
class SQLite3(object):
def execute_fetch_query_multi_values_order(query, query2, db):
try:
con = sqlite3.connect(db)
except sqlite3.Error as e:
logger.error("SQLite open error. it is an invalid file: %s" % db)
return False
# con.text_factory = str
# con.text_factory = lambda x: x.decode("utf-8") + "foo"
cursor = con.cursor()
try:
cursor.execute(query)
except sqlite3.Error as e:
try:
cursor.execute(query2)
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s, db: %s" % (query2, db))
return False
try:
ret = cursor.fetchall()
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s, db: %s" % (query, db))
return False
con.close()
return ret
def execute_fetch_query_multi_values(query, db):
try:
con = sqlite3.connect(db)
except sqlite3.Error as e:
logger.error("SQLite open error. it is an invalid file: %s" % db)
return False
# con = sqlite3.connect(db)
# # con.text_factory = str
# # con.text_factory = lambda x: x.decode("utf-8") + "foo"
cursor = con.cursor()
try:
cursor.execute(query)
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s, db: %s" % (query, db))
return False
try:
ret = cursor.fetchall()
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s, db: %s" % (query, db))
return False
con.close()
return ret
def execute_fetch_query(query, db):
try:
con = sqlite3.connect(db)
except sqlite3.Error as e:
logger.error("SQLite open error. it is an invalid file: %s" % db)
return False
cursor = con.cursor()
try:
cursor.execute(query)
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s" % query)
return False
try:
ret = cursor.fetchone()
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s" % query)
return False
con.close()
return ret
def execute_commit_query(queries, db):
# con = sqlite3.connect(db.decode('cp949'))
# con = sqlite3.connect(io.StringIO(db.decode('cp949')))
try:
con = sqlite3.connect(db)
except sqlite3.Error as e:
logger.error("SQLite open error. it is an invalid file: %s" % db)
return False
cursor = con.cursor()
query_type = type(queries)
if query_type == list:
for query in queries:
# print('query: %s' % query)
try:
cursor.execute(query)
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s" % query)
return False
elif query_type == str:
try:
cursor.execute(queries)
except sqlite3.Error as e:
logger.error("SQLite query execution error. query: %s" % queries)
return False
else:
print(query_type)
con.commit()
con.close()
return
|
[
"jbc0729@gmail.com"
] |
jbc0729@gmail.com
|
487ea60364bea7fbadc50bc1b2fd1c36512a6d7c
|
e9c8094407c351919cc765990dc2b4907d7dc986
|
/CRC/check_district_functionality.py
|
ad1b478d278355540dd1826145d71c1e27efd42a
|
[] |
no_license
|
chetandg123/System_Test
|
343991e37d90c9ae25dbdd9ea06944483e071f33
|
5c8875e298f31dd3feb0726d3967bca7a7daea0a
|
refs/heads/master
| 2022-10-11T13:42:32.537274
| 2020-06-04T16:45:12
| 2020-06-04T16:45:12
| 269,412,983
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 229
|
py
|
import time
import unittest
class CRC(unittest.TestCase):
def setUp(self):
time.sleep(15)
def test_query(self):
print("District Functionality is selected")
def tearDown(self):
time.sleep(15)
|
[
"sdevaraja85@gmail.com"
] |
sdevaraja85@gmail.com
|
31c69697e3c23566e5008b2f757956ca5be41372
|
d725d4909a144f3218067c78e0339df781ba8145
|
/src/plot_utils.py
|
cadda8ff609970ad7cbbf8f12682808aac707c01
|
[
"Apache-2.0"
] |
permissive
|
dhermes/phd-thesis
|
05b101aa93c9d8aa72cc069d29ba3b9d3f2384dc
|
732c75b4258e6f41b2dafb2929f0e3dbd380239b
|
refs/heads/master
| 2021-06-13T04:22:37.265874
| 2019-11-16T16:35:22
| 2019-11-16T16:35:22
| 139,187,875
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,463
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared utilities and settings for plotting."""
import fractions
import math
import os
import seaborn
# As of ``0.9.0``, this palette has (BLUE, ORANGE, GREEN, RED, PURPLE, BROWN).
_COLORS = seaborn.color_palette(palette="deep", n_colors=6)
BLUE = _COLORS[0]
GREEN = _COLORS[2]
RED = _COLORS[3]
PURPLE = _COLORS[4]
del _COLORS
TEXT_SIZE = 10 # NOTE: Thesis text uses 12 point.
TICK_SIZE = 7
def set_styles():
"""Set the styles used for plotting."""
seaborn.set(style="white")
def get_path(*parts):
"""Get a file path in the ``images/`` directory.
This assumes the script is currently in the ``src/``
directory.
"""
curr_dir = os.path.abspath(os.path.dirname(__file__))
root_dir = os.path.dirname(curr_dir)
images_dir = os.path.join(root_dir, "images")
return os.path.join(images_dir, *parts)
def binomial(n, k):
numerator = math.factorial(n)
denominator = math.factorial(k) * math.factorial(n - k)
result = fractions.Fraction(numerator, denominator)
if float(result) != result:
raise ValueError("Cannot be represented exactly")
return float(result)
def next_float(value, greater=True):
"""Gets the next (or previous) floating point value."""
frac, exponent = math.frexp(value)
if greater:
if frac == -0.5:
ulp = 0.5 ** 54
else:
ulp = 0.5 ** 53
else:
if frac == 0.5:
ulp = -0.5 ** 54
else:
ulp = -0.5 ** 53
return (frac + ulp) * 2.0 ** exponent
def to_float(v):
"""Converts an MPF (``mpmath`` float) to a ``float``."""
f = float(v)
if f == v:
return f
if f < v:
low = f
high = next_float(f, greater=True)
else:
low = next_float(f, greater=False)
high = f
d_low = v - low
d_high = high - v
if d_low < d_high:
return low
else:
return high
|
[
"daniel.j.hermes@gmail.com"
] |
daniel.j.hermes@gmail.com
|
568db6eb13884c292c10227362bb121ed53af89d
|
4e4c22dfabb1a0fa89f0f51f58737273412a30e0
|
/audit/backend/ssh_interactive.py
|
41119aecffd21653ed1bf7b9824da53cc1eeb199
|
[] |
no_license
|
shaoqianliang/fort_machine
|
4cb271d5ef29c924c09172ff397e2af8562ee4ba
|
cf7e3d4c6682831ce04bcde478930ab7e85abb01
|
refs/heads/master
| 2020-04-28T15:24:02.056674
| 2019-04-12T23:50:35
| 2019-04-12T23:50:35
| 175,372,042
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,469
|
py
|
#!/usr/bin/env python
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import base64
from binascii import hexlify
import getpass
import os
import select
import socket
import sys
import time
import traceback
from paramiko.py3compat import input
from audit import models
import paramiko
try:
import interactive
except ImportError:
from . import interactive
def ssh_session(bind_host_user, user_obj):
# now connect
hostname = bind_host_user.host.ip_addr
port = bind_host_user.host.port
username = bind_host_user.host_user.username
password = bind_host_user.host_user.password
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
except Exception as e:
print('*** Connect failed: ' + str(e))
traceback.print_exc()
sys.exit(1)
try:
t = paramiko.Transport(sock)
try:
t.start_client()
except paramiko.SSHException:
print('*** SSH negotiation failed.')
sys.exit(1)
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
except IOError:
print('*** Unable to open host keys file')
keys = {}
# check server's host key -- this is important.
key = t.get_remote_server_key()
if hostname not in keys:
print('*** WARNING: Unknown host key!')
elif key.get_name() not in keys[hostname]:
print('*** WARNING: Unknown host key!')
elif keys[hostname][key.get_name()] != key:
print('*** WARNING: Host key has changed!!!')
sys.exit(1)
else:
print('*** Host key OK.')
if not t.is_authenticated():
manual_auth(t, username, password)
if not t.is_authenticated():
print('*** Authentication failed. :(')
t.close()
sys.exit(1)
chan = t.open_session()
chan.get_pty() # terminal
chan.invoke_shell()
print('*** Here we go!\n')
session_obj = models.SessionLog.objects.create(account=user_obj.account,
host_user_bind=bind_host_user)
interactive.interactive_shell(chan, session_obj)
chan.close()
t.close()
except Exception as e:
print('*** Caught exception: ' + str(e.__class__) + ': ' + str(e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)
|
[
"1132424753@qq.com"
] |
1132424753@qq.com
|
6b886ff3e8ceaf9a22c3d83ecd4644649ed20e2b
|
d6411d6d766adf97490b5229780952a23a3ec93e
|
/exportToBox2D.py
|
520e5c8b2251d0a95da09748a244b69b76da98c3
|
[] |
no_license
|
NCCA/Box2DExport
|
22db2dfa4d934a3cb19cb8ca3dc6e98d4c8f5f70
|
a81d2a6c02aae0cc45ff573fb8ab625c9cd5454d
|
refs/heads/main
| 2022-11-18T20:23:52.118572
| 2022-11-16T17:03:02
| 2022-11-16T17:03:02
| 24,465,700
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,426
|
py
|
import maya.OpenMaya as OM
import maya.OpenMayaAnim as OMA
import maya.OpenMayaMPx as OMX
import maya.cmds as cmds
import sys, math
structure="""
typedef struct
{
std::string name;
float tx;
float ty;
float width;
float height;
b2BodyType type;
}Body; \n
"""
def exportBox2D() :
# basicFilter = "*.b2d"
# file=cmds.fileDialog2(caption="Please select file to save",fileFilter=basicFilter, dialogStyle=2)
file="test.bd2"
if file !="" :
dagIt = OM.MItDag(OM.MItDag.kDepthFirst, OM.MFn.kTransform)
object = OM.MObject
ofile=open(file[0],'w')
ofile.write(structure)
ofile.write('\n\nBody bodies[]={\n')
numBodies=0
while not dagIt.isDone():
object = dagIt.currentItem()
depNode = OM.MFnDependencyNode(object)
if object.apiTypeStr() =="kTransform" :
fn = OM.MFnTransform(object)
child = fn.child(0)
if child.apiTypeStr()=="kMesh" :
name=fn.name()
ofile.write('\t{ "%s",' %(name) )
x=cmds.getAttr("%s.translateX" %(name))
ofile.write('%sf,' %(x))
y=cmds.getAttr("%s.translateY" %(name))
ofile.write('%sf,' %(y))
width=cmds.getAttr("%s.scaleX" %(name))
ofile.write('%sf,' %(width))
height=cmds.getAttr("%s.scaleY" %(name))
ofile.write('%sf,' %(height))
type=cmds.getAttr("%s.Box2D" %(name))
ofile.write('%s },\n' %(type))
numBodies=numBodies+1
dagIt.next()
ofile.write("};\n")
ofile.close()
exportBox2D()
|
[
"jmacey@bournemouth.ac.uk"
] |
jmacey@bournemouth.ac.uk
|
7c579b4d5629b63b895546455d32a4745a7ba2ee
|
35351364eef7f058b358141aca6f3b74717841b8
|
/src/taxpasta/infrastructure/application/kraken2/kraken2_profile_standardisation_service.py
|
3d1268f8a406dc1afb07d7f990d8d53b7e29b3f8
|
[
"Apache-2.0"
] |
permissive
|
taxprofiler/taxpasta
|
75f59c4bb234be9e93418d7eeaadfd73865e0df3
|
98713deaeec2e92b2f020860d264bccc9a25dbd1
|
refs/heads/dev
| 2023-08-31T15:04:17.971556
| 2023-08-24T20:01:50
| 2023-08-24T20:01:50
| 499,589,621
| 21
| 8
|
Apache-2.0
| 2023-09-10T07:49:26
| 2022-06-03T17:06:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,871
|
py
|
# Copyright (c) 2022 Moritz E. Beber
# Copyright (c) 2022 Maxime Borry
# Copyright (c) 2022 James A. Fellows Yates
# Copyright (c) 2022 Sofia Stamouli.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a standardisation service for kraken2 profiles."""
import pandera as pa
from pandera.typing import DataFrame
from taxpasta.application.service import ProfileStandardisationService
from taxpasta.domain.model import StandardProfile
from .kraken2_profile import Kraken2Profile
class Kraken2ProfileStandardisationService(ProfileStandardisationService):
"""Define a standardisation service for kraken2 profiles."""
@classmethod
@pa.check_types(lazy=True)
def transform(
cls, profile: DataFrame[Kraken2Profile]
) -> DataFrame[StandardProfile]:
"""
Tidy up and standardize a given kraken2 profile.
Args:
profile: A taxonomic profile generated by kraken2.
Returns:
A standardized profile.
"""
return (
profile[[Kraken2Profile.taxonomy_id, Kraken2Profile.direct_assigned_reads]]
.copy()
.rename(
columns={
Kraken2Profile.taxonomy_id: StandardProfile.taxonomy_id,
Kraken2Profile.direct_assigned_reads: StandardProfile.count,
}
)
)
|
[
"midnighter@posteo.net"
] |
midnighter@posteo.net
|
0275e902be4106106025a6572c63ae75e2419353
|
21b5ad37b812ed78799d4efc1649579cc83d32fb
|
/career_advice/migrations/0005_merge_20200412_0918.py
|
da2433e841795a5ffac4c98c6ade0117d9040f76
|
[] |
no_license
|
SaifulAbir/django-js-api
|
b6f18c319f8109884e71095ad49e08e50485bb25
|
fbf174b9cde2e7d25b4898f511df9c6f96d406cf
|
refs/heads/master
| 2023-02-12T16:09:21.508702
| 2021-01-14T09:05:15
| 2021-01-14T09:05:15
| 329,713,528
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
# Generated by Django 3.0.3 on 2020-04-12 09:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('career_advice', '0004_auto_20200406_0429'),
('career_advice', '0004_auto_20200408_0830'),
]
operations = [
]
|
[
"rashed@ishraak.com"
] |
rashed@ishraak.com
|
5f5e3209d2fceeaecaa1b2c52f3cb5efe5bf924d
|
cdd33a31d5b57a4a02803dded5e96a815fbb06d7
|
/examples/dagster_examples_tests/test_examples.py
|
502cc68abe72b5555628b27e6a1738166a9790cd
|
[
"Apache-2.0"
] |
permissive
|
david-alexander-white/dagster
|
4f177c167150316a5056901aa2522ab778d1d163
|
1c341500bb2380e14873b59b7e25503270188bda
|
refs/heads/master
| 2020-12-07T04:40:02.676080
| 2020-01-06T17:37:40
| 2020-01-07T22:19:01
| 232,633,648
| 1
| 0
|
Apache-2.0
| 2020-01-08T18:42:28
| 2020-01-08T18:42:27
| null |
UTF-8
|
Python
| false
| false
| 679
|
py
|
from __future__ import print_function
from click.testing import CliRunner
from dagster.cli.pipeline import execute_list_command, pipeline_list_command
from dagster.utils import script_relative_path
def no_print(_):
return None
def test_list_command():
runner = CliRunner()
execute_list_command(
{
'repository_yaml': script_relative_path('../repository.yaml'),
'python_file': None,
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-y', script_relative_path('../repository.yaml')]
)
assert result.exit_code == 0
|
[
"s@thupukari.com"
] |
s@thupukari.com
|
40816b4509834099df0a36e835300937d0875954
|
63e0b2a87237df482f559e428c068fb0bdae3786
|
/python/tts_aksk_demo.py
|
49a804c8c915b5d56eab9d7353895a66a8047692
|
[
"Apache-2.0"
] |
permissive
|
liwenxiang/ais-sdk
|
b547a16de630073e7552aad7425405d1b91d1a7e
|
76240abc49795e914988f3cafb6d08f60dbdcb4c
|
refs/heads/master
| 2020-05-13T21:54:56.771333
| 2019-04-04T10:06:58
| 2019-04-04T10:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 813
|
py
|
# -*- coding:utf-8 -*-
from ais_sdk.utils import decode_to_wave_file
from ais_sdk.tts import tts_aksk
import json
if __name__ == '__main__':
#
# access text to speech,post data by token
#
app_key = '*************'
app_secret = '************'
# call interface use the default config
result = tts_aksk(app_key, app_secret, '语音合成为你的业务增加交互的能力.')
result_obj = json.loads(result)
decode_to_wave_file(result_obj['result']['data'], 'data/tts_use_aksk_default_config.wav')
# call interface use the specific config
result = tts_aksk(app_key, app_secret, '这里是语音合成的测试。', 'xiaoyu', '0', '16k')
result_obj = json.loads(result)
decode_to_wave_file(result_obj['result']['data'], 'data/tts_use_aksk_specific_config.wav')
|
[
"17091412@qq.com"
] |
17091412@qq.com
|
82c96800c7360a392e36d9f829b797033956880b
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/MEDIUM/YW_ZXBMM_SZXJ_097.py
|
3725e08c3c5e6aebcbf3e46bfbad955ec13ddc7d
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,073
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_ZXBMM_SZXJ_097(xtp_test_case):
# YW_ZXBMM_SZXJ_097
def test_YW_ZXBMM_SZXJ_097(self):
title = '深圳A股股票交易日限价委托卖-错误的数量(数量>100万)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000107,
'errorMSG': queryOrderErrorMsg(11000107),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '1', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': stkparm['随机中间价'],
'quantity': 1000100,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
51f1669223491d79b767c54eee33c490e7696ab8
|
a06b1f68a43622c21b1dbdd8680f21d588a45219
|
/theory/espim/2D/plate/FSDT/transverse_shear_edge_based/stiffmatrices.py
|
560948f4d2b1b61c7f3a8349af531c9f91bf3667
|
[
"BSD-2-Clause"
] |
permissive
|
i5misswrong/meshless
|
6eac7e7ddbe51160ee37358ce36525b26b6c6843
|
27f9729050cedec2d7c1a716104d068608827c0f
|
refs/heads/master
| 2021-01-15T22:51:33.502229
| 2017-07-05T13:59:17
| 2017-07-05T13:59:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,528
|
py
|
import sympy
from sympy import Matrix
from meshless.sympytools import print_as_sparse, print_as_array, print_as_full
sympy.var('nx1, ny1')
sympy.var('nx2, ny2')
sympy.var('nx3, ny3')
sympy.var('nx4, ny4')
sympy.var('f11, f12, f13, f14')
sympy.var('f21, f22, f23, f24')
sympy.var('f31, f32, f33, f34')
sympy.var('f41, f42, f43, f44')
sympy.var('A11, A12, A16, A22, A26, A66')
sympy.var('B11, B12, B16, B22, B26, B66')
sympy.var('D11, D12, D16, D22, D26, D66')
sympy.var('E44, E45, E55')
sympy.var('le1, le2, le3, le4, Ac')
su1 = Matrix([[f11, 0, 0, 0, 0, f12, 0, 0, 0, 0, f13, 0, 0, 0, 0, f14, 0, 0, 0, 0]])
sv1 = Matrix([[0, f11, 0, 0, 0, 0, f12, 0, 0, 0, 0, f13, 0, 0, 0, 0, f14, 0, 0, 0]])
sw1 = Matrix([[0, 0, f11, 0, 0, 0, 0, f12, 0, 0, 0, 0, f13, 0, 0, 0, 0, f14, 0, 0]])
sphix1 = Matrix([[0, 0, 0, f11, 0, 0, 0, 0, f12, 0, 0, 0, 0, f13, 0, 0, 0, 0, f14, 0]])
sphiy1 = Matrix([[0, 0, 0, 0, f11, 0, 0, 0, 0, f12, 0, 0, 0, 0, f13, 0, 0, 0, 0, f14]])
su2 = Matrix([[f21, 0, 0, 0, 0, f22, 0, 0, 0, 0, f23, 0, 0, 0, 0, f24, 0, 0, 0, 0]])
sv2 = Matrix([[0, f21, 0, 0, 0, 0, f22, 0, 0, 0, 0, f23, 0, 0, 0, 0, f24, 0, 0, 0]])
sw2 = Matrix([[0, 0, f21, 0, 0, 0, 0, f22, 0, 0, 0, 0, f23, 0, 0, 0, 0, f24, 0, 0]])
sphix2 = Matrix([[0, 0, 0, f21, 0, 0, 0, 0, f22, 0, 0, 0, 0, f23, 0, 0, 0, 0, f24, 0]])
sphiy2 = Matrix([[0, 0, 0, 0, f21, 0, 0, 0, 0, f22, 0, 0, 0, 0, f23, 0, 0, 0, 0, f24]])
su3 = Matrix([[f31, 0, 0, 0, 0, f32, 0, 0, 0, 0, f33, 0, 0, 0, 0, f34, 0, 0, 0, 0]])
sv3 = Matrix([[0, f31, 0, 0, 0, 0, f32, 0, 0, 0, 0, f33, 0, 0, 0, 0, f34, 0, 0, 0]])
sw3 = Matrix([[0, 0, f31, 0, 0, 0, 0, f32, 0, 0, 0, 0, f33, 0, 0, 0, 0, f34, 0, 0]])
sphix3 = Matrix([[0, 0, 0, f31, 0, 0, 0, 0, f32, 0, 0, 0, 0, f33, 0, 0, 0, 0, f34, 0]])
sphiy3 = Matrix([[0, 0, 0, 0, f31, 0, 0, 0, 0, f32, 0, 0, 0, 0, f33, 0, 0, 0, 0, f34]])
su4 = Matrix([[f41, 0, 0, 0, 0, f42, 0, 0, 0, 0, f43, 0, 0, 0, 0, f44, 0, 0, 0, 0]])
sv4 = Matrix([[0, f41, 0, 0, 0, 0, f42, 0, 0, 0, 0, f43, 0, 0, 0, 0, f44, 0, 0, 0]])
sw4 = Matrix([[0, 0, f41, 0, 0, 0, 0, f42, 0, 0, 0, 0, f43, 0, 0, 0, 0, f44, 0, 0]])
sphix4 = Matrix([[0, 0, 0, f41, 0, 0, 0, 0, f42, 0, 0, 0, 0, f43, 0, 0, 0, 0, f44, 0]])
sphiy4 = Matrix([[0, 0, 0, 0, f41, 0, 0, 0, 0, f42, 0, 0, 0, 0, f43, 0, 0, 0, 0, f44]])
A = Matrix([[A11, A12, A16],
[A12, A22, A26],
[A16, A26, A66]])
B = Matrix([[B11, B12, B16],
[B12, B22, B26],
[B16, B26, B66]])
D = Matrix([[D11, D12, D16],
[D12, D22, D26],
[D16, D26, D66]])
E = Matrix([[E44, E45],
[E45, E55]])
# membrane
Bm = 1/Ac * (
le1*Matrix([nx1*su1,
ny1*sv1,
ny1*su1 + nx1*sv1])
+ le2*Matrix([nx2*su2,
ny2*sv2,
ny2*su2 + nx2*sv2])
+ le3*Matrix([nx3*su3,
ny3*sv3,
ny3*su3 + nx3*sv3])
+ le4*Matrix([nx4*su4,
ny4*sv4,
ny4*su4 + nx4*sv4])
)
# bending
Bb = 1/Ac * (
le1*Matrix([nx1*sphix1,
ny1*sphiy1,
ny1*sphix1 + nx1*sphiy1])
+ le2*Matrix([nx2*sphix2,
ny2*sphiy2,
ny2*sphix2 + nx2*sphiy2])
+ le3*Matrix([nx3*sphix3,
ny3*sphiy3,
ny3*sphix3 + nx3*sphiy3])
+ le4*Matrix([nx4*sphix4,
ny4*sphiy4,
ny4*sphix4 + nx4*sphiy4])
)
K = Ac*(Bm.transpose() * A * Bm
+ Bm.transpose() * B * Bb
+ Bb.transpose() * B * Bm
+ Bb.transpose() * D * Bb)
print_as_full(K, 'k0', dofpernode=5)
# transverse shear terms
sympy.var('a1, b1, c1, d1, Ac1')
sympy.var('a2, b2, c2, d2, Ac2')
# Tria1: mid1 -> node1 -> node2
# Tria2: node1 -> mid2 -> node2
#mid 1
Tria1Bs1 = 1/(2*Ac1) * Matrix([
[0, 0, b1-d1, Ac1, 0],
[0, 0, c1-a1, 0, Ac1]])
#node 1
Tria1Bs2 = 1/(2*Ac1) * Matrix([
[0, 0, d1, a1*d1/2, b1*d1/2],
[0, 0, -c1, -a1*c1/2, -b1*c1/2]])
#node 2
Tria1Bs3 = 1/(2*Ac1) * Matrix([
[0, 0, -b1, -b1*c1/2, -b1*d1/2],
[0, 0, a1, a1*c1/2, a1*d1/2]])
#node 1
Tria2Bs1 = 1/(2*Ac2) * Matrix([
[0, 0, b2-d2, Ac2, 0],
[0, 0, c2-a2, 0, Ac2]])
#mid 2
Tria2Bs2 = 1/(2*Ac2) * Matrix([
[0, 0, d2, a2*d2/2, b2*d2/2],
[0, 0, -c2, -a2*c2/2, -b2*c2/2]])
#node 2
Tria2Bs3 = 1/(2*Ac2) * Matrix([
[0, 0, -b2, -b2*c2/2, -b2*d2/2],
[0, 0, a2, a2*c2/2, a2*d2/2]])
ZERO = Tria1Bs1*0
#node 1 , node 2 , other 1 , other 2
BsTria1 = Matrix([Tria1Bs2.T + 1/3*Tria1Bs1.T, Tria1Bs3.T + 1/3*Tria1Bs1.T, 1/3*Tria1Bs1.T, ZERO.T ]).T
BsTria2 = Matrix([Tria2Bs1.T + 1/3*Tria2Bs2.T, Tria2Bs3.T + 1/3*Tria2Bs2.T, ZERO.T , 1/3*Tria2Bs2.T]).T
Bs = 1/Ac*(Ac1*BsTria1 + Ac2*BsTria2)
K = Ac*Bs.transpose()*E*Bs
print_as_full(K, 'k0s_interior_edge', dofpernode=5)
#mid 1
Tria1Bs1 = 1/(2*Ac) * Matrix([
[0, 0, b1-d1, Ac, 0],
[0, 0, c1-a1, 0, Ac]])
#node 1
Tria1Bs2 = 1/(2*Ac) * Matrix([
[0, 0, d1, a1*d1/2, b1*d1/2],
[0, 0, -c1, -a1*c1/2, -b1*c1/2]])
#node 2
Tria1Bs3 = 1/(2*Ac) * Matrix([
[0, 0, -b1, -b1*c1/2, -b1*d1/2],
[0, 0, a1, a1*c1/2, a1*d1/2]])
#node 1 , node 2 , other 1
BsTria1 = Matrix([Tria1Bs2.T + 1/3*Tria1Bs1.T, Tria1Bs3.T + 1/3*Tria1Bs1.T, 1/3*Tria1Bs1.T]).T
Bs = BsTria1
K = Ac*Bs.transpose()*E*Bs
print_as_full(K, 'k0s_boundary_edge', dofpernode=5)
|
[
"saullogiovani@gmail.com"
] |
saullogiovani@gmail.com
|
afdbe16747b31d11aa6ffd23da8bdb456945b5a7
|
c36b028acbcb8c7416c13e011dd1f6fef3825a6b
|
/treehand.py
|
12c71618d86a403dd284c85d8930b837919e52bd
|
[] |
no_license
|
pglen/dbgui
|
32184e1bd27dad001e52d93bb5a3feb088168921
|
790a9dd3fe9d30399550faef246d2e83467636bd
|
refs/heads/master
| 2023-01-21T08:46:05.598868
| 2023-01-10T15:21:34
| 2023-01-10T15:21:34
| 153,726,394
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,690
|
py
|
#!/usr/bin/env python
from __future__ import print_function
import os, sys, getopt, signal
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GObject
from gi.repository import GLib
class TreeHand():
def __init__(self, tree_sel_row):
self.treestore = None
self.tree = self.create_tree(self)
self.tree.set_headers_visible(False)
self.tree.get_selection().set_mode(Gtk.SelectionMode.MULTIPLE)
self.stree = Gtk.ScrolledWindow()
self.stree.add(self.tree)
self.tree.connect("cursor-changed", tree_sel_row)
# Tree handlers
def start_tree(self):
if not self.treestore:
self.treestore = Gtk.TreeStore(str)
# Delete previous contents
try:
while True:
root = self.treestore.get_iter_first()
self.treestore.remove(root)
except:
#print( sys.exc_info())
pass
piter = self.treestore.append(None, ["Loading .."])
self.treestore.append(piter, ["None .."])
# -------------------------------------------------------------------------
def create_tree(self, match, text = None):
self.start_tree()
tv = Gtk.TreeView(self.treestore)
tv.set_enable_search(True)
cell = Gtk.CellRendererText()
tvcolumn = Gtk.TreeViewColumn()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
tv.append_column(tvcolumn)
return tv
def update_treestore(self, text):
#print( "was", was)
# Delete previous contents
try:
while True:
root = self.treestore.get_iter_first()
self.treestore.remove(root)
except:
pass
#print( sys.exc_info() )
if not text:
self.treestore.append(None, ["No Match",])
return
cnt = 0; piter2 = None; next = False
try:
for line in text:
piter = self.treestore.append(None, [line])
if next:
next = False; piter2 = piter
#if cnt == was:
# next = True
cnt += 1
except:
pass
#print( sys.exc_info())
if piter2:
self.tree.set_cursor(self.treestore.get_path(piter2))
else:
root = self.treestore.get_iter_first()
self.tree.set_cursor(self.treestore.get_path(root))
def append_treestore(self, text):
piter = self.treestore.append(None, [text])
# EOF
|
[
"peterglen99@gmail.com"
] |
peterglen99@gmail.com
|
b2eec97a9e55d61e6b24331daca7712bdc299e93
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/accelbyte_py_sdk/api/ugc/operations/public_channel/public_create_channel.py
|
ede3403280ff18ce2acd0694c96810b63b78a3ae
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340
| 2023-08-22T01:08:03
| 2023-08-22T01:08:03
| 410,735,805
| 2
| 1
|
MIT
| 2022-08-02T03:54:11
| 2021-09-27T04:00:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,118
|
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Ugc Service (2.11.3)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ModelsChannelResponse
from ...models import ModelsPublicChannelRequest
from ...models import ResponseError
class PublicCreateChannel(Operation):
"""Create Channel (PublicCreateChannel)
Required permission NAMESPACE:{namespace}:USER:{userId}:CHANNEL [CREATE]
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:CHANNEL [CREATE]
Properties:
url: /ugc/v1/public/namespaces/{namespace}/users/{userId}/channels
method: POST
tags: ["Public Channel"]
consumes: ["application/json", "application/octet-stream"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ModelsPublicChannelRequest in body
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
201: Created - ModelsChannelResponse (Created)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = "/ugc/v1/public/namespaces/{namespace}/users/{userId}/channels"
_method: str = "POST"
_consumes: List[str] = ["application/json", "application/octet-stream"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ModelsPublicChannelRequest # REQUIRED in [body]
namespace: str # REQUIRED in [path]
user_id: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
if hasattr(self, "user_id"):
result["userId"] = self.user_id
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ModelsPublicChannelRequest) -> PublicCreateChannel:
self.body = value
return self
def with_namespace(self, value: str) -> PublicCreateChannel:
self.namespace = value
return self
def with_user_id(self, value: str) -> PublicCreateChannel:
self.user_id = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ModelsPublicChannelRequest()
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
if hasattr(self, "user_id") and self.user_id:
result["userId"] = str(self.user_id)
elif include_empty:
result["userId"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[
Union[None, ModelsChannelResponse], Union[None, HttpResponse, ResponseError]
]:
"""Parse the given response.
201: Created - ModelsChannelResponse (Created)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 201:
return ModelsChannelResponse.create_from_dict(content), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls, body: ModelsPublicChannelRequest, namespace: str, user_id: str, **kwargs
) -> PublicCreateChannel:
instance = cls()
instance.body = body
instance.namespace = namespace
instance.user_id = user_id
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> PublicCreateChannel:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ModelsPublicChannelRequest.create_from_dict(
dict_["body"], include_empty=include_empty
)
elif include_empty:
instance.body = ModelsPublicChannelRequest()
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
if "userId" in dict_ and dict_["userId"] is not None:
instance.user_id = str(dict_["userId"])
elif include_empty:
instance.user_id = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"namespace": "namespace",
"userId": "user_id",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"namespace": True,
"userId": True,
}
# endregion static methods
|
[
"elmernocon@gmail.com"
] |
elmernocon@gmail.com
|
81cdfcc0b542f443cf22c5b6650f06961619aecb
|
4979df3343d7b99a9a826bd1cb946ae79fac260c
|
/tests/core/test_compiler.py
|
4c31895384c81437789919e69b48a45af242ea1f
|
[
"BSD-3-Clause"
] |
permissive
|
e-calder/enaml
|
753ff329fb8a2192bddbe7166581ed530fb270be
|
8f02a3c1a80c0a6930508551c7de1d345095173d
|
refs/heads/master
| 2021-07-30T01:18:29.222672
| 2021-07-27T08:51:50
| 2021-07-27T08:51:50
| 206,089,494
| 0
| 0
|
NOASSERTION
| 2019-09-03T13:52:44
| 2019-09-03T13:52:44
| null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
#------------------------------------------------------------------------------
# Copyright (c) 2020, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
import traceback as tb
from textwrap import dedent
import pytest
from utils import compile_source
def test_validate_declarative_1():
""" Test that we reject children that are not type in enamldef.
This also serves to test the good working of try_squash_raise.
"""
source = dedent("""\
from enaml.widgets.api import *
a = 1
enamldef Main(Window):
a:
pass
""")
with pytest.raises(TypeError) as exc:
Main = compile_source(source, 'Main')
ftb = "\n".join(tb.format_tb(exc.tb))
assert " validate_declarative" not in ftb
def test_validate_declarative_2():
""" Test that we reject children that are not declarative in enamldef.
This also serves to test the good working of try_squash_raise.
"""
source = dedent("""\
from enaml.widgets.api import *
class A:
pass
enamldef Main(Window):
A:
pass
""")
with pytest.raises(TypeError) as exc:
Main = compile_source(source, 'Main')
ftb = "\n".join(tb.format_tb(exc.tb))
assert " validate_declarative" not in ftb
|
[
"marul@laposte.net"
] |
marul@laposte.net
|
4aae37809f53da42d03a976e5a6283a33bae61c8
|
334d0a4652c44d0c313e11b6dcf8fb89829c6dbe
|
/checkov/terraform/checks/resource/azure/AzureDefenderOnContainerRegistry.py
|
4a4d616fcf8b8cf44c547f290b395ec7d758874c
|
[
"Apache-2.0"
] |
permissive
|
schosterbarak/checkov
|
4131e03b88ae91d82b2fa211f17e370a6f881157
|
ea6d697de4de2083c8f6a7aa9ceceffd6b621b58
|
refs/heads/master
| 2022-05-22T18:12:40.994315
| 2022-04-28T07:44:05
| 2022-04-28T07:59:17
| 233,451,426
| 0
| 0
|
Apache-2.0
| 2020-03-23T12:12:23
| 2020-01-12T20:07:15
|
Python
|
UTF-8
|
Python
| false
| false
| 994
|
py
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from typing import List
class AzureDefenderOnContainerRegistry(BaseResourceCheck):
def __init__(self):
name = "Ensure that Azure Defender is set to On for Container Registries"
id = "CKV_AZURE_86"
supported_resources = ['azurerm_security_center_subscription_pricing']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
return CheckResult.PASSED if conf.get('resource_type', [None])[0] != 'ContainerRegistry' \
or conf.get('tier', [None])[0] == 'Standard' else CheckResult.FAILED
def get_evaluated_keys(self) -> List[str]:
return ['resource_type', 'tier']
check = AzureDefenderOnContainerRegistry()
|
[
"noreply@github.com"
] |
schosterbarak.noreply@github.com
|
54a8323ed3240105110fca78f0cb928d7777c030
|
2234300b2316bc0e7b9fcc28de567c62c98a55b5
|
/setup.py
|
b91c43d560eae77a6cfb609534438a62739b253a
|
[
"MIT"
] |
permissive
|
PierrePaul/ABToast
|
9f96249c1f3987421c1a68c81af084bd1a914d85
|
edf65f0e86aace18a33a624c13c8ce936c5940eb
|
refs/heads/master
| 2020-07-21T03:08:14.356334
| 2016-11-14T19:00:44
| 2016-11-14T19:00:44
| 73,740,655
| 0
| 0
| null | 2016-11-14T19:41:40
| 2016-11-14T19:41:40
| null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
# _*_ coding: utf-8 _*_
from distutils.core import setup
from setuptools import find_packages
setup(
name='django-abtoast',
version='1.0.3',
author='Hiten Sharma',
author_email='sharmahiten3@gmail.com',
packages=find_packages(),
url='https://github.com/htadg/ABToast',
license='MIT License',
description='ABToast is an A/B Testing app that is developed in django.',
long_description=open('README.md').read(),
zip_safe=False,
)
|
[
"sharmahiten3@gmail.com"
] |
sharmahiten3@gmail.com
|
894cf0ded218d4c3ec26f8fe45a2c3b61bbc23e9
|
27cd4886e5d08cca23bf36e24339ff1155b7db10
|
/generators/splash/BagModules/adc_sar_templates_fdsoi/capdac_7b.py
|
ff152c65df1d3c87f87af13dd87f33d543d0924c
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
ucb-art/laygo
|
8539accac6e9888122e8e0afd160d294ffb56bfc
|
8f62ec1971480cb27cb592421fd97f590379cff9
|
refs/heads/master
| 2021-01-11T08:49:24.306674
| 2020-06-18T15:01:50
| 2020-06-18T15:01:50
| 194,750,788
| 24
| 9
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,759
|
py
|
# -*- coding: utf-8 -*-
########################################################################################################################
#
# Copyright (c) 2014, Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
########################################################################################################################
import os
import pkg_resources
from bag.design import Module
yaml_file = pkg_resources.resource_filename(__name__, os.path.join('netlist_info', 'capdac_7b.yaml'))
class adc_sar_templates__capdac_7b(Module):
"""Module for library adc_sar_templates cell capdac_7b.
Fill in high level description here.
"""
def __init__(self, bag_config, parent=None, prj=None, **kwargs):
Module.__init__(self, bag_config, yaml_file, parent=parent, prj=prj, **kwargs)
def design(self):
"""To be overridden by subclasses to design this module.
This method should fill in values for all parameters in
self.parameters. To design instances of this module, you can
call their design() method or any other ways you coded.
To modify schematic structure, call:
rename_pin()
delete_instance()
replace_instance_master()
reconnect_instance_terminal()
restore_instance()
array_instance()
"""
pass
def get_layout_params(self, **kwargs):
"""Returns a dictionary with layout parameters.
This method computes the layout parameters used to generate implementation's
layout. Subclasses should override this method if you need to run post-extraction
layout.
Parameters
----------
kwargs :
any extra parameters you need to generate the layout parameters dictionary.
Usually you specify layout-specific parameters here, like metal layers of
input/output, customizable wire sizes, and so on.
Returns
-------
params : dict[str, any]
the layout parameters dictionary.
"""
return {}
def get_layout_pin_mapping(self):
"""Returns the layout pin mapping dictionary.
This method returns a dictionary used to rename the layout pins, in case they are different
than the schematic pins.
Returns
-------
pin_mapping : dict[str, str]
a dictionary from layout pin names to schematic pin names.
"""
return {}
|
[
"richards@eecs.berkeley.edu"
] |
richards@eecs.berkeley.edu
|
b30ddcdc45696a58718f254483b7b0596091e699
|
334bb5c9d948287d8746e81f0438ac5f3ef4c7c8
|
/examples/full-screen/simple-demos/colorcolumn.py
|
b81bcabe0e43a48b686372e51b64a9271c64eedf
|
[
"BSD-3-Clause",
"Python-2.0"
] |
permissive
|
davidtavarez/python-prompt-toolkit
|
fc6629694cfdaa227c5c908f7cdb0b73b9eedd1a
|
ceeed2bb4cb8467cefc112987121b3afd37d773a
|
refs/heads/master
| 2020-04-02T23:39:50.099817
| 2018-10-25T20:50:02
| 2018-10-25T20:50:02
| 154,874,871
| 1
| 0
|
BSD-3-Clause
| 2018-10-26T18:08:46
| 2018-10-26T18:08:46
| null |
UTF-8
|
Python
| false
| false
| 1,906
|
py
|
#!/usr/bin/env python
"""
Colorcolumn example.
"""
from __future__ import unicode_literals
from prompt_toolkit.application import Application
from prompt_toolkit.buffer import Buffer
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import HSplit, Window, ColorColumn
from prompt_toolkit.layout.controls import FormattedTextControl, BufferControl
from prompt_toolkit.layout.layout import Layout
LIPSUM = """
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Maecenas
quis interdum enim. Nam viverra, mauris et blandit malesuada, ante est bibendum
mauris, ac dignissim dui tellus quis ligula. Aenean condimentum leo at
dignissim placerat. In vel dictum ex, vulputate accumsan mi. Donec ut quam
placerat massa tempor elementum. Sed tristique mauris ac suscipit euismod. Ut
tempus vehicula augue non venenatis. Mauris aliquam velit turpis, nec congue
risus aliquam sit amet. Pellentesque blandit scelerisque felis, faucibus
consequat ante. Curabitur tempor tortor a imperdiet tincidunt. Nam sed justo
sit amet odio bibendum congue. Quisque varius ligula nec ligula gravida, sed
convallis augue faucibus. Nunc ornare pharetra bibendum. Praesent blandit ex
quis sodales maximus."""
# Create text buffers.
buff = Buffer()
buff.text = LIPSUM
# 1. The layout
color_columns = [
ColorColumn(50),
ColorColumn(80, style='bg:#ff0000'),
ColorColumn(10, style='bg:#ff0000'),
]
body = HSplit([
Window(FormattedTextControl('Press "q" to quit.'), height=1, style='reverse'),
Window(BufferControl(buffer=buff), colorcolumns=color_columns),
])
# 2. Key bindings
kb = KeyBindings()
@kb.add('q')
def _(event):
" Quit application. "
event.app.exit()
# 3. The `Application`
application = Application(
layout=Layout(body),
key_bindings=kb,
full_screen=True)
def run():
application.run()
if __name__ == '__main__':
run()
|
[
"jonathan@slenders.be"
] |
jonathan@slenders.be
|
aa7a716385760f45fec52d6192485a83cb9b1531
|
f9369134d8d12e4b542e5529d4283abbda76c07e
|
/BSTconstruction.py
|
d890ae7ee8b26bdb2eaca0fc823aa50b9a8cac78
|
[] |
no_license
|
yash921/leetcode-questions
|
10fdaae874075ddb20331ccbf39dd82d10a9bb11
|
f3fa3d3b5843a21bb86f91711105ae2751373e9c
|
refs/heads/main
| 2023-03-27T01:28:13.986142
| 2021-03-20T06:23:40
| 2021-03-20T06:23:40
| 349,645,174
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,017
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 6 18:50:47 2020
@author: yash
"""
import pygame
class BST:
def __init__(self, value):
self.value = value
self.left = None
self.right = None
# Average: O(log(n)) time | O(1) space
# Worst: O(n) time | O(1) space
def insert(self, value):
currentNode = self
while True:
if value < currentNode.value:
if currentNode.left is None:
currentNode.left = BST(value)
break
else:
currentNode = currentNode.left
else:
if currentNode.right is None:
currentNode.right = BST(value)
break
else:
currentNode = currentNode.right
return self
# Average: O(log(n)) time | O(1) space
# Worst: O(n) time | O(1) space
def contains(self, value):
currentNode = self
while currentNode is not None:
if value < currentNode.value:
currentNode = currentNode.left
elif value > currentNode.value:
currentNode = currentNode.right
else:
return True
return False
def findClosestValue(tree, target):
return findClosestValueInBstHelper(tree, target, float("inf"))
def findClosestValueInBstHelper(tree, target, closest):
if tree is None:
return closest
if abs(target - closest) > abs(target - tree.value):
closest = tree.value
if target < tree.value:
return findClosestValueInBstHelper(tree.left, target, closest)
elif target > tree.value:
return findClosestValueInBstHelper(tree.right, target, closest)
else:
return closest
myTree = BST(1)
myTree.left = BST(2)
myTree.right = BST(3)
# myTree.insert(2)
# myTree.insert(3)
print(findClosestValue(myTree,4))
|
[
"yashdixit921@gmail.com"
] |
yashdixit921@gmail.com
|
25e6bd086f9b240c5225f9b752696fa71aab17ae
|
5a281cb78335e06c631181720546f6876005d4e5
|
/cloudkitty-9.0.0/cloudkitty/rating/noop.py
|
bcd23ab7f2e824e0c756b0c453b0e82d1fcdd194
|
[
"Apache-2.0"
] |
permissive
|
scottwedge/OpenStack-Stein
|
d25b2a5bb54a714fc23f0ff0c11fb1fdacad85e8
|
7077d1f602031dace92916f14e36b124f474de15
|
refs/heads/master
| 2021-03-22T16:07:19.561504
| 2020-03-15T01:31:10
| 2020-03-15T01:31:10
| 247,380,811
| 0
| 0
|
Apache-2.0
| 2020-03-15T01:24:15
| 2020-03-15T01:24:15
| null |
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2014 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Stéphane Albert
#
import decimal
from cloudkitty import rating
class Noop(rating.RatingProcessorBase):
module_name = "noop"
description = 'Dummy test module.'
@property
def enabled(self):
"""Check if the module is enabled
:returns: bool if module is enabled
"""
return True
@property
def priority(self):
return 1
def reload_config(self):
pass
def process(self, data):
for cur_data in data:
cur_usage = cur_data['usage']
for service in cur_usage:
for entry in cur_usage[service]:
if 'rating' not in entry:
entry['rating'] = {'price': decimal.Decimal(0)}
return data
|
[
"Wayne Gong@minbgong-winvm.cisco.com"
] |
Wayne Gong@minbgong-winvm.cisco.com
|
7acbd3f06c296e7b9c504e668609619e1dc8f852
|
1aed14713ddc1a3cea120cb4c8d0d9f79cf62a77
|
/test_classification.py
|
2ecaf428745589ce698cc0d20d2a4930f9687c77
|
[
"MIT"
] |
permissive
|
dazzag24/ResumeParser
|
16aa577548f6b300eab758ffb94aacce920b9566
|
06a105f587dd20dd47a9ac81c37e5f4b47d83d9f
|
refs/heads/master
| 2020-04-12T06:55:17.931441
| 2018-12-19T14:21:10
| 2018-12-19T14:21:10
| 162,351,604
| 0
| 0
|
MIT
| 2018-12-18T22:17:24
| 2018-12-18T22:17:24
| null |
UTF-8
|
Python
| false
| false
| 7,218
|
py
|
# from glob import glob
# import os
# import pandas as pd
# import matplotlib.pyplot as plt
# import numpy as np
# import json
# from collections import defaultdict
# base_json = 'dataset/resume_dataset.json'
# def pop_annot(raw_line):
# in_line = defaultdict(list, **raw_line)
# if 'annotation' in in_line:
# labels = in_line['annotation']
# for c_lab in labels:
# if len(c_lab['label'])>0:
# in_line[c_lab['label'][0]] += c_lab['points']
# return in_line
# with open(base_json, 'r') as f:
# # data is jsonl and so we parse it line-by-line
# resume_data = [json.loads(f_line) for f_line in f.readlines()]
# resume_df = pd.DataFrame([pop_annot(line) for line in resume_data])
# resume_df['length'] = resume_df['content'].map(len)
# # resume_df['length'].hist()
# # print(resume_df.sample(3))
# def extract_higlights(raw_line):
# in_line = defaultdict(list, **raw_line)
# if 'annotation' in in_line:
# labels = in_line['annotation']
# for c_lab in labels:
# if len(c_lab['label'])>0:
# in_line['highlight'] += [dict(category = c_lab['label'][0], **cpts) for cpts in c_lab['points']]
# return in_line
# resume_hl_df = pd.DataFrame([extract_higlights(line) for line in resume_data])
# resume_hl_df['length'] = resume_hl_df['content'].map(len)
# # resume_hl_df['length'].hist()
# # resume_hl_df.sample(3)
# from string import ascii_lowercase, digits
# valid_chars = ascii_lowercase+digits+'@., '
# focus_col = 'highlight'
# focus_df = resume_hl_df[['content', focus_col, 'length']].copy().dropna()
# # clean up the text but maintain the length
# focus_df['kosher_content'] = resume_df['content'].str.lower().map(lambda c_text: ''.join([c if c in valid_chars else ' ' for c in c_text]))
# # print(focus_col, 'with', focus_df.shape[0], 'complete results')
# # print('First result')
# for _, c_row in focus_df.query('length<2000').sample(1, random_state = 20).iterrows():
# # print(len(c_row['content']))
# for yoe in c_row[focus_col]:
# s,e = yoe['start'], yoe['end']
# print(yoe)
# # print(c_row['content'][s:e+1])
############################################ NOTE ########################################################
#
# Creates NER training data in Spacy format from JSON downloaded from Dataturks.
#
# Outputs the Spacy training data which can be used for Spacy training.
#
############################################################################################################
import json
import random
import logging
from sklearn.metrics import classification_report
from sklearn.metrics import precision_recall_fscore_support
from spacy.gold import GoldParse
from spacy.scorer import Scorer
from sklearn.metrics import accuracy_score
def convert_dataturks_to_spacy(dataturks_JSON_FilePath):
try:
training_data = []
lines=[]
with open(dataturks_JSON_FilePath, 'r') as f:
lines = f.readlines()
for line in lines:
data = json.loads(line)
text = data['content']
entities = []
for annotation in data['annotation']:
#only a single point in text annotation.
point = annotation['points'][0]
labels = annotation['label']
# handle both list of labels or a single label.
if not isinstance(labels, list):
labels = [labels]
for label in labels:
#dataturks indices are both inclusive [start, end] but spacy is not [start, end)
entities.append((point['start'], point['end'] + 1 ,label))
training_data.append((text, {"entities" : entities}))
return training_data
except Exception as e:
logging.exception("Unable to process " + dataturks_JSON_FilePath + "\n" + "error = " + str(e))
return None
import spacy
################### Train Spacy NER.###########
def train_spacy():
TRAIN_DATA = convert_dataturks_to_spacy("dataset/resume_dataset.json")
nlp = spacy.blank('en') # create blank Language class
# create the built-in pipeline components and add them to the pipeline
# nlp.create_pipe works for built-ins that are registered with spaCy
if 'ner' not in nlp.pipe_names:
ner = nlp.create_pipe('ner')
nlp.add_pipe(ner, last=True)
# add labels
for _, annotations in TRAIN_DATA:
for ent in annotations.get('entities'):
ner.add_label(ent[2])
# get names of other pipes to disable them during training
other_pipes = [pipe for pipe in nlp.pipe_names if pipe != 'ner']
with nlp.disable_pipes(*other_pipes): # only train NER
optimizer = nlp.begin_training()
for itn in range(10):
print("Statring iteration " + str(itn))
random.shuffle(TRAIN_DATA)
losses = {}
for text, annotations in TRAIN_DATA:
nlp.update(
[text], # batch of texts
[annotations], # batch of annotations
drop=0.2, # dropout - make it harder to memorise data
sgd=optimizer, # callable to update weights
losses=losses)
print(losses)
#test the model and evaluate it
examples = convert_dataturks_to_spacy("dataset/resume_dataset_test.json")
tp=0
tr=0
tf=0
ta=0
c=0
for text,annot in examples:
f=open("resume"+str(c)+".txt","w")
doc_to_test=nlp(text)
d={}
for ent in doc_to_test.ents:
d[ent.label_]=[]
for ent in doc_to_test.ents:
d[ent.label_].append(ent.text)
for i in set(d.keys()):
f.write("\n\n")
f.write(i +":"+"\n")
for j in set(d[i]):
f.write(j.replace('\n','')+"\n")
d={}
for ent in doc_to_test.ents:
d[ent.label_]=[0,0,0,0,0,0]
for ent in doc_to_test.ents:
doc_gold_text= nlp.make_doc(text)
gold = GoldParse(doc_gold_text, entities=annot.get("entities"))
y_true = [ent.label_ if ent.label_ in x else 'Not '+ent.label_ for x in gold.ner]
y_pred = [x.ent_type_ if x.ent_type_ ==ent.label_ else 'Not '+ent.label_ for x in doc_to_test]
if(d[ent.label_][0]==0):
#f.write("For Entity "+ent.label_+"\n")
#f.write(classification_report(y_true, y_pred)+"\n")
(p,r,f,s)= precision_recall_fscore_support(y_true,y_pred,average='weighted')
a=accuracy_score(y_true,y_pred)
d[ent.label_][0]=1
d[ent.label_][1]+=p
d[ent.label_][2]+=r
d[ent.label_][3]+=f
d[ent.label_][4]+=a
d[ent.label_][5]+=1
c+=1
for i in d:
print("\n For Entity "+i+"\n")
print("Accuracy : "+str((d[i][4]/d[i][5])*100)+"%")
print("Precision : "+str(d[i][1]/d[i][5]))
print("Recall : "+str(d[i][2]/d[i][5]))
print("F-score : "+str(d[i][3]/d[i][5]))
train_spacy()
|
[
"omkarpathak27@gmail.com"
] |
omkarpathak27@gmail.com
|
d3551bbf0e9586456be42b46aba252c03480d773
|
39b0d9c6df77671f540c619aff170441f953202a
|
/PYTHON LIBRARY/SUB_3/linecache_getline.py
|
a046ab4287da9bd8bc46a9515c5c9027ab555e45
|
[] |
no_license
|
yeboahd24/Python201
|
e7d65333f343d9978efff6bf86ce0447d3a40d70
|
484e66a52d4e706b8478473347732e23998c93c5
|
refs/heads/main
| 2023-02-06T10:24:25.429718
| 2020-12-26T01:08:04
| 2020-12-26T01:08:04
| 306,487,550
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
import linecache
from linecache_data import *
filename = make_tempfile()
# Pick out the same line from source and cache.
# (Notice that linecache counts from 1.)
print('SOURCE:')
print('{!r}'.format(lorem.split('\n')[4]))
print()
print('CACHE:')
print('{!r}'.format(linecache.getline(filename, 5)))
cleanup(filename)
|
[
"noreply@github.com"
] |
yeboahd24.noreply@github.com
|
986ea4fb769b25b6b526c57c670df265c47eca64
|
7172ed9a83a2d3d9a61918bbb9db89a4641f862a
|
/tests/test_resource_list.py
|
89ea147c8912fc597230c055b7c325d2c07f16f3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
giorgiobasile/resync
|
e7bb49661b32a7789248eabf3a640c327c37b343
|
c4734648a6ca93e985164450b85f387a349adee2
|
refs/heads/master
| 2021-01-13T13:39:49.865944
| 2016-05-06T22:33:37
| 2016-05-06T22:33:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,643
|
py
|
import unittest
try: #python2
# Must try this first as io also exists in python2
# but in the wrong one!
import StringIO as io
except ImportError: #python3
import io
import re
from resync.resource import Resource
from resync.resource_list import ResourceList, ResourceListDupeError
from resync.sitemap import SitemapParseError
class TestResourceList(unittest.TestCase):
def test01_same(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('b',timestamp=2) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
i = iter(same)
self.assertEqual( next(i).uri, 'a', "first was a" )
self.assertEqual( next(i).uri, 'b', "second was b" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 0, "nothing added" )
def test02_changed(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=3) )
dst.add( Resource('b',timestamp=4) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 0, "0 things unchanged" )
self.assertEqual( len(changed), 2, "2 things changed" )
i = iter(changed)
self.assertEqual( next(i).uri, 'a', "first was a" )
self.assertEqual( next(i).uri, 'b', "second was b" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 0, "nothing added" )
def test03_deleted(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('b',timestamp=2) )
dst.add( Resource('c',timestamp=3) )
dst.add( Resource('d',timestamp=4) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 2, "c and d deleted" )
i = iter(deleted)
self.assertEqual( next(i).uri, 'c', "first was c" )
self.assertEqual( next(i).uri, 'd', "second was d" )
self.assertEqual( len(added), 0, "nothing added" )
def test04_added(self):
src = ResourceList()
src.add( Resource('a',timestamp=1) )
src.add( Resource('b',timestamp=2) )
src.add( Resource('c',timestamp=3) )
src.add( Resource('d',timestamp=4) )
dst = ResourceList()
dst.add( Resource('a',timestamp=1) )
dst.add( Resource('c',timestamp=3) )
( same, changed, deleted, added ) = dst.compare(src)
self.assertEqual( len(same), 2, "2 things unchanged" )
self.assertEqual( len(changed), 0, "nothing changed" )
self.assertEqual( len(deleted), 0, "nothing deleted" )
self.assertEqual( len(added), 2, "b and d added" )
i = iter(added)
self.assertEqual( next(i).uri, 'b', "first was b" )
self.assertEqual( next(i).uri, 'd', "second was d" )
def test05_add(self):
r1 = Resource(uri='a',length=1)
r2 = Resource(uri='b',length=2)
i = ResourceList()
i.add(r1)
self.assertRaises( ResourceListDupeError, i.add, r1)
i.add(r2)
self.assertRaises( ResourceListDupeError, i.add, r2)
# allow dupes
r1d = Resource(uri='a',length=10)
i.add(r1d,replace=True)
self.assertEqual( len(i), 2 )
self.assertEqual( i.resources['a'].length, 10 )
def test06_add_iterable(self):
r1 = Resource(uri='a',length=1)
r2 = Resource(uri='b',length=2)
i = ResourceList()
i.add( [r1,r2] )
self.assertRaises( ResourceListDupeError, i.add, r1)
self.assertRaises( ResourceListDupeError, i.add, r2)
# allow dupes
r1d = Resource(uri='a',length=10)
i.add( [r1d] ,replace=True)
self.assertEqual( len(i), 2 )
self.assertEqual( i.resources['a'].length, 10 )
def test07_has_md5(self):
r1 = Resource(uri='a')
r2 = Resource(uri='b')
i = ResourceList()
self.assertFalse( i.has_md5() )
i.add(r1)
i.add(r2)
self.assertFalse( i.has_md5() )
r1.md5="aabbcc"
self.assertTrue( i.has_md5() )
def test08_iter(self):
i = ResourceList()
i.add( Resource('a',timestamp=1) )
i.add( Resource('b',timestamp=2) )
i.add( Resource('c',timestamp=3) )
i.add( Resource('d',timestamp=4) )
resources=[]
for r in i:
resources.append(r)
self.assertEqual(len(resources), 4)
self.assertEqual( resources[0].uri, 'a')
self.assertEqual( resources[3].uri, 'd')
def test20_as_xml(self):
rl = ResourceList()
rl.add( Resource('a',timestamp=1) )
rl.add( Resource('b',timestamp=2) )
xml = rl.as_xml()
self.assertTrue( re.search(r'<rs:md .*capability="resourcelist"', xml), 'XML has capability' )
self.assertTrue( re.search(r'<url><loc>a</loc><lastmod>1970-01-01T00:00:01Z</lastmod></url>', xml), 'XML has resource a' )
def test30_parse(self):
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:rs="http://www.openarchives.org/rs/terms/">\
<rs:md at="2013-08-07" capability="resourcelist" completed="2013-08-08" />\
<url><loc>/tmp/rs_test/src/file_a</loc><lastmod>2012-03-14T18:37:36Z</lastmod><rs:md change="updated" length="12" /></url>\
<url><loc>/tmp/rs_test/src/file_b</loc><lastmod>2012-03-14T18:37:36Z</lastmod><rs:md length="32" /></url>\
</urlset>'
rl=ResourceList()
rl.parse(fh=io.StringIO(xml))
self.assertEqual( len(rl.resources), 2, 'got 2 resources')
self.assertEqual( rl.md['capability'], 'resourcelist', 'capability set' )
self.assertEqual( rl.md_at, '2013-08-07' )
self.assertEqual( rl.md_completed, '2013-08-08' )
def test31_parse_no_capability(self):
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">\
<url><loc>http://example.com/res1</loc><lastmod>2012-03-14T18:37:36Z</lastmod></url>\
</urlset>'
rl=ResourceList()
rl.parse(fh=io.StringIO(xml))
self.assertEqual( len(rl.resources), 1, 'got 1 resource')
self.assertEqual( rl.md['capability'], 'resourcelist', 'capability set by reading routine' )
self.assertFalse( 'from' in rl.md )
def test32_parse_bad_capability(self):
# the <rs:md capability="bad_capability".. should give error
xml='<?xml version=\'1.0\' encoding=\'UTF-8\'?>\n\
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:rs="http://www.openarchives.org/rs/terms/">\
<rs:md capability="bad_capability" from="2013-01-01"/>\
<url><loc>http://example.com/bad_res_1</loc><lastmod>2012-03-14T18:37:36Z</lastmod></url>\
</urlset>'
rl=ResourceList()
self.assertRaises( SitemapParseError, rl.parse, fh=io.StringIO(xml) )
if __name__ == '__main__':
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TestResourceList)
unittest.TextTestRunner().run(suite)
|
[
"simeon.warner@cornell.edu"
] |
simeon.warner@cornell.edu
|
ba713802b829240da77702cbc404256e8622c024
|
aa1972e6978d5f983c48578bdf3b51e311cb4396
|
/mas_nitro-python-1.0/sample/system_version.py
|
f30d7ea2bffb3a8239bf8a2d6c1e2d8bf5d973eb
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
MayankTahil/nitro-ide
|
3d7ddfd13ff6510d6709bdeaef37c187b9f22f38
|
50054929214a35a7bb19ed10c4905fffa37c3451
|
refs/heads/master
| 2020-12-03T02:27:03.672953
| 2017-07-05T18:09:09
| 2017-07-05T18:09:09
| 95,933,896
| 2
| 5
| null | 2017-07-05T16:51:29
| 2017-07-01T01:03:20
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,023
|
py
|
#!/usr/bin/env python
'''
* Copyright (c) 2008-2015 Citrix Systems, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
'''
import sys
from massrc.com.citrix.mas.nitro.exception.nitro_exception import nitro_exception
from massrc.com.citrix.mas.nitro.resource.config.mps.mps import mps
from massrc.com.citrix.mas.nitro.service.nitro_service import nitro_service
class system_version :
def __init__(self):
ipaddress=""
username=""
password=""
@staticmethod
def main(cls, args_):
if(len(args_) < 3):
print("Usage: run.bat <ip> <username> <password>")
return
config = system_version()
config.ip = args_[1]
config.username = args_[2]
config.password = args_[3]
try :
client = nitro_service(config.ip,"http","v1")
client.set_credential(config.username,config.password)
client.timeout = 1800
client.login()
config.run_sample(client)
client.logout()
except nitro_exception as e:
print("Exception::errorcode="+str(e.errorcode)+",message="+ e.message)
except Exception as e:
print("Exception::message="+str(e.args))
return
def run_sample(self, client) :
self.get_mps(client)
def get_mps(self,client) :
try:
result = mps()
simplelist = mps.get(client,result)
print "--------------"
print "Response Came :"
print "--------------"
for item in simplelist :
print "Product : "+ item.product+ " | Session Build : " +item.build_number
except nitro_exception as e :
print "--------------"
print "Exception :"
print "--------------"
print "ErrorCode : "+ str(e.errorcode)
print "Message : " +e.message
except Exception as e:
raise e
#
# Main thread of execution
#
if __name__ == '__main__':
try:
print len(sys.argv)
if len(sys.argv) < 3:
sys.exit()
else:
ipaddress=sys.argv[1]
username=sys.argv[2]
password=sys.argv[3]
system_version().main(system_version(),sys.argv)
except SystemExit:
print("Exception::Usage: Sample.py <directory path of Nitro.py> <nsip> <username> <password>")
|
[
"Mayank@Mandelbrot.local"
] |
Mayank@Mandelbrot.local
|
3e8bd385f8e17649c7021b1e65085eb5bb3cf686
|
d41c15b9c68ab2ee70740044d25d620e6b90a09e
|
/app/mod_cmd/commands/login.py
|
c37dcd065386da1d5e8778f595de2e190332b205
|
[
"Apache-2.0"
] |
permissive
|
jaycode/Arthur.workspace
|
9093b54cda983d2e8b6745b894403b5fa1282b56
|
7a581104141ee5f556e058b1276b4087a2921dfc
|
refs/heads/master
| 2021-01-10T10:36:35.599700
| 2016-03-21T19:37:49
| 2016-03-21T19:37:49
| 55,436,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 198
|
py
|
"""Login
"""
def run(project = None, args = [], **kwargs):
"""(todo) Login
login [username]
Args:
username: Do I need to explain this?
"""
return [project, instruction]
|
[
"teguhwpurwanto@gmail.com"
] |
teguhwpurwanto@gmail.com
|
40f6e5873c826d97e8e486bb641cd1a516f4eb39
|
8b1c47d1ee06bfd2642305bc8c6723ccbe7f9f0d
|
/pdot.py
|
8daa9401b64244dc5827d08408e8b9371a91b5da
|
[] |
no_license
|
tribeiro/DRATools
|
6e594e3e0a497be9aa439c0cc76aa4923c7a5def
|
8b73bd50e8cde0ab4225df495970c4199f5f1f1b
|
refs/heads/master
| 2016-08-03T17:44:20.505742
| 2016-02-05T15:44:20
| 2016-02-05T15:44:20
| 6,463,425
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,562
|
py
|
#! /usr/bin/env python
'''
Calculates Pdot for a O-C time of eclipses, considering Pdot constant over time.
'''
import sys,os
import numpy as np
import pylab as py
######################################################################
def main(argv):
'''
Main function. Reads input parameters and run iteractive procedure.
Run with -h to get help on input parameters.
Defaults to CAL87 data on ribeiro & lopes de oliveira (2014)
'''
from optparse import OptionParser
parser = OptionParser()
parser.add_option( '--HJD0',
help='Reference time of first eclipse in\
HJD. Ephemeris.',
type='float',
default=2450111.5144)
parser.add_option( '--P0',
help='Reference orbital period in days.',
type='float',
default=0.44267714)
parser.add_option( '--DT0',
help='Measured difference between observed\
and calculated eclipse time.',
type='float',
default=0.)
parser.add_option( '--E',
help='Number of observed cycle, with \
respect to HJD0, the reference first\
eclipse.',
type='float',
default=5997)
parser.add_option( '--sigmaDT0',
help='Uncertainty in the determination\
of DT0.',
type='float',
default=0.)
opt,args = parser.parse_args(argv)
print '''
PDOT.PY - calculates Pdot for given O-C between eclipses,
considers Pdot is constant.
c - Tiago Ribeiro - UFS - 2013
'''
print 'HJD0 = %f'%opt.HJD0
print 'P0 = %f'%opt.P0
print 'DT0 = %f'%opt.DT0
print 'E = %f'%opt.E
# First iteration. Considers T0' = T0 to obtain DT
DT = opt.P0 * opt.E # Calculated time of eclipse
Pdot = opt.DT0 / DT / opt.E # calculated Pdot
print 'Pdot = %e'%(Pdot)
difPdot = 1.0
print '---------------------------------------------------'
print '|Pdot | difPdot | relDif |'
while (difPdot/Pdot > 1e-10):
DT = opt.P0 * opt.E + Pdot * DT * opt.E # Calculated time of eclipse
oldPdot = Pdot
Pdot = opt.DT0 / DT / opt.E # calculated Pdot
difPdot = np.abs(Pdot - oldPdot)
print '|%14.8e|%18.10e|%15.8e|'%(Pdot,difPdot,difPdot/Pdot)
print '---------------------------------------------------'
if opt.sigmaDT0 > 0:
sDT0 = opt.sigmaDT0
sigmaPdot = np.sqrt( (sDT0 / opt.E / DT)**2. + (sDT0 * opt.DT0 / opt.E / DT**2.)**2. )
print 'Pdot = %e +/- %e'%(Pdot,sigmaPdot)
else:
print 'Pdot = %e'%(Pdot)
######################################################################
if __name__ == '__main__':
main(sys.argv)
######################################################################
|
[
"tiago.astro@gmail.com"
] |
tiago.astro@gmail.com
|
568d1d5d130d876efb8e9236f37533fd74439534
|
feba3c32aac7f17d8fbaf6ef7bb4d229844f8247
|
/machine_learning/clustering/spectral_clustering/SpectralClustering/main.py
|
4478a211b938139541a188e35d40ad5479bc2d81
|
[] |
no_license
|
lisunshine1234/mlp-algorithm-python
|
d48aa1336ae7c4925a0e30f4f09fa6de21f83d0e
|
898359a10f65f16e94f3bb27cc61f3837806ca68
|
refs/heads/master
| 2023-05-01T11:11:47.465491
| 2021-05-24T13:53:40
| 2021-05-24T13:53:40
| 291,934,886
| 0
| 0
| null | 2021-05-24T13:42:15
| 2020-09-01T08:00:17
|
Python
|
UTF-8
|
Python
| false
| false
| 5,872
|
py
|
import numpy as np
import run as r
'''
[id]
144
[name]
SpectralClustering
[input]
x_train 训练集 训练集标签数据集 二维数组 必须 定数
y_train 测试集 测试集数据集 二维数组 必须 定数
n_clusters 簇数 默认为8,投影子空间的尺寸,可选整数,整数 字符串 不必须 定数
eigen_solver eigen_solver 默认为None,使用特征值分解策略。 AMG需要安装pyamg。在非常大且稀疏的问题上,它可能会更快,但也可能导致不稳定,可选'lobpcg','amg','arpack' 字符串 不必须 定数
n_components 组件数 默认为None,用于频谱嵌入的本征向量数,可选整数,整数 字符串 不必须 定数
random_state 随机种子 默认为None,伪随机数生成器,用于在'eigen_solver=' amg'时通过K-Means初始化来分解lobpcg本征向量。使用int可以确定随机性,可选整数 整数 不必须 定数
n_init 随机初始化数量 默认为10,k均值算法将在不同质心种子下运行的次数。就惯性而言,最终结果将是n_init个连续运行的最佳输出,可选整数 整数 不必须 定数
gamma gamma 默认为1.,rbf,poly,Sigmoid,laplacian和chi2内核的内核系数。忽略了'affinity=' nearest_neighbors,可选浮点数 浮点数 不必须 定数
affinity 亲和力 默认为rbf,如何构造亲和力矩阵。-'nearest_neighbors':通过计算最近邻居的图来构造亲和矩阵。-'rbf':使用径向基函数(RBF)内核构造亲和矩阵。-'precomputed':将'X'解释为预先计算的亲和力矩阵。 -'precomputed_nearest_neighbors':将'X'解释为预先计算的最近邻居的稀疏图,并通过选择'n_neighbors'最近邻居构建亲和力矩阵,可选,'rbf' 字符串 不必须 定数
n_neighbors 邻居数量 默认为10,使用最近邻居方法构造亲和力矩阵时要使用的邻居数量。忽略了'affinity=' rbf',可选整数,整数 字符串 不必须 定数
eigen_tol eigen_tol 默认为0.0,当“ arpack”时,拉普拉斯矩阵特征分解的停止准则,可选浮点数 浮点数 不必须 定数
assign_labels 分配标签策略 默认为kmeans,用于在嵌入空间中分配标签的策略。拉普拉斯嵌入后,有两种分配标签的方法。可以应用k均值,它是一种流行的选择。但是它也可能对初始化敏感。离散化是另一种对随机初始化不太敏感的方法,可选'kmeans','discretize' 字符串 不必须 定数
degree 度 默认为3,多项式内核的度。被其他内核忽略,可选浮点数 浮点数 不必须 定数
coef0 coef0 默认为1,多项式和S形核的系数为零。被其他内核忽略,可选浮点数 浮点数 不必须 定数
kernel_params kernel参数 默认为None,作为可调用对象传递的内核的参数(关键字参数)和值。被其他内核忽略,可选字符串,字符串,字典 字符串 不必须 定数
n_jobs CPU数量 默认为None,要运行的并行作业数。 'None'表示1,可选整数 整数 不必须 定数
[output]
affinity_matrix_ 亲和矩阵 用于聚类的亲和矩阵 二维数组
labels_ labels_ 每个点的标签 一维数组
[outline]
将聚类应用于规范化拉普拉斯算子的投影。
[describe]
将聚类应用于规范化拉普拉斯算子的投影。
在实践中,当各个群集的结构高度不凸,或更普遍地说,当群集的中心和散布的度量值不适合完整群集时,频谱群集非常有用。
例如,当簇在2D平面上嵌套圆时。
如果亲和力是图的邻接矩阵,则可以使用此方法查找归一化图割。
当调用'fit'时,将使用任一核函数构造亲和矩阵,例如距离为'd(X,X)'的欧几里德的高斯(aka RBF)核:: np.exp(-gamma * d( X,X)** 2)或k最近邻居连接矩阵。
或者,使用'预先计算',可以使用用户提供的亲和力矩阵。
'''
def main(x_train, y_train,
n_clusters=8, eigen_solver=None, n_components=None, random_state=None, n_init=10, gamma=1., affinity='rbf', n_neighbors=10, eigen_tol=0.0,
assign_labels='kmeans', degree=3, coef0=1, kernel_params=None, n_jobs=None
):
if type(x_train) is str:
x_train = eval(x_train)
if type(y_train) is str:
y_train = eval(y_train)
if type(n_clusters) is str:
n_clusters = eval(n_clusters)
if type(n_components) is str:
n_components = eval(n_components)
if type(random_state) is str:
random_state = eval(random_state)
if type(n_init) is str:
n_init = eval(n_init)
if type(gamma) is str:
gamma = eval(gamma)
if type(n_neighbors) is str:
n_neighbors = eval(n_neighbors)
if type(eigen_tol) is str:
eigen_tol = eval(eigen_tol)
if type(degree) is str:
degree = eval(degree)
if type(coef0) is str:
coef0 = eval(coef0)
if type(kernel_params) is str:
kernel_params = eval(kernel_params)
if type(n_jobs) is str:
n_jobs = eval(n_jobs)
return r.run(x_train=x_train, y_train=y_train, n_clusters=n_clusters,
eigen_solver=eigen_solver,
n_components=n_components,
random_state=random_state,
n_init=n_init,
gamma=gamma,
affinity=affinity,
n_neighbors=n_neighbors,
eigen_tol=eigen_tol,
assign_labels=assign_labels,
degree=degree,
coef0=coef0,
kernel_params=kernel_params,
n_jobs=n_jobs)
if __name__ == '__main__':
import numpy as np
import json
array = np.loadtxt('D:\\123_2.csv', delimiter=',')
array = array[0:20, :]
y = array[:, -1].tolist()
x = np.delete(array, -1, axis=1).tolist()
array = array.tolist()
back = main(x, y)
print(back)
for i in back:
print(i + ":" + str(back[i]))
json.dumps(back)
|
[
"178513111@qq.com"
] |
178513111@qq.com
|
b5e44acdf849d67dd76df15fee9528740e2d4810
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-cloudide/huaweicloudsdkcloudide/v2/model/show_price_response.py
|
319556bbd2c00ca7611e5913f4d058e565a6f5bc
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,007
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ShowPriceResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'prices': 'list[ResourcePrice]',
'status': 'str'
}
attribute_map = {
'prices': 'prices',
'status': 'status'
}
def __init__(self, prices=None, status=None):
"""ShowPriceResponse
The model defined in huaweicloud sdk
:param prices: 技术栈价格列表
:type prices: list[:class:`huaweicloudsdkcloudide.v2.ResourcePrice`]
:param status: 状态
:type status: str
"""
super(ShowPriceResponse, self).__init__()
self._prices = None
self._status = None
self.discriminator = None
if prices is not None:
self.prices = prices
if status is not None:
self.status = status
@property
def prices(self):
"""Gets the prices of this ShowPriceResponse.
技术栈价格列表
:return: The prices of this ShowPriceResponse.
:rtype: list[:class:`huaweicloudsdkcloudide.v2.ResourcePrice`]
"""
return self._prices
@prices.setter
def prices(self, prices):
"""Sets the prices of this ShowPriceResponse.
技术栈价格列表
:param prices: The prices of this ShowPriceResponse.
:type prices: list[:class:`huaweicloudsdkcloudide.v2.ResourcePrice`]
"""
self._prices = prices
@property
def status(self):
"""Gets the status of this ShowPriceResponse.
状态
:return: The status of this ShowPriceResponse.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ShowPriceResponse.
状态
:param status: The status of this ShowPriceResponse.
:type status: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ShowPriceResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
ba15ea6b6a8de11aca7e0fee71fc5594b4862c2b
|
c4a0669126f2fbf757ac3b33a8279ef32305bbd7
|
/Python Crash Course/Chapter 13/13.1 Stars/main.py
|
d13a05bea7ad0b6f82c5294c2a451c874c0f822f
|
[] |
no_license
|
ezeutno/PycharmProject
|
822b5a7da05729c5241a03b7413548a34b12e4a5
|
bdb87599885287d2d7cd5cd703b62197563722b8
|
refs/heads/master
| 2021-07-18T20:55:08.605486
| 2017-10-24T03:14:10
| 2017-10-24T03:14:10
| 105,782,136
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 470
|
py
|
import pygame
from settings import Settings
from pygame.sprite import Group
import game_fuction as gf
def main():
pygame.init()
ai_settings = Settings()
screen = pygame.display.set_mode((ai_settings.screen_width, ai_settings.screen_height))
pygame.display.set_caption('Stars')
stars = Group()
gf.create_multilayer(ai_settings, screen, stars)
while True:
gf.check_events()
gf.update_screen(ai_settings, screen, stars)
main()
|
[
"ivan.suratno@gmail.com"
] |
ivan.suratno@gmail.com
|
e0493a95982145e381c01ff67d16292c6a0349f0
|
d191a04a3ded41175ea84ae88ebddb4f262b7fb1
|
/Company test/nvidia.py
|
c4986c711e53efb20d3e34f2e58950269a5d26df
|
[] |
no_license
|
YLyeliang/now_leet_code_practice
|
ae4aea945bae72ec08b11e57a8f8a3e81e704a54
|
204d770e095aec43800a9771fe88dd553463d2f7
|
refs/heads/master
| 2022-06-13T20:22:51.266813
| 2022-05-24T05:29:32
| 2022-05-24T05:29:32
| 205,753,056
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
x=int(input())
y=int(input())
z=int(input())
A=[]
B=[]
for _ in range(x):
A.append(list(map(int,input().split())))
for _ in range(y):
B.append(list(map(int,input().split())))
C=[['']*z for _ in range(x)]
for i in range(x):
for j in range(z):
C[i][j]=str(sum([A[i][k]*B[k][j] for k in range(y)]))
for l in range(x):
print(" ".join(C[l]))
|
[
"k87974@163.com"
] |
k87974@163.com
|
8e5a1d8fbec942a0e35a39675415f7e7b2f42cd4
|
3a18b8ba06a58231f4ecb2c1a231722cdf862e6b
|
/python_code/dillonsCode/websites_folder/todo_tutorial/venv/lib/python3.8/site-packages/werkzeug/middleware/proxy_fix.py
|
2a7af0cdfe43370cd52976eac8ac734c1585167c
|
[] |
no_license
|
dillonallen92/codeStorage
|
98dd7f5a8ecb062e37313a1323aacd362ffc44c7
|
23351e0b3348de922283f6494762db9f291579d6
|
refs/heads/master
| 2023-07-07T10:32:40.428607
| 2023-06-26T23:26:47
| 2023-06-26T23:28:13
| 141,781,205
| 0
| 1
| null | 2023-02-12T00:21:58
| 2018-07-21T04:30:51
|
Mathematica
|
UTF-8
|
Python
| false
| false
| 7,161
|
py
|
"""
X-Forwarded-For Proxy Fix
=========================
This module provides a middleware that adjusts the WSGI environ based on
``X-Forwarded-`` headers that proxies in front of an application may
set.
When an application is running behind a proxy server, WSGI may see the
request as coming from that server rather than the real client. Proxies
set various headers to track where the request actually came from.
This middleware should only be used if the application is actually
behind such a proxy, and should be configured with the number of proxies
that are chained in front of it. Not all proxies set all the headers.
Since incoming headers can be faked, you must set how many proxies are
setting each header so the middleware knows what to trust.
.. autoclass:: ProxyFix
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import typing as t
from ..http import parse_list_header
if t.TYPE_CHECKING:
from _typeshed.wsgi import StartResponse
from _typeshed.wsgi import WSGIApplication
from _typeshed.wsgi import WSGIEnvironment
class ProxyFix:
"""Adjust the WSGI environ based on ``X-Forwarded-`` that proxies in
front of the application may set.
- ``X-Forwarded-For`` sets ``REMOTE_ADDR``.
- ``X-Forwarded-Proto`` sets ``wsgi.url_scheme``.
- ``X-Forwarded-Host`` sets ``HTTP_HOST``, ``SERVER_NAME``, and
``SERVER_PORT``.
- ``X-Forwarded-Port`` sets ``HTTP_HOST`` and ``SERVER_PORT``.
- ``X-Forwarded-Prefix`` sets ``SCRIPT_NAME``.
You must tell the middleware how many proxies set each header so it
knows what values to trust. It is a security issue to trust values
that came from the client rather than a proxy.
The original values of the headers are stored in the WSGI
environ as ``werkzeug.proxy_fix.orig``, a dict.
:param app: The WSGI application to wrap.
:param x_for: Number of values to trust for ``X-Forwarded-For``.
:param x_proto: Number of values to trust for ``X-Forwarded-Proto``.
:param x_host: Number of values to trust for ``X-Forwarded-Host``.
:param x_port: Number of values to trust for ``X-Forwarded-Port``.
:param x_prefix: Number of values to trust for
``X-Forwarded-Prefix``.
.. code-block:: python
from werkzeug.middleware.proxy_fix import ProxyFix
# App is behind one proxy that sets the -For and -Host headers.
app = ProxyFix(app, x_for=1, x_host=1)
.. versionchanged:: 1.0
Deprecated code has been removed:
* The ``num_proxies`` argument and attribute.
* The ``get_remote_addr`` method.
* The environ keys ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host``.
.. versionchanged:: 0.15
All headers support multiple values. The ``num_proxies``
argument is deprecated. Each header is configured with a
separate number of trusted proxies.
.. versionchanged:: 0.15
Original WSGI environ values are stored in the
``werkzeug.proxy_fix.orig`` dict. ``orig_remote_addr``,
``orig_wsgi_url_scheme``, and ``orig_http_host`` are deprecated
and will be removed in 1.0.
.. versionchanged:: 0.15
Support ``X-Forwarded-Port`` and ``X-Forwarded-Prefix``.
.. versionchanged:: 0.15
``X-Forwarded-Host`` and ``X-Forwarded-Port`` modify
``SERVER_NAME`` and ``SERVER_PORT``.
"""
def __init__(
self,
app: "WSGIApplication",
x_for: int = 1,
x_proto: int = 1,
x_host: int = 0,
x_port: int = 0,
x_prefix: int = 0,
) -> None:
self.app = app
self.x_for = x_for
self.x_proto = x_proto
self.x_host = x_host
self.x_port = x_port
self.x_prefix = x_prefix
def _get_real_value(self, trusted: int, value: t.Optional[str]) -> t.Optional[str]:
"""Get the real value from a list header based on the configured
number of trusted proxies.
:param trusted: Number of values to trust in the header.
:param value: Comma separated list header value to parse.
:return: The real value, or ``None`` if there are fewer values
than the number of trusted proxies.
.. versionchanged:: 1.0
Renamed from ``_get_trusted_comma``.
.. versionadded:: 0.15
"""
if not (trusted and value):
return None
values = parse_list_header(value)
if len(values) >= trusted:
return values[-trusted]
return None
def __call__(
self, environ: "WSGIEnvironment", start_response: "StartResponse"
) -> t.Iterable[bytes]:
"""Modify the WSGI environ based on the various ``Forwarded``
headers before calling the wrapped application. Store the
original environ values in ``werkzeug.proxy_fix.orig_{key}``.
"""
environ_get = environ.get
orig_remote_addr = environ_get("REMOTE_ADDR")
orig_wsgi_url_scheme = environ_get("wsgi.url_scheme")
orig_http_host = environ_get("HTTP_HOST")
environ.update(
{
"werkzeug.proxy_fix.orig": {
"REMOTE_ADDR": orig_remote_addr,
"wsgi.url_scheme": orig_wsgi_url_scheme,
"HTTP_HOST": orig_http_host,
"SERVER_NAME": environ_get("SERVER_NAME"),
"SERVER_PORT": environ_get("SERVER_PORT"),
"SCRIPT_NAME": environ_get("SCRIPT_NAME"),
}
}
)
x_for = self._get_real_value(self.x_for, environ_get("HTTP_X_FORWARDED_FOR"))
if x_for:
environ["REMOTE_ADDR"] = x_for
x_proto = self._get_real_value(
self.x_proto, environ_get("HTTP_X_FORWARDED_PROTO")
)
if x_proto:
environ["wsgi.url_scheme"] = x_proto
x_host = self._get_real_value(self.x_host, environ_get("HTTP_X_FORWARDED_HOST"))
if x_host:
environ["HTTP_HOST"] = environ["SERVER_NAME"] = x_host
# "]" to check for IPv6 address without port
if ":" in x_host and not x_host.endswith("]"):
environ["SERVER_NAME"], environ["SERVER_PORT"] = x_host.rsplit(":", 1)
x_port = self._get_real_value(self.x_port, environ_get("HTTP_X_FORWARDED_PORT"))
if x_port:
host = environ.get("HTTP_HOST")
if host:
# "]" to check for IPv6 address without port
if ":" in host and not host.endswith("]"):
host = host.rsplit(":", 1)[0]
environ["HTTP_HOST"] = f"{host}:{x_port}"
environ["SERVER_PORT"] = x_port
x_prefix = self._get_real_value(
self.x_prefix, environ_get("HTTP_X_FORWARDED_PREFIX")
)
if x_prefix:
environ["SCRIPT_NAME"] = x_prefix
return self.app(environ, start_response)
|
[
"dillon.allen.92@gmail.com"
] |
dillon.allen.92@gmail.com
|
d69b9f745e9ad17b53c5384804fdd18190e526a7
|
f82bfba767a44bc15557eb2d2ae2558c83cfb0e1
|
/catkin_ws/src/tracking/src/diagnose.py
|
808ad7f138ea91084f29763cd4647e031bf82b90
|
[] |
no_license
|
championway/argbot
|
4d89a233541a38cd8c8293c55f981b78aad276b6
|
a2c49a4a9df28675063aeb8d8ff6768f424526a1
|
refs/heads/master
| 2020-04-04T14:39:04.156209
| 2020-01-01T17:06:54
| 2020-01-01T17:06:54
| 156,006,637
| 8
| 2
| null | 2019-03-30T12:36:06
| 2018-11-03T17:24:24
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,534
|
py
|
#!/usr/bin/env python
import numpy as np
import cv2
import roslib
import rospy
import tf
import struct
import math
import time
from sensor_msgs.msg import Image, LaserScan
from sensor_msgs.msg import CameraInfo, CompressedImage
from geometry_msgs.msg import PoseArray, Pose, PoseStamped, Point
from visualization_msgs.msg import Marker, MarkerArray
from nav_msgs.msg import OccupancyGrid, MapMetaData, Odometry
import rospkg
from cv_bridge import CvBridge, CvBridgeError
from dynamic_reconfigure.server import Server
from control.cfg import pos_PIDConfig, ang_PIDConfig
from duckiepond_vehicle.msg import UsvDrive
from std_srvs.srv import SetBool, SetBoolResponse
from PID import PID_control
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
from ssd import build_ssd
from matplotlib import pyplot as plt
class Diagnose():
def __init__(self):
self.node_name = rospy.get_name()
rospy.loginfo("[%s] Initializing " %(self.node_name))
self.frame_id = 'odom'
#self.image_sub = rospy.Subscriber("/BRIAN/camera_node/image/compressed", Image, self.img_cb, queue_size=1)
self.image_sub = rospy.Subscriber("/BRIAN/camera_node/image/compressed", CompressedImage, self.img_cb, queue_size=1, buff_size = 2**24)
self.pub_cmd = rospy.Publisher("/MONICA/cmd_drive", UsvDrive, queue_size = 1)
self.pub_goal = rospy.Publisher("/goal_point", Marker, queue_size = 1)
self.image_pub = rospy.Publisher("/predict_img", Image, queue_size = 1)
self.station_keeping_srv = rospy.Service("/station_keeping", SetBool, self.station_keeping_cb)
self.pos_control = PID_control("Position_tracking")
self.ang_control = PID_control("Angular_tracking")
self.ang_station_control = PID_control("Angular_station")
self.pos_station_control = PID_control("Position_station")
self.pos_srv = Server(pos_PIDConfig, self.pos_pid_cb, "Position_tracking")
self.ang_srv = Server(ang_PIDConfig, self.ang_pid_cb, "Angular_tracking")
self.pos_station_srv = Server(pos_PIDConfig, self.pos_station_pid_cb, "Angular_station")
self.ang_station_srv = Server(ang_PIDConfig, self.ang_station_pid_cb, "Position_station")
self.initialize_PID()
def img_cb(self, msg):
try:
np_arr = np.fromstring(msg.data, np.uint8)
cv_image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
#cv_image = self.bridge.imgmsg_to_cv2(msg, "bgr8")
except CvBridgeError as e:
print(e)
(rows, cols, channels) = cv_image.shape
self.width = cols
self.height = rows
predict = self.predict(cv_image)
if predict is None:
return
angle, dis = predict[0], predict[1]
self.tracking_control(angle, dis)
def tracking_control(self, goal_angle, goal_distance):
if self.is_station_keeping:
rospy.loginfo("Station Keeping")
pos_output, ang_output = self.station_keeping(goal_distance, goal_angle)
else:
pos_output, ang_output = self.control(goal_distance, goal_angle)
cmd_msg = UsvDrive()
cmd_msg.left = self.cmd_constarin(pos_output + ang_output)
cmd_msg.right = self.cmd_constarin(pos_output - ang_output)
self.pub_cmd.publish(cmd_msg)
#self.publish_goal(self.goal)
def predict(self, img):
# Image Preprocessing (vgg use BGR image as training input)
image = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
x = torch.from_numpy(x).permute(2, 0, 1)
#SSD Prediction
xx = Variable(x.unsqueeze(0))
if torch.cuda.is_available():
xx = xx.cuda()
y = self.net(xx)
scale = torch.Tensor(img.shape[1::-1]).repeat(2)
detections = y.data
max_prob = 0
coords = None
for i in range(self.ROBOT_NUM):
if detections[0, 1, i, 0].numpy() > self.predict_prob and detections[0, 1, i, 0].numpy() > max_prob:
max_prob = detections[0, 1, i, 0].numpy()
score = detections[0, 1, i, 0]
pt = (detections[0, 1, i,1:]*scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
if coords is None:
return None
angle, dis, center = self.BBx2AngDis(coords)
cv2.circle(img, (int(center[0]), int(center[1])), 10, (0,0,255), -1)
cv2.rectangle(img, (int(coords[0][0]), int(coords[0][1])),\
(int(coords[0][0] + coords[1]), int(coords[0][1] + coords[2])),(0,0,255),5)
try:
img = self.draw_cmd(img, dis, angle)
self.image_pub.publish(self.bridge.cv2_to_imgmsg(img, "bgr8"))
except CvBridgeError as e:
print(e)
if __name__ == '__main__':
rospy.init_node('diagnose')
foo = Diagnose()
rospy.spin()
|
[
"cpwearth.eed03@g2.nctu.edu.tw"
] |
cpwearth.eed03@g2.nctu.edu.tw
|
08c90ebe0b76a7df4692e61b1223d169fa34fbe2
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/095_os_and_sys/_exercises/exercises/Programming_Python/04_File and Directory Tools/04_012_os.open mode flags.py
|
2336e6e88475b4379afd36620f42eff0ea5e9cbc
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,564
|
py
|
# _______ __
# fdfile _ __.o... _'C:\temp\spam.txt', __.O_R.. | __.O_B..
# __.r... ? 20
# # b'Hello stdio file\r\nHe'
# # ######################################################################################################################
#
# __.ls.. ? 0 0 # go back to start of file
# __.r... ? 100 # binary mode retains "\r\n"
# # b'Hello stdio file\r\nHello descriptor file\n'
# # ######################################################################################################################
#
# __.ls.. ?, 0, 0
# __.w.. ?, b'HELLO') # overwrite first 5 bytes
# # 5
# # C:\temp> type spam.txt
# # HELLO stdio file
# # Hello descriptor file
# # ######################################################################################################################
#
# file _ o... _'C:\temp\spam.txt' ___ # same but with open/objects
# ?.r... 20
# # b'HELLO stdio file\r\nHe'
# # ######################################################################################################################
#
# ?.se.. 0
# ?.r... 100
# # b'HELLO stdio file\r\nHello descriptor file\n'
# # ######################################################################################################################
#
# ?.se.. 0
# ?.w.. _'Jello'
# # 5
# # ######################################################################################################################
#
# ?.se.. 0
# ?.r...
# # b'Jello stdio file\r\nHello descriptor file\n'
# # ######################################################################################################################
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
62ab7743a9f1074052811f9d7645e054d1514cc3
|
e3fc83e77e218f7b8df4b14b0753fd65afd4b923
|
/downloaded_kernels/house_sales/converted_notebooks/kernel_121.py
|
36d0e7d411d81763309cb78183e37fd1f9b83211
|
[
"MIT"
] |
permissive
|
jupste/wranglesearch
|
982684fdaa7914af59758880fdc3a4ff3346477f
|
a6978fae73eee8ece6f1db09f2f38cf92f03b3ad
|
refs/heads/master
| 2023-06-18T04:46:34.474046
| 2021-07-15T23:43:24
| 2021-07-15T23:43:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,981
|
py
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 5))
from sklearn.model_selection import train_test_split
from sklearn import tree
from sklearn import linear_model
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score
import seaborn as sns
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.feature_selection import RFE
def performance_metric(y_true, y_predict, normalize=True):
score = r2_score(y_true, y_predict)
return score
data = pd.read_csv("../input/kc_house_data.csv", encoding = "ISO-8859-1")
Y = data["price"]
X = data[["bedrooms", "bathrooms", "sqft_living", "sqft_lot", "floors", "waterfront", "view", "grade", "sqft_above", "sqft_basement", "yr_built", "yr_renovated", "zipcode", "lat", "long"]]
colnames = X.columns
#ranking columns
ranks = {}
def ranking(ranks, names, order=1):
minmax = MinMaxScaler()
ranks = minmax.fit_transform(order*np.array([ranks]).T).T[0]
ranks = map(lambda x: round(x,2), ranks)
return dict(zip(names, ranks))
for i, col in enumerate(X.columns):
# 3 plots here hence 1, 3
plt.subplot(1, 15, i+1)
x = X[col]
y = Y
plt.plot(x, y, 'o')
# Create regression line
plt.plot(np.unique(x), np.poly1d(np.polyfit(x, y, 1))(np.unique(x)))
plt.title(col)
plt.xlabel(col)
plt.ylabel('prices')
#Splitting the datasets
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2, random_state=10)
#Models
#Decision Tree Regressor
DTR = tree.DecisionTreeRegressor()
DTR = DTR.fit(X_train,y_train)
ranks["DTR"] = ranking(np.abs(DTR.feature_importances_), colnames)
Y_target_DTR = DTR.predict(X_test)
#Decision Tree Classifier
DTC = DecisionTreeClassifier(max_depth=None, min_samples_split=2, random_state=0)
DTC = DTC.fit(X_train, y_train)
ranks["DTC"] = ranking(np.abs(DTC.feature_importances_), colnames)
Y_target_DTC = DTC.predict(X_test)
#LARS Lasso
LARS_L = linear_model.LassoLars(alpha=.4)
LARS_L = LARS_L.fit(X_train, y_train)
ranks["LARS_L"] = ranking(np.abs(LARS_L.coef_), colnames)
Y_target_lars_l = LARS_L.predict(X_test)
#Bayesian Ridge
BR = linear_model.BayesianRidge()
BR = BR.fit(X_train, y_train)
ranks["BR"] = ranking(np.abs(BR.coef_), colnames)
Y_target_BR = BR.predict(X_test)
#Random Forest Regressor
RFR = RandomForestRegressor(n_jobs=-1, n_estimators=50, verbose=0)
RFR = RFR.fit(X_train,y_train)
ranks["RFR"] = ranking(RFR.feature_importances_, colnames);
#print(ranks["RFR"])
Y_target_RFR = RFR.predict(X_test)
#Recursive Feature Elimination on Random Forest Regressor
RFE_RFR = RFE(RFR, n_features_to_select=10, step = 1)
RFE_RFR.fit(X_train,y_train)
Y_target_RFE_RFR = RFE_RFR.predict(X_test)
#Extra Trees Classifier
ETC = ExtraTreesClassifier(n_estimators=10, max_depth=None, min_samples_split=2, random_state=0)
ETC = ETC.fit(X_train, y_train)
ranks["ETC"] = ranking(np.abs(ETC.feature_importances_), colnames)
Y_target_ETC = ETC.predict(X_test)
#Recursive Feature Elimination on Decision Tree Regressor
RFE = RFE(DTR, n_features_to_select=10, step =1 )
RFE.fit(X_train,y_train)
Y_target_RFE = RFE.predict(X_test)
#Ranking inputs
r = {}
for name in colnames:
r[name] = round(np.mean([ranks[method][name]
for method in ranks.keys()]), 2)
methods = sorted(ranks.keys())
ranks["Mean"] = r
methods.append("Mean")
print("\t%s" % "\t".join(methods))
for name in colnames:
print("%s\t%s" % (name, "\t".join(map(str,
[ranks[method][name] for method in methods]))))
#seaborn plot
#create dataframe
meanplot = pd.DataFrame(list(r.items()), columns= ['Feature','Mean Ranking'])
meanplot = meanplot.sort_values('Mean Ranking', ascending=False)
#plot proper
sns.factorplot(x="Mean Ranking", y="Feature", data = meanplot, kind="bar",
size=14, aspect=1.9, palette='coolwarm')
#R2 metrics for each model
print("\nR2 score, Decision Tree Regressor:")
print(performance_metric(y_test, Y_target_DTR))
print("\nR2 score, Decision Tree Classifier:")
print(performance_metric(y_test, Y_target_DTC))
print("\nR2 score, LARS Lasso:")
print(performance_metric(y_test, Y_target_lars_l))
print("\nR2 score, Bayesian Ridge:")
print(performance_metric(y_test, Y_target_BR))
print("\nR2 score, Random Forest Regressor:")
print(performance_metric(y_test, Y_target_RFR))
print("\nR2 score, Recursive Feature Eliminition on Random Forest Regressor:")
print(performance_metric(y_test, Y_target_RFE_RFR))
print("\nR2 score, Extra Trees Classifier:")
print(performance_metric(y_test, Y_target_ETC))
print("\nR2 score, Recursive Feature Eliminition on Decision Tree Regressor:")
print(performance_metric(y_test, Y_target_RFE))
# In[ ]:
|
[
"jcamsan@rhino.csail.mit.edu"
] |
jcamsan@rhino.csail.mit.edu
|
c2c571d1543df3e9ae04c706f4659fbe4e3352ec
|
9a181799f7b87aace15f0db9afedd861259a48c2
|
/At least 1 edge between any 2 vertexes in directed graph.py
|
65caf712b4e55b9f6fc994149d45f68c162b02bd
|
[] |
no_license
|
D-Katt/Coding-examples
|
77bea4cf1099019b12bbafd967c1c017adf4e9b8
|
81e8b47857513b7961cab4c09b8c27c20b8b8081
|
refs/heads/master
| 2021-12-25T05:01:05.026469
| 2021-12-17T13:43:57
| 2021-12-17T13:43:57
| 226,685,637
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
# Ориентированный граф называется полуполным, если между любой парой
# его различных вершин есть хотя бы одно ребро. Для заданного списком ребер
# графа проверьте, является ли он полуполным.
# Сначала вводятся числа n ( 1 <= n <= 100) – количество вершин в графе
# и m ( 1 <= m <= n(n - 1)) – количество ребер. Затем следует m пар чисел –
# ребра графа. Номера вершин начинаются с 0.
# Выведите «YES», если граф является полуполным, и «NO» в противном случае.
from itertools import combinations
n, m = (int(s) for s in input().split())
# n - количество вершин, m - количество ребер
Graph = [[0] * n for _ in range(n)] # Заготовка под матрицу смежности
for i in range(m): # Считываем ребра попарно
a, b = (int(s) for s in input().split())
Graph[a][b] += 1
vertexes = [i for i in range(n)] # Список всех вершин для последующего перебора комбинаций
pairs = combinations(vertexes, 2) # Список комбинаций всех вершин
for a, b in pairs:
if Graph[a][b] + Graph[b][a] < 1: # Проверяем наличие хотя бы одной связи
print("NO") # в каждой комбинации вершин.
exit()
print("YES")
|
[
"noreply@github.com"
] |
D-Katt.noreply@github.com
|
6e0790e32d260b990eddc5aac28a8e17cb474c33
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5630113748090880_0/Python/musicman3320/argus.py
|
5443e0aafb386bdc58fc19821a2d13ddc2e2698f
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
fin = open('B-small-attempt0.in', 'r')
fout = open('B-small-attempt0.out','w')
numtests = int(fin.readline().rstrip())
for test in range(numtests):
N = int(fin.readline().rstrip())
heightCounts = [0]*2501
for i in range(2*N-1):
page = [int(h) for h in str(fin.readline().rstrip()).split(" ")]
for h in page:
heightCounts[h] = heightCounts[h] + 1
result = []
for h in range(len(heightCounts)):
if heightCounts[h] % 2 == 1:
result = result + [str(h)]
outstr = "Case #" + str(test+1) + ": " + str(' '.join(result)) + "\n"
# print outstr.rstrip()
fout.write(outstr)
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
98ae99df2b35e5c0d48d6310c4502d8027b57ff4
|
4314b77d958174db744ae29cd914d24435246cd0
|
/sparse_ll.py
|
5bf449c07b0c7a2b6a45ecffee23624d0ea43e35
|
[] |
no_license
|
danieljtait/solid-sniffle
|
87aa262a2281be831c4408e6e06871d8affeb65a
|
31abbe1387468100708e7d13baa166f97e094af8
|
refs/heads/master
| 2021-01-11T00:39:22.628372
| 2016-10-17T08:22:00
| 2016-10-17T08:22:00
| 70,501,791
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,648
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from main import init_dbw_example,potential,Hamiltonian,pStationary,HJacobi_integrator
from main import pTransition
from scipy.interpolate import UnivariateSpline
X = np.loadtxt('dbwData1.txt',delimiter=',')
T = 0.5
tt = np.linspace(0.,T,50)
# For now fix diffusion constant
D2 = .5
z = np.sort(X)
def objFunc(par):
# Set up the necessary functions
def f(x):
return -(-x**4 + 2*par*x**2)
def fgrad(x):
return -(4*x*(par-x**2))
U = potential(f,fgrad)
H = Hamiltonian(U,lambda x: D2)
eps = 0.0
def func(x,p):
return eps*(p-H.seperatrix(x))**2
H.set_add_term(func)
Pst = None
rootPar = np.sqrt(par)
#xRep = [-0.63,0.63]
xRep = [z[20],z[-20]]
try :
val = 0.
J = HJacobi_integrator(H,Pst)
pT1 = pTransition(J)
pT1.make(xRep[0],tt)
pT2 = pTransition(J)
pT2.make(xRep[1],tt)
xx = J.xx
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(xx,pT1(xx))
ax.plot(xx,pT2(xx))
"""
val = 0.
for i in range(X.size-1):
x = X[i]
xT = X[i+1]
if x < xRep[0] :
val += np.log(pT1(xT))
elif x > xRep[1] :
val += np.log(pT2(xT))
else:
w = abs(x-xRep[0])/(xRep[1]-xRep[0])
val += np.log( w*pT1(xT) + (1-w)*pT2(xT) )
return -val
except:
return np.inf
print z[20],z[-20]
ll = []
pars = np.linspace(0.6,0.81,15)
for p in pars:
ll.append(objFunc(p))
ll=np.array(ll)
print pars[np.where(ll == ll.min())[0]]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(pars,ll)
from scipy.optimize import minimize
res = minimize(objFunc,[0.7],method='Nelder-Mead',options={ 'disp': True , 'xatol' :1e-2})
print res
plt.show()
|
[
"tait.djk@gmail.com"
] |
tait.djk@gmail.com
|
07a3b814581a863498fc66da22f05128fbf8aa7d
|
59b3dce3c770e70b2406cc1dd623a2b1f68b8394
|
/python_3/lessons/Properties/src/test_teacher.py
|
f4f1839acbf9975d7e2d0791f8b5ab9b6c999217
|
[] |
no_license
|
patrickbeeson/python-classes
|
04ed7b54fc4e1152a191eeb35d42adc214b08e39
|
b5041e71badd1ca2c013828e3b2910fb02e9728f
|
refs/heads/master
| 2020-05-20T07:17:36.693960
| 2015-01-23T14:41:46
| 2015-01-23T14:41:46
| 29,736,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
import unittest
from teacher import Teacher
class TestTeacher(unittest.TestCase):
def setUp(self):
self.teacher = Teacher('steve',
'holden',
'63',
['Python 3-1','Python 3-2','Python 3-3'],
5)
def test_get(self):
self.assertEqual(self.teacher.first_name, 'Steve')
self.assertEqual(self.teacher.last_name, 'Holden')
self.assertEqual(self.teacher.age, 63)
self.assertEqual(self.teacher.classes, ['Python 3-1','Python 3-2','Python 3-3'])
self.assertEqual(self.teacher.grade, 'Fifth')
self.teacher.description = 'curmudgeon'
self.assertEqual(self.teacher.description, 'curmudgeon')
def test_set(self):
self.teacher.age = 21
self.assertEqual(self.teacher._age, 21)
self.assertEqual(self.teacher.age, 21)
self.assertRaises(ValueError, self.setAgeWrong)
def setAgeWrong(self):
self.teacher.age = 'twentyone'
def test_delete(self):
del self.teacher.grade
self.assertEqual(self.teacher.age, 64)
self.assertRaises(AttributeError, self.accessGrade)
def accessGrade(self):
return self.teacher.grade
if __name__ == "__main__":
unittest.main()
|
[
"patrickbeeson@gmail.com"
] |
patrickbeeson@gmail.com
|
6794f14678710d8ace89c78a28304ab8181c1c25
|
d0002c42833f416d13c2452e3aaf31e34e474231
|
/Multibox-FHD-Skin-4ATV/usr/lib/enigma2/python/Components/Renderer/AMBCicontrol.py
|
2e7cb609b6a9bb4740eb067d30931f6eb66d2c69
|
[] |
no_license
|
stein17/Skins-for-openATV
|
b146b9d62a1c3149b02af09253a225db43783768
|
ad67a0336e8cdba54bf6c5fda42cb12e2b820b05
|
refs/heads/master
| 2023-08-14T21:31:18.530737
| 2022-08-29T00:35:44
| 2022-08-29T00:35:44
| 94,653,168
| 12
| 18
| null | 2022-04-26T06:02:50
| 2017-06-17T22:47:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,832
|
py
|
#by Nikolasi and jbleyel
from Components.Renderer.Renderer import Renderer
from enigma import ePixmap, eDVBCI_UI, eDVBCIInterfaces, eEnv
from Tools.Directories import fileExists
from Components.Converter.Poll import Poll
class AMBCicontrol(Renderer, Poll):
searchPaths = [eEnv.resolve('${datadir}/enigma2/Multibox/%s/')]
def __init__(self):
Poll.__init__(self)
Renderer.__init__(self)
self.path = 'module'
self.slot = 0
self.nameCache = { }
self.pngname = ""
def applySkin(self, desktop, parent):
attribs = []
for (attrib, value,) in self.skinAttributes:
if attrib == 'path':
self.path = value
elif attrib == 'slot':
self.slot = int(value)
else:
attribs.append((attrib, value))
self.skinAttributes = attribs
return Renderer.applySkin(self, desktop, parent)
GUI_WIDGET = ePixmap
def changed(self, what):
self.poll_interval = 1000
self.poll_enabled = True
if self.instance:
text = "nomodule"
pngname = ''
if what[0] != self.CHANGED_CLEAR:
service = self.source.service
if service:
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
if NUM_CI > 0:
state = eDVBCI_UI.getInstance().getState(self.slot)
if state != -1:
if state == 0:
text = "nomodule"
elif state == 1:
text = "initmodule"
elif state == 2:
text = "ready"
pngname = self.nameCache.get(text, "")
if pngname == "":
pngname = self.findPicon(text)
if pngname != "":
self.nameCache[text] = pngname
else:
return
if self.pngname != pngname:
self.instance.setPixmapFromFile(pngname)
self.pngname = pngname
def findPicon(self, serviceName):
for path in self.searchPaths:
pngname = (path % self.path) + serviceName + ".png"
if fileExists(pngname):
return pngname
return ""
|
[
"lutz.f.kroll@gmail.com"
] |
lutz.f.kroll@gmail.com
|
f697416ba21ad9c5bf52caaad7472d1afcf3e15f
|
fc678a0a5ede80f593a29ea8f43911236ed1b862
|
/575-DistributeCandies.py
|
4fa220fe6385d42dd5572d2b3e9528ca85753fa5
|
[] |
no_license
|
dq-code/leetcode
|
4be0b1b154f8467aa0c07e08b5e0b6bd93863e62
|
14dcf9029486283b5e4685d95ebfe9979ade03c3
|
refs/heads/master
| 2020-12-13T15:57:30.171516
| 2017-11-07T17:43:19
| 2017-11-07T17:43:19
| 35,846,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
class Solution(object):
def distributeCandies(self, candies):
"""
:type candies: List[int]
:rtype: int
"""
return min(len(candies) / 2, len(set(candies)))
|
[
"dengqianwork@gmail.com"
] |
dengqianwork@gmail.com
|
f0f572dd9a8b601278b6a14b38b8ab2ede39f5d8
|
e60487a8f5aad5aab16e671dcd00f0e64379961b
|
/python_stack/Algos/list_comprehension/interview2.py
|
88cd08b39166faf1fc9acacbb6ec43f53c1757b6
|
[] |
no_license
|
reenadangi/python
|
4fde31737e5745bc5650d015e3fa4354ce9e87a9
|
568221ba417dda3be7f2ef1d2f393a7dea6ccb74
|
refs/heads/master
| 2021-08-18T08:25:40.774877
| 2021-03-27T22:20:17
| 2021-03-27T22:20:17
| 247,536,946
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
# LIST COMPREHENSIONS
# 1. return a list of numbers between (1-100)
# 2. List - I want n for each n in nums[1,2,3,4,5,6,7,8,9,10] -(even/odd)
# 3. List of dictionary- return list of brand and models for year>2000
# cars=[
# {'brand':'Ford','model':'Mustang','year':1964},
# {'brand':'Ford','model':'Ranger','year':1960},
# {'brand':'Audi','model':'A8','year':2008},
# {'brand':'BMW','model':'X7','year':2007}
# ]
# 4. Creating a dictionary with list comprehensions
# brands=['Ford','Audi','BMW']
# cars=['Ranger','A8','X7']
# {'Ford':'Ranger','Audi':'A8','BMW','X7'}
# I want a dict 'brand':'car' for each brand and car
# [expression iteration condition]
|
[
"reena.dangi@gmail.com"
] |
reena.dangi@gmail.com
|
c1f8bb3be62c97ced5dc256c82611d9584b30bc9
|
960b3a17a4011264a001304e64bfb76d669b8ac5
|
/mstrio/modeling/schema/__init__.py
|
01d0f898ac0d5d563da16c9addf1e2a2610c83f7
|
[
"Apache-2.0"
] |
permissive
|
MicroStrategy/mstrio-py
|
012d55df782a56dab3a32e0217b9cbfd0b59b8dd
|
c6cea33b15bcd876ded4de25138b3f5e5165cd6d
|
refs/heads/master
| 2023-08-08T17:12:07.714614
| 2023-08-03T12:30:11
| 2023-08-03T12:30:11
| 138,627,591
| 84
| 60
|
Apache-2.0
| 2023-07-31T06:43:33
| 2018-06-25T17:23:55
|
Python
|
UTF-8
|
Python
| false
| false
| 323
|
py
|
# flake8: noqa
from .attribute import *
from .fact import *
from .helpers import *
from .schema_management import (
SchemaLockStatus,
SchemaLockType,
SchemaManagement,
SchemaTask,
SchemaTaskStatus,
SchemaUpdateType,
)
from .table import *
from .transformation import *
from .user_hierarchy import *
|
[
"noreply@github.com"
] |
MicroStrategy.noreply@github.com
|
a1224e592c6abcd25d41d21b5503c12b326683c4
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/170_8.py
|
8e5e0f599ffbaa4d32d8dfe5afc7b1f9031f152f
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,392
|
py
|
Python | Check if list is strictly increasing
The test for monotonic sequence is a utility that has manifold applications in
mathematics and hence every sphere related to mathematics. As mathematics and
Computer Science generally go parallel, mathematical operations such as
checking for strictly increasing sequence can be useful to gather knowledge
of. Same argument can be extended for strictly decreasing lists also. Lets
discuss certain ways to perform this test.
**Method #1 : Usingall() + zip()**
The all() generally checks for all the elements fed to it. The task of zip()
is to link list beginning from beginning and list beginning from first
element, so that a check can be performed on all elements.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# to check for strictly increasing list
# using zip() + all()
# initializing list
test_list = [1, 4, 5, 7, 8, 10]
# printing original lists
print ("Original list : " + str(test_list))
# using zip() + all()
# to check for strictly increasing list
res = all(i < j for i, j in zip(test_list,
test_list[1:]))
# printing result
print ("Is list strictly increasing ? : " + str(res))
---
__
__
**Output:**
Original list : [1, 4, 5, 7, 8, 10]
Is list strictly increasing ? : True
**Method #2 : Usingreduce() \+ lambda**
reduce() coupled with lambda can also perform this task of checking for
monotonicity. reduce function is used to cumulate the result as True or False,
lambda function checks for each index value with next index value.
__
__
__
__
__
__
__
# Python3 code to demonstrate
# to check for strictly increasing list
# using reduce() + lambda
# initializing list
test_list = [1, 4, 5, 7, 8, 10]
# printing original lists
print ("Original list : " + str(test_list))
# using reduce() + lambda
# to check for strictly increasing list
res = bool(lambda test_list: reduce(lambda i, j: j if
i < j else 9999, test_list) != 9999)
# printing result
print ("Is list strictly increasing ? : " + str(res))
---
__
__
**Output:**
Original list : [1, 4, 5, 7, 8, 10]
Is list strictly increasing ? : True
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
55ba0ca91879be3c1ad97a693770b8fd4d88b8bc
|
cf0779621df542169096d73476de493c0eb7eecd
|
/setup.py
|
9454c7f1200e950f028e3ec48532a413397f72cd
|
[
"MIT"
] |
permissive
|
aixpact/microservices-api
|
e16884d204dbcc63e1f49a1feb707e5980bdbeec
|
639725be630f4f049cef9251cb5946dfd846d234
|
refs/heads/master
| 2020-05-20T02:55:32.561419
| 2019-05-08T07:21:47
| 2019-05-08T07:21:47
| 185,343,743
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
from setuptools import setup
setup(
name='HelloApp-CLI',
version='1.0',
packages=['cli', 'cli.commands'],
include_package_data=True,
install_requires=[
'click',
],
entry_points="""
[console_scripts]
hello_app=cli.cli:cli
""",
)
|
[
"frank@aixpact.com"
] |
frank@aixpact.com
|
416b80855f6461627677d64403c3f36b99959cfc
|
9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56
|
/google/ads/googleads/v11/enums/types/conversion_origin.py
|
636cddf2770b73e0085cb89f0705b9b6c383f762
|
[
"Apache-2.0"
] |
permissive
|
GerhardusM/google-ads-python
|
73b275a06e5401e6b951a6cd99af98c247e34aa3
|
676ac5fcb5bec0d9b5897f4c950049dac5647555
|
refs/heads/master
| 2022-07-06T19:05:50.932553
| 2022-06-17T20:41:17
| 2022-06-17T20:41:17
| 207,535,443
| 0
| 0
|
Apache-2.0
| 2019-09-10T10:58:55
| 2019-09-10T10:58:55
| null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v11.enums",
marshal="google.ads.googleads.v11",
manifest={"ConversionOriginEnum",},
)
class ConversionOriginEnum(proto.Message):
r"""Container for enum describing possible conversion origins.
"""
class ConversionOrigin(proto.Enum):
r"""The possible places where a conversion can occur."""
UNSPECIFIED = 0
UNKNOWN = 1
WEBSITE = 2
GOOGLE_HOSTED = 3
APP = 4
CALL_FROM_ADS = 5
STORE = 6
YOUTUBE_HOSTED = 7
__all__ = tuple(sorted(__protobuf__.manifest))
|
[
"noreply@github.com"
] |
GerhardusM.noreply@github.com
|
efd46d8641ce515dc35ba0086dd38668077077c3
|
986a8c5de450fc436897de9aaff4c5f737074ee3
|
/剑指offer/字符串/正则表达式匹配.py
|
aebcc76c678aad65e62c9dd65bc9248eb931a8d6
|
[] |
no_license
|
lovehhf/newcoder_py
|
7a0ef03f0ea733ec925a10f06566040f6edafa67
|
f8ae73deef1d9422ca7b0aa9f484dc96db58078c
|
refs/heads/master
| 2020-04-27T18:20:19.082458
| 2019-05-24T15:30:13
| 2019-05-24T15:30:13
| 174,564,930
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,318
|
py
|
# -*- coding:utf-8 -*-
__author__ = 'huanghf'
"""
题目描述
请实现一个函数用来匹配包括'.'和'*'的正则表达式。模式中的字符'.'表示任意一个字符,而'*'表示它前面的字符可以出现任意次(包含0次)。
在本题中,匹配是指字符串的所有字符匹配整个模式。例如,字符串"aaa"与模式"a.a"和"ab*ac*a"匹配,但是与"aa.a"和"ab*a"均不匹配
"""
class Solution:
def match(self, s, p):
"""
# s, pattern都是字符串
dp[i][j]: s的前i个字符与p的第j个字符是否匹配
:param s:
:param p:
:return:
"""
s = ' ' + s
p = ' ' + p
m, n = len(s), len(p)
dp = [[0] * n for _ in range(m)]
dp[0][0] = 1
for i in range(m):
for j in range(1, n):
if i > 0 and (s[i] == p[j] or p[j] == '.'):
dp[i][j] = dp[i - 1][j - 1]
if p[j] == '*':
dp[i][j] = dp[i][j - 2] | dp[i][j]
if i > 0 and (p[j - 1] == '.' or s[i] == p[j - 1]):
dp[i][j] = dp[i][j] | dp[i - 1][j] | dp[i - 1][j - 2]
# for i in dp:
# print(i)
return dp[-1][-1]
s = ""
p = "."
sol = Solution()
print(sol.match(s, p))
|
[
"853885165@qq.com"
] |
853885165@qq.com
|
72c5c0b01ae2c2fd37c6514e631e20eaf84a2f41
|
0cfd2faf3f52af67888f082bd9dc29f34ffdc810
|
/python/2_0/watchdog.py
|
e434d9b6e239ae27fd1be2db6b4b5b98b0e61458
|
[] |
no_license
|
rsprenkels/kattis
|
28bd078a797a9cfa76509145e39db77fe6a895cd
|
7d51e8afb841c4bd4debaf5aef0c37f6da6f3abd
|
refs/heads/master
| 2023-08-11T00:07:06.308151
| 2023-07-29T22:04:04
| 2023-07-29T22:04:04
| 69,284,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,097
|
py
|
import math
from typing import Tuple, Sequence
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, Point):
return self.x == other.x and self.y == other.y
return False
def mhdist(self, other) -> int:
return abs(self.x - other.x) + abs(self.y - other.y)
def dist(self, other) -> float:
return math.sqrt(math.pow(abs(self.x - other.x), 2) + math.pow(abs(self.y - other.y), 2))
def length(self):
return math.sqrt(self.x * self.x + self.y * self.y)
def __repr__(self):
return f"{self.x} {self.y}"
def multisort(xs, specs):
for key, reverse in reversed(specs):
xs.sort(key=itemgetter(key), reverse=reverse)
return xs
def watchdog(roof_size: int, hatches : Sequence[Point]) -> Tuple[bool, Point]:
for x in range(1, roof_size):
for y in range(1, roof_size):
candidate = Point(x, y)
if candidate not in hatches:
longest_leash = max([candidate.dist(h) for h in hatches])
if candidate.x >= longest_leash and candidate.x <= roof_size - longest_leash and candidate.y >= longest_leash and candidate.y <= roof_size - longest_leash:
return (True, candidate)
return (False, Point(0,0))
# assert watchdog(10, [Point(6,6), Point(5,4)]) == (True, Point(3, 6))
# assert watchdog(20, [Point(1,1), Point(19,19)]) == (False, Point(0, 0))
for _ in range(int(input())):
roof_size, num_hathes = map(int, input().split())
hatches = []
for _ in range(num_hathes):
hatches.append(Point(*map(int, input().split())))
result = watchdog(roof_size, hatches)
if result[0]:
print(result[1])
else:
print('poodle')
# from 370.9 rank 1025 to 372.9 rank 1019
|
[
"ron.sprenkels@gmail.com"
] |
ron.sprenkels@gmail.com
|
697c6ce021892e01d425df78101ae11b6bfd2b4f
|
c418bd9d730bc17653611da7f0642bdd25cba65f
|
/djangosite/users/forms.py
|
55126f872392526a2768044a49fa73651634d1de
|
[] |
no_license
|
ErDeepakSingh/Ajax-State-City
|
ae18a4f4b8ef8e90932d8aed74553897d7ac9b3b
|
72a31424bd9402ef2c76198ee80934ac399fccf9
|
refs/heads/master
| 2020-08-16T02:29:36.081445
| 2019-10-16T02:51:22
| 2019-10-16T02:51:22
| 215,443,208
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserChangeForm
class UserUpdateForm(UserChangeForm):
password = None
class Meta:
model = User
fields = ['first_name', 'last_name', 'email']
# exclude = ['first_name']
|
[
"deepakthakur755@gmail.com"
] |
deepakthakur755@gmail.com
|
4e3fac772aa2d67c2aab6d3965c3ef63863ca614
|
574e874ebd7889c23a6292705dcd89594567bae8
|
/code/Scheduler_Class.py
|
3e9aa38a4a331038c83d92063010e9f28ec5c2e7
|
[] |
no_license
|
hammadhaleem/Switching-Fabric-Simulator
|
6bb9c65b983c7a23b11fd60f5e6472bc269878c9
|
279a22c349d02fd061bc52fd77e4c6be223041b7
|
refs/heads/master
| 2021-01-17T06:28:08.162967
| 2015-05-08T08:52:18
| 2015-05-08T08:52:18
| 34,894,625
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,442
|
py
|
from Queue_Class import *
import time
class StandardScheduler(object):
def __init__(self, number_of_queue,data):
super(StandardScheduler, self).__init__()
self.number_of_queue = number_of_queue
self.input_queue_object = SuperMultiQueue(int(self.number_of_queue))
self.output_queue_object = SuperMultiQueue(int(self.number_of_queue))
self.input_queue_object.insert_data_in_queues(data)
def create_state_variable(self):
states = {}
data =self.Status()
for cards in data :
obj = {}
obj['pointer'] = None
obj['count'] = 0
obj['max'] = self.number_of_queue
for keys in data[cards] :
obj['count'] = obj['count'] + data[cards][keys]
states[cards] =obj
return states
def Test_Queues(self):
print "Input Queues"
self.input_queue_object.debug()
print "\nQutput Queues"
self.output_queue_object.debug()
def Get_Output(self):
return self.output_queue_object.get_data_stream()
# ((input port, output) , output_Card)
# Returns true if the exchange was success.
def Packet_Exchange(self,set_inp,set_out):
try:
data = self.input_queue_object.pop_from_queue(set_inp)
fake_list = [data]
out = data['source']
data['source']= set_out
data['outport'] = set_out
data['time_out'] =time.time()
self.output_queue_object.insert_data_in_queues(fake_list)
return True
except:
pass
return False
def Status(self):
return self.input_queue_object.generate_input_status()
|
[
"hammadhaleem@gmail.com"
] |
hammadhaleem@gmail.com
|
6d6487a513d54348666f0ce244513f8c0b0773b1
|
58f095f52d58afa9e8041c69fa903c5a9e4fa424
|
/examples_UQ/TEST2.py
|
3519916d32cf9db375c1fa916e9fb27bb71fdba2
|
[
"BSD-3-Clause"
] |
permissive
|
cdeil/mystic
|
e41b397e9113aee1843bc78b5b4ca30bd0168114
|
bb30994987f36168b8f09431cb9c3823afd892cd
|
refs/heads/master
| 2020-12-25T23:18:52.086894
| 2014-08-13T14:36:09
| 2014-08-13T14:36:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,723
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 2009-2014 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
#######################################################################
# scaling and mpi info; also optimizer configuration parameters
# hard-wired: use DE solver, don't use mpi, F-F' calculation
# (similar to concentration.in)
#######################################################################
scale = 1.0
#XXX: <mpi config goes here>
npop = 20
maxiter = 1000
maxfun = 1e+6
convergence_tol = 1e-4
crossover = 0.9
percent_change = 0.9
#######################################################################
# the model function
# (similar to Simulation.cpp)
#######################################################################
def function(x):
"""a simple model function
f = (x1*x2*x3)**(1/3)
Input:
- x -- 1-d array of coefficients [x1,x2,x3]
Output:
- f -- function result
"""
return (x[0]*x[1]*x[2])**(1.0/3.0)
#######################################################################
# the subdiameter calculation
# (similar to driver.sh)
#######################################################################
def costFactory(i):
"""a cost factory for the cost function"""
def cost(rv):
"""compute the diameter as a calculation of cost
Input:
- rv -- 1-d array of model parameters
Output:
- diameter -- scale * | F(x) - F(x')|**2
"""
# prepare x and xprime
params = rv[:-1] #XXX: assumes Xi' is at rv[-1]
params_prime = rv[:i]+rv[-1:]+rv[i+1:-1] #XXX: assumes Xi' is at rv[-1]
# get the F(x) response
Fx = function(params)
# get the F(x') response
Fxp = function(params_prime)
# compute diameter
return -scale * (Fx - Fxp)**2
return cost
#######################################################################
# the differential evolution optimizer
# (replaces the call to dakota)
#######################################################################
def dakota(cost,lb,ub):
from mystic.solvers import DifferentialEvolutionSolver2
from mystic.termination import CandidateRelativeTolerance as CRT
from mystic.strategy import Best1Exp
from mystic.monitors import VerboseMonitor, Monitor
from mystic.tools import getch, random_seed
random_seed(123)
#stepmon = VerboseMonitor(100)
stepmon = Monitor()
evalmon = Monitor()
ndim = len(lb) # [(1 + RVend) - RVstart] + 1
solver = DifferentialEvolutionSolver2(ndim,npop)
solver.SetRandomInitialPoints(min=lb,max=ub)
solver.SetStrictRanges(min=lb,max=ub)
solver.SetEvaluationLimits(maxiter,maxfun)
solver.SetEvaluationMonitor(evalmon)
solver.SetGenerationMonitor(stepmon)
tol = convergence_tol
solver.Solve(cost,termination=CRT(tol,tol),strategy=Best1Exp, \
CrossProbability=crossover,ScalingFactor=percent_change)
print solver.bestSolution
diameter = -solver.bestEnergy / scale
func_evals = solver.evaluations
return diameter, func_evals
#######################################################################
# loop over model parameters to calculate concentration of measure
# (similar to main.cc)
#######################################################################
def UQ(start,end,lower,upper):
diameters = []
function_evaluations = []
total_func_evals = 0
total_diameter = 0.0
for i in range(start,end+1):
lb = lower[start:end+1] + [lower[i]]
ub = upper[start:end+1] + [upper[i]]
#construct cost function and run optimizer
cost = costFactory(i)
subdiameter, func_evals = dakota(cost,lb,ub) #XXX: no initial conditions
function_evaluations.append(func_evals)
diameters.append(subdiameter)
total_func_evals += function_evaluations[-1]
total_diameter += diameters[-1]
print "subdiameters (squared): %s" % diameters
print "diameter (squared): %s" % total_diameter
print "func_evals: %s => %s" % (function_evaluations, total_func_evals)
return
#######################################################################
# rank, bounds, and restart information
# (similar to concentration.variables)
#######################################################################
if __name__ == '__main__':
RVstart = 0; RVend = 2
lower_bounds = [3.0,4.0,1.0]
upper_bounds = [5.0,10.0,10.0]
print " function: f = (x1*x2*x3)**(1/3)"
print " parameters: ['x1', 'x2', 'x3']"
print " lower bounds: %s" % lower_bounds
print " upper bounds: %s" % upper_bounds
print " ..."
UQ(RVstart,RVend,lower_bounds,upper_bounds)
|
[
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] |
mmckerns@968178ea-60bd-409e-af13-df8a517b6005
|
5fdfc19ffacd51059a30914512e970f4bc922a3d
|
d1742451b25705fc128acc245524659628ab3e7d
|
/Codeforces with Python/1144B. Parity Alternated Deletions.py
|
928f29fb4554e6e84745a2c4f5cd70b8baf7e179
|
[] |
no_license
|
Shovon588/Programming
|
ebab793a3c97aedddfcad5ea06e7e22f5c54a86e
|
e4922c9138998358eed09a1be7598f9b060c685f
|
refs/heads/master
| 2022-12-23T18:29:10.141117
| 2020-10-04T17:29:32
| 2020-10-04T17:29:32
| 256,915,133
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 460
|
py
|
n=int(input())
a=list(map(int,input().split()))
codd=0;ceven=0;odd=[];even=[]
for i in a:
if i%2==0:
ceven+=1
even.append(i)
else:
codd+=1
odd.append(i)
even.sort(reverse=True)
odd.sort(reverse=True)
if abs(codd-ceven)<=1:
print(0)
else:
if ceven>codd:
temp=ceven-codd-1
print(sum(even[ceven-temp:]))
else:
temp=codd-ceven-1
print(sum(odd[codd-temp:]))
|
[
"mainulislam588@gmail.com"
] |
mainulislam588@gmail.com
|
62d61a289e5ed0d3b6b96274a6af852f57c89682
|
6b29d66ba7927129b68bc00db769f0edf1babaea
|
/SoftLayer/CLI/image/list.py
|
74de2d02dfd63f8749ad77dcae506a180c1ccca1
|
[
"MIT"
] |
permissive
|
tdurden82/softlayer-python
|
65f42923c347a164995dfc267829721032de261d
|
0eed20fa4adedd3228d91d929bb8befb1e445e49
|
refs/heads/master
| 2021-01-17T10:01:48.087450
| 2015-10-19T18:38:53
| 2015-10-19T18:38:53
| 46,301,339
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
"""List images."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import image as image_mod
from SoftLayer import utils
import click
@click.command()
@click.option('--public/--private',
is_flag=True,
default=None,
help='Display only public or private images')
@environment.pass_env
def cli(env, public):
"""List images."""
image_mgr = SoftLayer.ImageManager(env.client)
images = []
if public in [False, None]:
for image in image_mgr.list_private_images(mask=image_mod.MASK):
images.append(image)
if public in [True, None]:
for image in image_mgr.list_public_images(mask=image_mod.MASK):
images.append(image)
table = formatting.Table(['guid',
'name',
'type',
'visibility',
'account'])
images = [image for image in images if image['parentId'] == '']
for image in images:
visibility = (image_mod.PUBLIC_TYPE if image['publicFlag']
else image_mod.PRIVATE_TYPE)
table.add_row([
image.get('globalIdentifier', formatting.blank()),
formatting.FormattedItem(image['name'],
click.wrap_text(image['name'], width=50)),
formatting.FormattedItem(
utils.lookup(image, 'imageType', 'keyName'),
utils.lookup(image, 'imageType', 'name')),
visibility,
image.get('accountId', formatting.blank()),
])
env.fout(table)
|
[
"k3vinmcdonald@gmail.com"
] |
k3vinmcdonald@gmail.com
|
93d181edc316819ffb8293d1f14bd6c16374f7a0
|
549270020f6c8724e2ef1b12e38d11b025579f8d
|
/recipes/rectanglebinpack/all/conanfile.py
|
f77182eff1815c5c7427bd8ec64b4c03ec271b88
|
[
"MIT"
] |
permissive
|
conan-io/conan-center-index
|
1bcec065ccd65aa38b1fed93fbd94d9d5fe6bc43
|
3b17e69bb4e5601a850b6e006e44775e690bac33
|
refs/heads/master
| 2023-08-31T11:34:45.403978
| 2023-08-31T11:13:23
| 2023-08-31T11:13:23
| 204,671,232
| 844
| 1,820
|
MIT
| 2023-09-14T21:22:42
| 2019-08-27T09:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,295
|
py
|
import os
from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, cmake_layout
from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, load, save
required_conan_version = ">=1.53.0"
class RectangleBinPackConan(ConanFile):
name = "rectanglebinpack"
description = "The code can be used to solve the problem of packing a set of 2D rectangles into a larger bin."
license = "LicenseRef-rectanglebinpack-public-domain"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/juj/RectangleBinPack"
topics = ("rectangle", "packing", "bin")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if self.settings.compiler.get_safe("cppstd"):
check_min_cppstd(self, 11)
def source(self):
get(self, **self.conan_data["sources"][self.version][0], strip_root=True)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = self.options.shared
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def _extract_license(self):
readme_content = load(self, os.path.join(self.source_folder, "Readme.txt"), encoding="latin-1")
license_content = "\n".join(readme_content.splitlines()[-4:])
save(self, os.path.join(self.package_folder, "licenses", "LICENSE"), license_content)
def package(self):
self._extract_license()
copy(self, "*.h",
dst=os.path.join(self.package_folder, "include", self.name),
src=self.source_folder,
excludes="old/**")
copy(self, "*.dll",
dst=os.path.join(self.package_folder, "bin"),
src=self.build_folder,
keep_path=False)
for pattern in ["*.lib", "*.so", "*.dylib", "*.a"]:
copy(self, pattern,
dst=os.path.join(self.package_folder, "lib"),
src=self.build_folder,
keep_path=False)
def package_info(self):
self.cpp_info.libs = ["RectangleBinPack"]
self.cpp_info.set_property("cmake_file_name", "RectangleBinPack")
self.cpp_info.set_property("cmake_target_name", "RectangleBinPack::RectangleBinPack")
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs = ["m"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "RectangleBinPack"
self.cpp_info.names["cmake_find_package_multi"] = "RectangleBinPack"
|
[
"noreply@github.com"
] |
conan-io.noreply@github.com
|
aaf71114a0ea633a8470abae97853ce02b109f69
|
365c85a280596d88082c1f150436453f96e18c15
|
/Python/Interview/电梯与爬楼.py
|
40bfb9e1b5e9d4195c9faf6bda94d32585345960
|
[] |
no_license
|
Crisescode/leetcode
|
0177c1ebd47b0a63476706562bcf898f35f1c4f2
|
c3a60010e016995f06ad4145e174ae19668e15af
|
refs/heads/master
| 2023-06-01T06:29:41.992368
| 2023-05-16T12:32:10
| 2023-05-16T12:32:10
| 243,040,322
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 489
|
py
|
import sys
class Solution:
def find_floor(self, floor, nums):
res = [0 for i in range(floor)]
min_res, min_index = sys.maxsize, 0
for i in range(floor):
for j in range(len(nums)):
res[i] += abs(i - nums[j])
if min_res > res[i]:
min_res = res[i]
min_index = i
print(min_res)
print(min_index)
if __name__ == "__main__":
Solution().find_floor(10, [1, 3, 8, 10, 9])
|
[
"zhaopanp2018@outlook.com"
] |
zhaopanp2018@outlook.com
|
56087d81dafd62bf5c993992aa00023609074dce
|
acc7137e34fdc950fbb2593b2c4b0355c975faa3
|
/diffy_api/schemas.py
|
4efb43587c24a360186b0e9ac3a5e8e8c37271bb
|
[
"Apache-2.0"
] |
permissive
|
kevgliss/diffy
|
ba8b01b2c0daa81faa39d69e1380eea16b84d732
|
681d5edd4a72e47a924d4b1b1136d40efa52b631
|
refs/heads/master
| 2020-03-14T20:10:21.797782
| 2018-05-01T22:09:16
| 2018-05-01T22:09:16
| 131,772,394
| 0
| 0
| null | 2018-05-01T23:10:15
| 2018-05-01T23:10:14
| null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
"""
.. module: diffy.schemas
:platform: Unix
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from marshmallow import fields
from diffy.schema import (
TargetPluginSchema,
PersistencePluginSchema,
CollectionPluginSchema,
PayloadPluginSchema,
AnalysisPluginSchema
)
from diffy_api.common.schema import DiffyInputSchema
class BaselineSchema(DiffyInputSchema):
target_key = fields.String(required=True)
incident_id = fields.String(required=True)
target_plugin = fields.Nested(TargetPluginSchema, missing={})
persistence_plugin = fields.Nested(PersistencePluginSchema, missing={})
collection_plugin = fields.Nested(CollectionPluginSchema, missing={})
payload_plugin = fields.Nested(PayloadPluginSchema, missing={})
class AnalysisSchema(BaselineSchema):
analysis_plugin = fields.Nested(AnalysisPluginSchema, missing={})
baseline_input_schema = BaselineSchema()
baseline_output_schema = BaselineSchema()
analysis_input_schema = AnalysisSchema()
analysis_output_schema = AnalysisSchema()
|
[
"kevgliss@gmail.com"
] |
kevgliss@gmail.com
|
697bac709aa09a2bdbb3d97f1417cfb4bbcc306d
|
f4b011992dd468290d319d078cbae4c015d18338
|
/Array/Container_With_most_Water.py
|
1c1adf9cf2aa2c41e70c6b93616bcd5d35dbfaf5
|
[] |
no_license
|
Neeraj-kaushik/Geeksforgeeks
|
deca074ca3b37dcb32c0136b96f67beb049f9592
|
c56de368db5a6613d59d9534de749a70b9530f4c
|
refs/heads/master
| 2023-08-06T05:00:43.469480
| 2021-10-07T13:37:33
| 2021-10-07T13:37:33
| 363,420,292
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 309
|
py
|
def container(li):
max = 0
for i in range(len(li)-1):
for j in range(i+1, len(li)):
a = j-i
b = min(li[i], li[j])
c = a*b
if max < c:
max = c
print(max)
n = int(input())
li = [int(x) for x in input().split()]
container(li)
|
[
"nkthecoder@gmail.com"
] |
nkthecoder@gmail.com
|
f78f2203b461289608061f69e6fbe921db6e52b3
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/GIT-USERS/TOM-Lambda/CSEU2-Graphs-gp/src/challenge2/isl.py
|
ac24bdb92ca489c6b326e7896af3ef9e3e78d855
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 2,689
|
py
|
from util import Queue
<<<<<<< HEAD
# island count problem
def island_counter(arr):
rows = len(arr)
cols = len(arr[0])
count = 0
for i in range(rows):
for j in range(cols):
if arr[i][j] == 1:
deleteOnes(arr, i, j, rows, cols)
count += 1
return count
def deleteOnes(grid, i, j, rows, cols):
q = Queue()
q.enqueue([i, j])
grid[i][j] = 0
while q.size() > 0:
node = q.dequeue()
row = node[0]
col = node[1]
for row2, col2 in ((row + 1, col), (row - 1, col), (row, col + 1), (row, col -1)):
if 0 <= row2 < rows and 0 <= col2 < cols and grid[row2][col2] != 0:
grid[row2][col2] = 0
q.enqueue([row2, col2])
islands = [[0, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0]]
island_counter(islands) # 4
islands = [[1, 0, 0, 1, 1, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 0, 0, 1, 0]]
island_counter(islands) # 13
=======
# island count problem
def island_counter(arr):
rows = len(arr)
cols = len(arr[0])
count = 0
for i in range(rows):
for j in range(cols):
if arr[i][j] == 1:
deleteOnes(arr, i, j, rows, cols)
count += 1
return count
def deleteOnes(grid, i, j, rows, cols):
q = Queue()
q.enqueue([i, j])
grid[i][j] = 0
while q.size() > 0:
node = q.dequeue()
row = node[0]
col = node[1]
for row2, col2 in (
(row + 1, col),
(row - 1, col),
(row, col + 1),
(row, col - 1),
):
if 0 <= row2 < rows and 0 <= col2 < cols and grid[row2][col2] != 0:
grid[row2][col2] = 0
q.enqueue([row2, col2])
islands = [
[0, 1, 0, 1, 0],
[1, 1, 0, 1, 1],
[0, 0, 1, 0, 0],
[1, 0, 1, 0, 0],
[1, 1, 0, 0, 0],
]
island_counter(islands) # 4
islands = [
[1, 0, 0, 1, 1, 0, 1, 1, 0, 1],
[0, 0, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 1, 0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 1, 0, 0, 1, 0],
]
island_counter(islands) # 13
>>>>>>> 23fb4d348bb9c7b7b370cb2afcd785793e3816ea
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
97b2ab61542ae094603a2691a04ef0fffc95cf21
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/eBay/Trading/EndItem.py
|
87aa0de2ea3a105cb636525193a819e0e0d694df
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902
| 2016-11-17T17:32:30
| 2016-11-17T17:32:30
| 42,438,362
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,780
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# EndItem
# Ends the specified item listing before the date and time that it is scheduled to end per the listing duration.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class EndItem(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the EndItem Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(EndItem, self).__init__(temboo_session, '/Library/eBay/Trading/EndItem')
def new_input_set(self):
return EndItemInputSet()
def _make_result_set(self, result, path):
return EndItemResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return EndItemChoreographyExecution(session, exec_id, path)
class EndItemInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the EndItem
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_EndingReason(self, value):
"""
Set the value of the EndingReason input for this Choreo. ((required, string) The reason the listing is ending early. Valid values are: LostOrBroken, NotAvailable, Incorrect, OtherListingError, CustomCode, SellToHighBidder, or Sold.)
"""
super(EndItemInputSet, self)._set_input('EndingReason', value)
def set_ItemID(self, value):
"""
Set the value of the ItemID input for this Choreo. ((required, string) The ID of the item to end.)
"""
super(EndItemInputSet, self)._set_input('ItemID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(EndItemInputSet, self)._set_input('ResponseFormat', value)
def set_SandboxMode(self, value):
"""
Set the value of the SandboxMode input for this Choreo. ((conditional, boolean) Indicates that the request should be made to the sandbox endpoint instead of the production endpoint. Set to 1 to enable sandbox mode.)
"""
super(EndItemInputSet, self)._set_input('SandboxMode', value)
def set_SellerInventoryID(self, value):
"""
Set the value of the SellerInventoryID input for this Choreo. ((optional, string) Unique identifier that the seller specified when they listed the Half.com item. This paramater only applies to Half.com.)
"""
super(EndItemInputSet, self)._set_input('SellerInventoryID', value)
def set_SiteID(self, value):
"""
Set the value of the SiteID input for this Choreo. ((optional, string) The eBay site ID that you want to access. Defaults to 0 indicating the US site.)
"""
super(EndItemInputSet, self)._set_input('SiteID', value)
def set_UserToken(self, value):
"""
Set the value of the UserToken input for this Choreo. ((required, string) A valid eBay Auth Token.)
"""
super(EndItemInputSet, self)._set_input('UserToken', value)
class EndItemResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the EndItem Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from eBay.)
"""
return self._output.get('Response', None)
class EndItemChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return EndItemResultSet(response, path)
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
65261f757b8d466feca4479d5346beec5a78e31b
|
e6d862a9df10dccfa88856cf16951de8e0eeff2b
|
/Core/worker/python-aiohttp/api_server/models/worker_performance_event_duty_details.py
|
f29762066c70ea15c67959153241105705c85d5b
|
[] |
no_license
|
AllocateSoftware/API-Stubs
|
c3de123626f831b2bd37aba25050c01746f5e560
|
f19d153f8e9a37c7fb1474a63c92f67fc6c8bdf0
|
refs/heads/master
| 2022-06-01T07:26:53.264948
| 2020-01-09T13:44:41
| 2020-01-09T13:44:41
| 232,816,845
| 0
| 0
| null | 2022-05-20T21:23:09
| 2020-01-09T13:34:35
|
C#
|
UTF-8
|
Python
| false
| false
| 3,121
|
py
|
# coding: utf-8
from datetime import date, datetime
from typing import List, Dict, Type
from api_server.models.base_model_ import Model
from api_server import util
class WorkerPerformanceEventDutyDetails(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, duty_id: str=None, _from: datetime=None, to: datetime=None):
"""WorkerPerformanceEventDutyDetails - a model defined in OpenAPI
:param duty_id: The duty_id of this WorkerPerformanceEventDutyDetails.
:param _from: The _from of this WorkerPerformanceEventDutyDetails.
:param to: The to of this WorkerPerformanceEventDutyDetails.
"""
self.openapi_types = {
'duty_id': str,
'_from': datetime,
'to': datetime
}
self.attribute_map = {
'duty_id': 'dutyId',
'_from': 'from',
'to': 'to'
}
self._duty_id = duty_id
self.__from = _from
self._to = to
@classmethod
def from_dict(cls, dikt: dict) -> 'WorkerPerformanceEventDutyDetails':
"""Returns the dict as a model
:param dikt: A dict.
:return: The WorkerPerformanceEvent_dutyDetails of this WorkerPerformanceEventDutyDetails.
"""
return util.deserialize_model(dikt, cls)
@property
def duty_id(self):
"""Gets the duty_id of this WorkerPerformanceEventDutyDetails.
ID of the duty within the allocate system
:return: The duty_id of this WorkerPerformanceEventDutyDetails.
:rtype: str
"""
return self._duty_id
@duty_id.setter
def duty_id(self, duty_id):
"""Sets the duty_id of this WorkerPerformanceEventDutyDetails.
ID of the duty within the allocate system
:param duty_id: The duty_id of this WorkerPerformanceEventDutyDetails.
:type duty_id: str
"""
self._duty_id = duty_id
@property
def _from(self):
"""Gets the _from of this WorkerPerformanceEventDutyDetails.
When the duty started
:return: The _from of this WorkerPerformanceEventDutyDetails.
:rtype: datetime
"""
return self.__from
@_from.setter
def _from(self, _from):
"""Sets the _from of this WorkerPerformanceEventDutyDetails.
When the duty started
:param _from: The _from of this WorkerPerformanceEventDutyDetails.
:type _from: datetime
"""
self.__from = _from
@property
def to(self):
"""Gets the to of this WorkerPerformanceEventDutyDetails.
When the duty ended
:return: The to of this WorkerPerformanceEventDutyDetails.
:rtype: datetime
"""
return self._to
@to.setter
def to(self, to):
"""Sets the to of this WorkerPerformanceEventDutyDetails.
When the duty ended
:param to: The to of this WorkerPerformanceEventDutyDetails.
:type to: datetime
"""
self._to = to
|
[
"nigel.magnay@gmail.com"
] |
nigel.magnay@gmail.com
|
0f79ee95751b41818a702247f7f40d6f88f71c80
|
7950c4faf15ec1dc217391d839ddc21efd174ede
|
/leetcode-cn/1220.0_Count_Vowels_Permutation.py
|
01e3682d75f3280c9a127e70607bc6363f6959ee
|
[] |
no_license
|
lixiang2017/leetcode
|
f462ecd269c7157aa4f5854f8c1da97ca5375e39
|
f93380721b8383817fe2b0d728deca1321c9ef45
|
refs/heads/master
| 2023-08-25T02:56:58.918792
| 2023-08-22T16:43:36
| 2023-08-22T16:43:36
| 153,090,613
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 709
|
py
|
'''
执行用时:120 ms, 在所有 Python3 提交中击败了63.64% 的用户
内存消耗:15.1 MB, 在所有 Python3 提交中击败了59.09% 的用户
通过测试用例:43 / 43
'''
class Solution:
def countVowelPermutation(self, n: int) -> int:
'''
a -> e
e -> a/i
i -> a/e/o/u
o -> i/u
u -> a
'''
MOD = 10 ** 9 + 7
a1 = e1 = i1 = o1 = u1 = 1
a2 = e2 = i2 = o2 = u2 = 0
for i in range(n - 1):
a2, e2, i2, o2, u2 = e1 + i1 + u1, a1 + i1, e1 + o1, i1, i1 + o1
a1, e1, i1, o1, u1 = a2 % MOD, e2 % MOD, i2 % MOD, o2 % MOD, u2 % MOD
return (a1 + e1 + i1 + o1 + u1) % MOD
|
[
"laoxing201314@outlook.com"
] |
laoxing201314@outlook.com
|
f19f605ddf8db8b480c00c74ed23b523b12ed70d
|
319d3dfc79d6249bf6d6dab1c51a7d5d0af3c860
|
/tests/test_line_set_data.py
|
00aa7bc5bec56fa3340c23db28ff5477fc06cb6e
|
[
"MIT"
] |
permissive
|
jameshensman/matplotlib2tikz
|
1d365b6a9e91453492a17ec28c5eb74f2279e26e
|
450712b4014799ec5f151f234df84335c90f4b9d
|
refs/heads/master
| 2023-01-24T07:59:53.641565
| 2022-02-28T11:27:51
| 2022-02-28T11:27:51
| 169,421,478
| 1
| 0
|
MIT
| 2019-02-06T14:47:09
| 2019-02-06T14:47:08
| null |
UTF-8
|
Python
| false
| false
| 320
|
py
|
# from <https://github.com/nschloe/tikzplotlib/issues/339>
import matplotlib.pyplot as plt
def plot():
fig = plt.figure()
line = plt.plot(0, 0, "kx")[0]
line.set_data(0, 0)
return fig
def test():
from .helpers import assert_equality
assert_equality(plot, "test_line_set_data_reference.tex")
|
[
"nico.schloemer@gmail.com"
] |
nico.schloemer@gmail.com
|
b56d1a4f34a8e9cc9ae7192fc5b710a3e1a0ee47
|
c3523080a63c7e131d8b6e0994f82a3b9ed901ce
|
/django/hello_world/hello_world_project/my_app/views.py
|
93892aad6dc8f05c979061a01916dbfbfe83c670
|
[] |
no_license
|
johnlawrenceaspden/hobby-code
|
2c77ffdc796e9fe863ae66e84d1e14851bf33d37
|
d411d21aa19fa889add9f32454915d9b68a61c03
|
refs/heads/master
| 2023-08-25T08:41:18.130545
| 2023-08-06T12:27:29
| 2023-08-06T12:27:29
| 377,510
| 6
| 4
| null | 2023-02-22T00:57:49
| 2009-11-18T19:57:01
|
Clojure
|
UTF-8
|
Python
| false
| false
| 655
|
py
|
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
import datetime
def index(request):
return HttpResponse(
"Hello, World!"
"<a href='secret'>secrets</a>"
"<a href='geeks_view'>geeks_view</a>"
"<a href='template'>template</a>"
)
def secret(request):
return HttpResponse("Secrets!")
def geeks_view(request):
now = datetime.datetime.now()
html = "Hello, World<br/> time is {} <br/> ".format(now)
return HttpResponse(html)
def template_view(request):
return render(request, "template_view.html")
# <a href="{% url 'secret' %}">secrets</a>
|
[
"github@aspden.com"
] |
github@aspden.com
|
7b1190e83ad63f84b348c940915312eff30c3b58
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/nlp/gpt2/src/utils/metric_method.py
|
721d3f0619b1251f5d85a89639ac74cfe067a333
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 6,095
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""metric method for downstream task"""
import string
import re
from collections import Counter
import numpy as np
from .rouge_score import get_rouge_score
from .bleu import compute_bleu
class LastWordAccuracy():
"""
LastWordAccuracy class is for lambada task (predict the final word of sentence)
"""
def __init__(self):
self.acc_num = 0
self.total_num = 0
def normalize(self, word):
"""normalization"""
word = word.lstrip()
word = word.rstrip()
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return remove_punc(lower(word))
def update(self, predict_label, gold_label):
if isinstance(predict_label, str) and isinstance(gold_label, str):
predict_label = [predict_label]
gold_label = [gold_label]
for predict_word, gold_word in zip(predict_label, gold_label):
self.total_num += 1
if self.normalize(predict_word) == self.normalize(gold_word):
self.acc_num += 1
class Accuracy():
"""
calculate accuracy
"""
def __init__(self):
self.acc_num = 0
self.total_num = 0
def update(self, logits, labels):
"""accuracy update"""
labels = np.reshape(labels, -1)
logits_id = np.argmax(logits, axis=-1)
print(" | Preict Label: {} Gold Label: {}".format(logits_id, labels))
self.acc_num += np.sum(labels == logits_id)
self.total_num += len(labels)
print("\n| Accuracy = {} \n".format(self.acc_num / self.total_num))
class F1():
"""calculate F1 score"""
def __init__(self):
self.f1_score = 0.0
def get_normalize_answer_token(self, string_):
"""Lower text and remove punctuation, article and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(char for char in text if char not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(string_)))).split()
def update(self, pred_answer, gold_answer):
"""F1 update"""
common = Counter(pred_answer) & Counter(gold_answer)
num_same = sum(common.values())
# the number of same tokens between pred_answer and gold_answer
precision = 1.0 * num_same / len(pred_answer) if pred_answer else 0
recall = 1.0 * num_same / len(gold_answer) if gold_answer else 0
if ' '.join(pred_answer).strip() == "" and ' '.join(gold_answer).strip() == "":
self.f1_score += 1
else:
self.f1_score += 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0.0
print('| precision: {}, recall: {}\n'.format(precision, recall))
class BLEU():
"""calculate BLEU score"""
def __init__(self, tokenizer=None, max_order=4, smooth=True):
self.bleu = 0.0
self.total_num = 0
self.tokenizer = tokenizer
self.max_order = max_order
self.smooth = smooth
def sum_bleu(self, references, translations, max_order, smooth):
"""calculate the sum of bleu score"""
all_result = []
bleu_avg = 0.0
for refer, trans in zip(references, translations):
result = compute_bleu([[refer]], [trans], max_order, smooth)
all_result.append(result)
bleu_avg += result[0]
bleu_avg /= len(references)
return bleu_avg, all_result
def update(self, hypotheses, references):
"""BLEU update"""
hypo_l = []
ref_l = []
if self.tokenizer is not None:
for hypo, ref in zip(hypotheses, references):
if ref.strip() == '':
print("Reference is None, skip it !")
continue
if hypo.strip() == '':
print("translation is None, skip it !")
continue
hypo_l.append(self.tokenizer.encode(hypo))
ref_l.append(self.tokenizer.encode(ref))
if hypo_l and ref_l:
hypotheses = hypo_l
references = ref_l
bleu_avg, _ = self.sum_bleu(references, hypotheses, self.max_order, self.smooth)
self.bleu += bleu_avg * 100
self.total_num += 1
print("============== BLEU: {} ==============".format(float(self.bleu / self.total_num)))
class Rouge():
'''
Get Rouge Score
'''
def __init__(self):
self.Rouge1 = 0.0
self.Rouge2 = 0.0
self.RougeL = 0.0
self.total_num = 0
def update(self, hypothesis, targets):
scores = get_rouge_score(hypothesis, targets)
self.Rouge1 += scores['rouge-1']['f'] * 100
self.Rouge2 += scores['rouge-2']['f'] * 100
self.RougeL += scores['rouge-l']['f'] * 100
self.total_num += 1
print("=============== ROUGE: {} ===============".format(
(self.Rouge1 + self.Rouge2 + self.RougeL) / float(3.0 * self.total_num)))
|
[
"chenhaozhe1@huawei.com"
] |
chenhaozhe1@huawei.com
|
8c77d762715a190f6a21873d09291edc7d9199dd
|
491c1e520a64e3ebd5349130f35047aaed1e70ec
|
/stack/monotonic_stack/739 dailyTemperatures.py
|
c8944e497eca5d5c03af42c94c50ed5f70a3e33b
|
[] |
no_license
|
pangyouzhen/data-structure
|
33a7bd7790c8db3e018114d85a137f5f3d6b92f8
|
cd46cf08a580c418cc40a68bf9b32371fc69a803
|
refs/heads/master
| 2023-05-26T12:02:30.800301
| 2023-05-21T08:07:57
| 2023-05-21T08:07:57
| 189,315,047
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
from typing import List
class Solution:
# 暴力解法
def dailyTemperatures_(self, temperatures: List[int]) -> List[int]:
if not temperatures:
return []
l = len(temperatures)
res = [0] * l
for i in range(l):
for j in range(i + 1, l):
if temperatures[j] > temperatures[i]:
print(f"{temperatures[j]} > {temperatures[i]}")
res[i] = j - i
break
return res
# 单调栈应用场景: 每个数右边第一个比它大的数
def dailyTemperatures(self, temperatures: List[int]) -> List[int]:
l = len(temperatures)
ans = [0] * l
stack = []
for i in range(l):
temperature = temperatures[i]
while stack and temperature > temperatures[stack[-1]]:
prev_index = stack.pop()
ans[prev_index] = i - prev_index
stack.append(i)
return ans
# 单调栈这里改成如果得到的是值怎么改
# TODO
def dailyTemperatures_value(self, temperatures: List[int]) -> List[int]:
pass
if __name__ == '__main__':
temperatures = [73, 74, 75, 71, 69, 72, 76, 73]
# temperatures = [30, 40, 50, 60]
# temperatures = [30, 60, 90]
func = Solution().dailyTemperatures
print(func(temperatures))
|
[
"pangyouzhen@live.com"
] |
pangyouzhen@live.com
|
301c10bb286366de50022142a49a5e4c3d4219c9
|
c80b3cc6a8a144e9858f993c10a0e11e633cb348
|
/plugins/gateway-acl/acl/api/group.py
|
a647f836086fc9940457cbd96486ebaae5fd5068
|
[] |
no_license
|
cristidomsa/Ally-Py
|
e08d80b67ea5b39b5504f4ac048108f23445f850
|
e0b3466b34d31548996d57be4a9dac134d904380
|
refs/heads/master
| 2021-01-18T08:41:13.140590
| 2013-11-06T09:51:56
| 2013-11-06T09:51:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,462
|
py
|
'''
Created on Aug 7, 2013
@package: gateway acl
@copyright: 2012 Sourcefabric o.p.s.
@license: http://www.gnu.org/licenses/gpl-3.0.txt
@author: Gabriel Nistor
API specifications for access group.
'''
from .domain_acl import modelACL
from acl.api.acl import IAclPrototype
from acl.api.compensate import ICompensatePrototype
from ally.api.config import service, query
from ally.api.criteria import AsBooleanOrdered
from ally.api.option import SliceAndTotal # @UnusedImport
from ally.support.api.entity_named import Entity, IEntityService, QEntity
# --------------------------------------------------------------------
@modelACL
class Group(Entity):
'''
Defines the group of ACL access.
Name - the group unique name.
IsAnonymous - if true it means that the group should be delivered for anonymous access.
Description - a description explaining the group.
'''
IsAnonymous = bool
Description = str
# --------------------------------------------------------------------
@query(Group)
class QGroup(QEntity):
'''
Provides the query for group.
'''
isAnonymous = AsBooleanOrdered
# --------------------------------------------------------------------
@service((Entity, Group), (QEntity, QGroup), ('ACL', Group))
class IGroupService(IEntityService, IAclPrototype, ICompensatePrototype):
'''
The ACL access group service used for allowing accesses based on group.
'''
|
[
"gabriel.nistor@sourcefabric.org"
] |
gabriel.nistor@sourcefabric.org
|
9377ca1d4e1a8f7e874803665efdc587668509ce
|
4bc19f4dd098ebedcb6ee78af0ae12cb633671fe
|
/rekvizitka/templatetags/signin_tags.py
|
8226953cbaa61707f115f701fe9c2a6a61ba2593
|
[] |
no_license
|
StanislavKraev/rekvizitka
|
958ab0e002335613a724fb14a8e4123f49954446
|
ac1f30e7bb2e987b3b0bda4c2a8feda4d3f5497f
|
refs/heads/master
| 2021-01-01T05:44:56.372748
| 2016-04-27T19:20:26
| 2016-04-27T19:20:26
| 57,240,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 613
|
py
|
# -*- coding: utf-8 -*-
from django import template
from rek.rekvizitka.forms import SigninForm, SignupForm
register = template.Library()
@register.inclusion_tag("includes/navigation/index.html", takes_context=True)
def signin_form(context):
result = {}
for d in context:
result.update(d)
if 'top_signin_form' not in result:
result['top_signin_form'] = SigninForm()
if 'signup_form' not in result:
result['signup_form'] = SignupForm()
if 'request' in context:
result['show_login_form'] = 'next' in context['request'].GET
return result
|
[
"kraevst@yandex.ru"
] |
kraevst@yandex.ru
|
430c29e62b60c6a030c6eebfbbf4f5c8806ae29f
|
f48f9798819b12669a8428f1dc0639e589fb1113
|
/office/misc/zekr/actions.py
|
7f611db22f147aa0500f55fdb6215f5233806360
|
[] |
no_license
|
vdemir/PiSiPackages-pardus-2011-devel
|
781aac6caea2af4f9255770e5d9301e499299e28
|
7e1867a7f00ee9033c70cc92dc6700a50025430f
|
refs/heads/master
| 2020-12-30T18:58:18.590419
| 2012-03-12T03:16:34
| 2012-03-12T03:16:34
| 51,609,831
| 1
| 0
| null | 2016-02-12T19:05:41
| 2016-02-12T19:05:40
| null |
UTF-8
|
Python
| false
| false
| 1,131
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2011 TUBITAK/BILGEM
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
WorkDir = "%s/%s" % (get.ARCH(), get.srcNAME())
BASEDIR = "/usr/share/java/zekr"
def setup():
shelltools.system("ant clean")
def build():
shelltools.system("ant")
def install():
pisitools.insinto(BASEDIR, "*")
pisitools.dosym("%s/zekr.sh" % BASEDIR, "/usr/bin/zekr")
pisitools.dodoc("doc/changes.txt", "doc/license/*", "doc/readme.txt")
# Remove redundant files
pisitools.removeDir("%s/build" % BASEDIR)
pisitools.remove("%s/build.xml" % BASEDIR)
pisitools.remove("%s/readme.txt" % BASEDIR)
# Javadoc generation
# shelltools.system("ant javadoc")
# shelltools.copytree("build/docs/javadocs", "%s/%s/%s" %(get.installDIR(), get.docDIR(), get.srcNAME()))
# shelltools.unlinkDir("%s%s/build" % (get.installDIR(), BASEDIR))
|
[
"kaptan@pisipackages.org"
] |
kaptan@pisipackages.org
|
30d47c3b4db546a33d6f8b9cc2e181c424689c59
|
23e0629881270a881e68b2b07c6b8bc8b53c4127
|
/glmtools/test/residual_test_int_scaling_nc.py
|
746dcf06ef19d887e7e25f87429c712f3319eb1e
|
[
"BSD-3-Clause"
] |
permissive
|
fluxtransport/glmtools
|
7a78ed697ef3515869fa5c46afa9cd1b03700514
|
ae17d95b61af011cf966392ba94863c5928053b7
|
refs/heads/master
| 2022-09-12T00:20:31.378392
| 2020-06-05T16:25:48
| 2020-06-05T16:25:48
| 266,206,763
| 1
| 1
|
BSD-3-Clause
| 2020-05-22T21:02:06
| 2020-05-22T21:02:06
| null |
UTF-8
|
Python
| false
| false
| 1,655
|
py
|
"""
This set of tests reads the GLM data two ways, one by applying the unsigned integer conversion
manually, and the other by using the automatic method implemented in the library.
It was used to test PR #658 developed in response to issue #656 on the unidata/netcdf4-python library.
The second, full-auto method should work if the version (>=1.2.8) of netcdf4-python post-dates this PR.
These tests were developed with GLM data dating after 24 April 2017, but may not work with
later production upgrades if the unsigned int encoding method used in the production system changes.
The correct answers are:
-139.505
-43.7424
"""
filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171161230400_e20171161231000_c20171161231027.nc'
some_flash = 6359
import netCDF4
nc = netCDF4.Dataset(filename)
event_lons = nc.variables['event_lon']
event_lons.set_auto_scale(False)
scale_factor = event_lons.scale_factor
add_offset = event_lons.add_offset
event_lons = event_lons[:].astype('u2')
event_lons_fixed = (event_lons[:])*scale_factor+add_offset
nc.close()
print("Manual scaling")
print(event_lons_fixed.min())
print(event_lons_fixed.max())
# lon_fov = (-156.06, -22.94)
# dlon_fov = lon_fov[1]-lon_fov[0]
# lat_fov = (-66.56, 66.56)
# scale_factor = 0.00203128 # from file and spec; same for both
# ------
filename = '/data/LCFA-production/OR_GLM-L2-LCFA_G16_s20171161230400_e20171161231000_c20171161231027.nc'
some_flash = 6359
import netCDF4
nc = netCDF4.Dataset(filename)
event_lons = nc.variables['event_lon']
event_lons_fixed = event_lons[:]
nc.close()
print("Auto scaling")
print(event_lons_fixed.min())
print(event_lons_fixed.max())
|
[
"eric.bruning@gmail.com"
] |
eric.bruning@gmail.com
|
165573a4fe8aadb00a0a2ffec9a278040aa43dc8
|
3c8c2af952f2a785ca648c44954961a198c2ac6b
|
/tensorflower/Examples/NeuralNetworkExample/simple_neural_network_with_eager.py
|
a0be16daa338f9e52c6f3ae4318fdfcd600cdaf4
|
[] |
no_license
|
SCismycat/TensorFlowLearning
|
0b8db07ca24f6a7ac75ddc9a91f7a13c71709104
|
e5fe6359e2ae9fdfc6a6824efdfc2660c7a9d061
|
refs/heads/master
| 2020-11-25T18:30:55.859591
| 2020-01-03T12:45:24
| 2020-01-03T12:45:24
| 228,793,975
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,190
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : Leslee
# @Email : leelovesc@gmail.com
# @Time : 2019.11.27 14:05
import tensorflower as tf
tf.enable_eager_execution()
tfe = tf.contrib.eager
# Import MNIST data
from tensorflower.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Parameters
learning_rate = 0.001
num_steps = 1000
batch_size = 128
display_step = 100
# Network Parameters
n_hidden_1 = 256 # 1st layer number of neurons
n_hidden_2 = 256 # 2nd layer number of neurons
num_input = 784 # MNIST data input (img shape: 28*28)
num_classes = 10 # MNIST total classes (0-9 digits)
# 先分好数据的batchs
dataset = tf.data.Dataset.from_tensor_slices(
(mnist.train.images,mnist.train.labels))
dateset = dataset.repeat().batch(batch_size).prefetch(batch_size)
dataset_iter = tfe.Iterator(dataset)
# 定义神经网络,使用eager API和tf.layer API
class NeuralNetwork(tfe.Network):
def __init__(self):
# 定义每个层
super(NeuralNetwork, self).__init__()
self.layer1 = self.track_layer(
tf.layers.Dense(n_hidden_1,activation=tf.nn.relu))
self.layer2 = self.track_layer(
tf.layers.Dense(n_hidden_2,activation=tf.nn.relu))
self.out_layer = self.track_layer(tf.layers.Dense(num_classes))
def call(self,x):
x = self.layer1(x)
x = self.layer2(x)
return self.out_layer(x)
neural_network = NeuralNetwork()
# 交叉熵损失函数
def loss_fn(inference_fn,inputs,labels):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=inference_fn(inputs),labels=labels))
# 计算 acc
def accuracy_fn(inference_fn,inputs,labels):
prediction = tf.nn.softmax(inference_fn(inputs))
correct_pred = tf.equal(tf.argmax(prediction,1),labels)
return tf.reduce_mean(tf.cast(correct_pred,tf.float32))
optimizer = tf.train.AdamOptimizer(learning_rate)
# 计算梯度
grad = tfe.implicit_gradients(loss_fn)
# 开始训练
average_loss = 0.
average_acc = 0.
for step in range(num_steps):
d = dataset_iter.next()
# Images
x_batch = d[0]
y_batch = tf.cast(d[1],dtype=tf.int64)
# 计算整个batch的loss
batch_loss = loss_fn(neural_network,x_batch,y_batch)
average_loss += batch_loss
# 计算整个batch的accuracy
batch_accuracy = accuracy_fn(neural_network,x_batch,y_batch)
average_acc += batch_accuracy
if step == 0:
# 打印优化前的初始的cost
print("Initial loss= {:.9f}".format(average_loss))
optimizer.apply_gradients(grad(neural_network,x_batch,y_batch))
# 打印细节
if (step+1) % display_step == 0 or step == 0:
if step >0:
average_loss/=display_step
average_acc /= display_step
print("Step:", '%04d' % (step + 1), " loss=",
"{:.9f}".format(average_loss), " accuracy=",
"{:.4f}".format(average_acc))
average_loss = 0.
average_acc = 0.
test_X = mnist.test.images
test_Y = mnist.test.labels
test_acc = accuracy_fn(neural_network,test_X,test_Y)
print("Testset Accuracy: {:.4f}".format(test_acc))
|
[
"leelovesc@gmail.com"
] |
leelovesc@gmail.com
|
65949528935c33a7194c89fc9126372595d6568f
|
1adc05008f0caa9a81cc4fc3a737fcbcebb68995
|
/hardhat/recipes/python/wtforms.py
|
d5f3bc0b4cb6ebf88755c2e29ac9acc1dcd3dcdf
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
stangelandcl/hardhat
|
4aa995518697d19b179c64751108963fa656cfca
|
1ad0c5dec16728c0243023acb9594f435ef18f9c
|
refs/heads/master
| 2021-01-11T17:19:41.988477
| 2019-03-22T22:18:44
| 2019-03-22T22:18:52
| 79,742,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
from .base import PipBaseRecipe
class WTFormsRecipe(PipBaseRecipe):
def __init__(self, *args, **kwargs):
super(WTFormsRecipe, self).__init__(*args, **kwargs)
self.sha256 = 'ffdf10bd1fa565b8233380cb77a304cd' \
'36fd55c73023e91d4b803c96bc11d46f'
self.name = 'wtforms'
self.version = '2.1'
|
[
"clayton.stangeland@gmail.com"
] |
clayton.stangeland@gmail.com
|
fb2ae029326cdc5260c2c34c847575975d292b52
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03449/s515079577.py
|
ed708e8b8f9b546f606183d8cdb3a5cc6cd6ae6f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
import math
from math import gcd,pi,sqrt
INF = float("inf")
MOD = 10**9 + 7
import sys
sys.setrecursionlimit(10**6)
import itertools
import bisect
from collections import Counter,deque
def i_input(): return int(input())
def i_map(): return map(int, input().split())
def i_list(): return list(i_map())
def i_row(N): return [i_input() for _ in range(N)]
def i_row_list(N): return [i_list() for _ in range(N)]
def s_input(): return input()
def s_map(): return input().split()
def s_list(): return list(s_map())
def s_row(N): return [s_input for _ in range(N)]
def s_row_str(N): return [s_list() for _ in range(N)]
def s_row_list(N): return [list(s_input()) for _ in range(N)]
def main():
n = i_input()
a = i_list()
a.append(0)
b = i_list()
b.append(0)
m = 0
for i in range(n):
trial = sum(a[:i+1]) + sum(b[i:])
m = max(m, trial)
print(m)
if __name__=="__main__":
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
3a60fe79b32100607536ae6536cd91c46be9e2ed
|
eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7
|
/google/cloud/mediatranslation/v1beta1/mediatranslation-v1beta1-py/noxfile.py
|
235477334e1e16d28e4cf484fb7e4a2071eaf77b
|
[
"Apache-2.0"
] |
permissive
|
Tryweirder/googleapis-gen
|
2e5daf46574c3af3d448f1177eaebe809100c346
|
45d8e9377379f9d1d4e166e80415a8c1737f284d
|
refs/heads/master
| 2023-04-05T06:30:04.726589
| 2021-04-13T23:35:20
| 2021-04-13T23:35:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,994
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import shutil
import nox # type: ignore
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/mediatranslation_v1beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
7b2e484e63c5fc45c04b1b476c54ad6f48df39f1
|
a34b9c6cb03e13e0b13f55f3fcd23e974187a19b
|
/ample/utils.py
|
c31b1cbbbeb76b55ddb36918d3661cf0b51c3a1e
|
[
"MIT"
] |
permissive
|
hhcho/ample
|
a9496e18943da1116a804cee27a2759905ce29a1
|
cdb28d1ff1d285d851350e0446d0dc5e48a7a561
|
refs/heads/master
| 2020-04-10T19:08:10.569231
| 2018-12-12T17:42:03
| 2018-12-12T17:42:03
| 161,224,274
| 0
| 0
|
MIT
| 2018-12-12T17:42:04
| 2018-12-10T19:14:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,633
|
py
|
import errno
from fbpca import pca
import datetime
import numpy as np
import os
from sklearn.random_projection import SparseRandomProjection as JLSparse
import sys
# Default parameters.
DIMRED = 100
def log(string):
string = str(string)
sys.stdout.write(str(datetime.datetime.now()) + ' | [ample] ')
sys.stdout.write(string + '\n')
sys.stdout.flush()
def reduce_dimensionality(X, method='svd', dimred=DIMRED, raw=False):
if method == 'svd':
k = min((dimred, X.shape[0], X.shape[1]))
U, s, Vt = pca(X, k=k, raw=raw)
return U[:, range(k)] * s[range(k)]
elif method == 'jl_sparse':
jls = JLSparse(n_components=dimred)
return jls.fit_transform(X).toarray()
elif method == 'hvg':
X = X.tocsc()
disp = dispersion(X)
highest_disp_idx = np.argsort(disp)[::-1][:dimred]
return X[:, highest_disp_idx].toarray()
else:
sys.stderr.write('ERROR: Unknown method {}.'.format(svd))
exit(1)
def dispersion(X, eps=1e-10):
mean = X.mean(0).A1
X_nonzero = X[:, mean > eps]
nonzero_mean = X_nonzero.mean(0).A1
nonzero_var = (X_nonzero.multiply(X_nonzero)).mean(0).A1
del X_nonzero
nonzero_dispersion = (nonzero_var / nonzero_mean)
dispersion = np.zeros(X.shape[1])
dispersion[mean > eps] = nonzero_dispersion
dispersion[mean <= eps] = float('-inf')
return dispersion
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
|
[
"brianhie@mit.edu"
] |
brianhie@mit.edu
|
9206f7deeac420809be8a7ba2e64e36170b26ce7
|
254f1c347c1c9412b8e7d2c41d4b53eae57e8ead
|
/analysis/hchii_candidates.py
|
e69ab1aa3cec535cb56308252e05a014f131f73c
|
[] |
no_license
|
keflavich/MGPS
|
16d9b2343e4e78609d77c9138341c04273d62b10
|
a9f9dbaead132c42dd74de9915d2996c1fb0cf02
|
refs/heads/master
| 2021-06-03T06:33:49.335347
| 2020-11-04T01:21:45
| 2020-11-04T01:21:45
| 136,971,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,125
|
py
|
from dendrocat.aperture import Circle, Annulus
from astropy import wcs
from astropy.io import fits
from astropy.stats import mad_std
from astropy.convolution import convolve_fft, Gaussian2DKernel
from astropy import units as u
from astropy import coordinates
from astropy.table import Column, Table
import regions
import pylab as pl
from paths import catalog_figure_path, catalog_path, overview_figure_path
from files import files
from constants import mustang_central_frequency, mustang_beam_fwhm
from astropy.visualization import (MinMaxInterval, AsinhStretch,
PercentileInterval,
ImageNormalize)
reglist = regions.io.read_ds9('cutout_regions.reg')
cutout_regions = {reg.meta['label']: reg for reg in reglist}
for regname,fn in files.items():
for threshold,min_npix in ((4, 100),): # (6, 15), (8, 15), (10, 15)):
for min_delta in (1, ):
print(f"{regname}, {fn}")
catalog = Table.read(f'{catalog_path}/{regname}_dend_contour_thr{threshold}_minn{min_npix}_mind{min_delta}_crossmatch.ipac', format='ascii.ipac')
|
[
"keflavich@gmail.com"
] |
keflavich@gmail.com
|
3bcaece774afadaf9337a2966e2f51dc0850ba20
|
cb25407fc1480f771391bb09e36dad123ec9fca2
|
/bin/backupz.py
|
48644d780bc7d83d269ca55a769e30ee3e83314d
|
[] |
no_license
|
prataprc/zeta
|
f68925c9dfbf70331eae59ff5cf173956d249696
|
9c3bc88c56c67d0fff5c0790d768ad6cac79642f
|
refs/heads/master
| 2021-01-20T01:51:20.194893
| 2017-04-25T08:07:41
| 2017-04-25T08:07:41
| 89,334,343
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
#! /usr/bin/env python
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# Copyright (c) 2009 SKR Farms (P) LTD.
# Gotcha :
# Notes :
# * The back-up direcotry structure,
# <bkpdir>
# |---<name>
# |---<name>-bkp-<timestamp> (backup directory)
# | |----<name> (hard-link to deployed dir)
# | |----<name>-sql-<timestamp> (sqldumpfile)
# |
# |---<name>-bkp-<timestamp>.tar.gz
# |---backupz-log-<timestamp>
import sys
import getopt
from optparse import OptionParser
import os
from os.path import basename, abspath, dirname, isdir, isfile, join
import shutil as sh
import time
progname = basename( __file__ )
usage = "usage: %prog [options] name deploydir bkpdir"
pyver = "%s.%s" % sys.version_info[:2]
python = 'python%s' % pyver
timest = time.localtime()
timestr = '%s.%s.%s.%s' % timest[:4]
options = None
def _cmdexecute( cmd, log=True ) :
if log :
print >> options.logfd, " %s" % cmd
rc = os.system( cmd )
if rc != 0 :
print >> options.logfd, "Command failed `%s`" % cmd
sys.exit(1)
def cmdoptions() :
op = OptionParser( usage=usage )
#op.add_option( "--eggs", dest="fetcheggs", default="",
# help="Fetch all the egg files to the <fetcheggs> directory" )
#op.add_option( "-H", dest="noindex", action="store_true", default=False,
# help="Do not look up into python package index" )
options, args = op.parse_args()
return op, options, args
def backupsql( name, destfile ) :
cmd = 'mysqldump %s -u %s --password=%s#321 > %s' % (
name, name, name, destfile )
_cmdexecute( cmd )
if __name__ == '__main__' :
op, options, args = cmdoptions()
if len(args) == 3 :
options.name = args[0]
options.deploydir= abspath(args[1])
options.bkpdir = join( abspath(args[2]), options.name,
'%s-bkp-%s' % (options.name, timestr) )
options.sqldump = join( options.bkpdir,
'%s-sql-%s' % (options.name, timestr) )
options.logfile = join( dirname(options.bkpdir),
'backupz-log-%s' % timestr )
options.targz = join( dirname(options.bkpdir),
'%s-bkp-%s.tar.gz' % (options.name, timestr) )
os.makedirs( options.bkpdir )
options.logfd = open( options.logfile, 'w' )
# Symbolically link deployed directory for backup
os.symlink( options.deploydir, join( options.bkpdir, options.name ) )
# SQL dump
backupsql( options.name,
join( options.bkpdir, '%s-%s.sql' % (options.name, timestr) )
)
# Tar and gzip
cmd = 'tar cfhlz %s %s' % ( options.targz, options.bkpdir )
_cmdexecute( cmd )
# Remove the original tar tree
sh.rmtree( options.bkpdir )
else :
op.print_help()
|
[
"prataprc@gmail.com"
] |
prataprc@gmail.com
|
963002037c164929ea7727c36d3bf9cd05df3dd9
|
bfc25f1ad7bfe061b57cfab82aba9d0af1453491
|
/data/external/repositories/126714/kaggle-avazu-master/script/rare.py
|
43114a58f77b0a7a3318461e834e50e3edd2f92b
|
[
"MIT"
] |
permissive
|
Keesiu/meta-kaggle
|
77d134620ebce530d183467202cf45639d9c6ff2
|
87de739aba2399fd31072ee81b391f9b7a63f540
|
refs/heads/master
| 2020-03-28T00:23:10.584151
| 2018-12-20T19:09:50
| 2018-12-20T19:09:50
| 147,406,338
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 870
|
py
|
import marshal
def stat(input,isTest):
f = open(input)
line = f.readline()
count = 0
while True:
line = f.readline()
if not line:
break
count += 1
if count % 100000 == 0:
print count
lis = line.split(",")
index = 11
if isTest:
index = 10
id = "i_"+lis[index]
ip = "j_" + lis[index+1]
iid = "v_" + lis[len(lis)-7]
if id in d:
d[id] += 1
else:
d[id] = 1
if ip in d:
d[ip] += 1
else:
d[ip] = 1
if iid in d:
d[iid] += 1
else:
d[iid] = 1
f.close()
d = {}
stat("../train_c",False)
stat("../test_c",True)
rare_d = {}
for k in d:
if d[k] <=10:
rare_d[k] = d[k]
marshal.dump(rare_d,open("../rare_d","w"))
|
[
"keesiu.wong@gmail.com"
] |
keesiu.wong@gmail.com
|
ffc88f94db44f760091df1a143e9f2971d8b52db
|
3700369b3c560e47dbc27c8b059b6f000a361f83
|
/webapp/models.py
|
9dc630d78b90b422702a3c3562405d4fb3819577
|
[] |
no_license
|
Aitmatow/instagram
|
0a44bc05db6308ccb4648d55932613d1377915d1
|
2b8e19e899316720d1f0626a7587f1b895c77a6f
|
refs/heads/master
| 2022-11-23T00:14:05.189119
| 2019-12-16T08:47:42
| 2019-12-16T08:47:42
| 227,972,959
| 1
| 0
| null | 2022-11-22T04:54:20
| 2019-12-14T05:46:03
|
Python
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
from django.db import models
QUOTE_NEW = 'new'
QUOTE_APPROVED = 'approved'
QUOTE_STATUS_CHOICES = (
(QUOTE_NEW, 'Новая'),
(QUOTE_APPROVED, 'Подтверждена')
)
# Create your models here.
class Quote(models.Model):
text = models.TextField(max_length=2000, verbose_name='Текст цитаты')
created_at = models.DateTimeField(auto_now=True, verbose_name='Дата добавления')
status = models.CharField(max_length=20, choices=QUOTE_STATUS_CHOICES, default=QUOTE_NEW, verbose_name='Статус')
author_name = models.CharField(max_length=50, verbose_name='Кто добавил')
author_email = models.EmailField(verbose_name='Email')
rating = models.IntegerField(default=0, verbose_name='Рейтинг')
def __str__(self):
return self.text[:20] + '....'
class Meta:
verbose_name = 'Цитата'
verbose_name_plural = 'Цитаты'
|
[
"aitmarowd@gmail.com"
] |
aitmarowd@gmail.com
|
cd94f45eb0dc8a9695b7edda556ed08c23785c4f
|
663d89c6d26b66673d2df136366dab6f36f17ee9
|
/audiovisual/indico_audiovisual/blueprint.py
|
f281f0bd19160c861bf5b607ed314e0150b5c730
|
[
"MIT"
] |
permissive
|
rama270677/indico-plugins-cern
|
1a0a421bd45ce3f8bcea60d04ab4edca92fc5421
|
4ab66be5d633f31922be1ee8fd9d3a0905610924
|
refs/heads/master
| 2022-12-01T04:44:28.861197
| 2020-08-21T14:35:13
| 2020-08-21T14:35:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
py
|
# This file is part of the CERN Indico plugins.
# Copyright (C) 2014 - 2020 CERN
#
# The CERN Indico plugins are free software; you can redistribute
# them and/or modify them under the terms of the MIT License; see
# the LICENSE file for more details.
from __future__ import unicode_literals
from indico.core.plugins import IndicoPluginBlueprint
from indico_audiovisual.controllers import RHRequestList
blueprint = IndicoPluginBlueprint('audiovisual', __name__, url_prefix='/service/audiovisual')
blueprint.add_url_rule('/', 'request_list', RHRequestList)
|
[
"adrian.moennich@cern.ch"
] |
adrian.moennich@cern.ch
|
34f23e5d5803c1e8ef372ec6d8a00f6416b33083
|
d5ba475a6a782b0eed5d134b66eb8c601c41421c
|
/terrascript/data/docker.py
|
e4f799274d97d85094ebb96f999803db056d4c25
|
[
"BSD-2-Clause",
"Python-2.0"
] |
permissive
|
amlodzianowski/python-terrascript
|
ab42a06a5167e53ad8093b656a9bf14a03cb031d
|
142b1a4d1164d1012ac8865d12fdcc72f1e7ae75
|
refs/heads/master
| 2021-05-19T11:59:47.584554
| 2020-03-26T07:13:47
| 2020-03-26T07:13:47
| 251,688,045
| 0
| 0
|
BSD-2-Clause
| 2020-03-31T18:00:22
| 2020-03-31T18:00:22
| null |
UTF-8
|
Python
| false
| false
| 225
|
py
|
# terrascript/data/docker.py
import terrascript
class docker_registry_image(terrascript.Data):
pass
class docker_network(terrascript.Data):
pass
__all__ = [
"docker_registry_image",
"docker_network",
]
|
[
"markus@juenemann.net"
] |
markus@juenemann.net
|
f5ebb98bb66da12d55b669a91f4934411791b362
|
629f2bcdfb0902e013c16792184d4d809e40b775
|
/notebooks/tests/group_images_by_cycle_for_panoramic_stitching/test_group_images_by_cycle.py
|
f438b251676b15c959701135a2187adec3264157
|
[
"BSD-3-Clause"
] |
permissive
|
neutronimaging/python_notebooks
|
7d7a1df33300c4b952873efdfb358098a658896d
|
70a43a76eaf08f4ac63db3df7fbfb2e5cdb1216e
|
refs/heads/next
| 2023-08-30T20:05:20.225198
| 2023-07-05T16:38:10
| 2023-07-05T16:38:10
| 99,945,953
| 8
| 7
|
BSD-3-Clause
| 2022-11-03T12:03:30
| 2017-08-10T16:56:26
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,106
|
py
|
from pathlib import Path
from notebooks.__code.group_images_by_cycle_for_panoramic_stitching.group_images_by_cycle import GroupImagesByCycle
import glob
class TestGroupImagesByCycle:
def setup_method(self):
data_path = Path(__file__).parent.parent
self.data_path = str(data_path)
tiff_path = Path(data_path) / 'data' / 'images' / 'tiff'
list_of_files = glob.glob(str(tiff_path) + '/*.tif')
list_of_files.sort()
self.list_of_files = list_of_files
full_tiff_path = Path(data_path) / 'data' / 'images' / 'data_with_acquisition_cycle'
full_list_of_files = glob.glob(str(full_tiff_path) + '/*.tif')
full_list_of_files.sort()
self.full_list_of_files = full_list_of_files
self.list_of_metadata_key = [65045, 65041]
def test_create_master_dictionary(self):
o_group = GroupImagesByCycle(list_of_files=self.list_of_files,
list_of_metadata_key=self.list_of_metadata_key)
o_group.create_master_dictionary()
dict_expected = {self.data_path + '/data/images/tiff/image001.tif': {
'MotLongAxis': '170.000000',
'MotLiftTable': '115.000000'},
self.data_path + '/data/images/tiff/image002.tif': {
'MotLongAxis': '135.000000',
'MotLiftTable': '115.000000'},
self.data_path + '/data/images/tiff/image003.tif': {
'MotLongAxis': '100.000000',
'MotLiftTable': '115.000000'},
self.data_path + '/data/images/tiff/image004.tif': {
'MotLongAxis': '100.000000',
'MotLiftTable': '70.000000'},
self.data_path + '/data/images/tiff/image005.tif': {
'MotLongAxis': '100.000000',
'MotLiftTable': '30.000000'},
self.data_path + '/data/images/tiff/image006.tif': {
'MotLongAxis': '135.000000',
'MotLiftTable': '30.000000'},
self.data_path + '/data/images/tiff/image007.tif': {
'MotLongAxis': '170.000000',
'MotLiftTable': '30.000000'},
self.data_path + '/data/images/tiff/image008.tif': {
'MotLongAxis': '170.000000',
'MotLiftTable': '70.000000'},
self.data_path + '/data/images/tiff/image009.tif': {
'MotLongAxis': '135.000000',
'MotLiftTable': '70.000000'},
}
dict_returned = o_group.master_dictionary
for _file in dict_expected.keys():
_expected = dict_expected[_file]
_returned = dict_returned[_file]
for _key in _expected.keys():
assert _expected[_key] == _returned[_key]
def test_group_dictionary(self):
o_group = GroupImagesByCycle(list_of_files=self.full_list_of_files,
list_of_metadata_key=self.list_of_metadata_key)
o_group.create_master_dictionary()
o_group.group()
assert len(o_group.dictionary_of_groups.keys()) == 3
expected_list_group0 = self.full_list_of_files[:9]
assert len(o_group.dictionary_of_groups[0]) == len(expected_list_group0)
for _file_returned, _file_expected in zip(o_group.dictionary_of_groups[0], expected_list_group0):
assert _file_expected == _file_returned
expected_list_group1 = self.full_list_of_files[9:18]
assert len(o_group.dictionary_of_groups[1]) == len(expected_list_group1)
for _file_returned, _file_expected in zip(o_group.dictionary_of_groups[1], expected_list_group1):
assert _file_expected == _file_returned
|
[
"bilheuxjm@ornl.gov"
] |
bilheuxjm@ornl.gov
|
294de16b645cac37a71d2da6cad69031b535576e
|
274563cbc93b8dfb93eb574babc4ab5109a20de2
|
/basic/simple_draw_text.py
|
b0220f9f159a97fc9bc3f69992c39cab7a2cb8b9
|
[] |
no_license
|
land-pack/opencv-example
|
ed349cc4196b017ecfadc20d3419542dbb580111
|
bef764c31e7fb3aaaa91fdceddc4617d5c9baedd
|
refs/heads/master
| 2021-09-12T19:42:41.570268
| 2018-04-20T07:01:55
| 2018-04-20T07:01:55
| 109,978,004
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 255
|
py
|
import numpy as np
import cv2
img = np.zeros((512, 512, 3), np.uint8)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Frank AK', (10, 500), font, 4, (255, 255, 255), 2, cv2.LINE_AA)
cv2.imshow('Text', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"landpack@sina.com"
] |
landpack@sina.com
|
bc9e7e89d2e6baf54b81ba09c4a086e283f0e331
|
d67bd00f8fe819bd3011ce154c19cbc765d59f1d
|
/branches/4.0_buildout/openlegis/sagl/skins/cadastros/auxiliares/tipo_norma_juridica/titulo_salvar_pysc.py
|
74f5c0d14c877874fc08a3e7eefbbd31d9f42463
|
[] |
no_license
|
openlegis-br/sagl
|
90f87bdbbaa8a6efe0ccb5691ea8424575288c46
|
eabf7529eefe13a53ed088250d179a92218af1ed
|
refs/heads/master
| 2023-08-31T12:29:39.382474
| 2023-08-29T16:12:01
| 2023-08-29T16:12:01
| 32,593,838
| 17
| 1
| null | 2023-08-29T06:16:55
| 2015-03-20T16:11:04
|
Python
|
UTF-8
|
Python
| false
| false
| 376
|
py
|
## Script (Python) "titulo_salvar_proc"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=id, title
##title=
##
if hasattr(context.sapl_documentos.modelo.norma,id):
arquivo = getattr(context.sapl_documentos.modelo.norma,id)
arquivo.manage_changeProperties(title=title)
return title
|
[
"contato@openlegis.com.br"
] |
contato@openlegis.com.br
|
89659d4b65962b9ea76b4d78d503da8bc52d4d1e
|
9fa490196c2f7b2e102ed1b3c512403a9a5655e3
|
/src/examples/lookup_example.py
|
518da3720badec9cef9a519de4d0c031e807af3f
|
[] |
no_license
|
TRomijn/EMAworkbench
|
742d29d997e05d8dce4150dc09207d2b1fe10e95
|
02a211f95c1e0a634aba1d1cadadbeba33b1e27e
|
refs/heads/master
| 2021-01-18T20:49:47.501239
| 2017-03-10T13:31:00
| 2017-03-10T13:31:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,657
|
py
|
'''
Created on Oct 1, 2012
This is a simple example of the lookup uncertainty provided for
use in conjuction with vensim models. This example is largely based on
`Eker et al. (2014) <http://onlinelibrary.wiley.com/doi/10.1002/sdr.1518/suppinfo>`_
@author: sibeleker
@author: jhkwakkel
'''
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import matplotlib.pyplot as plt
from ema_workbench.em_framework import Outcome, ModelEnsemble
from ema_workbench.util import ema_logging
from ema_workbench.connectors.vensim import (LookupUncertainty,
VensimModel)
from ema_workbench.analysis.plotting import lines
from ema_workbench.analysis.plotting_util import BOXPLOT
class Burnout(VensimModel):
model_file = r'\BURNOUT.vpm'
outcomes = [Outcome('Accomplishments to Date', time=True),
Outcome('Energy Level', time=True),
Outcome('Hours Worked Per Week', time=True),
Outcome('accomplishments per hour', time=True)]
def __init__(self, working_directory, name):
super(Burnout, self).__init__(working_directory, name)
self.uncertainties = [LookupUncertainty('hearne2',[(-1, 3), (-2, 1), (0, 0.9), (0.1, 1), (0.99, 1.01), (0.99, 1.01)],
"accomplishments per hour lookup", self, 0, 1),
LookupUncertainty('hearne2', [(-0.75, 0.75), (-0.75, 0.75), (0, 1.5), (0.1, 1.6), (-0.3, 1.5), (0.25, 2.5)],
"fractional change in expectations from perceived adequacy lookup", self, -1, 1),
LookupUncertainty('hearne2', [(-2, 2), (-1, 2), (0, 1.5), (0.1, 1.6), (0.5, 2), (0.5, 2)],
"effect of perceived adequacy on energy drain lookup", self, 0, 10),
LookupUncertainty('hearne2', [(-2, 2), (-1, 2), (0, 1.5), (0.1, 1.6), (0.5, 1.5), (0.1, 2)],
"effect of perceived adequacy of hours worked lookup", self, 0, 2.5),
LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)],
"effect of energy levels on hours worked lookup", self, 0, 1.5),
LookupUncertainty('hearne2', [(-1, 1), (-1, 1), (0, 0.9), (0.1, 1), (0.5, 1.5), (1, 1.5)],
"effect of high energy on further recovery lookup", self, 0, 1.25),
LookupUncertainty('hearne2', [(-2, 2), (-1, 1), (0, 100), (20, 120), (0.5, 1.5), (0.5, 2)],
"effect of hours worked on energy recovery lookup", self, 0, 1.5),
LookupUncertainty('approximation', [(-0.5, 0.35), (3, 5), (1, 10), (0.2, 0.4), (0, 120)],
"effect of hours worked on energy drain lookup", self, 0, 3),
LookupUncertainty('hearne1', [(0, 1), (0, 0.15), (1, 1.5), (0.75, 1.25)],
"effect of low energy on further depletion lookup", self, 0, 1)]
self._delete_lookup_uncertainties()
if __name__ == "__main__":
ema_logging.log_to_stderr(ema_logging.INFO)
model = Burnout(r'./models/burnout', "burnout")
ensemble = ModelEnsemble()
ensemble.model_structures = model
#run policy with old cases
results = ensemble.perform_experiments(100)
lines(results, 'Energy Level', density=BOXPLOT)
plt.show()
|
[
"j.h.kwakkel@tudelft.nl"
] |
j.h.kwakkel@tudelft.nl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.