text
stringlengths 8
6.05M
|
|---|
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.optimizers import Adam
# 케라스에서 MNIST데이터 셋을 다운로드
fashion_mnist = tf.keras.datasets.fashion_mnist
# 데이터 셋을 트레이닝 셋과 테스트 셋으로 분류
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# 0부터 9까지의 라벨의 이름
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# train_images.shape # 트레인 데이터의 shape
# len(train_labels) # 트레인 데이터의 전체 길이
# 트레인 셋 6만개의 각 라벨(이름표) 0 ~ 9까지의 숫자로 라벨링
# train_labels # 총 6만개
# 테스트 데이터 셋은 10000개
# test_images.shape # 트레인 데이터와 같은 shape,
# len(test_labels) # 트레인 데이터는 6만개지만 검증에 사용할 테스트 데이터는 1만개
plt.figure()
# 트레인 데이터셋의 첫번째 데이터 사진
plt.imshow(train_images[0])
# 이미지 옆에 컬러바 표현
plt.colorbar()
plt.grid(False)
plt.show()
# 이미지 데이터를 정규화, 0~255의 픽셀값을 0~1사이의 값으로 정규화 하는것
# 정규화를 통해 학습에 용이하도록 함
train_images = train_images / 255.0
test_images = test_images / 255.0
# 0번부터 24번까지의 25개의 이미지를 5 X 5형태로 표현
# 데이터가 제대로 준비되었는지 확인
# plt.figure(figsize=(10,10))
# for i in range(25):
# plt.subplot(5,5,i+1)
# plt.xticks([])
# plt.yticks([])
# plt.grid(False)
# plt.imshow(train_images[i], cmap=plt.cm.binary)
# plt.xlabel(class_names[train_labels[i]])
# plt.show()
# 모델 생성
model = Sequential()
# 분류를 위한 학습 레이어에서는 1차원 데이터로 바꾸어서 학습되어야함
# 28 X 28 형태의 2차원 shape을 1차원 데이터 형태로 변환
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(128, activation="relu"))
# softmax활성함수를 사용하여 10개의 확률로 분류, 각 확률의 합은 1
model.add(Dense(10, activation="softmax"))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.summary()
model.fit(train_images, train_labels, epochs=5)
# 학습된 모델에 테스트 셋을 넣어서 loss와 정확도를 도출
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\n테스트 정확도:', test_acc)
# 학습된 모델을 사용, 테스트 셋을 통해 다중분류 결과를 예측
predictions = model.predict(test_images)
# 테스트셋 전체를 모델에 넣어서 예측하였기에 1만개이 예측값이 predections에 있는것
# 1만개의 예측값 중 첫번째 테스트셋의 이미지에 대한 예측값
predictions[0]
# 첫번째 예측값중 가장 확률이 높은 라벨 도출
np.argmax(predictions[0])
# 학습된 모델을 시각화 하기 위한 함수
def plot_image(i, predictions_array, true_label, img):
# 테스트 셋의 결과, 실제 라벨, 이미지
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
# 테스트 셋의 결과 예측 라벨
predicted_label = np.argmax(predictions_array)
# 예측 라벨이 실제 라벨과 같으면 파란색, 틀렸으면 빨간색
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
# plot의 x라벨에 예측라벨의 이름, 확률, 실제 라벨을 표현(위 if문을 통해 정답이면 파란색, 오답이면 빨간색으로 )
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label], 100*np.max(predictions_array), class_names[true_label]), color=color)
# 학습된 모델을 시각화 하기 위한 함수
def plot_value_array(i, predictions_array, true_label):
# 테스트 셋의 결과, 실제 라벨
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
# 테스트 셋의 결과 예측 0 ~ 9 항목을 softmax한 값을 회색의 bar그래프를 만듦
thisplot = plt.bar(range(10), predictions_array, color="#777777")
# softmax를 거치면 분류된 모델의 확률이 각 항목의 합이 1이 되도록 나오기에 0 ~ 1의 범위
plt.ylim([0, 1])
# 각 예측 확률 중 가장 높은(실제 모델이 예측한 항목)
predicted_label = np.argmax(predictions_array)
# 모델의 예측값은 빨간색, 실제 값은 파란색으로 하여 예측값이 맞으면 파란색으로 덧씌워져 파랗게 보이고
# 예측값이 실제 값과 달랐을 경우 파란색과 빨간색 두가지가 그래프에 나타나 시각적으로 확인하기 쉽게함
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# 테스트 셋을 통체로 모델에 넣었기에 몇번째 모델의 예측값을 뽑을지 정하는 변수
i = 4
plt.figure(figsize=(6,3))
# 수평으로 2개의 plot, 그중 첫번째 plot
plt.subplot(1,2,1)
# plot_image함수를 호출하여 i번째의 이미지와 예측 성공 여부, 확률을 출력
plot_image(i, predictions, test_labels, test_images)
# 수평으로 2개의 plot, 그중 두번째 plot
plt.subplot(1,2,2)
# plot_value_array함수를 호출하여 i번째의 이미지를 예측하면서 다른 항목들과의 확률 비교 시각화
plot_value_array(i, predictions, test_labels)
plt.show()
# In[50]:
# In[51]:
|
from django.db import models
class ModelWithBasicField(models.Model):
name = models.CharField(max_length=32)
class Meta:
app_label = 'testapp'
def __unicode__(self):
return u'%s' % self.pk
class ModelWithParentModel(ModelWithBasicField):
foo = models.BooleanField(default=False)
class Meta:
app_label = 'testapp'
|
"""
i18n regions resource implementation.
"""
from typing import Optional, Union
from pyyoutube.resources.base_resource import Resource
from pyyoutube.models import I18nRegionListResponse
from pyyoutube.utils.params_checker import enf_parts
class I18nRegionsResource(Resource):
"""An i18nRegion resource identifies a geographic area that a YouTube user can select as
the preferred content region.
References: https://developers.google.com/youtube/v3/docs/i18nRegions
"""
def list(
self,
parts: Optional[Union[str, list, tuple, set]] = None,
hl: Optional[str] = None,
return_json: bool = False,
**kwargs: Optional[dict],
) -> Union[dict, I18nRegionListResponse]:
"""Returns a list of content regions that the YouTube website supports.
Args:
parts:
Comma-separated list of one or more i18n regions resource properties.
Accepted values: snippet.
hl:
Specifies the language that should be used for text values in the API response.
The default value is en_US.
return_json:
Type for returned data. If you set True JSON data will be returned.
**kwargs:
Additional parameters for system parameters.
Refer: https://cloud.google.com/apis/docs/system-parameters.
Returns:
i18n regions data.
"""
params = {
"part": enf_parts(resource="i18nRegions", value=parts),
"hl": hl,
**kwargs,
}
response = self._client.request(path="i18nRegions", params=params)
data = self._client.parse_response(response=response)
return data if return_json else I18nRegionListResponse.from_dict(data)
|
#!/usr/bin/env python
import sys
import simplep2p as s
peer_count = s.get_peer_count()
if peer_count == 0:
print "FAIL: no peers"
sys.exit(1)
response_count = s.send_message_to_peers("hello_there")
if response_count == peer_count:
print "PASS"
else:
print "FAIL"
sys.exit(1)
|
import unittest
from employee import Employee
<<<<<<< HEAD
from unittest.mock import patch # we can use patch as a decorator or as a context manager
# patch allows us to mock an object during a test and then that object is automatically restored after the test is run.
=======
from unittest.mock import patch # can be used as a context manager or a decorator
>>>>>>> 127c6c57686d95ac80fb811aa750727e3a136371
class TestEmployee(unittest.TestCase):
# class methods allow us to work with the class itself, rather than the instance of the class
# class methods are denoted with the @clasmethod decorator before the method definition
@classmethod
def setUpClass(cls):
print("setUpClass")
<<<<<<< HEAD
=======
# class methods setUpClass and tearDownClass to put up instances before and after testing
@classmethod
def setUpClass(cls):
print("setUpClass")
>>>>>>> 127c6c57686d95ac80fb811aa750727e3a136371
@classmethod
def tearDownClass(cls):
print("tearDownClass")
<<<<<<< HEAD
# using the setUp method to keep code DRY
# The setUp() method will run its code BEFORE every single test
# In order to access these instances throuhgout our script, we will have to
# set these variables as instance attributes by putting self.variable
# since those are now instance attributes, because they have self. infront of them,
# we have to add self to the beginning when referring to them
def setUp(self):
# These two employee instance attributes are created before every single one of our tests
# They will be created to be used as testing examples
print("setUp")
self.emp_1 = Employee("Corey", "Schafer", 50_000)
self.emp_2 = Employee("Sue", "Smith", 60_000)
# use the tearDown() method to keep code DRY
# the tearDown() method will run its code AFTER every single test
def tearDown(self):
print("tearDown\n")
pass
=======
# This setUp method will do something before the testing starts, which keeps our code DRYer
def setUp(self):
print("setUp")
self.emp_1 = Employee("Corey", "Schafer", 50_000)
self.emp_2 = Employee("Sue", "Smith", 60_000)
def tearDown(self):
print("tearDown\n")
>>>>>>> 127c6c57686d95ac80fb811aa750727e3a136371
def test_email(self):
print("test_email")
# we test if those assertions on those new instances hold true
self.assertEqual(self.emp_1.email, "Corey.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Sue.Smith@email.com")
# we change the values for the names
self.emp_1.first = "John"
self.emp_2.first = "Jane"
# we check if given the assertions that the values still hold true
self.assertEqual(self.emp_1.email, "John.Schafer@email.com")
self.assertEqual(self.emp_2.email, "Jane.Smith@email.com")
<<<<<<< HEAD
def test_fullname(self):
print("test_fullname")
self.assertEqual(self.emp_1.fullname, "Corey Schafer")
self.assertEqual(self.emp_2.fullname, "Sue Smith")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.fullname, "John Schafer")
self.assertEqual(self.emp_2.fullname, "Jane Smith")
def test_apply_raise(self):
print("test_apply_raise")
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52_500)
self.assertEqual(self.emp_2.pay, 63_500)
=======
def test_fullname(self):
print("test_fullename")
self.assertEqual(self.emp_1.fullname, "Corey Schafer")
self.assertEqual(self.emp_2.fullname, "Sue Smith")
self.emp_1.first = "John"
self.emp_2.first = "Jane"
self.assertEqual(self.emp_1.first, "John Schafer")
self.assertEqual(self.emp_2.first, "Jane Smith")
def test_apply_raise(self):
print("test_apply_raise")
self.emp_1.apply_raise()
self.emp_2.apply_raise()
self.assertEqual(self.emp_1.pay, 52_500)
self.assertEqual(self.emp_2.pay, 63_500)
def test_monthly_schedule(self):
'''here we use patch a context manager for mocking
mocking allows us to still test our code even if an external variable, like the
operation of an external website is down without our test returning a Failed result
because of an external event
What we pass to patch is what we want to mock, is requests.get from the employee module, and setting
that equal to mocked_get. We didn't just import it straight out but we want to
mock these objects where they are actually being used in the script.
'''
with patch("employee.requests.get") as mocked_get:
# Testing a passing value
mocked_get.return_value.ok = True
mocked_get.return_value.text = "Success"
# within our context, we want to run our method monthly_schedule method
# just like we are testing it.
schedule = self.emp_1.monthly_schedule("May")
mocked_get.assert_called_with("http://company.com/Schafer/May")
self.assertEqual(schedule, "Success")
# Testing a failed response
mocked_get.return_value.ok = False
# within our context, we want to run our method monthly_schedule method
# just like we are testing it.
schedule = self.emp_2.monthly_schedule("June")
mocked_get.assert_called_with("http://company.com/Smith/May")
self.assertEqual(schedule, "Bad Response!")
>>>>>>> 127c6c57686d95ac80fb811aa750727e3a136371
'''
as programmers, we often try to keep our code DRY
DRY stands for: Don't Repeat Yourself
We we see a lot of similar classes that appear and this means that if we make updates
it will be a pain to just have to fix those things in all those spots
So why not put all the testcases in one place and re-use them for every test?
That is what the setup() and teardown() methods are for.
<<<<<<< HEAD
You can do this by creating two new methods at the top of our TestClass.
The print statements indicate that the tests are run as such:
-setUp
-test_
-tearDown
We also notice that the tests are not necessarily run in order of how they appear in the script (i.e., from top to down)
That's why we need to keep our code isolated from one another.
And that's why it's useful to have some code run at the very beginning of test file,
and then have some cleanup code that runs after all the tests have been run.
So, unlike the setUp and tearDown that runs before and after every single test, it would be nice
if we had something that would run once before for anything and once after everything.
We can actually do this with two class methods called setUPClass and tearDownClass
Remember, you can create class methods with the @classmethod decorator above the method
Classmethods allow us to work with the class itself, rather than the instance of the class.
Notice how setUpClass and tearDownClass are run at the start off all the testing
and tearDownClass is run after all the testing is done
setUpClass and tearDownClass are useful if you want to run something once and is too costly to do before each test.
=======
Also, last notes:
Test should be isolated. Basically this just means that your tests should run without affecting other tests.
In this video, Corey was adding tests to existing code.
You might have heard of something called
"Test Driven Development". Basically what test-driven development means is that you write the test BEFORE
you even write the code. Sometimes this can be useful. This is not always followed in practice but it is nice.
The concept is: you should think about what you want your code to do and write a test implementing that behaviour
and then watch the test fail since it doesn't have any code to run against and then to write the code in a way
that gets the code to pass.
Simple tests and testing is better than no testing. Don't feel like you have to be an expert at writing mocks
and things like that. Even if you just write some basic assertions, then it's better than just not writing anything.
There is also another test framework out there called PyTest than a lot of people like to use than this built-in unittest module.
>>>>>>> 127c6c57686d95ac80fb811aa750727e3a136371
'''
# define a new test for testing the new employee method
def test_monthly_schedule(self):
if __name__ == "__main__":
unittest.main()
|
import argparse
import copy
import json
import os
import platform
import socket
import time
import traceback
from queue import Queue
from base64 import b64encode, b64decode
import requests
class OperationLoop:
def __init__(self, server, es_host='http://127.0.0.1:9200', index_pattern='*',
result_size=10, group='blue', minutes_since=60, sleep=60):
self.es_host = es_host
self.index_pattern = index_pattern
self.result_size = result_size
self.minutes_since = minutes_since
self.sleep = sleep
self.instruction_queue = Queue()
self._profile = dict(
server=server,
host=socket.gethostname(),
platform=platform.system().lower(),
executors=['elasticsearch'],
pid=os.getpid(),
group=group
)
def get_profile(self):
return copy.copy(self._profile)
@property
def server(self):
return self._profile['server']
@property
def paw(self):
return self._profile.get('paw', 'unknown')
def execute_lucene_query(self, lucene_query_string):
query_string = 'event.created:[now-%im TO now] AND %s' % (self.minutes_since, lucene_query_string)
body = dict(query=dict(query_string=dict(query=query_string)))
resp = requests.post('%s/%s/_search' % (self.es_host, self.index_pattern),
params=dict(size=self.result_size),
json=body)
resp.raise_for_status()
return resp.json().get('hits', {}).get('hits', [])
def start(self):
while True:
try:
print('[*] Sending beacon for %s' % (self.paw,))
self._send_beacon()
self._handle_instructions()
time.sleep(self.sleep)
except Exception as e:
print('[-] Operation loop error: %s' % e)
traceback.print_exc()
time.sleep(30)
""" PRIVATE """
def _handle_instructions(self):
while not self.instruction_queue.empty():
i = self.instruction_queue.get()
result, seconds = self._execute_instruction(json.loads(i))
self._send_beacon(results=[result])
time.sleep(seconds)
else:
self._send_beacon()
def _next_instructions(self, beacon):
return json.loads(self._decode_bytes(beacon['instructions']))
def _send_beacon(self, results=None, enqueue_instructions=True):
results = results or []
beacon = self.get_profile()
beacon['results'] = results
body = self._encode_string(json.dumps(beacon))
resp = requests.post('%s/beacon' % (self.server,), data=body)
resp.raise_for_status()
beacon_resp = json.loads(self._decode_bytes(resp.text))
self._profile['paw'] = beacon_resp['paw']
self.sleep = beacon_resp['sleep']
if enqueue_instructions:
for instruction in json.loads(beacon_resp.get('instructions', [])):
self.instruction_queue.put(instruction)
return beacon_resp
def _execute_instruction(self, i):
print('[+] Running instruction: %s' % i['id'])
query = self._decode_bytes(i['command'])
results = self.execute_lucene_query(query)
return dict(output=self._encode_string(json.dumps(results)), pid=os.getpid(), status=0, id=i['id']), i['sleep']
@staticmethod
def _decode_bytes(s):
return b64decode(s).decode('utf-8', errors='ignore').replace('\n', '')
@staticmethod
def _encode_string(s):
return str(b64encode(s.encode()), 'utf-8')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--server', default='http://127.0.0.1:8888', help='Base URL Caldera server.')
parser.add_argument('--es-host', default='http://127.0.0.1:9200', dest='es_host',
help='Base URL of ElasticSearch.')
parser.add_argument('--index', default='*', help='ElasticSearch index pattern to search over.')
parser.add_argument('--group', default='blue')
parser.add_argument('--minutes-since', dest='minutes_since', default=60, type=int,
help='How many minutes back to search for events.')
parser.add_argument('--sleep', default=60, type=int,
help='Number of seconds to wait to check for new commands.')
args = parser.parse_args()
try:
OperationLoop(args.server, es_host=args.es_host, index_pattern=args.index, group=args.group,
minutes_since=args.minutes_since, sleep=args.sleep).start()
except Exception as e:
print('[-] Caldera server not be accessible, or: %s' % e)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 12:11:43 2020
@author: adeela
"""
'''
## Given data of intervals, find overlaping intervals
## x1, 9:30
## x1, 10:00
## x2, 11:00
## x2, 12:00
## y1, 11:30
## y1, 01:00
In this data x2 and y1
x1(9:30, 10:00), x2(11:00, 12:00),
y1(11:30, 01:00)
'''
import pandas as pd
import numpy as np
df = pd.DataFrame({
'interval':
})
|
import os
def isValidPath(filePath):
if os.path.exists(filePath):
pass
elif os.access(os.path.dirname(filePath), os.W_OK):
pass
else:
return False
return True
|
import autodisc as ad
def get_system_parameters():
system_parameters = ad.systems.Lenia.default_system_parameters()
system_parameters.size_y = 256
system_parameters.size_x = 256
return system_parameters
def get_explorer_config():
explorer_config = ad.explorers.OnlineLearningGoalExplorer.default_config()
explorer_config.seed = 5
explorer_config.num_of_random_initialization = 1000
explorer_config.run_parameters = []
# Parameter 1: init state
parameter = ad.Config()
parameter.name = 'init_state'
parameter.type = 'cppn_evolution'
parameter.init = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.init.neat_config_file = 'neat_config.cfg'
parameter.init.n_generations = 1
parameter.init.best_genome_of_last_generation = True
parameter.mutate = ad.cppn.TwoDMatrixCCPNNEATEvolution.default_config()
parameter.mutate.neat_config_file = 'neat_config.cfg'
parameter.mutate.n_generations = 2
parameter.mutate.best_genome_of_last_generation = True
explorer_config.run_parameters.append(parameter)
# Parameter 2: R
parameter = ad.Config()
parameter.name = 'R'
parameter.type = 'sampling'
parameter.init = ('discrete', 2, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 2, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 3: T
parameter = ad.Config()
parameter.name = 'T'
parameter.type = 'sampling'
parameter.init = ('discrete', 1, 20)
parameter.mutate = {'type': 'discrete', 'distribution': 'gauss', 'sigma': 0.5, 'min': 1, 'max': 20}
explorer_config.run_parameters.append(parameter)
# Parameter 4: b
parameter = ad.Config()
parameter.name = 'b'
parameter.type = 'sampling'
parameter.init = ('function', ad.helper.sampling.sample_vector, (('discrete', 1, 3), (0, 1)))
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 5: m
parameter = ad.Config()
parameter.name = 'm'
parameter.type = 'sampling'
parameter.init = ('continuous', 0, 1)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.05, 'min': 0, 'max': 1}
explorer_config.run_parameters.append(parameter)
# Parameter 6: s
parameter = ad.Config()
parameter.name = 's'
parameter.type = 'sampling'
parameter.init = ('continuous', 0.001, 0.3)
parameter.mutate = {'type': 'continuous', 'distribution': 'gauss', 'sigma': 0.01, 'min': 0.001, 'max': 0.3}
explorer_config.run_parameters.append(parameter)
# which statistics are used as a goal space
explorer_config.goal_space_representation.type = 'pytorchnnrepresentation'
explorer_config.goal_space_representation.config = ad.representations.static.PytorchNNRepresentation.default_config()
explorer_config.goal_space_representation.config.initialization = ad.Config()
explorer_config.goal_space_representation.config.initialization.type = 'random_weight'
explorer_config.goal_space_representation.config.initialization.load_from_model_path = ''
explorer_config.goal_space_representation.config.initialization.model_type = 'BetaVAE'
system_params = get_system_parameters()
explorer_config.goal_space_representation.config.initialization.model_init_params = {'n_latents': 8, 'beta': 1.0, 'add_var_to_KLD_loss': False, 'input_size': (system_params.size_y, system_params.size_x), 'num_classes': 3}
#online training parameters
explorer_config.online_training.output_representation_folder = './trained_representation'
explorer_config.online_training.n_runs_between_train_steps = 100
explorer_config.online_training.n_epochs_per_train_steps = 40
explorer_config.online_training.train_batch_size = 64
explorer_config.online_training.importance_sampling_new_vs_old = 0.5
explorer_config.online_training.dataset_constraints = [dict( active = True, filter = ('statistics.is_dead', '==', False))]
# how are goals sampled
explorer_config.goal_selection.type = 'random'
explorer_config.goal_selection.sampling = [(-3,3)] * explorer_config.goal_space_representation.config.initialization.model_init_params['n_latents']
# how are the source policies for a mutation are selected
explorer_config.source_policy_selection.type = 'optimal'
return explorer_config
def get_number_of_explorations():
return 5000
|
# coding:utf-8
# 除非显示说明,否则np.array()会尝试为新建的数组推断出一个较为合适的数据类型。
# 数据类型保存在一个特殊的dtype()对象中
# 1. 一个列表的转换
import numpy as np
import matplotlib.pyplot as plt
data = [1, 2, 3, 4, 5, 6]
arr1 = np.array(data)
print(arr1)
# shape: 表示维度大小的数组; dtype:用于说明数组数据类型的对象
print("arr1.shape:{0},\narr1.dtype:{1}".format(arr1.shape, arr1.dtype))
# 2.创建多维数组:嵌套序列将会被转换成一个多维数组
data2 = [[1, 2, 3], [4, 5, 6]]
arr2 = np.array(data2)
# 可以用astype的方法显示更改数组的dtype
arr2 = arr2.astype(np.float64)
print(arr2)
print("arr2.shape:{0},\narr2.dtype:{1}".format(arr2.shape, arr2.dtype))
# 3.数组和标量之间的运算
print("=========================数组和标量之间的运算===================================")
# 大小相等的数组之间的任何算术运算都会应用到元素集
arr3 = np.array([[1, 2, 3], [4, 5, 6]])
arr4 = arr3 * arr3
print(arr4)
arr5 = arr3 - arr3
print(arr5)
# 数组和标量的运算会将那个标量传播到各个元素
arr6 = 1 + arr3
print(arr6)
arr7 = arr3 * 0.5
arr7.astype(np.float64)
print(arr7)
# 4.索引和切片
arr8 = np.arange(10)
print(arr8)
# [0 1 2 3 4 5 6 7 8 9]
# 4.1 一维数组的切片索引
print("======================一维数组的切片索引===================================")
print("arr8[4]:{0}".format(arr8[4]))
arr8_slice = arr8[3:7]
# [3 4 5 6]
print(arr8_slice)
# 当将一个标量赋值给一个切片,该值会自动传播到整个选区。
arr8[3:7] = 12
print(arr8)
# 4.2 高维数组的切片索引
# 在一个二维数元素组中,各索引位置上的元素不再是标量而是一维数组
# 可以对各个元素进行递归访问。可以传入一个以逗号隔开的索引列表来选取单个元素
print("======================高维数组的切片索引===================================")
arr9 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
print(arr9)
print(arr9[1])
print(arr9[1, 2])
# 花式索引是利用整数数组进行
# empty()创建新数组,只分配内存空间,不填充任何值
print("======================花式索引===================================")
arr10 = np.empty((8, 4))
print(arr10)
for i in range(8):
arr10[i] = i
print(arr10)
print("======================以特定的顺序选取行子集===================================")
# 为了以特定的顺序选取行子集,只需要传入一个用于指定顺序的整数列表或ndarray即可
arr10_chid = arr10[[4, 3, 0, 6]]
print(arr10_chid)
print("======================将一维数组改为二维数组===================================")
# 当一次传入多个数组时,它返回的是一个一维数组,其中的索引对应各个索引元组
arr11 = np.arange(32)
print(arr11)
# 将一维数组改为二维数组
arr12 = arr11.reshape((8, 4))
print(arr12)
# 5.数组转置和轴对称
print("======================数组转置和轴对称==================================")
arr13 = np.arange(15).reshape(5, 3)
print(arr13)
# 数组转置: 数组不仅有transpose()方法,还有一个特殊的T属性。
print(np.transpose(arr13))
print(arr13.T)
print("======================高维数组:数组转置和轴对称==================================")
arr15 = np.arange(16).reshape((2, 2, 4))
print(arr15)
# 对于高维数组,transpose()需要得到一个由轴编号组成的元组
arr16 = np.transpose(arr15, (1, 0, 2))
print(arr16)
# 6.利用数组进行数据处理
print("======================利用数组进行数据处理=================================")
# NumPy数组可以将很多数据处理任务表述为简洁的数组表达式(否循环则需要编写)
# np.meshgrid()函数接受两个一维数组,并产生两个二维矩阵(对应两个数组中所有的(x,y)对)
points = np.arange(-5, 5, 0.01) # 1000个间隔相等的点
xs, ys = np.meshgrid(points, points)
print(ys)
print("======================公式:sqrt(x^2 + y^2)================================")
# 根据网络对函数求值的结果
z = np.sqrt(xs ** 2 + ys ** 2)
print(z)
plt.imshow(z, cmap=plt.cm.gray)
plt.colorbar()
plt.title('Image plot of % \sqrt{x^2 + y^2}$ for a grid of values')
# 7.数学和统计方法
# 用户可以通过数组上的一组数学函数对整个数组或某个轴向的数据进行统计计算
print("======================数学和统计方法=================================")
arr18 = np.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
print(arr18.cumprod(1))
|
import xmltodict
with open('sample.xml', 'r', encoding='utf-8') as fp:
root = xmltodict.parse(fp.read(), dict_constructor=dict)
sample = root['root']
sample['items']['item'][0]['amount'] = 200
if not sample['items']['item'][0]['owner']:
sample['items']['item'][0]['owner'] = 'alice'
sample['items']['item'][0]['update_at'] = '2019-04-13T14:23:53.193Z'
sample['items']['item'] = list(filter(lambda x: x, sample['items']['item']))
sample['items']['item'].append({"name": "pen", "price": 1.2})
print(xmltodict.unparse(root))
|
import cx_Oracle
import threading
from urllib import urlopen
#subclass of threading.Thread
class AsyncBlobInsert(threading.Thread):
def __init__(self, cur, input):
threading.Thread.__init__(self)
self.cur = cur
self.input = input
def run(self):
blobdoc = self.input.read()
self.cur.execute("INSERT INTO blob_tab (ID, BLOBDOC) VALUES(blob_seq.NEXTVAL, :blobdoc)", {'blobdoc':blobdoc})
self.input.close()
self.cur.close()
#main thread starts here
inputs = []
inputs.append(open('/tmp/figure1.bmp', 'rb'))
inputs.append(urlopen('http://localhost/_figure2.bmp', 'rb'))
dbconn = cx_Oracle.connect('usr', 'pswd', '127.0.0.1/XE',threaded=True)
dbconn.autocommit = True
for input in inputs:
cur = dbconn.cursor()
cur.setinputsizes(blobdoc=cx_Oracle.BLOB)
th = AsyncBlobInsert(cur, input)
th.start()
|
#!/usr/bin/python3
#
# Python script that regenerates the README.md from the embedded template. Uses
# ./generate_table.awk to regenerate the ASCII tables from the various *.txt
# files.
from subprocess import check_output
nano_results = check_output(
"./generate_table.awk < nano.txt", shell=True, text=True)
micro_results = check_output(
"./generate_table.awk < micro.txt", shell=True, text=True)
samd21_results = check_output(
"./generate_table.awk < samd21.txt", shell=True, text=True)
stm32_results = check_output(
"./generate_table.awk < stm32.txt", shell=True, text=True)
samd51_results = check_output(
"./generate_table.awk < samd51.txt", shell=True, text=True)
esp8266_results = check_output(
"./generate_table.awk < esp8266.txt", shell=True, text=True)
esp32_results = check_output(
"./generate_table.awk < esp32.txt", shell=True, text=True)
print(f"""\
# Auto Benchmark
Here are the results from `AutoBenchmark.ino` for various boards.
These results show that integer division and modulus operations are incredibly
slow on 8-bit AVR processors.
**Version**: AceTime v2.3.0
**NOTE**: This file was auto-generated using `make README.md`. DO NOT EDIT.
## Dependencies
This program depends on the following libraries:
* [AceTime](https://github.com/bxparks/AceTime)
* [AceRoutine](https://github.com/bxparks/AceRoutine)
## How to Generate
This requires the [AUniter](https://github.com/bxparks/AUniter) script
to execute the Arduino IDE programmatically.
The `Makefile` has rules to generate the `*.txt` results file for several
microcontrollers that I usually support, but the `$ make benchmarks` command
does not work very well because the USB port of the microcontroller is a
dynamically changing parameter. I created a semi-automated way of collect the
`*.txt` files:
1. Connect the microcontroller to the serial port. I usually do this through a
USB hub with individually controlled switch.
2. Type `$ auniter ports` to determine its `/dev/ttyXXX` port number (e.g.
`/dev/ttyUSB0` or `/dev/ttyACM0`).
3. If the port is `USB0` or `ACM0`, type `$ make nano.txt`, etc.
4. Switch off the old microontroller.
5. Go to Step 1 and repeat for each microcontroller.
The `generate_table.awk` program reads one of `*.txt` files and prints out an
ASCII table that can be directly embedded into this README.md file. For example
the following command produces the table in the Nano section below:
```
$ ./generate_table.awk < nano.txt
```
Fortunately, we no longer need to run `generate_table.awk` for each `*.txt`
file. The process has been automated using the `generate_readme.py` script which
will be invoked by the following command:
```
$ make README.md
```
The CPU times below are given in microseconds.
## CPU Time Changes
**v0.8 to v1.4:**
* The CPU time did not change much from
**v1.5:**
* No significant changes to CPU time.
* Zone registries (kZoneRegistry, kZoneAndLinkRegistry) are now sorted by zoneId
instead of zoneName, and the `ZoneManager::createForZoneId()` will use a
binary search, instead of a linear search. This makes it 10-15X faster for
~266 entries.
* The `ZoneManager::createForZoneName()` also converts to a zoneId, then
performs a binary search, instead of doing a binary search on the zoneName
directly. Even with the extra level of indirection, the `createForZoneName()`
is between 1.5-2X faster than the previous version.
**v1.6:**
* BasicZoneManager and ExtendedZoneManager can take an optional
LinkRegistry which will be searched if a zoneId is not found. The
`BasicZoneManager::createForZoneId(link)` benchmark shows that if the zoneId
is not found, the total search time is roughly double, because the
LinkRegistry must be search as a fallback.
* On some compilers, the `BasicZoneManager::createForZoneName(binary)` becames
slightly slower (~10%?) because the algorithm was moved into the
`ace_common::binarySearchByKey()` template function, and the compiler is not
able to optimize the resulting function as well as the hand-rolled version.
The slightly decrease in speed seemed acceptable cost to reduce duplicate code
maintenance.
**v1.7.2:**
* `SystemClock::clockMillis()` became non-virtual after incorporating
AceRoutine v1.3. The sizeof `SystemClockLoop` and `SystemClockCoroutine`
decreases 4 bytes on AVR, and 4-8 bytes on 32-bit processors. No signficant
changes in CPU time.
**v1.7.5:**
* significant changes to size of `ExtendedZoneProcessor`
* 8-bit processors
* increases by 24 bytes on AVR, due adding 1 pointer and 2
`uint16_t` to MatchingEra
* decreases by 48 bytes on AVR, by disabling
`originalTransitionTime` unless
`ACE_TIME_EXTENDED_ZONE_PROCESSOR_DEBUG` is enabled.
* 32-bit processors
* increases by 32 bytes on 32-bit processors due to adding
a pointer and 2 `uint16_t` to MatchingEra
* decreases by 32 bytes on 32-bit processors due to disabling
`originalTransitionTime` in Transition
* Upgrade ESP8266 Core from 2.7.4 to 3.0.2.
* AutoBenchmark indicate that things are a few percentage faster.
**v1.8.0:**
* Remove `sizeof()` Clock classes which were moved to AceTimeClock library.
* No significant changes to excution times of various benchmarks.
**v1.9.0:**
* Extract `BasicZoneProcessorCache<SIZE>` and `ExtendedZoneProcessorCache<SIZE>`
from `BasicZoneManager` and `ExtendedZoneManager`. Remove all pure `virtual`
methods from `ZoneManager`, making ZoneManager hierarchy non-polymorphic.
* Saves 1100-1300 of flash on AVR.
* No signficant changes to CPU performance.
**v1.10.0:**
* Remove support for SAMD21 boards.
* Arduino IDE 1.8.19 with SparkFun SAMD 1.8.6 can no longer upload binaries
to these boards. Something about bossac 1.7.0 not found.
* Upgrade tool chain:
* Arduino IDE from 1.8.13 to 1.8.19
* Arduino AVR from 1.8.3 to 1.8.4
* STM32duino from 2.0.0 to 2.2.0
* ESP32 from 1.0.6 to 2.0.2
* Teensyduino from 1.55 to 1.56
* Add benchmarks for `ZonedDateTime::forComponents()`.
* Add support for `fold` parameter in `LocalDateTime`, `OffsetDateTime`,
`ZonedDateTime`, and `ExtendedZoneProcessor`.
* The `ZonedDateTime::forComponents()` can be made much faster using 'fold'.
* We know exactly when we must normalize and when we can avoid
normalization.
* 5X faster on AVR processors when cached, and
* 1.5-3X faster on 32-bit processors.
**v1.11.0:**
* Upgrade ZoneInfo database so that Links are symbolic links to Zones, instead
of hard links to Zones.
* No significant changes to CPU benchmarks.
**v1.11.5**
* Upgrade tool chain
* Arduino CLI from 0.20.2 to 0.27.1
* Arduino AVR Core from 1.8.4 to 1.8.5
* STM32duino from 2.2.0 to 2.3.0
* ESP32 Core from 2.0.2 to 2.0.5
* Teensyduino from 1.56 to 1.57
* Upgrade TZDB from 2022b to 2022d
**v2.0**
* Use `int16_t` year fields.
* Implement adjustable epoch year.
* Upgrade to TZDB 2022f.
* AVR:
* sizeof(LocalDate) increases from 3 to 4
* sizeof(BasicZoneProcessor) increases from 116 to 122
* sizeof(ExtendedZoneProcessor) increases from 436 to 468
* sizeof(TransitionStorage) increases from 340 to 364
* ZonedDateTime::forEpochSeconds() slower by 5-10%
* ESP8266
* sizeof(LocalDate) increases from 3 to 4
* sizeof(BasicZoneProcessor) remains at 164
* sizeof(ExtendedZoneProcessor) increases from 540 to 588
* sizeof(TransitionStorage) increases from 420 to 452
* ZonedDateTime::forEpochSeconds() slower by 0-10%
**v2.1.1**
* Upgrade to TZDB 2022g.
* Add `ZonedExtra`.
* Unify fat and symbolic links.
* Not much difference in execution times, except:
* `ZonedDateTime::forComponents()` using the `BasicZoneProcessor`
becomes ~50% slower due to the extra work needed to resolve gaps and
overlaps.
* `ZonedDateTime::forEpochSeconds()` using `BasicZoneProcessors` remains
unchanged.
* `ExtendedZoneProcessor` is substantially faster on AVR processors.
Maybe it should be recommended ove `BasicZoneProcessor` even on AVR.
**v2.2.0**
* Upgrade tool chain
* Arduino AVR from 1.8.5 to 1.8.6
* STM32duino from 2.3.0 to 2.4.0
* ESP8266 from 3.0.2 to 3.1.2 failed, reverted back to 3.0.2
* ESP32 from 2.0.5 to 2.0.7
* Add support for Seeed XIAO SAMD21
* Seeeduino 1.8.3
* Upgrade to TZDB 2023b
**v2.2.2**
* Upgrade to TZDB 2023c
**v2.2.3**
* Add support for Adafruit ItsyBitsy M4
* Using Adafruit SAMD Boards 1.7.11
* Remove Teensy 3.2
* Nearing end of life. Moved to Tier 2 (should work).
* Upgrade tool chain
* Seeeduino SAMD Boards 1.8.4
* STM32duino Boards 2.5.0
* ESP32 Boards 2.0.9
**v2.3.0**
* Add benchmarks for `CompleteZoneProcessor` and related classes
* Replace labels of `BasicZoneManager::createForXxx()` with
`BasicZoneRegistrar::findIndexForXxx()`, because those are the methods which
are actually being tested.
## Arduino Nano
* 16MHz ATmega328P
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* Arduino AVR Boards 1.8.6
```
{nano_results}
```
## Sparkfun Pro Micro
* 16 MHz ATmega32U4
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* SparkFun AVR Boards 1.1.13
```
{micro_results}
```
## Seeed Studio XIAO SAMD21
* SAMD21, 48 MHz ARM Cortex-M0+
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* Seeeduino 1.8.4
```
{samd21_results}
```
## STM32 Blue Pill
* STM32F103C8, 72 MHz ARM Cortex-M3
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* STM32duino 2.5.0
```
{stm32_results}
```
## Adafruit ItsyBitsy M4 SAMD51
* SAMD51, 120 MHz ARM Cortex-M4
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* Adafruit SAMD 1.7.11
```
{samd51_results}
```
## ESP8266
* NodeMCU 1.0 clone, 80MHz ESP8266
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* ESP8266 Boards 3.0.2
```
{esp8266_results}
```
## ESP32
* ESP32-01 Dev Board, 240 MHz Tensilica LX6
* Arduino IDE 1.8.19, Arduino CLI 0.33.0
* ESP32 Boards 2.0.9
```
{esp32_results}
```
Note: Once the benchmark of the function under test becomes smaller than the
duration of an empty loop, the numbers become unreliable.
""")
|
import pdb
import scipy
import numpy
import pygame
from pygame import display
from pygame.draw import *
from pygame import Color
import math
def barGraph(data):
"""
drawing contains (x, y, width, height)
"""
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
w = W / l
try:
for i in range(0, l):
h = data[i]
c = Color(0, 0, 0, 0)
c.hsva = (0, 100, 100, 0)
x = x0 + i * w
y = y0 + H * (1 - h)
rect(surface, c, \
(x, y, 0.9 * w, h * H))
except:
pdb.set_trace()
return f
def boopGraph(data):
def f(surface, rectangle):
x0, y0, W, H = rectangle
try:
l = len(data)
except:
pdb.set_trace()
dx = W / l
try:
for i in range(0, l):
d = data[i]
a = dx * d
x = (dx - a) / 2 + i * dx + x0
y = (H - dx) / 2 + (dx - a) / 2 + y0
c = Color(255, 255, 255, 255)
rect(surface, c, \
(x, y, a, a))
except:
pdb.set_trace()
return f
def circleRays(surface, center, data, transform=lambda y: scipy.log(y + 1)):
x0, y0 = center
total = math.radians(360)
l = len(data)
m = transform(max(data))
part = total/l
for i in range(0, l):
if m > 0:
p = transform(data[i])
h = p * 5
hue = p / m
c = Color(0, 0, 0, 0)
c.hsva = ((1-hue) * 360, 100, 100, 0)
x = x0 + (m*2+h)*math.cos(part * i)
y = y0 + (m*2+h)*math.sin(part*i)
line(surface, c,
(x0,y0),(x,y),1)
circle(surface,c, center,int(m*2),0)
def graphsGraphs(graphs, direction=0):
def f(surface, bigRect):
x0, y0, W, H = bigRect
h = H / len(graphs)
for graph in graphs:
graph(surface, (x0, y0, W, h))
y0 += h
return f
|
#I pledge my Honor I have abided by the Stevens Honor System. Elaine Kooiker
#Problem One; The Body Mass Index (BMI) is calculated as a person’s weight (in pounds) times 720,
#divided by the square of the person’s height (in inches).
#A BMI in the range of 19 to 25 (inclusive) is considered healthy.
#Write a program which calculates a person’s BMI and prints a message stating whether the person is above, within, or below the healthy range.
import math
def get_BMI(weight, height):
BMI=(weight*720)/(height**2)
return BMI
def define_BMI(BMI):
if (19<=BMI<=25):
statement="You are within a healthy BMI range."
elif (BMI>25):
statement="Your BMI is above a healthy range."
elif(BMI<25):
statement="Your BMI is below a healthy range."
return statement
def main():
weight=float(input("Enter your weight in pounds: "))
height=float(input("Enter your height in inches: "))
BMI=get_BMI(weight, height)
print(define_BMI(BMI))
main()
|
populationGrowthA = 80000
populationGrowthB = 200000
countYearPopulationGrowthA = 0
countYearPopulationGrowthB = 0
analysisPeriod = 100
#countYearPopulationGrowthA < analysisPeriod
while(populationGrowthB >= populationGrowthA):
populationGrowthA = populationGrowthA + (populationGrowthA * 0.3)
populationGrowthB = populationGrowthB + (populationGrowthB * 0.015)
countYearPopulationGrowthA += 1
countYearPopulationGrowthB += 1
print("A partir de {} ano(s) a população A é maior ou igual que a população B".format(
countYearPopulationGrowthA))
|
from django.contrib import admin
from projects.models import Dependency, Project
class ProjectDependencyInline(admin.TabularInline):
model = Project.dependencies.through
class ProjectAdmin(admin.ModelAdmin):
inlines = [ProjectDependencyInline]
admin.site.register(Dependency)
admin.site.register(Project, ProjectAdmin)
|
from apps.settings.base import *
DEBUG = False
|
import logging
from flask import request
from flask_restx import Resource, marshal
from main.util.decorator import admin_token_required, token_required
from ..util.dto import UserDto, fail_response
from ..service.user_service import UserService
from ..worker.tasks import add as task_add
LOG = logging.getLogger("app")
api = UserDto.api
parser = api.parser()
parser.add_argument(
"Authorization", type=str, location="headers", help="Bearer Access Token", required=True
)
user_service = UserService(api)
@api.route("/")
class UserList(Resource):
@api.doc("list_of_registered_users")
@admin_token_required(api)
@api.marshal_list_with(UserDto.user_response_fields, envelope="data")
def get(self):
"""List all registered users"""
LOG.error("=================---------------------- getting all user..")
LOG.warning("=================---------------------- getting all user..")
call_celery_task()
return user_service.get_all_users()
@api.doc("create a new user")
@api.expect(UserDto.user_register_fields, validate=True)
@api.marshal_with(UserDto.user_response_fields, code=201, description="User created")
# @api.response(201, "User created", _user) # NOTE: if using marshal_with and this line it will add 2 response code
# Reference: https://flask-restplus.readthedocs.io/en/0.7.0/documenting.html#documenting-with-the-api-response-decorator
def post(self):
"""Creates a new User"""
LOG.info("=================---------------------- creating user..")
call_celery_task()
data = request.json
return user_service.save_new_user(data=data), 201
@api.route("/<public_id>")
@api.param("public_id", "The User identifier")
@api.response(404, "User not found.")
class User(Resource):
@api.doc("get a user", expect=[parser])
@token_required(api) # NOTE: this line must be above marshal_with below
@api.marshal_with(UserDto.user_response_fields)
def get(self, public_id):
"""get a user by public id"""
LOG.error("=================--------------------- get a user..")
user = user_service.get_a_user(public_id)
if not user:
api.abort(404, "User not found.")
else:
return user
@api.doc(params={"param1": "description of param1", "param2": "description of param2"})
@api.response(200, "Success", fail_response(api))
@api.response(400, "Validation Error", fail_response(api))
@api.route("/publish")
class Publish(Resource):
def get(self):
call_celery_task()
return "this is publish api"
def call_celery_task():
# NOTE: when there is no running worker, comment below line to avoid hang when call api
result = task_add.apply_async((1, 2))
LOG.info("CELERY Task finished? : {}".format(result.ready()))
LOG.info("CELERY Task result: {}".format(result.result))
|
def sec_lar(a, b, c):
if a==b and b==c:
return -1
elif (a==b and b>c) or (a>c and b==c) or (a>c and c>b) or (b>c and c>a):
return c
elif (b==c and c>a) or (b>a and a==c) or (b>a and a>c) or (c>a and a>b):
return a
elif (a==c and a>b) or (c>b and a==b) or (c>b and b>a):
return b
else:
return -1
# Taking how many test cases to proceed
print("No of test cases: ", end="")
t = int(input())
for i in range(t):
# taking three numbers as input in one line
x, y, z = [int(n) for n in input("Enter three value (seperated by space): ").split()]
ans = sec_lar(x, y, z)
if ans != -1:
print("Second largest =", ans)
else:
print("There is no second largest number.")
|
#!/usr/bin/env python
import socket
remoteServer = raw_input("Enter a remote host to scan: ")
if socket.has_ipv6:
print "Has IPv6"
if not socket.has_ipv6:
print "Does Not Have IPv6"
|
#I pledge my honor I have abided by the Stevens Honor System - Tyson Werner
weight = float(input("What is your weight in pounds: "))
height = float(input("What is your height in inches: "))
BMI = (weight * 720)/(height*height)
if BMI < 19:
print("below healthy BMI range")
elif 19 <= BMI <=25:
print("within healthy BMI range")
elif BMI > 25:
print("above healthy BMI range")
|
from abc import ABCMeta, abstractmethod
class AbstractInfrastructureManager(object):
__metaclass__ = ABCMeta
def __init__(self):
"""TODO"""
self.client = docker.from_env()
self.overlay_opt_dict = {}
@abstractmethod
def init_nfvi(self):
"""
:return:
"""
return
@abstractmethod
def get_name(self):
"""
:return:
"""
return
@abstractmethod
def get_type(self):
"""
:return:
"""
return
@abstractmethod
def get_service(self, name):
"""
:param name:
:return:
"""
return
@abstractmethod
def get_network_config(self, name):
"""
:param name:
:return:
"""
return
@abstractmethod
def get_vdu(self, name):
"""
:param name:
:return:
"""
return
@abstractmethod
def get_VDUs_instances(self):
"""
:return:
"""
return
@abstractmethod
def create_network(self, name, protocol_type):
"""
:param name:
:param protocol_type:
:return:
"""
return
@abstractmethod
def deploy_VDU(self, name,
sw_image,
networks,
placement_policy,
mode="replicated",
replicas=1):
"""
:param name:
:param sw_image:
:param networks:
:param placement_policy:
:param mode:
:param replicas:
:return:
"""
return
#TODO: methodes to get information on resources
|
code1, units1, price1 = input().split()
code2, units2, price2 = input().split()
value1 = int(units1)*float(price1)
value2 = int(units2)*float(price2)
print('VALOR A PAGAR: R$ {:.2f}'.format(value1+value2))
|
import numpy as np
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
from rdkit.Chem.Crippen import MolLogP
from rdkit.Chem.rdMolDescriptors import CalcTPSA
def read_data(filename):
f = open(filename + '.smiles', 'r')
contents = f.readlines()
smiles = []
labels = []
for i in contents:
smi = i.split()[0]
label = int(i.split()[2].strip())
smiles.append(smi)
labels.append(label)
num_total = len(smiles)
rand_int = np.random.randint(num_total, size=(num_total,))
return np.asarray(smiles)[rand_int], np.asarray(labels)[rand_int]
def read_ZINC(num_mol):
f = open('ZINC.smiles', 'r')
contents = f.readlines()
smi = []
fps = []
logP = []
tpsa = []
for i in range(num_mol):
smi = contents[i].strip()
m = Chem.MolFromSmiles(smi)
fp = AllChem.GetMorganFingerprintAsBitVect(m,2)
arr = np.zeros((1,))
DataStructs.ConvertToNumpyArray(fp,arr)
fps.append(arr)
logP.append(MolLogP(m))
tpsa.append(CalcTPSA(m))
fps = np.asarray(fps)
logP = np.asarray(logP)
tpsa = np.asarray(tpsa)
return fps, logP, tpsa
|
#!/usr/bin/python
import os
import sys
import shutil
import tarfile
import StringIO
import unittest
from mic import chroot
TEST_CHROOT_LOC = os.path.join(os.getcwd(), 'chroot_fixtures')
TEST_CHROOT_TAR = os.path.join(TEST_CHROOT_LOC, 'minchroot.tar.gz')
TEST_CHROOT_DIR = os.path.join(TEST_CHROOT_LOC, 'minchroot')
def suite():
return unittest.makeSuite(ChrootTest)
class ChrootTest(unittest.TestCase):
def setUp(self):
tar = tarfile.open(TEST_CHROOT_TAR, "r:gz")
tar.extractall(path=TEST_CHROOT_LOC)
self.chrootdir = TEST_CHROOT_DIR
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
def tearDown(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
shutil.rmtree(TEST_CHROOT_DIR, ignore_errors=True)
def testChroot(self):
try:
chroot.chroot(TEST_CHROOT_DIR, None, 'exit')
except Exception, e:
raise self.failureException(e)
if __name__ == "__main__":
unittest.main()
|
# inspired by https://graph-tool.skewed.de/performance
# pgp.xml from:
# Richters O, Peixoto TP (2011) Trust Transitivity in Social Networks. PLoS ONE 6(4): e18384.
# doi 10.1371/journal.pone.0018384
import benchutil
import os
benchutil.add_external_path("networkx-1.11")
benchutil.add_external_path("decorator-4.0.10/src")
import networkx
def func():
g = networkx.read_graphml(os.path.join(benchutil.thisdir, "pgp.xml"))
networkx.shortest_path_length(g, 'n0')
networkx.pagerank(g, alpha=0.85, tol=1e-3, max_iter=10000000)
networkx.core.core_number(g)
u = g.to_undirected()
networkx.minimum_spanning_tree(u)
if __name__ == '__main__':
benchutil.main(func)
#print "betweenness_centrality"
#networkx.betweenness_centrality(g)
#
#print "edge_betweenness_centrality"
#networkx.edge_betweenness_centrality(g)
|
import requests
import json
url = "http://www.kuaidi.com/index-ajaxselectcourierinfo-1109966897738-ems-UUCAO1608710624586.html"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36"
}
s = requests.session()
r = s.get(url, headers=headers, verify=False)
result = r.json()
data = result["data"]
print(data)
print(data[0])
get_result = data[0]['context']
print(get_result)
if u"已签收" in get_result:
print("快递单已签收成功")
else:
print("未签收")
|
from datetime import date
AnoAtual = date.today().year
MaiorIdade = 0
MenorIdade = 0
for c in range(1,8):
pessoa = int(input('Digite o ano que a {}ª pessoa nasceu:'.format(c)))
if AnoAtual - pessoa >= 18:
MaiorIdade += 1
else:
MenorIdade += 1
print('Ao todo tivemos {} pessoas maiores de idade'.format(MaiorIdade))
print('Ao todo tivemos {} pessoas menores de idade'.format(MenorIdade))
|
from heapq import heapify, heappush, heappop
from itertools import count
from codes import codes
def huffman(seq, frq):
num = count()
trees = list(zip(frq, num, seq))
heapify(trees)
while len(trees) > 1:
fa, _, a = heappop(trees)
fb, _, b = heappop(trees)
n = next(num)
heappush(trees, (fa + fb, n, [a, b]))
return trees[0][-1]
def main():
seq = "abcdefghi"
frq = [4, 5, 6, 9, 11, 12, 15, 16, 20]
a = huffman(seq, frq)
print(a)
print(list(codes(a)))
if __name__ == "__main__":
main()
|
import caffe
import numpy as np
import argparse, pprint
from multiprocessing import Pool
import scipy.misc as scm
from os import path as osp
import my_pycaffe_io as mpio
import my_pycaffe as mp
from easydict import EasyDict as edict
import time
import glog
import pdb
try:
import cv2
except:
print('OPEN CV not found, resorting to scipy.misc')
IM_DATA = []
def image_reader(args):
imName, imDims, cropSz, imNum, isGray, isMirror = args
x1, y1, x2, y2 = imDims
im = cv2.imread(imName)
im = cv2.resize(im[y1:y2, x1:x2, :],
(cropSz, cropSz))
if isMirror and np.random.random() >= 0.5:
im = im[:,::-1,:]
im = im.transpose((2,0,1))
#glog.info('Processed')
return (im, imNum)
def image_reader_list(args):
outList = []
for ag in args:
imName, imDims, cropSz, imNum, isGray, isMirror = ag
x1, y1, x2, y2 = imDims
im = cv2.imread(imName)
im = cv2.resize(im[y1:y2, x1:x2, :],
(cropSz, cropSz))
if isMirror and np.random.random() >= 0.5:
im = im[:,::-1,:]
outList.append((im.transpose((2,0,1)), imNum))
#glog.info('Processed')
return outList
def image_reader_scm(args):
imName, imDims, cropSz, imNum, isGray, isMirror = args
x1, y1, x2, y2 = imDims
im = scm.imread(imName)
im = scm.imresize(im[y1:y2, x1:x2, :],
(cropSz, cropSz))
if isMirror and np.random.random() >= 0.5:
im = im[:,::-1,:]
im = im[:,:,[2,1,0]].transpose((2,0,1))
#glog.info('Processed')
return (im, imNum)
class PythonWindowDataLayer(caffe.Layer):
@classmethod
def parse_args(cls, argsStr):
parser = argparse.ArgumentParser(description='Python Window Data Layer')
parser.add_argument('--source', default='', type=str)
parser.add_argument('--root_folder', default='', type=str)
parser.add_argument('--mean_file', default='', type=str)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--crop_size', default=192, type=int)
parser.add_argument('--is_gray', dest='is_gray', action='store_true')
parser.add_argument('--no-is_gray', dest='is_gray', action='store_false')
parser.add_argument('--resume_iter', default=0, type=int)
args = parser.parse_args(argsStr.split())
print('Using Config:')
pprint.pprint(args)
return args
def load_mean(self):
self.mu_ = None
if len(self.param_.mean_file) > 0:
#Mean is assumbed to be in BGR format
self.mu_ = mp.read_mean(self.param_.mean_file)
self.mu_ = self.mu_.astype(np.float32)
ch, h, w = self.mu_.shape
assert (h >= self.param_.crop_size and w >= self.param_.crop_size)
y1 = int(h/2 - (self.param_.crop_size/2))
x1 = int(w/2 - (self.param_.crop_size/2))
y2 = int(y1 + self.param_.crop_size)
x2 = int(x1 + self.param_.crop_size)
self.mu_ = self.mu_[:,y1:y2,x1:x2]
def setup(self, bottom, top):
self.param_ = PythonWindowDataLayer.parse_args(self.param_str)
self.wfid_ = mpio.GenericWindowReader(self.param_.source)
self.numIm_ = self.wfid_.numIm_
self.lblSz_ = self.wfid_.lblSz_
if self.param_.is_gray:
self.ch_ = 1
else:
self.ch_ = 3
top[0].reshape(self.param_.batch_size, self.numIm_ * self.ch_,
self.param_.crop_size, self.param_.crop_size)
top[1].reshape(self.param_.batch_size, self.lblSz_, 1, 1)
self.load_mean()
#Skip the number of examples so that the same examples
#are not read back
if self.param_.resume_iter > 0:
N = self.param_.resume_iter * self.param_.batch_size
N = np.mod(N, self.wl_.num_)
for n in range(N):
_, _ = self.read_next()
def forward(self, bottom, top):
t1 = time.time()
tIm, tProc = 0, 0
for b in range(self.param_.batch_size):
if self.wfid_.is_eof():
self.wfid_.close()
self.wfid_ = mpio.GenericWindowReader(self.param_.source)
print ('RESTARTING READ WINDOW FILE')
imNames, lbls = self.wfid_.read_next()
#Read images
for n in range(self.numIm_):
#Load images
imName, ch, h, w, x1, y1, x2, y2 = imNames[n].strip().split()
imName = osp.join(self.param_.root_folder, imName)
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
tImSt = time.time()
im,_ = image_reader(imName, (x1,y1,x2,y2), self.param_.crop_size,0)
tImEn = time.time()
tIm += (tImEn - tImSt)
#Process the image
if self.mu_ is not None:
im = im - self.mu_
#Feed the image
cSt = n * self.ch_
cEn = cSt + self.ch_
top[0].data[b,cSt:cEn, :, :] = im.astype(np.float32)
tEn = time.time()
tProc += (tEn - tImEn)
#Read the labels
top[1].data[b,:,:,:] = lbls.reshape(self.lblSz_,1,1).astype(np.float32)
t2 = time.time()
print ('Forward: %fs, Reading: %fs, Processing: %fs' % (t2-t1, tIm, tProc))
def backward(self, top, propagate_down, bottom):
""" This layer has no backward """
pass
def reshape(self, bottom, top):
""" This layer has no reshape """
pass
class WindowLoader(object):
def __init__(self, root_folder, batch_size, channels,
crop_size, mu=None, poolsz=None):
self.root_folder = root_folder
self.batch_size = batch_size
self.ch = channels
self.crop_size = crop_size
self.mu = mu
self.pool_ = poolsz
def load_images(self, imNames, jobid):
imData = np.zeros((self.batch_size, self.ch,
self.crop_size, self.crop_size), np.float32)
for b in range(self.batch_size):
#Load images
imName, ch, h, w, x1, y1, x2, y2 = imNames[b].strip().split()
imName = osp.join(self.root_folder, imName)
#Gives BGR
im = cv2.imread(imName)
#Process the image
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
im = cv2.resize(im[y1:y2, x1:x2, :],
(self.crop_size, self.crop_size))
im = im.transpose((2,0,1))
imData[b,:, :, :] = im
#Subtract the mean if needed
if self.mu is not None:
imData = imData - self.mu
imData = imData.astype(np.float32)
return jobid, imData
def _load_images(args):
self, imNames, jobId = args
return self.load_images(imNames, jobId)
##
#Parallel version
class PythonWindowDataParallelLayer(caffe.Layer):
@classmethod
def parse_args(cls, argsStr):
parser = argparse.ArgumentParser(description='PythonWindowDataParallel Layer')
parser.add_argument('--source', default='', type=str)
parser.add_argument('--root_folder', default='', type=str)
parser.add_argument('--mean_file', default='', type=str)
parser.add_argument('--batch_size', default=128, type=int)
parser.add_argument('--crop_size', default=192, type=int)
parser.add_argument('--is_gray', dest='is_gray', action='store_true')
parser.add_argument('--no-is_gray', dest='is_gray', action='store_false')
parser.add_argument('--is_mirror', dest='is_mirror', action='store_true', default=False)
parser.add_argument('--resume_iter', default=0, type=int)
parser.add_argument('--jitter_pct', default=0, type=float)
parser.add_argument('--jitter_amt', default=0, type=int)
parser.add_argument('--ncpu', default=2, type=int)
args = parser.parse_args(argsStr.split())
print('Using Config:')
pprint.pprint(args)
return args
def __del__(self):
self.wfid_.close()
for n in self.numIm_:
self.pool_[n].terminate()
def load_mean(self):
self.mu_ = None
if len(self.param_.mean_file) > 0:
#Mean is assumbed to be in BGR format
self.mu_ = mp.read_mean(self.param_.mean_file)
self.mu_ = self.mu_.astype(np.float32)
ch, h, w = self.mu_.shape
assert (h >= self.param_.crop_size and w >= self.param_.crop_size)
y1 = int(h/2 - (self.param_.crop_size/2))
x1 = int(w/2 - (self.param_.crop_size/2))
y2 = int(y1 + self.param_.crop_size)
x2 = int(x1 + self.param_.crop_size)
self.mu_ = self.mu_[:,y1:y2,x1:x2]
def setup(self, bottom, top):
self.param_ = PythonWindowDataParallelLayer.parse_args(self.param_str)
self.wfid_ = mpio.GenericWindowReader(self.param_.source)
self.numIm_ = self.wfid_.numIm_
self.lblSz_ = self.wfid_.lblSz_
self.isV2 = False
if self.param_.is_gray:
self.ch_ = 1
else:
self.ch_ = 3
top[0].reshape(self.param_.batch_size, self.numIm_ * self.ch_,
self.param_.crop_size, self.param_.crop_size)
top[1].reshape(self.param_.batch_size, self.lblSz_, 1, 1)
#Load the mean
self.load_mean()
#If needed to resume
if self.param_.resume_iter > 0:
N = self.param_.resume_iter * self.param_.batch_size
N = np.mod(N, self.wfid_.num_)
print ('SKIPPING AHEAD BY %d out of %d examples, BECAUSE resume_iter is NOT 0'\
% (N, self.wfid_.num_))
for n in range(N):
_, _ = self.wfid_.read_next()
#Create the pool
self.pool_, self.jobs_ = [], []
for n in range(self.numIm_):
self.pool_.append(Pool(processes=self.param_.ncpu))
self.jobs_.append([])
self.imData_ = np.zeros((self.param_.batch_size, self.numIm_ * self.ch_,
self.param_.crop_size, self.param_.crop_size), np.float32)
if 'cv2' in globals():
print('OPEN CV FOUND')
if self.isV2:
self.readfn_ = image_reader_list
else:
self.readfn_ = image_reader
else:
print('OPEN CV NOT FOUND, USING SCM')
self.readfn_ = image_reader_scm
#Launch the prefetching
self.launch_jobs()
self.t_ = time.time()
def get_jitter(self, coords):
dx, dy = 0, 0
if self.param_.jitter_amt > 0:
rx, ry = np.random.random(), np.random.random()
dx, dy = rx * self.param_.jitter_amt, ry * self.param_.jitter_amt
if np.random.random() > 0.5:
dx = - dx
if np.random.random() > 0.5:
dy = -dy
if self.param_.jitter_pct > 0:
h, w = [], []
for n in range(len(coords)):
x1, y1, x2, y2 = coords[n]
h.append(y2 - y1)
w.append(x2 - x1)
mnH, mnW = min(h), min(w)
rx, ry = np.random.random(), np.random.random()
dx, dy = rx * mnW * self.param_.jitter_pct, ry * mnH * self.param_.jitter_pct
if np.random.random() > 0.5:
dx = - dx
if np.random.random() > 0.5:
dy = -dy
return int(dx), int(dy)
def launch_jobs(self):
argList = []
for n in range(self.numIm_):
argList.append([])
self.labels_ = np.zeros((self.param_.batch_size, self.lblSz_,1,1),np.float32)
#Form the list of images and labels
for b in range(self.param_.batch_size):
if self.wfid_.is_eof():
self.wfid_.close()
self.wfid_ = mpio.GenericWindowReader(self.param_.source)
glog.info('RESTARTING READ WINDOW FILE')
imNames, lbls = self.wfid_.read_next()
self.labels_[b,:,:,:] = lbls.reshape(self.lblSz_,1,1).astype(np.float32)
#Read images
fNames, coords = [], []
for n in range(self.numIm_):
fName, ch, h, w, x1, y1, x2, y2 = imNames[n].strip().split()
fNames.append(osp.join(self.param_.root_folder, fName))
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
coords.append((x1, y1, x2, y2))
#Computing jittering if required
dx, dy = self.get_jitter(coords)
for n in range(self.numIm_):
fName = fNames[n]
x1, y1, x2, y2 = coords[n]
#Jitter the box
x1 = max(0, x1 + dx)
y1 = max(0, y1 + dy)
x2 = min(w, x2 + dx)
y2 = min(h, y2 + dy)
#glog.info('%d, %d, %d, %d' % (x1, y1, x2, y2))
argList[n].append([fName, (x1,y1,x2,y2), self.param_.crop_size,
b, self.param_.is_gray, self.param_.is_mirror])
#Launch the jobs
for n in range(self.numIm_):
try:
#print (argList[n])
self.jobs_[n] = self.pool_[n].map_async(self.readfn_, argList[n])
except KeyboardInterrupt:
print 'Keyboard Interrupt received - terminating in launch jobs'
self.pool_[n].terminate()
def get_prefetch_data(self):
for n in range(self.numIm_):
cSt = n * self.ch_
cEn = cSt + self.ch_
t1 = time.time()
try:
imRes = self.jobs_[n].get()
except:
print 'Keyboard Interrupt received - terminating'
self.pool_[n].terminate()
#pdb.set_trace()
raise Exception('Error/Interrupt Encountered')
t2= time.time()
tFetch = t2 - t1
for res in imRes:
if self.mu_ is not None:
self.imData_[res[1],cSt:cEn,:,:] = res[0] - self.mu_
else:
self.imData_[res[1],cSt:cEn,:,:] = res[0]
#print ('%d, Fetching: %f, Copying: %f' % (n, tFetch, time.time()-t2))
#glog.info('%d, Fetching: %f, Copying: %f' % (n, tFetch, time.time()-t2))
def forward(self, bottom, top):
t1 = time.time()
tDiff = t1 - self.t_
#Load the images
self.get_prefetch_data()
top[0].data[...] = self.imData_
t2 = time.time()
tFetch = t2-t1
#Read the labels
top[1].data[:,:,:,:] = self.labels_
self.launch_jobs()
t2 = time.time()
#print ('Forward took %fs in PythonWindowDataParallelLayer' % (t2-t1))
glog.info('Prev: %f, fetch: %f forward: %f' % (tDiff,tFetch, t2-t1))
self.t_ = time.time()
def backward(self, top, propagate_down, bottom):
""" This layer has no backward """
pass
def reshape(self, bottom, top):
""" This layer has no reshape """
pass
|
#-coding:utf8-
#https://www.zhihu.com/question/25949022
# 检验正态分布等,ks h0 假设为正态分布
import numpy as np
from scipy.stats import kstest
x = np.linspace(-15, 15, 3)
print kstest(x, 'norm')
#最终返回的结果,第二个值 p-value=0.76584491300591395,比指定的显著水平(假设为5%)大,则我们不能拒绝假设:x服从正态分布。
#p<0.05 则可以拒绝
# 疑问?第一个d值怎么看,什么用
|
# this file also contains 2 function that belongs to other project ( the writing to txt files )
import xml.etree.ElementTree as ET
from Product import Product
class MakeData():
def __init__(self):
self.data_shufersal = []
self.data_market = []
self.data_ramilevi = []
self.file = open('C:\\Users\\yuval\\Desktop\\items.txt','w') # rami levi
self.filecode = open('C:\\Users\\yuval\\Desktop\\itemscode.txt', 'w') # rami levi
self.fileS = open('C:\\Users\\yuval\\Desktop\\itemss.txt','w') #shufersal
self.filecodeS = open('C:\\Users\\yuval\\Desktop\\itemscodes.txt', 'w') #shufersal
self.tree_shufersal = ET.parse('C:\\Users\\yuval\\Desktop\\stores\\shufersalNesher.txt')
self.root_shufersal = self.tree_shufersal.getroot()
self.tree_market = ET.parse('C:\\Users\\yuval\\Desktop\\stores\\market.xml')
self.root_market = self.tree_market.getroot()
self.tree_ramilevi = ET.parse('C:\\Users\\yuval\\Desktop\\stores\\ramilevi.xml')
self.root_ramilevi = self.tree_ramilevi.getroot()
def makeAll(self):
for i in self.root_shufersal[5]:
product = Product("shufersal", i[3].text, str(i[12].text), str(i[4].text), i[1].text)
self.data_shufersal.append(product)
for i in self.root_market[4]:
product = Product("market Store", i[3].text, i[12].text, str(i[4].text),i[1].text)
self.data_market.append(product)
for i in self.root_ramilevi[6]:
product = Product("rami levi Store", i[3].text, i[12].text, i[4].text, i[1].text)
self.data_ramilevi.append(product)
def writeToTxt(self): # rami levi
for i in range(len(self.data_ramilevi)):
pro_name = self.data_ramilevi[i].product_name
pro_code = self.data_ramilevi[i].item_code
self.file.write(pro_name +"\n")
self.filecode.write(pro_code + "\n")
self.filecode.close()
self.file.close()
def writeToTxtSufersal(self): # Shufersal
for i in range(len(self.data_shufersal)):
pro_name = self.data_shufersal[i].product_name
pro_code = self.data_shufersal[i].item_code
self.fileS.write(pro_name +"\n")
self.filecodeS.write(pro_code + "\n")
self.filecodeS.close()
self.fileS.close()
m = MakeData()
m.makeAll()
m.writeToTxtSufersal()
|
__author__ = "Narwhale"
# class Dog(object):
#
# def __init__(self,name):
# self.name = name
#
# def eat(self):
# print('%s is eating ....'%self.name)
#
# d = Dog('xiaohei') #实例化对象
# choice = input('>>>>:').strip() #strip()去除左右空格
#
# if hasattr(d,choice): #检查实例化d中有没有choice
# func = getattr(d,choice) #getattr返回d中的choice的内存地址
# func() #执行函数
#
class Foo(object):
def __init__(self):
self.name = 'wupeiqi'
def func(self):
return 'func'
obj = Foo()
# #### 检查是否含有成员 ####
hasattr(obj, 'name')
hasattr(obj, 'func')
# #### 获取成员 ####
getattr(obj, 'name')
getattr(obj, 'func')
# #### 设置成员 ####
setattr(obj, 'age', 18)
setattr(obj, 'show', lambda num: num + 1)
# #### 删除成员 ####
delattr(obj, 'name')
delattr(obj, 'func')
|
### 3-1. multi-class classification ###
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import io
import matplotlib.cm as cm
import random
import scipy.misc
## 1.1. Dataset ##
all_data = io.loadmat("d:/data/ex03/ex3data1.mat")
x_data = all_data['X']
y_data = all_data['y']
y_data = np.array(y_data).reshape(len(y_data), 1)
## 1.2 Visualizaing the data ##
def pixel(row):
width, height = 20, 20
square = row.reshape(width, height)
return square.T
def displayData(indices_to_display = None):
"""
Function that picks 100 random rows from X, creates a 20x20 image from each,
then stitches them together into a 10x10 grid of images, and shows it.
"""
width, height = 20, 20
nrows, ncols = 10, 10
if not indices_to_display:
indices_to_display = random.sample(range(x_data.shape[0]), nrows*ncols)
big_picture = np.zeros((height*nrows,width*ncols))
irow, icol = 0, 0
for idx in indices_to_display:
if icol == ncols:
irow += 1
icol = 0
iimg = pixel(x_data[idx])
big_picture[irow*height:irow*height+iimg.shape[0],icol*width:icol*width+iimg.shape[1]] = iimg
icol += 1
fig = plt.figure(figsize=(6,6))
img = scipy.misc.toimage( big_picture )
plt.imshow(img,cmap = cm.Greys_r)
displayData()
## 1.3 Vectorizing Logistic Regression ##
X = tf.placeholder(dtype=tf.float32, shape=[5000, 400])
Y = tf.placeholder(dtype=tf.float32, shape=[5000, 1]) # not one-hot --> 1~10
Y_one_hot = tf.one_hot(y_data, 10, dtype=tf.float32) # one hot method
Y_one_hot = tf.reshape(Y_one_hot, [-1, 10]) # one-hot 차원 줄이기
W = tf.Variable(tf.random_normal([400, 10]), name='weight')
b = tf.Variable(tf.random_normal([10]), name='bias')
ld = 0.001 # L2 Reg 람다 값
# 1.3.2 Vectorizing the gradient #
logit = tf.matmul(X, W) + b
hypothesis = tf.nn.softmax(logit)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logit, labels=Y_one_hot))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.9)
#regularizer = tf.nn.l2_loss(W)
#cost_reg = cost + ld + regularizer
train = optimizer.minimize(cost)
#hypothesis = tf.sigmoid(tf.matmul(X, W) + b)
#cost = -tf.reduce_mean(tf.log(hypothesis) * tf.transpose(Y) - (1 - Y) * tf.log(1 - hypothesis))
#regularizer = tf.nn.l2_loss(W)
#cost_reg = cost + ld + regularizer
#optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.00001)
#train = optimizer.minimize(cost_reg)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
prediction = tf.argmax(hypothesis, axis=1)
correct_prediction = tf.equal(prediction, tf.argmax(Y_one_hot, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
for i in range(8000):
y_hat, _ , cost_val, w_val, acc = sess.run([hypothesis, train, cost, W, accuracy], feed_dict={X: x_data, Y: y_data})
if i % 300 == 0:
print("Step: {:5}\tLoss: {:.3f}\tAcc: {:.2%}".format(i, cost_val, acc))
sess.close()
|
"""
"""
__all__ = [ "base", "simulator" ]
|
import numpy as np
from sklearn import svm
from sklearn.metrics import classification_report
import csv
import talib as ta
import math
import pymysql
import operator
from functools import reduce
def train_test_split(array,ratio=0.8):
train=array[0:int(ratio*len(array))]
test=array[int(ratio*len(array)):]
return train,test
def get_data(filename):
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
close= [row[3]for row in reader]
close.pop(0)
close=list(map(float, close))
close=np.array(close)
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
high= [row[2]for row in reader]
high.pop(0)
high=list(map(float, high))
high=np.array(high)
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
low= [row[4]for row in reader]
low.pop(0)
low=list(map(float, low))
low=np.array(low)
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
vol= [row[5]for row in reader]
vol.pop(0)
vol=list(map(float, vol))
vol=np.array(vol)
with open(filename,'r') as csvfile:
reader = csv.reader(csvfile)
dopen= [row[1]for row in reader]
dopen.pop(0)
dopen=list(map(float, dopen))
dopen=np.array(dopen)
return high,low,dopen,close,vol
def normalization(array):
return (array-np.mean(array))/np.std(array)
def get_data_fromDB(tablename):
db = pymysql.connect(host='127.0.0.1', user='root', passwd='ZXY2001zxy*', db='stock', charset='utf8')
cursor = db.cursor()
sql_high="""select high from """+tablename+""";"""
cursor.execute(sql_high) #执行sql语句
high=cursor.fetchall() #获取数据
#print(high)
high=np.array(high) #类型转换 tuple --> array 其中tuple和array均为n维
#print(high)
sql_low="""select low from """+tablename+""";"""
cursor.execute(sql_low) #执行sql语句
low=cursor.fetchall()
low=np.array(low)
sql_dopen="""select open_ from """+tablename+""";"""
cursor.execute(sql_dopen)
dopen=cursor.fetchall()
dopen=np.array(dopen)
sql_close="""select close from """+tablename+""";"""
cursor.execute(sql_close)
close=cursor.fetchall()
close=np.array(close)
sql_vol="""select volume from """+tablename+""";"""
cursor.execute(sql_vol)
vol=cursor.fetchall()
vol=np.array(vol)
#ndarry多维转换成一维
high=np.ravel(high)
low=np.ravel(low)
dopen=np.ravel(dopen)
close=np.ravel(close)
vol=np.ravel(vol)
return high,low,dopen,close,vol
|
# This is a program to test the importing of modules
def greetings():
print ("Hello world!")
|
import re
from collections import defaultdict
import pandas as pd
from scipy import stats
import numpy as np
from rdflib import Namespace, Literal
from brickschema.namespaces import BRICK, A, OWL
# from brickschema.inference import BrickInferenceSession
from brickschema.inference import OWLRLAllegroInferenceSession
from brickschema.graph import Graph
import resolve_ui as ui
import distance
import recordlinkage
from recordlinkage.base import BaseCompareFeature
def graph_from_triples(triples):
g = Graph(load_brick=True)
# g.load_file("ttl/owl.ttl")
g.add(*triples)
sess = OWLRLAllegroInferenceSession()
return sess.expand(g)
def tokenize_string(s):
s = s.lower()
s = re.split(r'-| |_|#|/|:', s)
return s
def compatible_classes(graph, c1, c2):
"""
Returns true if the two classes are compatible (equal, or one is a subclass
of another), false otherwise
"""
q1 = f"ASK {{ <{c1}> owl:equivalentClass?/rdfs:subClassOf*/owl:equivalentClass? <{c2}> }}"
q2 = f"ASK {{ <{c2}> owl:equivalentClass?/rdfs:subClassOf*/owl:equivalentClass? <{c1}> }}"
return graph.query(q1)[0] or graph.query(q2)[0]
def trim_prefix_tokenized(names):
if len(names) <= 1:
return names
max_length = max(map(len, names))
pfx_size = 1
# increase pfx_size until it doesn't match, then reduce by 1 and trim
while pfx_size <= max_length:
pfx = names[0][:pfx_size]
if not all(map(lambda x: x[:pfx_size] == pfx, names[1:])):
pfx_size -= 1
return list([x[pfx_size:] for x in names])
pfx_size += 1
# def trim_common_prefix(names):
# if len(names) <= 1:
# return names
# max_length = max(map(len, names))
# min_length = min(map(len, names))
# pfx_size = max_length
# while True:
# pfx = names[0][:pfx_size]
# # if prefix is common, we return
# if not all(map(lambda x: x[:pfx_size] == pfx, names[1:])):
# pfx_size = int(pfx_size / 2) + 1
#
# return list([x[:pfx_size] for x in names])
# ignore_brick_classes = [BRICK.Sensor, BRICK
class VectorJaccardCompare(BaseCompareFeature):
def _compute_vectorized(self, s1, s2):
s1 = list(s1)
s2 = list(s2)
sim = np.array([1-distance.jaccard(s1[i], s2[i])
for i in range(len(s1))])
return sim
class MaxLevenshteinMatch(BaseCompareFeature):
def _compute_vectorized(self, s1, s2):
# calculate pair-wise levenshtein
s1 = list(s1)
s2 = list(s2)
sim = np.array([distance.jaccard(s1[i], s2[i])
for i in range(len(s1))])
min_dist = np.min(sim)
sim = np.array([1 if x == min_dist and x > .8 else 0 for x in sim])
return sim
def cluster_on_labels(graphs):
# populates the following list; contains lists of URIs that are linked to
# be the same entity
clusters = []
# list of clustered entities
clustered = set()
datasets = []
for source, graph in graphs.items():
df = pd.DataFrame(columns=['label', 'uris'])
print(f"{'-'*5} {source} {'-'*5}")
res = graph.query("SELECT ?ent ?lab WHERE { \
{ ?ent rdf:type/rdfs:subClassOf* brick:Equipment } \
UNION \
{ ?ent rdf:type/rdfs:subClassOf* brick:Point } \
UNION \
{ ?ent rdf:type/rdfs:subClassOf* brick:Location } \
?ent brick:sourcelabel ?lab }")
# TODO: remove common prefix from labels?
labels = [tokenize_string(str(row[1])) for row in res
if isinstance(row[1], Literal)]
# labels = [l for l in labels if l != ["unknown"]]
labels = trim_prefix_tokenized(labels)
uris = [row[0] for row in res if isinstance(row[1], Literal)]
df['label'] = labels
df['uris'] = uris
datasets.append(df)
print("lengths", [len(df) for df in datasets])
if len(datasets) <= 1:
return clusters, clustered
indexer = recordlinkage.Index()
indexer.full()
candidate_links = indexer.index(*datasets)
comp = recordlinkage.Compare()
comp.add(VectorJaccardCompare('label', 'label', label='y_label'))
features = comp.compute(candidate_links, *datasets)
# use metric of '>=.9' because there's just one feature for now and it
# scales [0, 1]
matches = features[features.sum(axis=1) >= .9]
for idx_list in matches.index:
pairs = zip(datasets, idx_list)
entities = [ds['uris'].iloc[idx] for ds, idx in pairs]
for ent in entities:
clustered.add(str(ent))
clusters.append(entities)
return clusters, clustered
def cluster_on_type_alignment(graphs, clustered):
clusters = []
counts = defaultdict(lambda: defaultdict(set))
uris = {}
for source, graph in graphs.items():
res = graph.query("SELECT ?ent ?type ?lab WHERE { \
?ent rdf:type ?type .\
{ ?type rdfs:subClassOf+ brick:Equipment } \
UNION \
{ ?type rdfs:subClassOf+ brick:Point } \
UNION \
{ ?type rdfs:subClassOf+ brick:Location } \
?ent brick:sourcelabel ?lab }")
for row in res:
entity, brickclass, label = row
if entity in clustered:
continue
counts[brickclass][source].add(str(label))
uris[str(label)] = entity
for bc, c in counts.items():
mode_count = stats.mode([len(x) for x in c.values()]).mode[0]
candidates = [(src, list(ents)) for src, ents in c.items()
if len(ents) == mode_count]
if len(candidates) <= 1:
continue
print(f"class {bc} has {len(c)} sources with {mode_count} candidates each")
# short-circuit in the common case
if mode_count == 1:
cluster = [uris[ents[0]] for _, ents in candidates]
if cluster not in clusters:
clusters.append(cluster)
continue
datasets = [pd.DataFrame({'label': ents, 'uris': [uris[x] for x in ents]}) for (_, ents) in candidates]
indexer = recordlinkage.Index()
indexer.full()
candidate_links = indexer.index(*datasets)
comp = recordlinkage.Compare()
comp.add(MaxLevenshteinMatch('label', 'label', label='y_label'))
features = comp.compute(candidate_links, *datasets)
matches = features[features.sum(axis=1) == 1]
for idx_list in matches.index:
pairs = zip(datasets, idx_list)
entities = [ds['uris'].iloc[idx] for ds, idx in pairs]
for ent in entities:
clustered.add(str(ent))
if entities in clusters:
continue
clusters.append(entities)
return clusters, clustered
def merge_triples(triples, clusters):
# choose arbitrary entity as the canonical name
canonical = [([str(u) for u in c], list(c)[0]) for c in clusters]
def fix_triple(t):
for cluster, ent in canonical:
if t[0] in cluster:
return (ent, t[1], t[2])
return t
def cluster_for_entity(ent):
candidates = [c for c in clusters if ent in c]
# ent should be in at most one
if len(candidates) > 0:
return candidates[0]
return None
# replace = lambda x: [ent for (cluster, ent) in canonical if x in cluster][0]
triples = list(map(fix_triple, triples))
graph = graph_from_triples(triples)
print(len(graph))
for cluster in clusters:
pairs = zip(cluster[:-1], cluster[1:])
triples = [(a, OWL.sameAs, b) for (a, b) in pairs]
graph.add(*triples)
graph.g.serialize("before_inference.ttl", format="ttl")
sess = OWLRLAllegroInferenceSession()
graph = sess.expand(graph)
# check the inferred classes
# TODO: forward reasonable errors up to Python
res = graph.query("SELECT ?ent ?type WHERE { \
?ent rdf:type ?type .\
{ ?type rdfs:subClassOf+ brick:Equipment } \
UNION \
{ ?type rdfs:subClassOf+ brick:Point } \
UNION \
{ ?type rdfs:subClassOf+ brick:Location } \
?ent brick:sourcelabel ?lab }")
dis = ui.UserDisambiguation(graph)
entity_types = defaultdict(set)
for row in res:
ent, brickclass = row[0], row[1]
entity_types[ent].add(brickclass)
# TODO: handle this
"""
How to identify "bad" clusters:
1) if the types are incompatible
How to fix bad clusters:
- easy way: ask user to manually partition the bad clusters and recall the
merge_triples method (this method) with the new clusters
"""
# TODO: remove quantity kinds from this??
redo_clusters = []
for ent, classlist in entity_types.items():
classlist = list(classlist)
for (c1, c2) in zip(classlist[:-1], classlist[1:]):
if c1 == c2:
continue
if not compatible_classes(graph, c1, c2):
badcluster = cluster_for_entity(ent)
if badcluster is not None and dis.do_recluster(badcluster):
print("bad cluster", badcluster)
new_clusters = dis.recluster(badcluster)
redo_clusters.extend(new_clusters)
else:
# print("INCOMPATIBLE BUT NO CLUSTER?", ent, c1, c2)
# choose class and remove old triple
chosen = dis.ask([c1, c2], ent)
for c in [c1, c2]:
graph.g.remove((ent, A, c))
graph.g.add((ent, A, chosen))
break
# TODO: if any exception is thrown, need to recluster
if len(redo_clusters) > 0:
new_graph, new_canonical = merge_triples(triples, redo_clusters)
return new_graph + graph.g, new_canonical + canonical
else:
return graph.g, canonical
def resolve(records):
"""
Records with format {srcname: [list of triples], ...}
"""
graphs = {source: graph_from_triples(triples) \
for source, triples in records.items()}
clusters = []
clustered = set()
# TODO: due to limitation of the recordlinkage, can only compare *pairs*
# of graphs. Need to address this
# if len(graphs) == 2:
# new_clusters, clustered = cluster_on_labels(graphs)
# clusters.extend(new_clusters)
# for c in new_clusters:
# print(f"Label cluster: {c}")
new_clusters, clustered = cluster_on_type_alignment(graphs, clustered)
clusters.extend(new_clusters)
for c in new_clusters:
print(f"Type cluster: {c}")
# for cluster in clusters:
# print([str(x) for x in cluster])
all_triples = [t for triples in records.values() for t in triples]
# for t in all_triples:
# print(t)
# return
# graph, canonical
return merge_triples(all_triples, clusters)
if __name__ == '__main__':
BLDG = Namespace("http://building#")
records = {
'haystack': [
(BLDG['hay-rtu1'], A, BRICK['Rooftop_Unit']),
(BLDG['hay-rtu1'], BRICK.sourcelabel, Literal("my-cool-building RTU 1")),
(BLDG['hay-rtu2'], A, BRICK['Rooftop_Unit']),
(BLDG['hay-rtu2'], BRICK.sourcelabel, Literal("my-cool-building RTU 2")),
(BLDG['hay-rtu2-fan'], A, BRICK['Supply_Fan']),
(BLDG['hay-rtu2-fan'], BRICK.sourcelabel, Literal("my-cool-building RTU 2 Fan")),
(BLDG['hay-site'], A, BRICK['Site']),
(BLDG['hay-site'], BRICK.sourcelabel, Literal("my-cool-building")),
],
'buildingsync': [
(BLDG['bsync-ahu1'], A, BRICK['Air_Handler_Unit']),
(BLDG['bsync-ahu1'], BRICK.sourcelabel, Literal("AHU-1")),
(BLDG['bsync-ahu2'], A, BRICK['Air_Handler_Unit']),
(BLDG['bsync-ahu2'], BRICK.sourcelabel, Literal("AHU-2")),
(BLDG['bsync-site'], A, BRICK['Site']),
(BLDG['bsync-site'], BRICK.sourcelabel, Literal("my-cool-building")),
# (BLDG['BADENT'], A, BRICK['Room']),
# (BLDG['BADENT'], BRICK.sourcelabel, Literal("my-cool-building RTU 2 Fan")),
],
}
graph = resolve(records)
print(len(graph))
|
import os
from easydict import EasyDict as edict
cfg = edict()
cfg.PATH = edict()
cfg.PATH.DATA = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg.PATH.LABEL = '/home/liuhaiyang/dataset/CUB_200_2011/image_class_labels.txt'
cfg.PATH.EVAL = ['/home/liuhaiyang/dataset/CUB_200_2011/images.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/train_test_split.txt',
'/home/liuhaiyang/dataset/CUB_200_2011/images/']
cfg.PATH.TEST = '/home/liuhaiyang/liu_kaggle/cifar/dataset/cifar-10-batches-py/data_batch_1'
cfg.PATH.RES_TEST = './res_imgs/'
cfg.PATH.EXPS = './exps/'
cfg.PATH.NAME = 'reg32_cub_v1_cos'
cfg.PATH.MODEL = '/model.pth'
cfg.PATH.BESTMODEL = '/bestmodel.pth'
cfg.PATH.LOG = '/log.txt'
cfg.PATH.RESULTS = '/results/'
cfg.DETERMINISTIC = edict()
cfg.DETERMINISTIC.SEED = 60
cfg.DETERMINISTIC.CUDNN = True
cfg.TRAIN = edict()
cfg.TRAIN.EPOCHS = 60
cfg.TRAIN.BATCHSIZE = 8
cfg.TRAIN.L1SCALING = 100
cfg.TRAIN.TYPE = 'sgd'
cfg.TRAIN.LR = 1e-3
cfg.TRAIN.BETA1 = 0.9
cfg.TRAIN.BETA2 = 0.999
cfg.TRAIN.LR_TYPE = 'cos'
cfg.TRAIN.LR_REDUCE = [26,36]
cfg.TRAIN.LR_FACTOR = 0.1
cfg.TRAIN.WEIGHT_DECAY = 1e-4
cfg.TRAIN.NUM_WORKERS = 16
cfg.TRAIN.WARMUP = 0
cfg.TRAIN.LR_WARM = 1e-7
#-------- data aug --------#
cfg.TRAIN.USE_AUG = True
cfg.TRAIN.CROP = 224
cfg.TRAIN.PAD = 0
cfg.TRAIN.RESIZE = 300
cfg.TRAIN.ROATION = 30
cfg.MODEL = edict()
cfg.MODEL.NAME = 'regnet'
cfg.MODEL.IN_DIM = 3
cfg.MODEL.CLASS_NUM = 200
cfg.MODEL.USE_FC = True
cfg.MODEL.PRETRAIN = 'RegNetY-32GF'
cfg.MODEL.PRETRAIN_PATH = './exps/pretrain/'
cfg.MODEL.DROPOUT = 0
cfg.MODEL.LOSS = 'bce_only_g'
#-------- for resnet --------#
cfg.MODEL.BLOCK = 'bottleneck'
cfg.MODEL.BLOCK_LIST = [3,4,6,3]
cfg.MODEL.CONV1 = (7,2,3)
cfg.MODEL.OPERATION = 'B'
cfg.MODEL.STRIDE1 = 1
cfg.MODEL.MAX_POOL = True
cfg.MODEL.BASE = 64
#-------- for regnet --------#
cfg.MODEL.REGNET = edict()
cfg.MODEL.REGNET.STEM_TYPE = "simple_stem_in"
cfg.MODEL.REGNET.STEM_W = 32
cfg.MODEL.REGNET.BLOCK_TYPE = "res_bottleneck_block"
cfg.MODEL.REGNET.STRIDE = 2
cfg.MODEL.REGNET.SE_ON = True
cfg.MODEL.REGNET.SE_R = 0.25
cfg.MODEL.REGNET.BOT_MUL = 1.0
cfg.MODEL.REGNET.DEPTH = 20
cfg.MODEL.REGNET.W0 = 232
cfg.MODEL.REGNET.WA = 115.89
cfg.MODEL.REGNET.WM = 2.53
cfg.MODEL.REGNET.GROUP_W = 232
#-------- for anynet -------#
cfg.MODEL.ANYNET = edict()
cfg.MODEL.ANYNET.STEM_TYPE = "res_stem_in"
cfg.MODEL.ANYNET.STEM_W = 64
cfg.MODEL.ANYNET.BLOCK_TYPE = "res_bottleneck_block"
cfg.MODEL.ANYNET.STRIDES = [1,2,2,2]
cfg.MODEL.ANYNET.SE_ON = False
cfg.MODEL.ANYNET.SE_R = 0.25
cfg.MODEL.ANYNET.BOT_MULS = [0.5,0.5,0.5,0.5]
cfg.MODEL.ANYNET.DEPTHS = [3,4,6,3]
cfg.MODEL.ANYNET.GROUP_WS = [4,8,16,32]
cfg.MODEL.ANYNET.WIDTHS = [256,512,1024,2048]
#-------- for effnet --------#
cfg.MODEL.EFFNET = edict()
cfg.MODEL.EFFNET.STEM_W = 32
cfg.MODEL.EFFNET.EXP_RATIOS = [1,6,6,6,6,6,6]
cfg.MODEL.EFFNET.KERNELS = [3,3,5,3,5,5,3]
cfg.MODEL.EFFNET.HEAD_W = 1408
cfg.MODEL.EFFNET.DC_RATIO = 0.0
cfg.MODEL.EFFNET.STRIDES = [1,2,2,2,1,2,1]
cfg.MODEL.EFFNET.SE_R = 0.25
cfg.MODEL.EFFNET.DEPTHS = [2, 3, 3, 4, 4, 5, 2]
cfg.MODEL.EFFNET.GROUP_WS = [4,8,16,32]
cfg.MODEL.EFFNET.WIDTHS = [16,24,48,88,120,208,352]
cfg.GPUS = [0]
cfg.PRINT_FRE = 300
cfg.DATASET_TRPE = 'cub200_2011'
cfg.SHORT_TEST = False
if __name__ == "__main__":
from utils import load_cfg
logger = load_cfg(cfg)
print(cfg)
|
import unittest
from katas.kyu_6.drunk_friend import decode
class DrunkFriendTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(decode('yvvi'), 'beer')
def test_equals_2(self):
self.assertEqual(decode('Blf zoivzwb szw 10 yvvih'),
'You already had 10 beers')
def test_equals_3(self):
self.assertEqual(decode('Ovg\'h hdrn rm gsv ulfmgzrm!'),
'Let\'s swim in the fountain!')
def test_equals_4(self):
self.assertEqual(decode('Tl slnv, blf\'iv wifmp'),
'Go home, you\'re drunk')
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from ast import literal_eval
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel, cosine_similarity
from nltk.stem.snowball import SnowballStemmer
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.corpus import wordnet
from surprise import Reader, Dataset, SVD
from surprise.model_selection import cross_validate
import logging
import warnings; warnings.simplefilter('ignore')
logging.basicConfig(filename="../log/result_hybrid.txt",
filemode='a',
format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s',
datefmt='%H:%M:%S',
level=logging.DEBUG)
def convert_int(x):
"""
THis function for: convert x to int
"""
try:
return int(x)
except:
return np.nan
def load_data(dir_links_small='../input/links_small.csv', dir_metadata='../input/movies_metadata.csv'):
"""
This function for: Load the Dataset and preprocessing data
Args:
dir_links_small: đường dẫn đến file links small
dir_metadata : đường dẫn đến file meta data
Return:
links_small , md : pandas frame
"""
links_small = pd.read_csv(dir_links_small)
links_small = links_small[links_small['tmdbId'].notnull()]['tmdbId'].astype('int')
md = pd. read_csv(dir_metadata)
md['genres'] = md['genres'].fillna('[]').apply(literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else [])
return links_small, md
def get_small_movies_metatdata(md, links_small):
"""
This function for:
get small movies meta data.
Args:
md : meta data, a pandas frame
Return:
smd: small meta data.
"""
md['year'] = pd.to_datetime(md['release_date'], errors='coerce').apply(lambda x: str(x).split('-')[0] if x != np.nan else np.nan)
md = md.drop([19730, 29503, 35587]) # these numbers presents row indices which rows contain bad format data;
# just try
# md['id'] = md['id'].astype(int)
# u will get an error indicating it cannot convert '1975-xx-xx'.
md['id'] = md['id'].astype('int')
smd = md[md['id'].isin(links_small)]
return smd
def get_quantitative_matrix(smd):
"""
This function for:
get quantitative_matrix
Args:
smd : small meta data, a pandas frame
Return:
smd: small meta data.
tfidf_matrix: quantitative matrix.
"""
smd['tagline'] = smd['tagline'].fillna('')
smd['description'] = smd['overview'] + smd['tagline']
smd['description'] = smd['description'].fillna('')
tf = TfidfVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english')
tfidf_matrix = tf.fit_transform(smd['description'])
logging.info(f"tfidf_matrix shape: {tfidf_matrix.shape}")
return smd, tfidf_matrix
def get_movieID_indMovie(convert_int, smd, filename='../input/links_small.csv'):
"""
This function for:
get movies id, index of movies
Args:
convert_int: function
smd: small metadata
filename: path to csv file
Return:
id_map: movies id
indices_map: index of movies
"""
id_map = pd.read_csv(filename)[['movieId', 'tmdbId']]
id_map['tmdbId'] = id_map['tmdbId'].apply(convert_int)
id_map.columns = ['movieId', 'id']
id_map = id_map.merge(smd[['title', 'id']], on='id').set_index('title')
indices_map = id_map.set_index('id')
return id_map, indices_map
def read_and_train_svd(filename='../input/ratings_small.csv'):
"""
This function for:
read data and train SVD alg to fit data.
Args:
filename: path to data.
Return:
svd : SVD alg after training with the dataset.
"""
reader = Reader()
ratings = pd.read_csv(filename)
ratings.head()
data = Dataset.load_from_df(ratings[['userId', 'movieId', 'rating']], reader)
svd = SVD()
# Run 5-fold cross-validation and then print results
cross_validate(svd, data, measures=['RMSE', 'MAE'], cv=5, verbose=True)
trainset = data.build_full_trainset()
svd.fit(trainset)
return svd
def hybrid(userId, title, indices, id_map, cosine_sim, smd, indices_map):
"""
This function for:
build a simple hybrid recommender that brings together techniques
we have implemented in the content based and collaborative filter based engines.
Args:
userId: User ID
title: the Title of a Movie
indices : a list of movie indices
id_map: movies id
cosine_sim : similarity between 2 movies
smd : small meta data
indices_map: a list of movie indices
Return:
10 similar movies sorted on the basis of expected ratings by that particular user.
"""
idx = indices[title]
tmdbId = id_map.loc[title]['id']
#print(idx)
movie_id = id_map.loc[title]['movieId']
sim_scores = list(enumerate(cosine_sim[int(idx)]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:26]
movie_indices = [i[0] for i in sim_scores]
movies = smd.iloc[movie_indices][['title', 'vote_count', 'vote_average', 'year', 'id']]
movies['est'] = movies['id'].apply(lambda x: svd.predict(userId, indices_map.loc[x]['movieId']).est)
movies = movies.sort_values('est', ascending=False)
return movies.head(10)
def get_similarity_between2movies(tfidf_matrix):
"""
This function for:
get similarity between 2 movies
Args:
tfidf_matrix: quantitative matrix.
Return:
cosine_sim : similarity between 2 movies
"""
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_sim[0]
return cosine_sim
if __name__ == "__main__":
# Load all dataset
links_small, md = load_data()
# Load small dataset
smd = get_small_movies_metatdata(md, links_small)
# Calculate quantitative matrix
smd,tfidf_matrix = get_quantitative_matrix(smd)
smd = smd.reset_index()
indices = pd.Series(smd.index, index=smd['title'])
# Get similarity between two movies
cosine_sim = get_similarity_between2movies(tfidf_matrix)
# Get movie ID, movie indices
id_map, indices_map = get_movieID_indMovie(convert_int, smd)
# Training SVD algorithm with small data.
svd = read_and_train_svd()
logging.info(f"Top 10 movies for person with id 1, movie: Avatar:\n{hybrid(1, 'Avatar', indices, id_map, cosine_sim, smd, indices_map)}")
logging.info(f"Top 10 movies for person with id 500, movie: Avatar:\n{hybrid(500, 'Avatar', indices, id_map, cosine_sim, smd, indices_map)}")
|
age=50
name="John"
print name
print name[0:2]
list = []
list.append(1)
print(list[1])
for x in list:
print(x)
if (x == 1):
print(x)
def my_function():
print(5)
my_function()
class MyClass:
variable = "John"
def function(self):
print("wesh wesh")
myObject = MyClass()
myObject.variable
|
from controller import *
import os,re,time,random,math
class EpuckFunctions (DifferentialWheels):
max_wheel_speed = 1000
num_dist_sensors = 8
encoder_resolution = 159.23 # for wheel encoders
tempo = 0.5 # Upper velocity bound = Fraction of the robot's maximum velocity = 1000 = 1 wheel revolution/sec
wheel_diameter = 4.1 # centimeters
axle_length = 5.3 # centimeters
timestep_duration = 200/1000 # Real-time seconds per timestep
def basic_setup(self, timestep = 200, tempo = 1.0):
self.timestep = timestep
self.tempo = tempo
self.enableEncoders(self.timestep)
self.camera = self.getCamera('camera')
self.camera.enable(4*self.timestep)
self.dist_sensor_values = [0 for i in range(self.num_dist_sensors)]
self.dist_sensors = [self.getDistanceSensor('ps'+str(x)) for x in range(self.num_dist_sensors)] # distance sensors
self.camera_sample_size = [8,8] # x,y sample size in pixels
self.camera_sample_array_length = self.camera_sample_size[0] * self.camera_sample_size [1] * 3
map((lambda s: s.enable(self.timestep)), self.dist_sensors) # Enable all distance sensors
self.update_proximities()
self.step(self.timestep)
self.emitter = self.getEmitter("emitter");
self.receiver = self.getReceiver("receiver");
self.gps = self.getGPS("gps");
self.gps.enable(timestep)
self.compass = self.getCompass("compass");
self.compass.enable(timestep)
self.receiver.enable(self.timestep)
self.receiver.setChannel(1)
self.emitter.setChannel(2)
self.current_x = None
self.current_y = None
def move_wheels(self, speeds = [1,1], wheels=["L","R"]):
left = None
right = None
for i in range(0,len(speeds)):
if wheels[i] == "L":
left = speeds[i]
else:
right = speeds[i]
if left is None:
left = self.getLeftSpeed()/self.max_wheel_speed
if right is None:
right = self.getRightSpeed()/self.max_wheel_speed
# if left >= 1.0:
# left = 0.9
# if right >= 1.0:
# right = 0.9
self.setSpeed(int(left*self.max_wheel_speed),int(right*self.max_wheel_speed))
def get_coordinates(self):
gps_sensors = self.gps.getValues();
return [gps_sensors[0], gps_sensors[2]]
def get_compass_heading_in_grad(self):
compass_values = self.compass.getValues();
# subtract math.pi/2 (90) so that the heading is 0 facing 'north' (given x going from left to right)
rad = math.atan2(compass_values[0], compass_values[2]) - (math.pi / 2);
if (rad < -math.pi):
rad = rad + (2 * math.pi)
return rad / math.pi * 180.0
def get_random_wheel(self):
return random.choice(["L","R"])
def update_current_coordinates(self,d_str):
r = re.match('(.*),(.*)',d_str)
self.current_x = float(r.group(1))
self.current_y = float(r.group(2))
def update_proximities(self):
for i in range(self.num_dist_sensors):
self.dist_sensor_values[i] = self.normalise_value(self.dist_sensors[i].getValue())
return self.dist_sensor_values
def normalise_value(self,value):
if value > 250:
value = 250
return value
def get_random_proximity_sensor(self):
return random.randint(0,len(self.dist_sensors)-1)
def get_proximity_sensor_value(self,s):
value = self.dist_sensor_values[s]
if value > 1000:
value = 1000.0
if value < 50:
value = 0
return value / 1000.0
def get_multi_sensor_value(self,s):
if s < len(self.dist_sensor_values):
return self.get_proximity_sensor_value(s)
elif s >= len(self.dist_sensor_values) and s < (len(self.dist_sensors)+self.camera_sample_array_length):
# normalised by /255
return self.current_snapshot[s-len(self.dist_sensor_values)]/255.0
elif s == (len(self.dist_sensors)+self.camera_sample_array_length):
return self.current_x
elif s == (len(self.dist_sensors)+self.camera_sample_array_length) + 1:
return self.current_y
def get_random_multi_sensor(self):
if random.random() < 0.25:
return random.randint(0,(len(self.dist_sensors)-1))
else:
return random.randint(len(self.dist_sensors),len(self.dist_sensors)+self.camera_sample_array_length-1)
def snapshot(self, show = False, sampled= True):
if sampled:
im = self.get_sampled_image()
else:
im = self.get_image()
if show:
im.save('test.bmp')
self.current_snapshot = im
return im
def get_image(self):
strImage=self.camera.getImageArray()
im = Image.new('RGB', (self.camera.getWidth(), self.camera.getHeight()))
c = []
for i,a in enumerate(strImage):
for j,b in enumerate(a):
im.putpixel((i,j),(b[0],b[1],b[2]))
return im
def get_sampled_image(self):
strImage=self.camera.getImageArray()
im = []
for columns in [0,7,14,21,29,37,44,51]:
for rows in [0,5,10,16,22,28,33,38]:
for colour in [0,1,2]:
im.append(strImage[columns][rows][colour])
return im
def avg_rgb(self,image):
x,y = (len(image),len(image[0]))
total = float(x*y)
sum_r, sum_g, sum_b = 0.0, 0.0, 0.0
for i in range(x):
for j in range(y):
# print "image[i][j]:",image[i][j]
r,g,b = image[i][j][0],image[i][j][1],image[i][j][2]
sum_r, sum_g, sum_b = sum_r + r, sum_g + g, sum_b + b
return [sum_r/total, sum_g/total, sum_b/total]
def get_average_intensity_in_grids(self):
image = self.camera.getImageArray()
for x in range(0,len(image)):
image[x].append(image[x][-1])
grids = self.get_five_by_five_grids(image)
avrgd = []
for i,g in enumerate(grids):
intensities = self.avg_rgb(g)
avrgd.append(sum(intensities)/len(intensities))
return avrgd
def get_five_by_five_grids(self,data,patch_size=8):
patches = []
for i in range(5):
for j in range(5):
patch = []
for col in range(i*patch_size,i*patch_size+patch_size):
whole_row = []
for row in range(j*patch_size,j*patch_size+patch_size):
whole_row.append(data[col][row])
patch.append(whole_row)
patches.append(patch)
return patches
def get_camera_image(self):
"used to pass images to atoms"
return self.current_snapshot
def get_sampled_camera_image(self):
"used to pass images to atoms"
return self.current_snapshot
def do_actual_spin(self,speed=1.0,direction = "cw"):
if direction == "cw":
self.setSpeed(speed*self.max_wheel_speed,
-speed*self.max_wheel_speed)
else:
self.setSpeed(-speed*self.max_wheel_speed,
speed*self.max_wheel_speed)
def do_actual_move(self,speed_left=1.0,speed_right=None):
if speed_right is None:
speed_right = speed_left
else:
self.setSpeed(speed_left*self.max_wheel_speed,speed_right*self.max_wheel_speed)
def stop_moving(self):
self.setSpeed(0,0)
|
d = 2
st = 1
backwards = False
while st < 6:
if backwards == False:
print(" " * d + "*" * st + " " * d)
d -= 1
st += 2
if st == 5:
backwards = True
if backwards == True:
print(" " * d + "*" * st + " " * d)
d += 1
st -= 2
if st == 1:
break
print(" " * 2 + "*" + " " * 2)
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import logging
metrics_logger = logging.getLogger(__name__)
class MetricsRecord(object):
def __init__(self, fields):
self._fields = OrderedDict()
for field in fields:
self._fields[field.key] = field
def record_metrics(self, **kwargs):
for field_key, addend in list(kwargs.items()):
if field_key in self._fields:
self._fields[field_key].add(addend)
def print_metrics(self, logger=None, include_zero_values=False):
if not logger:
logger = metrics_logger
logging_lines = []
for field in list(self._fields.values()):
value = field.value()
if value > 0 or include_zero_values:
line = "{description}: {value}".format(description=field.description, value=field.value())
logging_lines.append(line)
logger.info('\n'.join(logging_lines))
def write_metrics(self):
raise NotImplementedError
|
from bottle import Bottle, route, run, response, hook, request, static_file
import paste
import bottle
import dbserver
import obtenerDatos
import impactarExtracto
from json import dumps
default = Bottle()
bottle.BaseRequest.MEMFILE_MAX = (1024 * 1024) * 3 #maximo 3mb
@default.hook('after_request')
def enable_cors():
"""
You need to add some headers to each request.
Don't use the wildcard '*' for Access-Control-Allow-Origin in production.
"""
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'PUT, GET, POST, DELETE, OPTIONS'
response.headers['Access-Control-Allow-Headers'] = 'Origin, Accept, Content-Type, X-Requested-With, X-CSRF-Token'
@default.route('/<:re:.*>', method='OPTIONS')
def dummy():
return
#ejemplo para importar modulos 'ruta principal', modulo)
#default.mount('/modulo',modulo.modulo)
default.mount('/datos',obtenerDatos.Rafam)
@default.route('/extracto/impactar',method='POST')
def impactar():
datos = request.json
entrada = datos['entrada']
desde = datos['desde']
hasta = datos['hasta']
cuentabanco = datos['cuentabancaria']
cuentacontable = datos['cuentacontable']
sucursal = datos['sucursal']
nro = impactarExtracto.guardarExtracto(entrada,desde,hasta,cuentabanco,cuentacontable,sucursal)
return dumps(nro)
@default.route('/static/<tipo>/<modulo>')
def devolverStatic(tipo,modulo):
return static_file(modulo,root="../../build/static/"+tipo)
@default.route('/<modulo>')
def devolverModulo(modulo):
return static_file(modulo,root="../../build/")
@default.route('/')
def devolverPagina():
return static_file("index.html",root="../../build/")
run(default,host = '0.0.0.0', port = 1700)
|
# -*- coding: utf-8 -*-
import numpy as np
def sampler(counts, prior_alpha, repl = 10000):
"""
Executes a Monte Carlo simulation to estimate the distribution
of class probabilities based on the dirichlet-multinomial conjugate
model with non-missing count data.
- counts is a 2-dim array of count values
- prior_alpha is a 1-dim vector of concentration parameters from the dirichlet prior
- repl is an integer of the desired number of mcmc samples
"""
if np.isnan(counts).any():
raise ValueError("Conjugate Monte Carlo sampler may only model fully non-missing count data.")
else:
return np.random.dirichlet(counts.sum(0) + prior_alpha,repl)
|
# Generated by Django 2.2.4 on 2019-10-31 06:55
from django.db import migrations
import django.db.models.deletion
import smart_selects.db_fields
class Migration(migrations.Migration):
dependencies = [
('billings', '0007_auto_20191022_0832'),
]
operations = [
migrations.AlterField(
model_name='billsetting',
name='lots',
field=smart_selects.db_fields.ChainedManyToManyField(chained_field='area', chained_model_field='street__area', to='residents.Lot'),
),
migrations.AlterField(
model_name='invoice',
name='lot',
field=smart_selects.db_fields.ChainedForeignKey(chained_field='area', chained_model_field='street__area', on_delete=django.db.models.deletion.CASCADE, to='residents.Lot'),
),
]
|
#DRONE LAUNCHER
#Import modules
from flask import Flask, render_template, request, jsonify
from roboclaw import Roboclaw
import time
import socket
#Open serial port
#Linux comport name
rc = Roboclaw("/dev/ttyACM0",115200)
#Windows comport name
#rc = Roboclaw("COM8",115200)
rc.Open()
#Declare variables
host=(([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
port=5000
address = 0x80 #Controller 1, M1=Pitch, M2=Rotation
address_2 = 0x81 #Controller 2, M1=Lift, M2=Launch
pitch_pulses=355000 #Encoder pulses from the linear actuator
pitch_length=90.0 #Degrees
pitch_speed_pulses=7000 #Pulses per second
pitch_speed_manual=127 #From 0 to 127
pitch_ready=70.0 #Pitch degrees for the launch (temporary)
rotation_pulses=950000 #Encoder pulses from the rotation motor
rotation_length=180.0 #Degrees
rotation_speed_pulses=16000 #Pulses per second
rotation_speed_manual=15 #From 0 to 127
rotation_ready=10.0 #Rotation degress for the launch (temporary)
lift_pulses=19000 #Encoder pulses from the lifting colum
lift_length=130.0 #cm
lift_speed_pulses=420 #Pulses per second
lift_speed_manual=127 #From 0 to 127
lift_ready=lift_length #Lift lenght for the launch (temporary)
launch_pulses=14800 #Encoder pulses from the launch motor
launch_length=111.0 #cm
launch_speed_pulses=6*13400 #Pulses per second during launch (145000 max) (13400 pulses/m)
launch_speed_pulses_slow=2500 #Pulses per second during preparation
launch_speed_manual=30 #From 0 to 127
launch_acceleration=(launch_speed_pulses**2)/13400 #Acceleration during launch (pulses/second2)
launch_max_speed=10 #Maximum launch speed
launch_min_speed=1 #Minimum launch speed
launch_max_acceleration=48 #Maximum launch acceleration
launch_min_acceleration=1 #Minimum launch acceleration
launch_standby=8000 #Drone position during stand-by
launch_mount=17000 #Drone position during mounting
launch_break=21000 #Belt position during breaking
launch_bottom=0 #Drone position at the back part of the capsule
launch_connect=2190 #Belt position for touching the upper part
encoders_ready = 0 #At the beggining, the encoders are not ready
#Create an instance of the Flask class for the web app
app = Flask(__name__)
app.debug = True
#Render HTML template
@app.route("/")
def index():
return render_template('dronelauncher_web.html')
#Motor controller functions
#rc.ForwardM2(address, rotation_speed_manual)
#c.ForwardM2(address,0) #Both commands are used to avoid rotation
@app.route('/app_pitch_up', methods=['POST'])
def function_pitch_up():
rc.BackwardM1(address, pitch_speed_manual)
return (''), 204 #Returns an empty response
@app.route('/app_pitch_down', methods=['POST'])
def function_pitch_down():
rc.ForwardM1(address, pitch_speed_manual)
return (''), 204
@app.route('/app_pitch_position', methods=['POST'])
def function_pitch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_position = request.form.get('pitch_position', type=int)
if pitch_position > pitch_length or pitch_position < 0:
return (''), 400
elif pitch_position == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_position))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_pitch_stop', methods=['POST'])
def function_pitch_stop():
rc.ForwardM1(address,0)
return (''), 204
@app.route('/app_rotation_right', methods=['POST'])
def function_rotation_right():
rc.ForwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_left', methods=['POST'])
def function_rotation_left():
rc.BackwardM2(address, rotation_speed_manual)
return (''), 204
@app.route('/app_rotation_position', methods=['POST'])
def function_rotation_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
rotation_position = request.form.get('rotation_position', type=int)
if rotation_position > rotation_length or rotation_position < -rotation_length:
return (''), 400
elif rotation_position == 0:
rotation_objective = 0
else:
rotation_objective = int((rotation_pulses/(rotation_length/rotation_position))/2)
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_rotation_stop', methods=['POST'])
def function_rotation_stop():
rc.ForwardM2(address,0)
return (''), 204
@app.route('/app_lift_up', methods=['POST'])
def function_lift_up():
rc.ForwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_down', methods=['POST'])
def function_lift_down():
rc.BackwardM1(address_2, lift_speed_manual)
return (''), 204
@app.route('/app_lift_position', methods=['POST'])
def function_lift_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_position = request.form.get('lift_position', type=int)
if lift_position > lift_length or lift_position < 0:
return (''), 400
elif lift_position == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_position))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_lift_stop', methods=['POST'])
def function_lift_stop():
rc.ForwardM1(address_2,0)
return (''), 204
@app.route('/app_launch_forwards', methods=['POST'])
def function_launch_forwards():
rc.ForwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_backwards', methods=['POST'])
def function_launch_backwards():
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
return (''), 204
@app.route('/app_launch_position', methods=['POST'])
def function_launch_position():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
launch_position = request.form.get('launch_position', type=int)
if launch_position > launch_length or launch_position < 0:
return (''), 400
else:
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
buffer_2 = (0,0,0)
while(buffer_2[2]!=0x80): #Loop until all movements are completed
buffer_2 = rc.ReadBuffers(address_2)
if launch_position == 0:
launch_objective = 0
else:
launch_objective = int(launch_pulses/(launch_length/launch_position))
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual+launch_connect
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_launch_stop', methods=['POST'])
def function_launch_stop():
rc.ForwardM2(address_2,0)
return (''), 204
@app.route('/app_max_pitch', methods=['POST'])
def function_max_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
#return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
#return (''), 204
t = 0
#while t < 30:
# print(rc.ReadEncM1(address))
# time.sleep(1)
return (''), 204
@app.route('/app_min_pitch', methods=['POST'])
def function_min_pitch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
print(pitch_actual)
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_max_lift', methods=['POST'])
def function_max_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_objective = lift_pulses
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
print(lift_increment)
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_min_lift', methods=['POST'])
def function_min_lift():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
print(lift_increment)
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
return (''), 204
t = 0
# while t < 10:
# print(rc.ReadEncM1(address_2)[1])
# t += 1
# time.sleep(1)
@app.route('/app_home', methods=['POST'])
def function_home():
rc.BackwardM1(address, pitch_speed_manual)
rc.BackwardM1(address_2, lift_speed_manual)
rc.BackwardM2(address_2, launch_speed_manual)
#rc.SpeedM2(address_2,-launch_speed_pulses_slow) #Using the speed control instead of the duty cycle because the friction changes in the tube
#Missing rotation limit switch
return (''), 204
@app.route('/app_reset_encoders', methods=['POST'])
def function_reset_encoders():
rc.ResetEncoders(address)
rc.ResetEncoders(address_2)
global encoders_ready
encoders_ready = 1 #Encoders have been reset
return (''), 204
@app.route('/app_battery_voltage', methods=['POST'])
def function_battery_voltage():
voltage = round(0.1*rc.ReadMainBatteryVoltage(address)[1],2)
return jsonify(voltage=voltage)
@app.route('/app_stop', methods=['POST'])
def function_stop():
rc.ForwardM1(address,0)
rc.ForwardM2(address,0)
rc.ForwardM1(address_2,0)
rc.ForwardM2(address_2,0)
return (''), 204
@app.route('/app_standby', methods=['POST'])
def function_standby():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = 0
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
print(pitch_actual)
print(pitch_increment)
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
print(rotation_increment)
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
print(lift_increment)
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_prepare', methods=['POST'])
def function_prepare():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
if pitch_ready == 0:
pitch_objective = 0
else:
pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
if rotation_ready == 0:
rotation_objective = 0
else:
rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
if lift_ready == 0:
lift_objective = 0
else:
lift_objective = int(lift_pulses/(lift_length/lift_ready))
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_launch', methods=['POST'])
def function_launch():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_break
launch_actual = launch_connect
launch_increment = launch_objective-launch_actual
rc.SpeedAccelDistanceM2(address_2,launch_acceleration,launch_speed_pulses,launch_increment,0)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
@app.route('/app_mount', methods=['POST'])
def function_mount():
if encoders_ready == 0: #Not execute if the encoders are not ready
return (''), 403
pitch_objective = pitch_pulses
pitch_actual = rc.ReadEncM1(address)[1]
pitch_increment = pitch_objective-pitch_actual
if pitch_increment >= 0:
rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
rotation_objective = 0
rotation_actual = rc.ReadEncM2(address)[1]
rotation_increment = rotation_objective-rotation_actual
if rotation_increment >= 0:
rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
lift_objective = 0
lift_actual = rc.ReadEncM1(address_2)[1]
lift_increment = lift_objective-lift_actual
if lift_increment >= 0:
rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
launch_objective = launch_bottom
launch_actual = rc.ReadEncM2(address_2)[1]
launch_increment = launch_objective-launch_actual
if launch_increment >= 0:
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
else:
rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_mount,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
return (''), 204
# Automatic launch works, but it is disabled becuase the loop while prevents
# the motors to stop when the button Stop is pressed, making it dangerous
##@app.route('/app_automatic_launch', methods=['POST'])
##def function_automatic_launch():
## if encoders_ready == 0: #Not execute if the encoders are not ready
## return (''), 403
##
## #Prepare
## if pitch_ready == 0:
## pitch_objective = 0
## else:
## pitch_objective = int(pitch_pulses/(pitch_length/pitch_ready))
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## if rotation_ready == 0:
## rotation_objective = 0
## else:
## rotation_objective = int(rotation_pulses/(rotation_length/rotation_ready))
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## if lift_ready == 0:
## lift_objective = 0
## else:
## lift_objective = int(lift_pulses/(lift_length/lift_ready))
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_connect,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## buffer_1 = (0,0,0)
## buffer_2 = (0,0,0)
## while(buffer_1[1]!=0x80): #Loop until pitch is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_1[2]!=0x80): #Loop until rotation is completed
## buffer_1 = rc.ReadBuffers(address)
## while(buffer_2[1]!=0x80): #Loop until lift is completed
## buffer_2 = rc.ReadBuffers(address_2)
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Launch
## launch_objective = launch_break
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## rc.SpeedDistanceM2(address_2,launch_speed_pulses,launch_increment,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## while(buffer_2[2]!=0x80): #Loop until launch is completed
## buffer_2 = rc.ReadBuffers(address_2)
## #The loop does not work with AND conditions
## time.sleep(2)
##
## #Standby
## pitch_objective = 0
## pitch_actual = rc.ReadEncM1(address)[1]
## pitch_increment = pitch_objective-pitch_actual
## if pitch_increment >= 0:
## rc.SpeedDistanceM1(address,pitch_speed_pulses,pitch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address,-pitch_speed_pulses,-pitch_increment,1)
## rc.SpeedDistanceM1(address,0,0,0) #To avoid deceleration
##
## rotation_objective = 0
## rotation_actual = rc.ReadEncM2(address)[1]
## rotation_increment = rotation_objective-rotation_actual
## if rotation_increment >= 0:
## rc.SpeedDistanceM2(address,rotation_speed_pulses,rotation_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address,-rotation_speed_pulses,-rotation_increment,1)
## rc.SpeedDistanceM2(address,0,0,0) #To avoid deceleration
##
## lift_objective = 0
## lift_actual = rc.ReadEncM1(address_2)[1]
## lift_increment = lift_objective-lift_actual
## if lift_increment >= 0:
## rc.SpeedDistanceM1(address_2,lift_speed_pulses,lift_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM1(address_2,-lift_speed_pulses,-lift_increment,1)
## rc.SpeedDistanceM1(address_2,0,0,0) #To avoid deceleration
##
## launch_objective = launch_bottom
## launch_actual = rc.ReadEncM2(address_2)[1]
## launch_increment = launch_objective-launch_actual
## if launch_increment >= 0:
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_increment,1) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## else:
## rc.SpeedDistanceM2(address_2,-launch_speed_pulses_slow,-launch_increment,1)
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
## rc.SpeedDistanceM2(address_2,launch_speed_pulses_slow,launch_standby,0) #(address, +-speed, pulses, buffer(0=buffered, 1=Execute immediately))
## rc.SpeedDistanceM2(address_2,0,0,0) #To avoid deceleration
##
## return (''), 204
@app.route('/app_change_pitch', methods=['POST'])
def function_change_pitch():
pitch_position_prepare = request.form.get('pitch_position_prepare', type=int)
if pitch_position_prepare > pitch_length or pitch_position_prepare < 0:
return (''), 400
global pitch_ready
pitch_ready = float(pitch_position_prepare)
return (''), 204
@app.route('/app_change_lift', methods=['POST'])
def function_change_lift():
lift_position_prepare = request.form.get('lift_position_prepare', type=int)
if lift_position_prepare > lift_length or lift_position_prepare < 0:
return (''), 400
global lift_ready
lift_ready = float(lift_position_prepare)
return (''), 204
@app.route('/app_change_rotation', methods=['POST'])
def function_change_rotation():
rotation_position_prepare = request.form.get('rotation_position_prepare', type=int)
if rotation_position_prepare > rotation_length or rotation_position_prepare < 0:
return (''), 400
global rotation_ready
rotation_ready = float(rotation_position_prepare)
return (''), 204
@app.route('/app_change_speed', methods=['POST'])
def function_change_speed():
speed = request.form.get('speed', type=int)
if speed > launch_max_speed or speed < launch_min_speed:
return (''), 400
global launch_speed_pulses
global launch_acceleration
if speed > 7:
launch_speed_pulses = speed*13400
launch_acceleration = 655360 #Maximum value
return (''), 204
else:
launch_speed_pulses = speed*13400
launch_acceleration = (launch_speed_pulses**2)/13400
return (''), 204
@app.route('/app_change_acceleration', methods=['POST'])
def function_change_acceleration():
acceleration = request.form.get('acceleration', type=int)
if acceleration > launch_max_acceleration or acceleration < launch_min_acceleration:
return (''), 400
acceleration = acceleration*13400
global launch_acceleration
launch_acceleration = acceleration
return (''), 204
@app.route('/app_disable_buttons', methods=['POST'])
def function_disable_buttons():
return jsonify(encoders_ready=encoders_ready)
#Specify IP address and port for the server
if __name__ == "__main__":
app.run(host=host,port=port)
|
from sim.api import *
from sim.basics import *
'''
Create your distance vector router in this file.
'''
class DVRouter (Entity):
def __init__(self):
# neighbor_list <neighbor, (port , distance)>
self.neighbor_list = {}
# distance_Vector - <(src, des), distance>
self.distance_Vector = {}
# forward_table - < destination, port_number >
self.forward_table = {}
self.livePort = set()
def handle_rx (self, packet, port):
# Add your code here!
if (len(self.neighbor_list) == 0 and len(self.distance_Vector) == 0 and len(self.forward_table) == 0):
self.neighbor_list[self] = (None, 0)
self.distance_Vector[(self,self)] = 0
self.forward_table[self] = None
if type(packet) is DiscoveryPacket:
self.handle_discoveryPacket(packet,port)
elif type(packet) is RoutingUpdate:
self.handle_RoutingUpdatePacket(packet,port)
else:
self.handle_otherPacket(packet,port)
def handle_discoveryPacket (self, packet, port):
me = self
changes = {}
# clean up garbage
if self.neighbor_list.has_key(packet.src) and self.neighbor_list[packet.src][1] == float('inf'):
self.neighbor_list.pop(packet.src)
if packet.is_link_up == True:
self.livePort.add(port)
if self.neighbor_list.has_key(packet.src):
change = packet.latency - self.neighbor_list[packet.src][1]
self.neighbor_list[packet.src] = (port, packet.latency)
for k, v in self.forward_table.items():
if v == port:
self.distance_Vector[(me, k)] += change
if (self.neighbor_list.has_key[k] and (self.neighbor_list[k][1] < self.distance_Vector[(me, k)]) or (self.neighbor_list[k][1] == self.distance_Vector[(me, k)] and self.neighbor_list[k][0] < self.forward_table[k])):
self.distance_Vector[(me, k)] = self.neighbor_list[k][1]
self.forward_table[k] = self.neighbor_list[k][0]
changes[k] = (self.forward_table[k], self.distance_Vector[(me, k)])
else:
self.neighbor_list[packet.src] = (port, packet.latency)
if (not self.distance_Vector.has_key((me, packet.src))) or (self.neighbor_list[packet.src][1] < self.distance_Vector[(me, packet.src)]) or (self.neighbor_list[packet.src][1] == self.distance_Vector[(me, packet.src)] and port < self.forward_table[packet.src]):
self.distance_Vector[(me, packet.src)] = self.neighbor_list[packet.src][1]
self.forward_table[packet.src] = port
changes[packet.src] = (self.forward_table[packet.src], self.distance_Vector[(me, packet.src)])
# for first time neighbor
updatePacket = RoutingUpdate()
for k, v in self.forward_table.items():
updatePacket.add_destination(k, self.distance_Vector[(me, k)])
self.send(updatePacket, port, flood=False)
else:
if port in self.livePort:
self.livePort.remove(port)
if self.neighbor_list.has_key(packet.src):
self.neighbor_list[packet.src] = (port, float('inf'))
for k, v in self.forward_table.items():
if v == port:
self.distance_Vector[(me, k)] = float('inf')
if self.neighbor_list.has_key(k):
self.distance_Vector[(me, k)] = self.neighbor_list[k][1]
self.forward_table[k] = self.neighbor_list[k][0]
changes[k] = (self.forward_table[k], self.distance_Vector[(me, k)])
else:
changes[k] = (self.forward_table[k], self.distance_Vector[(me, k)])
changes = dict(changes.items() + self.calculateDV().items())
self.sendRoutingUpdate(changes)
def handle_RoutingUpdatePacket (self, packet, port):
me = self
changes = {}
for dst in packet.all_dests():
self.distance_Vector[(packet.src, dst)] = packet.get_distance(dst)
if self.forward_table.has_key(dst) and port == self.forward_table[dst]:
if self.distance_Vector[(me, dst)] != self.neighbor_list[packet.src][1] + self.distance_Vector[(packet.src, dst)]:
self.distance_Vector[(me, dst)] = self.neighbor_list[packet.src][1] + self.distance_Vector[(packet.src, dst)]
changes[dst] = (self.forward_table[dst], self.distance_Vector[(me, dst)])
if self.neighbor_list.has_key(dst) and self.neighbor_list[dst][1] < self.distance_Vector[(me, dst)]:
self.distance_Vector[(me, dst)] = self.neighbor_list[dst][1]
self.forward_table[dst] = self.neighbor_list[dst][0]
changes[dst] = (self.forward_table[dst], self.distance_Vector[(me, dst)])
changes = dict(changes.items() + self.calculateDV().items())
self.sendRoutingUpdate(changes)
def calculateDV (self):
me = self
# < destination , (distance, port)>
changes = {}
for k in self.distance_Vector.keys():
src = k[0]
dst = k[1]
if src != me:
option = self.neighbor_list[src][1] + self.distance_Vector[(src, dst)]
if ((not self.distance_Vector.has_key((me, dst))) or (option < self.distance_Vector[(me, dst)]) or (option == self.distance_Vector[(me, dst)] and self.neighbor_list[src][0] < self.forward_table[dst])):
self.distance_Vector[(me, dst)] = option
self.forward_table[dst] = self.neighbor_list[src][0]
changes[dst] = (self.forward_table[dst], self.distance_Vector[(me, dst)])
if src == me and self.distance_Vector[(src, dst)] > 50 and self.distance_Vector[(src, dst)] != float('inf'):
self.distance_Vector[(src, dst)] = float('inf')
changes[dst] = (self.forward_table[dst], self.distance_Vector[(me, dst)])
return changes
def sendRoutingUpdate(self, changes):
if len(changes.items()) != 0:
for port in self.livePort:
updatePacket = RoutingUpdate()
for k,v in changes.items():
if port == v[0] and ((self.neighbor_list.has_key(k) and v[0] != self.neighbor_list[k][0]) or (not self.neighbor_list.has_key(k))):
# poisoned reversed
updatePacket.add_destination(k, float('inf'))
else:
updatePacket.add_destination(k, v[1])
self.send(updatePacket, port, flood=False)
def handle_otherPacket (self, packet, port):
if packet.dst != self and self.distance_Vector[(self, packet.dst)] != float('inf'):
self.send(packet, self.forward_table[packet.dst], flood=False)
|
from flask import Blueprint
from flask import jsonify
from shutil import copyfile, move
from google.cloud import storage
from google.cloud import bigquery
import dataflow_pipeline.ucc.ucc_mensajes_beam as ucc_mensajes_beam
import dataflow_pipeline.ucc.ucc_agentev_beam as ucc_agentev_beam
import dataflow_pipeline.ucc.ucc_campana_beam as ucc_campana_beam
import dataflow_pipeline.ucc.ucc_inscritos_beam as ucc_inscritos_beam
import dataflow_pipeline.ucc.ucc_integracion_beam as ucc_integracion_beam
import os
import socket
import time
universidad_cooperativa_col_api = Blueprint('universidad_cooperativa_col_api', __name__)
# ucc_api = Blueprint('ucc_api', __name__)
fileserver_baseroute = ("//192.168.20.87", "/media")[socket.gethostname()=="contentobi"]
@universidad_cooperativa_col_api.route("/archivos_sms")
def archivos_sms():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Resultado/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[8:16]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-sms/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.ucc.sms` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = ucc_mensajes_beam.run('gs://ct-ucc/info-sms/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Mensajes/Resultado/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
################################################################################################################################################
@universidad_cooperativa_col_api.route("/archivos_agentev")
def archivos_agentev():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/agente_virtual/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[20:28]
lote = archivo[15:19]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-agente_virtual/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.ucc.agente_virtual` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = ucc_agentev_beam.run('gs://ct-ucc/info-agente_virtual/' + archivo, mifecha,lote)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/agente_virtual/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
############################################################################################################################################
@universidad_cooperativa_col_api.route("/archivos_campanas")
def archivos_campanas():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Campanas/Resultado/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[13:21]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-campanas/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.ucc.base_campanas` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = ucc_campana_beam.run('gs://ct-ucc/info-campanas/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Campanas/Resultado/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
############################################################################################################################################
@universidad_cooperativa_col_api.route("/archivos_inscritos")
def archivos_inscritos():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Inscritos/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[15:23]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-inscritos/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.ucc.base_inscritos` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = ucc_inscritos_beam.run('gs://ct-ucc/info-inscritos/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Inscritos/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
############################################################################################################################################
@universidad_cooperativa_col_api.route("/archivos_integracion")
def archivos_integracion():
response = {}
response["code"] = 400
response["description"] = "No se encontraron ficheros"
response["status"] = False
local_route = fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Integracion/"
archivos = os.listdir(local_route)
for archivo in archivos:
if archivo.endswith(".csv"):
mifecha = archivo[17:25]
storage_client = storage.Client()
bucket = storage_client.get_bucket('ct-ucc')
# Subir fichero a Cloud Storage antes de enviarlo a procesar a Dataflow
blob = bucket.blob('info-integracion/' + archivo)
blob.upload_from_filename(local_route + archivo)
# Una vez subido el fichero a Cloud Storage procedemos a eliminar los registros de BigQuery
deleteQuery = "DELETE FROM `contento-bi.ucc.base_integracion` WHERE fecha = '" + mifecha + "'"
#Primero eliminamos todos los registros que contengan esa fecha
client = bigquery.Client()
query_job = client.query(deleteQuery)
#result = query_job.result()
query_job.result() # Corremos el job de eliminacion de datos de BigQuery
# Terminada la eliminacion de BigQuery y la subida a Cloud Storage corremos el Job
mensaje = ucc_integracion_beam.run('gs://ct-ucc/info-integracion/' + archivo, mifecha)
if mensaje == "Corrio Full HD":
move(local_route + archivo, fileserver_baseroute + "/BI_Archivos/GOOGLE/Ucc/Integracion/Procesados/"+archivo)
response["code"] = 200
response["description"] = "Se realizo la peticion Full HD"
response["status"] = True
return jsonify(response), response["code"]
# return "Corriendo : " + mensaje
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 20 20:38:15 2019
@author: imad
"""
import numpy as np
class AdalineSGD(object):
"""
A simple adaptive linear neuron with gradient descent classifier
Paratmeters
-----------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Number of passes over the training dataset
-----------
Attributes
----------
w_ : 1D array
Values of weights after fitting
cost_ : list
Sum of Squared Errors(SSE) / 2.0 in every epoch
----------
"""
def __init__(self, eta, n_iter = 10):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
"""
Fit training data
Parameters
----------
X : {array-like}, shape = [n_samples, n_features]
Training vectors where n_samples is number of samples and
n_features is number of features
y : {array-like}, shape = [n_samples]
Target classes corresponding to training vector samples
y[0] is the target class of x[0]
----------
Returns
-------
self : object
-------
"""
#w_[0] is the total weight
#w_[1] corresponds to the weight for 1st feature
self.w_ = np.zeros(1 + X.shape[1])
self.cost_ = []
for _ in range(self.n_iter):
output = self.activation(X)
errors = y.ravel() - output
self.w_[1:] += self.eta * X.T.dot(errors)
self.w_[0] += self.eta * errors.sum()
cost = (errors**2).sum()/2.0
self.cost_.append(cost)
return self
def net_input(self, X):
"""
Calculate net input
"""
return np.dot(X, self.w_[1:]) + self.w_[0]
def predict(self, X):
"""
Returns class label after unit step
"""
#if net input >= 0, return class label as 1 , -1 otherwise
return np.where(self.net_input(X) >= 0.0, 1, -1)
def activation(self, X):
"""
Compute linear activation
"""
return self.net_input(X)
def printVariable(self, selection=3):
"""
Prints the variables
Parameters
----------
selection : int
Default 3 prints weights and cost
1 prints weights
2 prints cost
----------
"""
if selection == 1:
print("Weights:")
print(self.w_)
elif selection == 2:
print("Errors:")
print(self.cost_)
elif selection == 3:
print("Weights:")
print(self.w_)
print("Errors:")
print(self.cost_)
else:
print("Wrong Selection")
|
import json
import twitter
def oauth_login():
# XXX: Go to http://twitter.com/apps/new to create an app and get values
# for these credentials that you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY = 'mhF9p0bY1rj3S5YTtkIIoBNEx'
CONSUMER_SECRET = 'YxZGFGIbKLQ5TdzgMTbu7JjYkATCiyrvYQ4EtTO8GDrw5cNRd3'
OAUTH_TOKEN = '2492696552-qplePWNJQ7tERmhU8SbzUFyEbACV5hZRmRBrt2M'
OAUTH_TOKEN_SECRET = 'FwOmtWJS6xxa2w0uuhlob9OjKjgczq38E5sQKIC8QWkha'
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,
CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
def twitter_trends(twitter_api, woe_id):
# Prefix ID with the underscore for query string parameterization.
# Without the underscore, the twitter package appends the ID value
# to the URL itself as a special-case keyword argument.
return twitter_api.trends.place(_id=woe_id)
# Sample usage
twitter_api = oauth_login()
# See https://dev.twitter.com/docs/api/1.1/get/trends/place and
# http://developer.yahoo.com/geo/geoplanet/ for details on
# Yahoo! Where On Earth ID
WORLD_WOE_ID = 23424848
world_trends = twitter_trends(twitter_api, WORLD_WOE_ID)
res=json.dumps(world_trends, indent=1)
print (res)
#US_WOE_ID = 23424977
#us_trends = twitter_trends(twitter_api, US_WOE_ID)
#print (json.dumps(us_trends, indent=1))
|
#File: speeding.py
#Author: Joel Okpara
#Date: 2/22/2016
#Section: 04
#E-mail: joelo1@umbc.edu
#Description: This program calculates the fine for driving over
# the speed limit.
def main():
speedLimit = int(input("What was the speed limit?"))
userSpeed = float(input("At how many miles per hour were you driving?"))
overSpeed = userSpeed - speedLimit
print("You were over the speed limit by", overSpeed,"MPH")
if overSpeed < 5:
print ("You receive no ticket... this time.")
elif overSpeed >= 5 and overSpeed < 15:
print ("You receive a ticket for a $100 fine!")
elif overSpeed >= 15 and overSpeed < 30:
print("You receive a ticket for a $200 fine!")
elif overSpeed >= 30:
print("You receive a ticket for a $500 fine, and a mandatory court date!")
main()
|
from common.run_method import RunMethod
import allure
@allure.step("极数据/权限部门查询")
def geekData_areaRelation_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/权限部门查询"
url = f"/service-statistics/geekData/areaRelation"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/全部目标方案")
def geekData_allTarget_get(params=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/全部目标方案"
url = f"/service-statistics/geekData/allTarget"
res = RunMethod.run_request("GET", url, params=params, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/人次/折线图")
def geekData_personTimeRange_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/人次/折线图"
url = f"/service-statistics/geekData/personTimeRange"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/人次/明细")
def geekData_personTimeDetail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/人次/明细"
url = f"/service-statistics/geekData/personTimeDetail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/人次/汇总")
def geekData_personTimeCount_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/人次/汇总"
url = f"/service-statistics/geekData/personTimeCount"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/人次/完成率")
def geekData_personTimeAchieveRate_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/人次/完成率"
url = f"/service-statistics/geekData/personTimeAchieveRate"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/续报率/数据查询")
def geekData_continueRate_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/续报率/数据查询"
url = f"/service-statistics/geekData/continueRate"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/续报率/折线图")
def geekData_continueRateRange_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/续报率/折线图"
url = f"/service-statistics/geekData/continueRateRange"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/满班率")
def geekData_fullClassRate_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/满班率"
url = f"/service-statistics/geekData/fullClassRate"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/退费率/总经办&区域校长")
def geekData_refund_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/退费率/总经办&区域校长"
url = f"/service-statistics/geekData/refund"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/退费率/中心校主管")
def geekData_refundForCenterMaster_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/退费率/中心校主管"
url = f"/service-statistics/geekData/refundForCenterMaster"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/极数据/续报率/年级数据")
def geekData_continueRate_grade_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/极数据/续报率/年级数据"
url = f"/service-statistics/geekData/continueRate/grade"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/极数据/续报率/明细")
def geekData_continueRate_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/极数据/续报率/明细"
url = f"/service-statistics/geekData/continueRate/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/极数据/满班率/明细")
def geekData_fullClassRate_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/极数据/满班率/明细"
url = f"/service-statistics/geekData/fullClassRate/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/极数据/退费率/明细")
def geekData_refund_detail_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/极数据/退费率/明细"
url = f"/service-statistics/geekData/refund/detail"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极运营/极数据/层级校区")
def geekData_treeSchools_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极运营/极数据/层级校区"
url = f"/service-statistics/geekData/treeSchools"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/首页/PK")
def geekData_index_static_pk_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/首页/PK"
url = f"/service-statistics/geekData/index/static/pk"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/首页/目标完成榜")
def geekData_index_achieve_rank_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/首页/目标完成榜"
url = f"/service-statistics/geekData/index/achieve/rank"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
@allure.step("极数据/首页/入口年级")
def geekData_index_entry_continue_post(params=None, body=None, header=None, return_json=True, **kwargs):
'''
:param: url地址后面的参数
:body: 请求体
:return_json: 是否返回json格式的响应(默认是)
:header: 请求的header
:host: 请求的环境
:return: 默认json格式的响应, return_json=False返回原始响应
'''
name = "极数据/首页/入口年级"
url = f"/service-statistics/geekData/index/entry/continue"
res = RunMethod.run_request("POST", url, params=params, body=body, header=header, return_json=return_json, name=name, **kwargs)
return res
|
__author__ = 'Vlad Iulian Schnakovszki'
# Using this because: http://stackoverflow.com/questions/1272138/baseexception-message-deprecated-in-python-2-6
class MyException(Exception):
def _get_message(self):
return self._message
def _set_message(self, message):
self._message = message
message = property(_get_message, _set_message)
|
import os
import win32com.client as win32
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import openpyxl
class SampleApp(Tk):
def __init__(self):
Tk.__init__(self)
self._frame = None
self.switch_frame(main_Page)
def switch_frame(self, frame_class):
"""Destroys current frame and replaces it with a new one."""
new_frame = frame_class(self)
if self._frame is not None:
self._frame.destroy()
self._frame = new_frame
self._frame.pack()
# class login_Page(Frame):
# def __init__(self, master):
# Frame.__init__(self, master)
#
# self.login_email_label = Label(self, text='Email address: ')
# self.login_password_label = Label(self, text='Password: ')
# self.login_email = Entry(self, show=None)
# self.login_password = Entry(self, show='*')
#
# self.login_email_label.grid(row=0,column=0)
# self.login_password_label.grid(row=1,column=0)
# self.login_email.grid(row=0,column=1, columnspan=5)
# self.login_password.grid(row=1,column=1, columnspan=5)
#
# self.login_btn = Button(self, text='Login', command=lambda: master.switch_frame(second_Page))
# self.login_btn.grid(row=2, column=8)
class main_Page(Frame):
def __init__(self, master):
Frame.__init__(self, master)
self.title_list = []
self.email_lists = []
self.title_dict = {}
self.file_list = []
self.cc_list = []
self.left_frame = Frame(self, width=400, height=500)
self.middle_frame = Frame(self, width=500, height=500)
self.right_frame = Frame(self, width=600, height=500)
self.bottom_frame = Frame(self, width=1500, height=100)
self.left_frame.grid(row=0, column=0)
self.middle_frame.grid(row=0, column=1)
self.right_frame.grid(row=0, column=2)
self.bottom_frame.grid(row=1, columnspan=3)
# left part
self.files_lists_box = Listbox(self.left_frame, selectmode='multiple', exportselection=0)
self.files_lists_box.place(height=400, width=350, x=25, y=20)
self.open_files_btn = Button(self.left_frame, text='Open files', command=self.open_files)
self.open_files_btn.place(height=40, width=150, x=25, y=435)
self.files_delete_btn = Button(self.left_frame, text='Delete all', command=self.delete_files)
self.files_delete_btn.place(height=40, width=150, x=225, y=435)
# middle part
self.email_title_label = Label(self.middle_frame, text='Title')
self.email_title_label.place(x=20, y=5)
self.email_title_box = Entry(self.middle_frame, show=None)
self.email_title_box.place(height=20, width=450, x=25, y=25)
self.email_content_label = Label(self.middle_frame, text='Content')
self.email_content_label.place(x=20, y=45)
self.email_content_box = Text(self.middle_frame)
self.email_content_box.place(height=410, width=450, x=25, y=65)
# right part
self.email_lists_box = Listbox(self.right_frame)
self.email_lists_box.bind('<Double-Button-1>', self.selected_email_list_title)
self.email_lists_box.place(height=400, width=300, x=25, y=20)
self.open_mail_lists_btn = Button(self.right_frame, text='Open Email Lists', command=self.open_mail_lists)
self.open_mail_lists_btn.place(height=40, width=150, x=25, y=435)
self.cc_list_box = Listbox(self.right_frame, selectmode='multiple')
self.cc_list_box.place(height=400, width=225, x=350, y=20)
self.cc_open_btn = Button(self.right_frame, text='Add cc', command=self.cc_selection_window)
self.cc_open_btn.place(height=40, width=150, x=225, y=435)
self.cc_delete_btn = Button(self.right_frame, text='Delete cc', command=self.cc_delete)
self.cc_delete_btn.place(height=40, width=150, x=425, y=435)
# bottom part
self.generate_btn = Button(self.bottom_frame, text='Generate', command=self.generate_email)
self.generate_btn.place(height=40, width=150, x=575, y=25)
self.send_button = Button(self.bottom_frame, text='Send', command=self.send_email)
self.send_button.place(height=40, width=150, x=775, y=25)
def selected_email_list_title(self, event):
self.email_title_box.delete(0, 'end')
self.email_title_box.insert('end', self.title_dict[self.email_lists_box.selection_get()])
def generate_email(self):
outlook = win32.Dispatch('outlook.application')
try:
mail = outlook.CreateItem(0)
mail.To = self.email_lists_box.selection_get()
mail.CC = ';'.join(self.cc_list)
mail.Subject = self.email_title_box.get()
mail.Body = self.email_content_box.get('1.0', 'end')
# To attach a file to the email (optional):
selected_files = [self.files_lists_box.get(idx) for idx in self.files_lists_box.curselection()]
for file in self.file_list:
for selected_file in selected_files:
if selected_file.replace('\\', '/') in file:
attachment = file
mail.Attachments.Add(attachment)
mail.Display(True)
self.files_lists_box.selection_clear(0, 'end')
except:
messagebox.showerror('Error', 'Something is wrong.')
def send_email(self):
outlook = win32.Dispatch('outlook.application')
try:
mail = outlook.CreateItem(0)
mail.To = self.email_lists_box.selection_get()
mail.CC = ';'.join(self.cc_list)
mail.Subject = self.email_title_box.get()
mail.Body = self.email_content_box.get('1.0', 'end')
# To attach a file to the email (optional):
selected_files = [self.files_lists_box.get(idx) for idx in self.files_lists_box.curselection()]
for file in self.file_list:
for selected_file in selected_files:
if selected_file.replace('\\', '/') in file:
attachment = file
mail.Attachments.Add(attachment)
mail.Send()
messagebox.showinfo('Success', 'Email has sent')
self.files_lists_box.selection_clear(0, 'end')
except:
messagebox.showerror('Error', 'Something is wrong. Your email has not sent')
def open_files(self):
files = filedialog.askopenfilenames(initialdir="C://", title="select files",
filetypes=[('all files', '*.*')])
for file in files:
if file not in self.file_list:
self.file_list.append(file)
self.files_lists_box.insert('end', os.path.join(os.path.basename(os.path.dirname(file)), os.path.basename(file)))
def open_mail_lists(self):
self.title_list = []
self.email_lists = []
self.email_lists_box.delete(0, 'end')
emails_file = filedialog.askopenfilename(initialdir="C://", title="select excel files",
filetypes=[('xlsx files', '*.xlsx')])
wb = openpyxl.load_workbook(emails_file)
ws = wb.active
for row in ws.iter_rows():
for cell in row:
try:
if 'title' in cell.value:
for i in range(ws.max_row-cell.row):
self.title_list.append(cell.offset(row=i+1).value)
if 'email-list' in cell.value:
for i in range(ws.max_row-cell.row):
temp_list = []
for y in range(ws.max_column-cell.column+1):
if cell.offset(row=i+1, column=y).value:
temp_list.append(cell.offset(row=i+1, column=y).value)
if not cell.offset(row=i+1, column=y+1).value:
self.email_lists.append(';'.join(temp_list))
self.email_lists_box.insert('end', ';'.join(temp_list))
except (AttributeError, TypeError):
continue
self.title_dict = dict(zip(self.email_lists, self.title_list))
def delete_files(self):
self.files_lists_box.delete(0, 'end')
self.file_list = []
def address_book(self):
outlook = win32.gencache.EnsureDispatch('Outlook.Application')
onMAPI = outlook.GetNamespace("MAPI")
ofContacts = onMAPI.GetDefaultFolder(win32.constants.olFolderContacts)
for item in ofContacts.Items:
self.cc_list_box.insert('end', item.Email1Address)
def cc_delete(self):
selected_cc = [self.cc_list_box.get(idx) for idx in self.cc_list_box.curselection()]
self.cc_list_box.delete(0, 'end')
for cc in selected_cc:
self.cc_list.remove(cc)
for remain_cc in self.cc_list:
self.cc_list_box.insert(0, remain_cc)
def cc_selection_window(self):
def cc_search():
search_list_box.delete(0, 'end')
outlook = win32.gencache.EnsureDispatch('Outlook.Application')
onMAPI = outlook.GetNamespace("MAPI")
ofContacts = onMAPI.GetDefaultFolder(win32.constants.olFolderContacts)
search_text = search_text_box.get()
for item in ofContacts.Items:
if search_text in item.FullName or search_text.lower() in item.FullName:
search_list_box.insert('end', item.Email1Address)
elif search_text in item.Email1Address:
search_list_box.insert('end', item.Email1Address)
def cc_add():
selected_cc = [search_list_box.get(idx) for idx in search_list_box.curselection()]
for cc in selected_cc:
if cc not in self.cc_list:
self.cc_list.append(cc)
self.cc_list_box.insert(0, cc)
top = Toplevel()
top.title('Select cc from outlook address book')
top.geometry('500x500')
search_list_box = Listbox(top, selectmode='multiple')
search_text_box = Entry(top)
search_btn = Button(top, text='Search', command=cc_search)
add_btn = Button(top, text='Add', command=cc_add)
search_list_box.place(height=370, width=300, relx=0.5, rely=0.05, anchor='n', bordermode=OUTSIDE)
search_text_box.place(height=30, width=300, relx=0.5, rely=0.8, anchor='n', bordermode=OUTSIDE)
search_btn.place(height=30, width=60, relx=0.35, rely=0.88, anchor='n', bordermode=OUTSIDE)
add_btn.place(height=30, width=60, relx=0.65, rely=0.88, anchor='n', bordermode=OUTSIDE)
app = SampleApp()
app.title('Yau Lam very handsome')
app.geometry('1500x600')
app.mainloop()
|
import numpy as np
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal
fs = 2e6 //sample frequency
rb = 1e3 //bit rate
fc = 0.5e6 //carrier frequency
t_sim = 1e-2 //simlation time
src_bits = 1-2*np.random.randint(0,2,t_sim*rb)
class mod_iq:
def __init__(self,fc,fs,mod_type=2,phase=0,alpha=0.5,rep=1):
self.fc = fc
self.fs = fs
self.pahse = phase
self.mod_type = mod_type
def mod_gen(self,di,dq):
i = 0
while True:
d_iq = np.exp(1j*(2*np.pi*(self.fc/self.fs*i+self.phase)))
yield d_iq
i += 1
|
from django.contrib.auth.models import User
from django.db import models
from django.conf import settings
class Story(models.Model):
place = models.CharField(max_length=150)
people_involved = models.TextField(blank=True)
images = models.ManyToManyField('StoryImage', blank=True)
date_posted = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(User)
class Admin:
pass
class StoryImage(models.Model):
image = models.FileField(upload_to=settings.UPLOADS_DIR)
class Admin:
pass
class TestImage(models.Model):
image = models.FileField(upload_to='test')
class Admin:
pass
|
# Copyright (c) 2018, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# This file contains a set of utilities for parsing minidumps.
import ctypes
import mmap
import os
import sys
class Enum(object):
def __init__(self, type, name2value):
self.name2value = name2value
self.value2name = {v: k for k, v in name2value.items()}
self.type = type
def from_raw(self, v):
if v not in self.value2name:
return 'Unknown(' + str(v) + ')'
return self.value2name[v]
def to_raw(self, v):
return self.name2value[v]
class Descriptor(object):
"""A handy wrapper over ctypes.Structure"""
def __init__(self, fields):
self.fields = fields
self.ctype = Descriptor._GetCtype(fields)
self.size = ctypes.sizeof(self.ctype)
def Read(self, address):
return self.ctype.from_address(address)
@staticmethod
def _GetCtype(fields):
raw_fields = []
wrappers = {}
for field in fields:
(name, type) = field
if isinstance(type, Enum):
raw_fields.append(('_raw_' + name, type.type))
wrappers[name] = type
else:
raw_fields.append(field)
class Raw(ctypes.Structure):
_fields_ = raw_fields
_pack_ = 1
def __getattribute__(self, name):
if name in wrappers:
return wrappers[name].from_raw(
getattr(self, '_raw_' + name))
else:
return ctypes.Structure.__getattribute__(self, name)
def __repr__(self):
return '{' + ', '.join(
'%s: %s' % (field, self.__getattribute__(field))
for field, _ in fields) + '}'
return Raw
# Structures below are based on the information in the MSDN pages and
# Breakpad/Crashpad sources.
MINIDUMP_HEADER = Descriptor([('signature', ctypes.c_uint32),
('version', ctypes.c_uint32),
('stream_count', ctypes.c_uint32),
('stream_directories_rva', ctypes.c_uint32),
('checksum', ctypes.c_uint32),
('time_date_stampt', ctypes.c_uint32),
('flags', ctypes.c_uint64)])
MINIDUMP_LOCATION_DESCRIPTOR = Descriptor([('data_size', ctypes.c_uint32),
('rva', ctypes.c_uint32)])
MINIDUMP_STREAM_TYPE = {
'MD_UNUSED_STREAM': 0,
'MD_RESERVED_STREAM_0': 1,
'MD_RESERVED_STREAM_1': 2,
'MD_THREAD_LIST_STREAM': 3,
'MD_MODULE_LIST_STREAM': 4,
'MD_MEMORY_LIST_STREAM': 5,
'MD_EXCEPTION_STREAM': 6,
'MD_SYSTEM_INFO_STREAM': 7,
'MD_THREAD_EX_LIST_STREAM': 8,
'MD_MEMORY_64_LIST_STREAM': 9,
'MD_COMMENT_STREAM_A': 10,
'MD_COMMENT_STREAM_W': 11,
'MD_HANDLE_DATA_STREAM': 12,
'MD_FUNCTION_TABLE_STREAM': 13,
'MD_UNLOADED_MODULE_LIST_STREAM': 14,
'MD_MISC_INFO_STREAM': 15,
'MD_MEMORY_INFO_LIST_STREAM': 16,
'MD_THREAD_INFO_LIST_STREAM': 17,
'MD_HANDLE_OPERATION_LIST_STREAM': 18,
}
MINIDUMP_DIRECTORY = Descriptor([('stream_type',
Enum(ctypes.c_uint32, MINIDUMP_STREAM_TYPE)),
('location',
MINIDUMP_LOCATION_DESCRIPTOR.ctype)])
MINIDUMP_MISC_INFO_2 = Descriptor([
('SizeOfInfo', ctypes.c_uint32),
('Flags1', ctypes.c_uint32),
('ProcessId', ctypes.c_uint32),
('ProcessCreateTime', ctypes.c_uint32),
('ProcessUserTime', ctypes.c_uint32),
('ProcessKernelTime', ctypes.c_uint32),
('ProcessorMaxMhz', ctypes.c_uint32),
('ProcessorCurrentMhz', ctypes.c_uint32),
('ProcessorMhzLimit', ctypes.c_uint32),
('ProcessorMaxIdleState', ctypes.c_uint32),
('ProcessorCurrentIdleState', ctypes.c_uint32),
])
MINIDUMP_MISC1_PROCESS_ID = 0x00000001
# A helper to get a raw address of the memory mapped buffer returned by
# mmap.
def BufferToAddress(buf):
obj = ctypes.py_object(buf)
address = ctypes.c_void_p()
length = ctypes.c_ssize_t()
ctypes.pythonapi.PyObject_AsReadBuffer(obj, ctypes.byref(address),
ctypes.byref(length))
return address.value
class MinidumpFile(object):
"""Class for reading minidump files."""
_HEADER_MAGIC = 0x504d444d
def __init__(self, minidump_name):
self.minidump_name = minidump_name
self.minidump_file = open(minidump_name, 'r')
self.minidump = mmap.mmap(
self.minidump_file.fileno(), 0, access=mmap.ACCESS_READ)
self.minidump_address = BufferToAddress(self.minidump)
self.header = self.Read(MINIDUMP_HEADER, 0)
if self.header.signature != MinidumpFile._HEADER_MAGIC:
raise Exception('Unsupported minidump header magic')
self.directories = []
offset = self.header.stream_directories_rva
for _ in range(self.header.stream_count):
self.directories.append(self.Read(MINIDUMP_DIRECTORY, offset))
offset += MINIDUMP_DIRECTORY.size
def GetProcessId(self):
for dir in self.directories:
if dir.stream_type == 'MD_MISC_INFO_STREAM':
info = self.Read(MINIDUMP_MISC_INFO_2, dir.location.rva)
if info.Flags1 & MINIDUMP_MISC1_PROCESS_ID != 0:
return info.ProcessId
return -1
def Read(self, what, offset):
return what.Read(self.minidump_address + offset)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.minidump.close()
self.minidump_file.close()
# Returns process id of the crashed process recorded in the given minidump.
def GetProcessIdFromDump(path):
try:
with MinidumpFile(path) as f:
return int(f.GetProcessId())
except:
return -1
|
def d_separated(G, x, y, z): ...
def minimal_d_separator(G, u, v): ...
def is_minimal_d_separator(G, u, v, z): ...
|
number = int(input('Enter number : '))
integer = number
number1 = 1
number2 = 1
count = 0
if integer <= 0:
print("Please enter positive integer")
elif integer == 1:
print("Fibonacci sequence",integer,":")
print(number1)
else:
print("Fibonacci sequence",integer,":")
while count < integer:
print(number1,end=' ')
numbertotal = number1 + number2
number1 = number2
number2 = numbertotal
count +=1
|
# -*- coding: utf-8 -*-
import pymysql.cursors
connection = pymysql.connect(host=#'hostname',
user=#'username',
password=#'password',
db=#'dbname',
charset=#'utf8',
cursorclass=pymysql.cursors.DictCursor)
try:
with connection.cursor() as cursor:
sql= "create table fixed_card_data(fixed_card_id int auto_increment primary key,name varchar(300)," \
"color varchar(50),supertype varchar(30),subtype varchar(50),text varchar(2000),cost varchar(20)," \
"cardtype varchar(50),cmc varchar(10),redirect_name varchar(300),redirect_id int(10) unsigned default 0,"\
"power varchar(10),toughness varchar(10),loyalty varchar(10),index(fixed_card_id))"
cursor.execute(sql)
sql= "create table variables_card_data(variables_card_id int auto_increment primary key,name_id int," \
"col_num varchar(10),rarity varchar(10),illustrator varchar(100),version varchar(10)," \
"language varchar(200),image_uri varchar(100)," \
"foreign key (name_id) references fixed_card_data(fixed_card_id) on delete cascade," \
"index(variables_card_id))"
cursor.execute(sql)
sql = "create table new_variables_card_data (new_variables_card_id int auto_increment primary key , " \
"name_id int , col_num varchar(10) , rarity varchar(20), illustrator varchar(100),version varchar(10)," \
"image_uri varchar(100),redirect_name_id int(10) unsigned default 0,redirect_iamge_uri varchar(100)," \
"lang_en boolean,lang_es boolean,lang_fr boolean,lang_de boolean,lang_it boolean,lang_pt boolean," \
"lang_ja boolean,lang_ko boolean,lang_ru boolean,lang_cs boolean,lang_ct boolean,lang_sa boolean," \
"lang_he boolean,lang_grc boolean,lang_la boolean,lang_ar boolean,lang_px boolean," \
"foreign key (name_id) references fixed_card_data(fixed_card_id) on delete cascade," \
"index(new_variables_card_id));"
cursor.execute(sql)
sql="show tables"
cursor.execute(sql)
dbdata = cursor.fetchall()
for rows in dbdata:
print(rows)
finally:
connection.close()
|
# coding: utf-8
# # Problem 6: Hash Tables
# You've used Python's `dict` data type extensively now. Recall that it maps keys to values. But how is it implemented under-the-hood? One way is via a classic computer science technique known as a **hash table**.
#
# For our purposes, a hash table has two parts:
#
# 1. A list of **buckets**. The buckets are stored in a Python `list` of a certain maximum size. Each bucket has a **bucket ID**, which is its position in the list of buckets. Each bucket is itself _another_ Python list, which will hold the values. But more on that detail later (under **Collisions**, below).
# 2. A **hash function**, which converts any given key into the bucket ID in which it belongs. We'll sometimes refer to the output of the hash function as the **hash value** of the given key.
#
# The hash function is usually decomposed into two steps. The first step converts the key into some non-negative integer, which may be very large. So, we can take this large integer _modulo_ the number of buckets, to get a valid bucket ID. (Recall that $a$ modulo $b$ means the remainder after dividing $a$ by $b$, which in Python you can compute using the `%` operator.)
# In this problem, you will implement a hash table that maps names (the keys, given as strings) to phone numbers (the values). You may find it helpful to keep the following image in mind when reasoning about how a hash table works. First, suppose you have 16 `buckets`:
#
#  Function")
#
# Further suppose that the first step of our hash function calculates the value of 124,722 for the key, `John Smith`. We would then take this value `modulo 16`, the size of `buckets`, and are left with the bucket ID of `124722 % 16 == 2`. We put John's information in this bucket, which is location 2 in our list of buckets.
#
# What is the motivation for this scheme? **If** the hash function does a good job of spreading out keys among the buckets, then the average time it takes to find a value into the hash table will be proportional to the average bucket size, rather than being proportional to the size of the entire list. And if we have enough buckets, then each bucket will (hopefully) be small, so that searching the bucket will be fast even if we use brute force.
#
# > *One detail.* Unlike the image above, in this problem you will store *both* the key and value into the hash table. That is, you will be inserting `(key, value)` pairs into the buckets, instead of just the value as shown in the illustration above.
# ** Exercise 0 (2 points)**: There are many ways to compute a hash value for a string. We want a method that will add an element of uniqueness for each string, so that any two strings have a small likelihood of being hashed to the same value. One way to do that is to calculate a value of this form, for a given string `s`:
#
# $$
# \mbox{HashFunction}(s) = \left(\mbox{InitialValue} \:+ \sum_{i=0}^{\mbox{len}(s)-1} \mbox{Seed}^i * \mbox{charvalue}(s[i])\right) \mbox{ mod } \mbox{NumBuckets}.
# $$
#
# The "InitialValue" and "Seed" are parameters. The function "charvalue`(c)`" converts a single letter into a number. Finally, "NumBuckets" is the number of buckets.
#
# For example, consider the input string, `s="midterm"` and suppose there are 10 buckets (NumBuckets=10). Suppose that InitialValue is 37 and Seed is 7. Further suppose that for the letter `a`, charvalue(`'a'`) is 97, and that the subsequent letters are numbered consecutively, i.e., so `b` is 98, ..., `d`=100, ..., `i`=105, `m` = 109, ...). Then HashFunction(`"midterm"`) is
#
# $$
# (37+7^0*109+7^1*105+7^2*100+7^3*116+7^4*101+7^5*114+7^6*109) \mbox{ mod } 10 = (15,027,809 \mbox{ mod } 10) = 9.
# $$
# We will give you a `seed` value and an `initial value`. Create a function that implements the formula for HashFunction(`s`), given the string `s` and number of buckets `num_buckets`, returning the bucket ID.
#
# > Recall that the `a % b` operator implements `a` modulo `b`. To convert an individual letter to an integer (i.e., to implement charvalue(c)), use Python's [`ord`](https://docs.python.org/3/library/functions.html#ord) function.
# In[3]:
SEED = 7
INITIALVALUE = 37
# In[4]:
def hash_function(s, num_buckets):
assert num_buckets > 0.0
ords = []
for i in s:
ords.append(ord(i))
hash_start = []
for index, value in enumerate(ords):
if index < len(ords):
hash_start.append(value * (SEED**(index)))
else:
hash_start.append(float(value))
hash_sum = sum(hash_start)
hash_complete = (hash_sum + INITIALVALUE) % num_buckets
return hash_complete
hash_function('midterm', 10)
hash_function("jack", 20)
# In[5]:
#test_hash (2 points):
assert type(hash_function('midterm', 10)) is int
assert hash_function('midterm', 10) == 9
assert hash_function('problems', 12) == 5
assert hash_function('problem', 1) == 0
print ("\n(Passed!)")
# **Collisions.** Collisions occur when two keys have the same bucket ID. There are many methods to deal with collisions, but you will implement a method known as _separate chaining_.
#
# In separate chaining, each bucket is a list, again implemented as a Python `list`. That way, it can hold multiple items of the same hash value. When adding a new item to the hash table, you can simply append that item onto the bucket.
#
# In other words, the overall hash table is a list of lists: a list of buckets, where each bucket is also a list (of items).
#
# Here is a helpful graphic displaying this concept. Focus on the `keys` for `John Smith` and `Sandra Dee`, supposing that their hash values collide:
#
# 
#
# ** Exercise 1 (3 points)**: Create the `hash_insert()` function for your hash table. This function will take in three arguments. The first is the string `key`, the second is the string `value`, and the third is the `buckets`, a Python `list` that represents the hash table buckets. Your function should add the given `(key, value)` tuple to the list, implementing the separate chaining logic if there is a collision.
#
# And if an identical `key` already exists in the hash table, your function should replace the current value with this new `value`.
#
# A reasonable algorithm for this problem would be the following:
#
# 1. Compute the bucket ID for the given `key`.
# 2. If the bucket is empty, append (`key`, `value`) to that bucket.
# 3. If the bucket is not empty, then there are two cases:
# a. If `key` is already in the bucket, then update the old value with the given `value`.
# b. If `key` is not yet in the bucket, then simply append (`key`, `value`) to the bucket.
#
# You may assume that every element of `buckets` is a valid `list` object. (It might be empty, but it will be a list.)
# In[6]:
def hash_insert(key, value, buckets):
assert len(buckets) > 0
assert all([type(b) is list for b in buckets])
### BEGIN SOLUTION
hash_value = hash_function(key, len(buckets))
bucket = buckets[hash_value]
if len(bucket):
for i, (k, v) in enumerate(bucket):
if k == key:
bucket[i] = (k, value)
return
bucket.append((key, value))
# In[7]:
#test_add (3 points):
table = [[] for i in range(20)]
hash_insert("jack", "8048148198", table)
hash_insert("asdf1", "8048148198", table)
assert type(table[14]) is list
assert len(table[14]) == 2
hash_insert("asdf2", "8048148198", table)
hash_insert("asdf3", "8048148198", table)
assert len(table[15]) == 1
assert len(table[16]) == 1
print ("\n(Passed!)")
# ** Exercise 2 (2 points)**. Implement a search operation, `hash_search()`. This operation would be used to implement Python's `dict[key]`.
#
# Your function should implement the following logic. Given a `key` and `buckets`, return the `value` associated with that `key`. If the key does not exist, return `None`.
# In[10]:
def hash_search(key, buckets):
assert len(buckets) > 0 and all([type(b) is list for b in buckets])
### BEGIN SOLUTION
bucket = buckets[hash_function(key, len(buckets))]
for (k, v) in bucket:
if k == key:
return v
# In[11]:
#test_cell
assert(hash_search("evan", table)) is None
assert hash_search("asdf1", table) == '8048148198'
print ("\n(Passed!)")
# **Putting it all together.** You will be supplied with a dataset of 1,000 name and phone number pairs, contained in a list-of-lists named `hash_table_data`. The following code cell will read in these data.
# In[12]:
import csv
hash_table_data = list()
with open("name_phonenumber_map.csv", "r") as f:
reader = csv.reader(f)
for line in f:
hash_table_data.append(line.replace("\r\n", "").split(","))
print("First few entries: {} ...".format(hash_table_data[:5]))
# **Exercise 3 (3 points)**: Use your functions from the first three exercises to create a hash table from the above dataset, `hash_table_data`. Store this table in a variable named `table`.
#
# In other words, iterate through `hash_table_data` and insert each `(name, phone number)` pair into your `table`.
#
# You will have to choose a specific size for your `table`. In practice, the size is often chosen to try to achieve a certain *load factor," which is defined as
#
# $$
# \mbox{load factor} \equiv \frac{n}{k},
# $$
#
# where $n$ is the number of items (i.e., key-value pairs) you expect to store and $k$ is the number of buckets. Common *load factor* values are 0.5 or 0.75. Remember that there are 1,000 entries in the `hash_table_data` dataset, so choose your number of buckets accordingly.
#
# You will be graded in two ways. The first test cell, worth one point, will test a correct import of the data into your `table`. The next test cell will test how your `hash_insert()` and `hash_search()` functions work with your newly created `table`.
# In[13]:
num_buckets = 2 * len(hash_table_data) # Based on a load factor of 0.5
table = [[] for i in range(num_buckets)]
for l in hash_table_data:
hash_insert(l[0], l[1], table)
# In[15]:
# test_cell_1 (1 point)
assert type(table) is list
for i in range(0,len(hash_table_data)):
assert hash_search(hash_table_data[i][0], table) is not None
print ("\n(Passed!)")
# In[16]:
#test_cell_2 (2 points)
assert (hash_search('Harriott Loan', table) == [s for s in hash_table_data if "Harriott Loan" in s ][0][1])
print ("\n(Passed!)")
# ** Fin ** You've reached the end of this problem. Don't forget to restart the
# kernel and run the entire notebook from top-to-bottom to make sure you did
# everything correctly. If that is working, try submitting this problem. (Recall
# that you *must* submit and pass the autograder to get credit for your work.)
|
import time
while(True):
f = open("data.txt","r")
a = f.readline()
f.close()
print a
|
import string
from abc import ABC, abstractmethod
import os
import pyaes
from io import BytesIO, StringIO
class Cipher(ABC):
def __init__(self) -> None:
super().__init__()
@abstractmethod
def generate_key(self):
pass
@abstractmethod
def encrypt(self, key, message):
pass
@abstractmethod
def decrypt(self, key, ciphertext):
pass
@abstractmethod
def get_key_size(self):
pass
class AesCBC128(Cipher):
def __init__(self) -> None:
super().__init__()
def generate_key(self):
return os.urandom(16)
def generate_iv(self):
return os.urandom(16)
def encrypt(self, key, message):
instream = BytesIO(message)
outstream = BytesIO()
iv = self.generate_iv()
aes = pyaes.AESModeOfOperationCBC(key, iv=iv)
pyaes.encrypt_stream(aes, instream, outstream)
ciphertext = outstream.getvalue()
instream.close()
outstream.close()
return iv+ciphertext
def decrypt(self, key, ciphertext):
iv = ciphertext[:16]
encoded_message = ciphertext[16:]
instream = BytesIO(encoded_message)
outstream = BytesIO()
aes = pyaes.AESModeOfOperationCBC(key, iv)
pyaes.decrypt_stream(aes, instream, outstream)
plaintext = outstream.getvalue()
instream.close()
outstream.close()
return plaintext
def get_key_size(self):
return 16
|
#! /usr/local/bin/python
# ! -*- encoding:utf-8 -*-
from pathlib import Path
import pandas as pd
import numpy as np
import random
import os
import argparse
proj_path = Path(__file__).parent.resolve().parent.resolve().parent.resolve()
data_path = proj_path / 'data' / 'PPP'
def read_csv(data_file, dataset):
cci_labels_gt_path = data_path / dataset / 'PPP_gt.csv'
cci_labels_junk_path = data_path / dataset / 'PPP_junk.csv'
edge_list_path = data_path / data_file
df = pd.read_csv(edge_list_path, header=None)
generate_gt(df, dataset)
def generate_gt(df, dataset):
cci_labels_gt_path = data_path / dataset / 'PPP_gt.csv'
cci_labels_junk_path = data_path / dataset / 'PPP_junk.csv'
edge_list = [];
for indexs in df.index:
rowData = df.loc[indexs].values[0:2]
rowData = rowData.tolist()
edge_list.append(rowData)
nodes = [];
for edge in edge_list:
if edge[0] not in nodes:
nodes.append(edge[0])
if edge[1] not in nodes:
nodes.append(edge[1])
nodes.sort()
gene2id = {gene: idx for idx, gene in enumerate(nodes)}
cur_cci_gt = []
cur_cci_junk = []
for indexs in df.index:
rowData = df.loc[indexs].values[0:2]
rowData = rowData.tolist()
p1 = rowData[0]
p2 = rowData[1]
choice = random.randint(0, 1)
cur_cci_gt.append([p1, p2])
if choice:
a, c = find_junk(p1, nodes, edge_list, cur_cci_junk)
else:
a, c = find_junk(p2, nodes, edge_list, cur_cci_junk)
cur_cci_junk.append([a, c])
with open(cci_labels_gt_path, 'w', encoding='utf-8') as f:
print(f"cur cci {len(cur_cci_gt)}")
for cci_label in cur_cci_gt:
f.write(f"{int(cci_label[0])},{int(cci_label[1])}\r\n")
with open(cci_labels_junk_path, 'w', encoding='utf-8') as f:
print(f"cur cci junk {len(cur_cci_junk)}")
for cci_label in cur_cci_junk:
f.write(f"{int(cci_label[0])},{int(cci_label[1])}\r\n")
# with open(cci_labels_junk_path.format(dataset, id2, type1), 'w', encoding='utf-8') as f:
# print(f"cur cci junk {len(cur_cci_junk_b2d)}")
# for cci_label in cur_cci_junk_b2d:
# f.write(f"{int(cci_label[0])},{int(cci_label[1])},{int(cci_label[2])}\r\n")
def find_junk(a, nodes, edge_list, cur_cci_junk):
"""
"""
c = random.choice(nodes)
while [a, c] in nodes or [a, c] in cur_cci_junk:
c = random.choice(nodes)
return a, c
def clean_cross_data(df1, df2):
df3 =pd.DataFrame()
nodes = []
for indexs in df1.index:
rowData = df1.loc[indexs].values[0:2]
rowData = rowData.tolist()
nodes.append(rowData[0])
nodes.append(rowData[1])
for indexs in df2.index:
rowData = df2.loc[indexs].values[0:2]
rowData = rowData.tolist()
if (rowData[0] not in nodes) and (rowData[1] not in nodes):
df3=df3.append(df2.loc[indexs])
return df3
if __name__ == "__main__":
import os
# python ./data/PPP/generate.py --dataset dataset_all_cross --data_path PP-Pathways_ppi.csv --cross_data 1 --train_rate 0.01
parser = argparse.ArgumentParser(description='GraphSAGE')
parser.add_argument("--random_seed", type=int, default=10086)
parser.add_argument("--dataset", type=str, default='dataset')
parser.add_argument("--data_path", type=str, default=None)
parser.add_argument("--train_rate", type=float, default=0.1)
parser.add_argument("--cross_data", type=int, default=0)
params = parser.parse_args()
random.seed(params.random_seed)
np.random.seed(params.random_seed)
if params.data_path == None:
# test_dataset
print('begin test generate:')
read_csv("PP-9001~10000.csv", dataset='test_' + params.dataset)
# train_dataset
print('begin train generate:')
read_csv('PP-1~9000.csv', dataset='train_' + params.dataset)
else:
edge_list_path = data_path / params.data_path
df = pd.read_csv(edge_list_path, header=None)
lens = len(df)
train_size = int(lens * params.train_rate)
df1 = df[0:train_size]
df2 = df[train_size:lens]
print('begin test generate:')
generate_gt(df1, dataset='test_' + params.dataset)
if params.cross_data == 1:
print('begin clean_cross_data:')
df2 = clean_cross_data(df1, df2)
print('begin train generate:')
generate_gt(df2, dataset='train_' + params.dataset)
|
from flask import jsonify, make_response, abort
def get_paginated_list(model, schema, url, start, limit):
"""
start - It is the position from which we want the data to be returned.
schema - It is the marsmallow schema to serialize models data
limit - It is the max number of items to return from that position.
next - It is the url for the next page of the query assuming current value of limit
previous - It is the url for the previous page of the query assuming current value of limit
count - It is the total count of results available in the dataset. Here as the count is 128,
that means you can go maximum till start=121 keeping limit as 20.
Also when you get the page with start=121 and limit=20, 8 items will be returned.
results - This is the list of results whose position lies within the bounds specified by the request.
"""
# check if page exists
results = model.query.all()
count = len(results)
if (count < start):
return False
# make response
obj = {}
obj['start'] = start
obj['limit'] = limit
obj['count'] = count
# make URLs
# make previous url
if start == 1:
obj['has_previous'] = False
else:
start_copy = max(1, start - limit)
limit_copy = start - 1
obj['previous'] = url + '?start=%d&limit=%d' % (start_copy, limit_copy)
obj['has_previous'] = True
# make next url
if start + limit > count:
obj['has_next'] = False
else:
start_copy = start + limit
obj['has_next'] = True
obj['next'] = url + '?start=%d&limit=%d' % (start_copy, limit)
# finally extract result according to bounds
results_extracted = results[(start - 1):(start - 1 + limit)]
model_schema = schema(many=True)
obj['results'] = model_schema.dump(results_extracted).data
return True, obj
|
from django.shortcuts import render
from django.http import HttpResponse
from django.core.mail import EmailMessage
from django.conf import settings
from django.template.loader import render_to_string
# Create your views here.
from .models import Post
def home(request):
posts = Post.objects.filter(active=True, featured=True)[0:3]
context = {'posts': posts}
return render(request, 'base/index.html', context)
def posts(request):
posts = Post.objects.filter(active=True)
context = {'posts': posts}
return render(request, 'base/posts.html', context)
def post(request, slug):
post = Post.objects.get(slug=slug)
context = {'post': post}
return render(request, 'base/post.html', context)
def profile(request):
return render(request, 'base/profile.html')
def sendEmail(request):
if request.method == 'POST':
template = render_to_string('base/email_template.html',{
'name' : request.POST['name'],
'email' : request.POST['email'],
'message' : request.POST['message'],
})
email = EmailMessage(
request.POST['subject'],
template,
settings.EMAIL_HOST_USER,
['moreyragabriel97@gmail.com']
)
email.fail_silently=False
email.send()
return render(request, 'base/email_sent.html')
|
import json
none = "d3043820717d74d9a17694c176d39733"
# region EMR
class EMR:
def __init__(
self,
name=none,
description=none,
region=none,
strategy=none,
compute=none,
scaling=none):
"""
:type name: str
:type decription: str
:type region: str
:type strategy: Strategy
:tpye compute: Compute
:type scaling: Scaling
"""
self.name = name
self.description = description
self.region = region
self.strategy = strategy
self.compute = compute
self.scaling = scaling
# endregion
# region Strategy
class Strategy:
def __init__(
self,
wrapping=none,
cloning=none,
provisioning_timeout=none):
"""
:type wrapping: Wrapping
:type cloning: Cloning
:type provisioning_timeout: ProvisioningTimeout
"""
self.wrapping = wrapping
self.cloning = cloning
self.provisioning_timeout = provisioning_timeout
class Wrapping:
def __init__(
self,
source_cluster_id):
"""
:type source_cluster_id: str
"""
self.source_cluster_id = source_cluster_id
class Cloning:
def __init__(
self,
origin_cluster_id=none,
include_steps=none):
"""
:type origin_cluster_id: str
:type include_steps: bool
"""
self.origin_cluster_id = origin_cluster_id
self.include_steps = include_steps
class ProvisioningTimeout:
def __init__(
self,
timeout = none,
timeout_action = none):
"""
:type timeout: int
:tpye timeout_action: str
"""
self.timeout = timeout
self.timeout_action = timeout_action
# endregion
# region Compute
class Compute:
def __init__(
self,
ebs_root_volume_size=none,
availability_zones=none,
bootstrap_actions=none,
steps=none,
instance_groups=none,
configurations=none):
"""
:type ebs_root_volume_size: int
:type availability_zones: List[AvailabilityZone]
:type bootstrap_actions: BootstrapActions
:type steps: Steps
:type instance_groups: InstanceGroups
:type configurations: Configurations
"""
self.ebs_root_volume_size = ebs_root_volume_size
self.availability_zones = availability_zones
self.bootstrap_actions = bootstrap_actions
self.steps = steps
self.instance_groups = instance_groups
self.configurations = configurations
class AvailabilityZone:
def __init__(
self,
name=none,
subnet=none):
"""
:type name: str
:type subnet: str
"""
self.name = name
self.subnet = subnet
class BootstrapActions:
def __init__(
self,
file=none):
"""
:type file: File
"""
self.file = file
class File:
def __init__(
self,
bucket=none,
key=none):
"""
:type bucket: str
:type key: str
"""
self.bucket = bucket
self. key = key
class Steps:
def __init__(
self,
file=none):
"""
:type file: File
"""
self.file = file
class InstanceGroups:
def __init__(
self,
master_group=none,
core_group=none,
task_group=none):
"""
:type master_group: MasterGroup
:type core_group: CoreGroup
:type task_group: TaskGroup
"""
self.master_group = master_group
self.core_group = core_group
self.task_group = task_group
class MasterGroup:
def __init__(
self,
instance_types=none,
target=none,
life_cycle=none):
"""
:type instance_types: List[str]
:type target: int
:type life_cycle: str
"""
self.instance_types = instance_types
self.target = target
self.life_cycle = life_cycle
class CoreGroup:
def __init__(
self,
instance_types=none,
target=none,
life_cycle=none,
ebs_configuration=none):
"""
:type instance_types: List[str]
:type target: int
:type life_cycle: str
:type ebs_configuration: EbsConfiguration
"""
self.instance_types = instance_types
self.target = target
self.life_cycle = life_cycle
self.ebs_configuration = ebs_configuration
class TaskGroup:
def __init__(
self,
instance_types=none,
capacity=none,
life_cycle=none,
ebs_configuration=none):
"""
:type instance_types: List[str]
:type capacity: Capacity
:type life_cycle: str
:type ebs_configuration: EbsConfiguration
"""
self.instance_types = instance_types
self.capacity = capacity
self.life_cycle = life_cycle
self.ebs_configuration = ebs_configuration
class Capacity:
def __init__(
self,
target=none,
minimum=none,
maximum=none):
"""
:type target: int
:type minimum: int
:type maximum: int
"""
self.target = target
self.minimum = minimum
self.maximum = maximum
class EbsConfiguration:
def __init__(
self,
ebs_block_device_configs=none,
ebs_optimized=none):
"""
:type ebs_block_device_configs: List[SingleEbsConfig]
:type ebs_optimized: bool
"""
self.ebs_block_device_configs = ebs_block_device_configs
self.ebs_optimized = ebs_optimized
class SingleEbsConfig:
def __init__(
self,
volume_specification=none,
volumes_per_instance=none):
"""
:type volume_specification: VolumeSpecification
:type volumes_per_instance: int
"""
self.volume_specification = volume_specification
self.volumes_per_instance = volumes_per_instance
class VolumeSpecification:
def __init__(
self,
volume_type=none,
size_in_gb=none):
"""
:type volume_type: str
:type size_in_GB: int
"""
self.volume_type = volume_type
self.size_in_gB = size_in_gb
class Configurations:
def __init__(
self,
file=none):
"""
:type file: File
"""
self.file = file
# endregion
# region Scaling
class Scaling:
def __init__(
self,
up=none,
down=none):
"""
:type up: List[Metric]
:type down: List[Metric]
"""
self.up = up
self.down = down
class Metric:
def __init__(
self,
metric_name=none,
statistic=none,
unit=none,
threshold=none,
adjustment=none,
namespace=none,
period=none,
evaluation_periods=none,
action=none,
cooldown=none,
dimensions=none,
operator=none):
"""
:type metric_name: str
:type statistic: str
:type unit: str
:type threshold: int
:type adjustment: int
:type namespace: str
:type period: int
:type evaluation_periods: int
:type action: Action
:type cooldown: int
:type dimensions: List[Dimension]
:type operator: str
"""
self.metric_name = metric_name
self.statistic = statistic
self.unit = unit
self.threshold = threshold
self.adjustment = adjustment
self.namespace = namespace
self.period = period
self.evaluation_periods = evaluation_periods
self.action = action
self.cooldown = cooldown
self.dimensions = dimensions
self.operator = operator
class Action:
def __init__(
self,
type=none,
adjustment=none,
min_target_capacity=none,
target=none,
minimum=none,
maximum=none):
"""
:type type: str
:type adjustment: int
:type min_target_capacity: int
:type target: int
:type minimum: int
:type maximum: int
"""
self.type = type
self.adjustment = adjustment
self.min_target_capacity = min_target_capacity
self.target = target
self.minimum = minimum
self.maximum = maximum
class Dimension:
def __init__(
self,
name=none):
"""
:type name: str
"""
self.name = name
#endregion
class EMRCreationRequest:
def __init__(self, mrScaler):
self.mrScaler = mrScaler
def toJSON(self):
return json.dumps(self, default=lambda o: o.__dict__,
sort_keys=True, indent=4)
|
#!/usr/bin/env python
# Dirty bit: if this memory has been Written to (and thus, it must write to disk upon eviction)
# Referenced bit: if this memory has been read or written to (if it's been referenced at all)
# Four algorithms: Optimal, Clock (w/ circular queue enhancement of second chance algo.),
# NotRecentlyUsed (variable of what "recent means" / use D and R bits), Random
# Implement a page table for a 32-bit address, all pages are 4kb's, number of frames are a cmdline param
"""
Program:
1) Get cmd-line args
2) Run through file
3) Display Action taken for each address:
- Hit
- Page Fault (no eviction)
- Page Fault (evict clean page)
- Page Fault (evict dirty page -- write to disk)
4) Print Summary Stats
- Number of frames / total memory accesses / total number of page faults / total writes to disk
"""
# File has memory address followed by (R)ead or (W)rite
# Note: play around with NRU to determine the best refresh rate
import sys
from random import randint
#Object Classes:
class PTEntry:
def __init__(self):
self.d = 0
self.r = 0
self.v = 0
self.pn = 0
self.fn = -1 # 11PM addition
def __repr__(self):
return "(%d%d%d|%s|%d)" % (self.d, self.r, self.v, self.pn, self.fn)
def is_valid(self):
return (self.v != 0 and self.fn != -1) # 11PM addition
def evicted(self): # 11PM addition
self.d = 0
self.r = 0
self.v = 0
self.fn = -1 # 11PM addition
def added(self):
self.r = 1
self.v = 1
def get_frame_number(self):
return self.fn
def get_page_number(self):
return self.pn
def get_dirty_bit(self):
return self.d
def get_ref_bit(self):
return self.r
def get_valid_bit(self):
return self.v
def get_key(self):
return self.pn
def set_dirty_bit(self, bit):
self.d = bit
def set_ref_bit(self, bit):
self.r = bit
def set_valid_bit(self, bit):
self.v = bit
def set_page_num(self, num):
self.pn = num
def set_frame_num(self, num):
self.fn = num
class Ram:
def __init__(self, numframes):
self.nf = numframes
self.array = [PTEntry() for i in range(self.nf)] # This is called a comprehension
self.fc = 0 #frame counter
self.clock_hand = 0 #init
def __repr__(self):
return "RAM(%d): %s FC(%d)" % (self.nf, self.array, self.fc)
def add(self, entry):
self.array[entry.get_frame_number()] = entry
entry.added()
self.fc += 1
def update(self, index, entry):
self.array[index] = entry
#def clear(self):
# self.array = [PTEntry() for i in range(self.nf)]
# self.fc = 0
def is_full(self):
return (int(self.fc) >= int(self.nf));
def evict(self): # 11PM addition
#Get Entry based on ALGO
#RAND
if ("opt" in algorithm): #OPTIMAL
print("Not Yet Implemented!")
elif ("clock" in algorithm): #CLOCK EVICT
replaced = False #init
while(not replaced):
#inspect R bit at hand location
old_entry = self.array[self.clock_hand]
if(old_entry.get_ref_bit() == 0): #If R == 0, place here
#evict entry
evictthis = old_entry.get_frame_number() #return this entry
replaced = True
else: #Else, clear R
old_entry.set_ref_bit(0)
self.clock_hand += 1
self.clock_hand = self.clock_hand % self.nf
#Increment clock hand and try again until replaced
elif ("nru" in algorithm): #NOT RECENTLY USED EVICT
print("Not Yet Implemented!")
else: #RANDOM EVICT
#Evict whatever the FC is at, modulo # of frames
evictthis = randint(0,self.nf-1)
#get whatever was there
old_entry = self.array[evictthis]
#empty it
#self.array[evictthis] = PTEntry() #well... not needed -- it will be overwritten!
#Determine if we need to write the old one to disk:
if(old_entry.get_dirty_bit() == 1):
#print("RAM: Evict Dirty")
global total_writes_to_disk
total_writes_to_disk += 1
#else:
#print("RAM: Evict Clean")
old_entry.evicted() #invalidate and reset bits
return evictthis #return INT -- the frame number to be replaced!
def get_frame_number(self):
return self.fc
def get_entry(self, index):
return self.array[index]
class PageTable:
def __init__(self):
self.pt = {}
def __repr__(self):
return "PT: %s" % (self.pt)
def add(self, entry):
entry.set_valid_bit(1)
self.pt[entry.get_key()] = entry
def update(self, entry):
self.pt[entry.get_key()] = entry
def get_entry(self, key):
return self.pt[key]
def dirty_bit(self, key):
self.pt[key] = "1%s" % (self.pt[key][1:])
#Function Declarations:
#EXIT
def exit():
print("Usage: ./vmsim -n <numframes> -a <opt|clock|nru|rand> [-r <NRUrefresh>] <tracefile>")
sys.exit(-1)
#END EXIT
#SETARGS
def set_args():
#check length
if not((len(sys.argv) == 6) or (len(sys.argv) == 8)):
exit()
#check and set -n -a and -r
if "-n" in sys.argv:
global num_frames
i = sys.argv.index("-n")
i += 1
num_frames = int(sys.argv[i])
else: #if -n is not included in the cmdline args
exit()
if "-a" in sys.argv:
global algorithm
i = sys.argv.index("-a")
i += 1
algorithm = sys.argv[i]
algorithm = algorithm.lower()
else: #if -a is not included in the cmdline args
exit()
if "nru" in sys.argv:
if "-r" in sys.argv:
global nru_refresh
i = sys.argv.index("-r")
i += 1
nru_refresh = sys.argv[i]
else:
exit()
#set filename
global filename
filename = sys.argv[-1]
#END SETARGS
# START MAIN:
# Set the global's from the cmd-line args
set_args()
global total_memory_access
total_memory_access = 0 #init
global total_page_faults
total_page_faults = 0 #init
global total_writes_to_disk
total_writes_to_disk = 0 #init
# Create RAM with user-defined number of frames
# Note: each frame in RAM is initialized to -1
RAM = Ram(num_frames) #Create new ram object!
PT = PageTable() #Create new PageTable Object!
#open file
f = open(filename, "r")
for line in f:
total_memory_access += 1
# TODO: Open file and reading
# DEBUG: Read line from keyboard
#line = raw_input("DEBUG: Enter line from file: ")# DEBUG
# Split line based on whitespace
result = line.split(" ")
# Create the page number and operation
memory_address = result[0] #in hex
page_number = memory_address[:5] #ignore the offset! First 5
operation = result[1].rstrip() #R or W, strips the new line
try: # Check for page number in the page table -- exists in page table
existing_pt_entry = PT.get_entry(page_number)
# Exists in page table!:
# Check for page in RAM
frame_number = existing_pt_entry.get_frame_number()
ram_entry = RAM.get_entry(frame_number)
if(operation.upper() == "W"):
existing_pt_entry.set_dirty_bit(1)
if(ram_entry.is_valid() and page_number == ram_entry.get_page_number()): #HIT! in page table and ram!
"""print("1) Hit!")"""
#set r bit
#existing_pt_entry.set_ref_bit(1)
ram_entry.set_ref_bit(1)
#Rewrite D-bit from RAM -- in case a write happened a while ago -- could be different than what is kept in the PT
#existing_pt_entry.set_dirty_bit(ram_entry.get_dirty_bit())
#Update the existing entry
#PT.update(page_number, existing_pt_entry)
#RAM.update(frame_number, existing_pt_entry)
else: #PAGE FAULT -- RUN EVICTION ALGO! hit in page table but not ram
#TODO -- check if ram is full, then evict if needed
if(RAM.is_full()):
"""print("2) Page Fault: Evict because RAM is full")"""
total_page_faults += 1
existing_pt_entry.set_frame_num(RAM.evict()) #returns an empty frame
#DEBUG EVICT ALL!
#RAM.clear()
#SET THE FRAME NUMBER FROM SOME RETURNING EVICTION FUNCTION!
#ENDDEBUG
#Add to RAM
else: #This type should go away when I implement my algorithms.
#print("3) Page Fault: Compulsory -- must load page into RAM")
print("3) ---- THIS SHOULD NEVER PRINT EVER @(@()(@$*(*$@(*&$%*(&#)%(*&#@jfldsjHF")
total_page_faults += 1
#NO evict -- set frame number
existing_pt_entry.set_frame_num(RAM.get_frame_number())
#Add to RAM
RAM.add(existing_pt_entry)
except KeyError: # ---- Does not exist in page table yet:
#Start building the new entry
new_page_table_entry = PTEntry() #create new object!
new_page_table_entry.set_page_num(page_number)
#Set R and D based on operation:
if (operation.upper() == "R"):
new_page_table_entry.set_ref_bit(1)
if (operation.upper() == "W"):
new_page_table_entry.set_ref_bit(1)
new_page_table_entry.set_dirty_bit(1)
if (RAM.is_full()): #PAGE FAULT -- RUN EVICTION ALGO!
#DO THE eviction ALGORITHM
#DEBUG EVICT ALL!
#RAM.clear()
#GET FRAME NUMBER FROM RETURNING EVICTION ALGO
#ENDDEBUG
"""print("4) Page Fault: Evict because RAM is full")"""
total_page_faults += 1
new_page_table_entry.set_frame_num(RAM.evict()) #evict returns an open frame number!
#Add new
else: #PAGE FAULT - NO EVICTION! this will only happen for the first n frames when ram isnt full
"""print("5) Page Fault: Compulsory -- RAM is not full, must load")"""
total_page_faults += 1
#NO evict -- set frame number
new_page_table_entry.set_frame_num(RAM.get_frame_number())
#Add new
#Create Page Table entry and store in RAM!
PT.add(new_page_table_entry)
RAM.add(new_page_table_entry)
#print("%s" % PT)#DEBUG
#print("%s" % RAM)#DEBUG
#end of file parsing
print("---------------------------------")
print("Number of Frames: %s" % num_frames)
print("Total Memory Accesses: %s" % total_memory_access)
print("Total Page Faults: %s" % total_page_faults)
print("Total Writes to Disk: %s" % total_writes_to_disk)
print("---------------------------------")
# END MAIN
|
import unittest
from app.code.bank.account import Account
from app.code.bank.bank import Bank
class BankTest(unittest.TestCase):
def test_bank_init_empty(self):
bank = Bank()
self.assertEqual({}, bank.accounts)
self.assertEqual(len(bank.accounts), 0)
def test_add_account(self):
bank = Bank()
account_1 = Account(001, 50)
account_2 = Account(002, 100)
bank.add_account(account_1)
bank.add_account(account_2)
self.assertEqual(len(bank.accounts), 2)
def test_get_account_balance(self):
bank = Bank()
account_1 = Account(001, 75)
bank.add_account(account_1)
self.assertEqual(bank.get_account_balance(001), 75)
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.testutil.pants_integration_test import run_pants, setup_tmpdir
PLUGIN = """
import logging
from pants.engine.goal import GoalSubsystem, Goal
from pants.engine.rules import collect_rules, goal_rule
class LogSubsystem(GoalSubsystem):
name = "logger"
help = "foo"
class LogGoal(Goal):
subsystem_cls = LogSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
@goal_rule
def write_logs() -> LogGoal:
for logger_name in ("globalLevel", "infoOverride", "debugOverride"):
logger = logging.getLogger(f"plugins.logger.{{logger_name}}")
logger.debug("debug log")
logger.info("info log")
logger.warning("warn log")
return LogGoal(exit_code=0)
def rules():
return collect_rules()
"""
REGISTER = """
from plugins import logger
def rules():
return logger.rules()
"""
def test_log_by_level() -> None:
"""Check that overriding log levels works for logs both coming from Rust and from Python.
This also checks that we correctly log `Starting` and `Completed` messages when the dynamic UI
is disabled.
"""
with setup_tmpdir({"plugins/logger.py": PLUGIN, "plugins/register.py": REGISTER}) as tmpdir:
result = run_pants(
[
f"--pythonpath={tmpdir}",
"--backend-packages=plugins",
"--no-dynamic-ui",
"--show-log-target",
"--level=warn",
(
"--log-levels-by-target={"
"'plugins.logger.infoOverride': 'info', "
"'plugins.logger.debugOverride': 'debug', "
"'workunit_store': 'debug'}"
),
"logger",
]
)
global_level = "globalLevel"
info_override = "infoOverride"
debug_override = "debugOverride"
for logger in (global_level, info_override, debug_override):
assert f"[WARN] (plugins.logger.{logger}) warn log" in result.stderr
for logger in (info_override, debug_override):
assert f"[INFO] (plugins.logger.{logger}) info log" in result.stderr
assert "[INFO] (plugins.logger.globalLevel) info log" not in result.stderr
assert "[DEBUG] (plugins.logger.debugOverride) debug log" in result.stderr
for logger in (global_level, info_override):
assert f"[DEBUG] (plugins.logger.{logger} debug log" not in result.stderr
# Check that overriding levels for Rust code works, and also that we log Starting and Completed
# properly.
assert "[DEBUG] (workunit_store) Starting: `logger` goal" in result.stderr
assert "[DEBUG] (workunit_store) Completed: `logger` goal" in result.stderr
|
import io
from typing import Dict, Union, List
from parseridge.utils.logger import LoggerMixin
ID, FORM, LEMMA, UPOS, XPOS, FEATS, HEAD, DEPREL, DEPS, MISC = range(10)
class CoNLLEvaluationScript(LoggerMixin):
"""
Based on v. 1.0 of the CoNLL 2017 UD Parsing evaluation script by
the Institute of Formal and Applied Linguistics (UFAL),
Faculty of Mathematics and Physics, Charles University, Czech Republic.
"""
class UDRepresentation:
def __init__(self):
# Characters of all the tokens in the whole file.
# Whitespace between tokens is not included.
self.characters = []
# List of UDSpan instances with start&end indices into `characters`.
self.tokens = []
# List of UDWord instances.
self.words = []
# List of UDSpan instances with start&end indices into `characters`.
self.sentences = []
class UDSpan:
def __init__(self, start, end):
self.start = start
# Note that end marks the first position **after the end** of span,
# so we can use characters[start:end] or range(start, end).
self.end = end
class UDWord:
def __init__(self, span, columns, is_multiword):
# Span of this word (or MWT, see below) within
# ud_representation.characters.
self.span = span
# 10 columns of the CoNLL-U file: ID, FORM, LEMMA,...
self.columns = columns
# is_multiword==True means that this word is part of a
# multi-word token. In that case, self.span marks the span of the
# whole multi-word token.
self.is_multiword = is_multiword
# Reference to the UDWord instance representing the HEAD
# (or None if root).
self.parent = None
# Let's ignore language-specific deprel subtypes.
self.columns[DEPREL] = columns[DEPREL].split(":")[0]
class UDError(Exception):
pass
class Score:
def __init__(self, gold_total, system_total, correct, aligned_total=None):
self.precision = correct / system_total if system_total else 0.0
self.recall = correct / gold_total if gold_total else 0.0
self.f1 = (
2 * correct / (system_total + gold_total)
if system_total + gold_total
else 0.0
)
self.aligned_accuracy = (
correct / aligned_total if aligned_total else aligned_total
)
def serialize(self):
return {"f1": self.f1, "aligned_accuracy": self.aligned_accuracy}
class AlignmentWord:
def __init__(self, gold_word, system_word):
self.gold_word = gold_word
self.system_word = system_word
self.gold_parent = None
self.system_parent_gold_aligned = None
class Alignment:
def __init__(self, gold_words, system_words):
self.gold_words = gold_words
self.system_words = system_words
self.matched_words = []
self.matched_words_map = {}
def append_aligned_words(self, gold_word, system_word):
self.matched_words.append(
CoNLLEvaluationScript.AlignmentWord(gold_word, system_word)
)
self.matched_words_map[system_word] = gold_word
def fill_parents(self):
"""
We represent root parents in both gold and system data by '0'.
For gold data, we represent non-root parent by corresponding
gold word. For system data, we represent non-root parent by
either gold word aligned to parent system nodes, or by None if
no gold words is aligned to the parent.
"""
for words in self.matched_words:
words.gold_parent = (
words.gold_word.parent if words.gold_word.parent is not None else 0
)
words.system_parent_gold_aligned = (
self.matched_words_map.get(words.system_word.parent, None)
if words.system_word.parent is not None
else 0
)
def evaluate(self, gold_ud, system_ud):
# Check that underlying character sequences do match
if gold_ud.characters != system_ud.characters:
index = 0
while gold_ud.characters[index] == system_ud.characters[index]:
index += 1
raise CoNLLEvaluationScript.UDError(
f"The concatenation of tokens in the gold file and in "
f"th system file differ!\n"
+ f"First 20 differing characters in gold file: "
f"'{''.join(gold_ud.characters[index:index + 20])}' "
f"and system file: "
f"'{''.join(system_ud.characters[index:index + 20])}'"
)
# Align words
alignment = self.align_words(gold_ud.words, system_ud.words)
# Compute the F1-scores
result = {
"Tokens": self.spans_score(gold_ud.tokens, system_ud.tokens),
"Sentences": self.spans_score(gold_ud.sentences, system_ud.sentences),
"Words": self.alignment_score(alignment, None),
"UPOS": self.alignment_score(alignment, lambda w, parent: w.columns[UPOS]),
"XPOS": self.alignment_score(alignment, lambda w, parent: w.columns[XPOS]),
"Feats": self.alignment_score(alignment, lambda w, parent: w.columns[FEATS]),
"AllTags": self.alignment_score(
alignment,
lambda w, parent: (w.columns[UPOS], w.columns[XPOS], w.columns[FEATS]),
),
"Lemmas": self.alignment_score(alignment, lambda w, parent: w.columns[LEMMA]),
"UAS": self.alignment_score(alignment, lambda w, parent: parent),
"LAS": self.alignment_score(
alignment, lambda w, parent: (parent, w.columns[DEPREL])
),
}
return result
@staticmethod
def load_conllu(stream):
ud = CoNLLEvaluationScript.UDRepresentation()
# Load the CoNLL-U file
index, sentence_start = 0, None
line_number = 0
while True:
line = stream.readline()
line_number += 1
if not line:
break
line = line.rstrip("\r\n")
# Handle sentence start boundaries
if sentence_start is None:
# Skip comments
if line.startswith("#"):
continue
# Start a new sentence
ud.sentences.append(CoNLLEvaluationScript.UDSpan(index, 0))
sentence_start = len(ud.words)
if not line:
# Add parent UDWord links and check there are no cycles
def process_word(word):
if word.parent == "remapping":
raise CoNLLEvaluationScript.UDError(
"There is a cycle in a sentence"
)
if word.parent is None:
head = int(word.columns[HEAD])
if head > len(ud.words) - sentence_start:
raise CoNLLEvaluationScript.UDError(
f"HEAD '{word.columns[HEAD]}' "
f"points outside of the sentence"
)
if head:
parent = ud.words[sentence_start + head - 1]
word.parent = "remapping"
process_word(parent)
word.parent = parent
for word in ud.words[sentence_start:]:
process_word(word)
# Check there is a single root node
if (
len([word for word in ud.words[sentence_start:] if word.parent is None])
!= 1
):
raise CoNLLEvaluationScript.UDError(
f"There are multiple roots in a sentence. " f"(Line {line_number})."
)
# End the sentence
ud.sentences[-1].end = index
sentence_start = None
continue
# Read next token/word
columns = line.split("\t")
if len(columns) != 10:
raise CoNLLEvaluationScript.UDError(
f"The CoNLL-U line does not contain "
f"10 tab-separated columns: '{line}'"
)
# Skip empty nodes
if "." in columns[ID]:
continue
# Delete spaces from FORM so gold.characters == system.characters
# even if one of them tokenizes the space.
columns[FORM] = columns[FORM].replace(" ", "")
if not columns[FORM]:
raise CoNLLEvaluationScript.UDError(
"There is an empty FORM in the CoNLL-U file"
)
# Save token
ud.characters.extend(columns[FORM])
ud.tokens.append(
CoNLLEvaluationScript.UDSpan(index, index + len(columns[FORM]))
)
index += len(columns[FORM])
# Handle multi-word tokens to save word(s)
if "-" in columns[ID]:
try:
start, end = map(int, columns[ID].split("-"))
except Exception:
raise CoNLLEvaluationScript.UDError(
"Cannot parse multi-word token ID '{}'".format(columns[ID])
)
for _ in range(start, end + 1):
word_line = stream.readline().rstrip("\r\n")
line_number += 1
word_columns = word_line.split("\t")
if len(word_columns) != 10:
raise CoNLLEvaluationScript.UDError(
f"The CoNLL-U line does not contain "
f"10 tab-separated columns: '{word_line}'"
)
ud.words.append(
CoNLLEvaluationScript.UDWord(
ud.tokens[-1], word_columns, is_multiword=True
)
)
# Basic tokens/words
else:
try:
word_id = int(columns[ID])
except Exception:
raise CoNLLEvaluationScript.UDError(
"Cannot parse word ID '{}'".format(columns[ID])
)
if word_id != len(ud.words) - sentence_start + 1:
raise CoNLLEvaluationScript.UDError(
f"Incorrect word ID '{columns[ID]}' "
f"for word '{columns[FORM]}', "
f"expected '{len(ud.words) - sentence_start + 1}'"
)
try:
head_id = int(columns[HEAD])
except Exception:
raise CoNLLEvaluationScript.UDError(
"Cannot parse HEAD '{}'".format(columns[HEAD])
)
if head_id < 0:
raise CoNLLEvaluationScript.UDError("HEAD cannot be negative")
ud.words.append(
CoNLLEvaluationScript.UDWord(ud.tokens[-1], columns, is_multiword=False)
)
if sentence_start is not None:
raise CoNLLEvaluationScript.UDError(
"The CoNLL-U file does not end with empty line"
)
return ud
@staticmethod
def spans_score(gold_spans, system_spans):
correct, gi, si = 0, 0, 0
while gi < len(gold_spans) and si < len(system_spans):
if system_spans[si].start < gold_spans[gi].start:
si += 1
elif gold_spans[gi].start < system_spans[si].start:
gi += 1
else:
correct += gold_spans[gi].end == system_spans[si].end
si += 1
gi += 1
return CoNLLEvaluationScript.Score(len(gold_spans), len(system_spans), correct)
@staticmethod
def alignment_score(alignment, key_fn, weight_fn=lambda w: 1):
gold, system, aligned, correct = 0, 0, 0, 0
for word in alignment.gold_words:
gold += weight_fn(word)
for word in alignment.system_words:
system += weight_fn(word)
for words in alignment.matched_words:
aligned += weight_fn(words.gold_word)
if key_fn is None:
# Return score for whole aligned words
return CoNLLEvaluationScript.Score(gold, system, aligned)
for words in alignment.matched_words:
if key_fn(words.gold_word, words.gold_parent) == key_fn(
words.system_word, words.system_parent_gold_aligned
):
correct += weight_fn(words.gold_word)
return CoNLLEvaluationScript.Score(gold, system, correct, aligned)
@staticmethod
def beyond_end(words, i, multiword_span_end):
if i >= len(words):
return True
if words[i].is_multiword:
return words[i].span.start >= multiword_span_end
return words[i].span.end > multiword_span_end
@staticmethod
def extend_end(word, multiword_span_end):
if word.is_multiword and word.span.end > multiword_span_end:
return word.span.end
return multiword_span_end
def find_multiword_span(self, gold_words, system_words, gi, si):
"""
We know gold_words[gi].is_multiword or system_words[si].is_multiword.
Find the start of the multiword span (gs, ss), so the multiword span
is minimal.
"""
# Initialize multiword_span_end characters index.
if gold_words[gi].is_multiword:
multiword_span_end = gold_words[gi].span.end
if (
not system_words[si].is_multiword
and system_words[si].span.start < gold_words[gi].span.start
):
si += 1
else: # if system_words[si].is_multiword
multiword_span_end = system_words[si].span.end
if (
not gold_words[gi].is_multiword
and gold_words[gi].span.start < system_words[si].span.start
):
gi += 1
gs, ss = gi, si
# Find the end of the multiword span (so both gi and si are pointing
# to the word following the multiword span end).
while not self.beyond_end(
gold_words, gi, multiword_span_end
) or not self.beyond_end(system_words, si, multiword_span_end):
if gi < len(gold_words) and (
si >= len(system_words)
or gold_words[gi].span.start <= system_words[si].span.start
):
multiword_span_end = self.extend_end(gold_words[gi], multiword_span_end)
gi += 1
else:
multiword_span_end = self.extend_end(system_words[si], multiword_span_end)
si += 1
return gs, ss, gi, si
@staticmethod
def compute_lcs(gold_words, system_words, gi, si, gs, ss):
lcs = [[0] * (si - ss) for i in range(gi - gs)]
for g in reversed(range(gi - gs)):
for s in reversed(range(si - ss)):
if (
gold_words[gs + g].columns[FORM].lower()
== system_words[ss + s].columns[FORM].lower()
):
lcs[g][s] = 1 + (
lcs[g + 1][s + 1] if g + 1 < gi - gs and s + 1 < si - ss else 0
)
lcs[g][s] = max(lcs[g][s], lcs[g + 1][s] if g + 1 < gi - gs else 0)
lcs[g][s] = max(lcs[g][s], lcs[g][s + 1] if s + 1 < si - ss else 0)
return lcs
def align_words(self, gold_words, system_words):
alignment = CoNLLEvaluationScript.Alignment(gold_words, system_words)
gi, si = 0, 0
while gi < len(gold_words) and si < len(system_words):
if gold_words[gi].is_multiword or system_words[si].is_multiword:
gs, ss, gi, si = self.find_multiword_span(gold_words, system_words, gi, si)
if si > ss and gi > gs:
lcs = self.compute_lcs(gold_words, system_words, gi, si, gs, ss)
# Store aligned words
s, g = 0, 0
while g < gi - gs and s < si - ss:
if (
gold_words[gs + g].columns[FORM].lower()
== system_words[ss + s].columns[FORM].lower()
):
alignment.append_aligned_words(
gold_words[gs + g], system_words[ss + s]
)
g += 1
s += 1
elif lcs[g][s] == (lcs[g + 1][s] if g + 1 < gi - gs else 0):
g += 1
else:
s += 1
else:
# B: No multi-word token => align according to spans.
if (gold_words[gi].span.start, gold_words[gi].span.end) == (
system_words[si].span.start,
system_words[si].span.end,
):
alignment.append_aligned_words(gold_words[gi], system_words[si])
gi += 1
si += 1
elif gold_words[gi].span.start <= system_words[si].span.start:
gi += 1
else:
si += 1
alignment.fill_parents()
return alignment
def get_las_score_for_sentences(
self, gold_sentences: List[str], predicted_sentences: List[str]
) -> Dict[str, Union[float, Dict[str, Dict[str, float]]]]:
"""
Takes a list of gold an predicted sentence objects and computes the
F1 LAS score between them.
"""
gold_buffer = io.StringIO("".join(gold_sentences))
pred_buffer = io.StringIO("".join(predicted_sentences))
gold_connl = self.load_conllu(gold_buffer)
pred_connl = self.load_conllu(pred_buffer)
raw_scores = self.evaluate(gold_connl, pred_connl)
scores = {
"las": raw_scores["LAS"].f1 * 100,
"uas": raw_scores["UAS"].f1 * 100,
"all": {k: v.serialize() for k, v in raw_scores.items()},
}
return scores
|
import argparse
def run(var1, var2):
"""
Change made to this file...
"""
var3 = f"{var1}:{var2}"
print(var3)
return var3
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Input sample argparse args")
parser.add_argument('--V1', type=int, help='Enter val for var1')
parser.add_argument('--V2', type=string, help='Enter val for var2')
args = parser.parse_args()
run(args.V1, args.V2)
|
import os
import json
import csv
with open("social.csv","a",newline="") as f:
csv_write = csv.writer(f, dialect='excel')
title = ["uid",">5person","social times"]
csv_write.writerow(title)
for file in os.listdir("./response/Social"):
dirt = "./response/Social/" + file
with open(dirt,"r") as f:
load_json = json.load(f)
nb = len(load_json)
if nb != 0:
use_data_len = 0 #count all data used
count=0
uid = file[-8:-5]
for i in load_json:
if "null" in i :
continue
use_data_len += 1
if int(i["number"]) >= 2: #social >5person
count += 1
if use_data_len != 0:
percent_5more = count/use_data_len
print(uid,percent_5more)
stu = [uid,percent_5more,use_data_len]
csv_write.writerow(stu)
|
import datetime
from datetime import date
import requests
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
stu_id = request.args.get('id', '')
today = date.today()
full_html = ""
for i in range(7):
d1 = today.strftime("%Y-%m-%d")
url = f"http://219.216.96.73/pyxx/App_Ajax/GetkcHandler.ashx?kcdate={d1}&xh={stu_id}"
weekday = today.weekday() +1
head = f"<h3>{d1} 星期{weekday}</h3>"
full_html += head + requests.get(url).text
today += datetime.timedelta(days=1)
return full_html
if __name__ == '__main__':
app.run(host="0.0.0.0", threaded=True, debug=False, port=5000)
|
#!/usr/bin/python
# soapclient.py - SOAP client class, part of BlackStratus.
"""
Provider level access to SOAP service.
"""
import webservice.soapclient
import logging;
logger = logging.getLogger("Notification")
class CaseClient(webservice.soapclient.TrSoapClient):
def __init__(self, wsdl_url, username, password):
self.servicename = "Case"
logger.info("Creating %s instance. url:%s,user:%s" % (self.__class__.__name__,
wsdl_url, username))
webservice.soapclient.TrSoapClient.__init__(self, wsdl_url, username,
password, self.servicename)
def get_case_detail(self, caseid):
logger.info("Fetching case (%d) details from provider" % caseid)
case_response = self.service.getCase(caseid)
if not case_response:
log.error("Failed to get response from webserver.")
return None
logger.info("getCase(%s) Response: Statuscode=%d. Message:%s" %
(caseid, case_response.statusCode, case_response.failureMessage))
if(case_response.statusCode != 0):
logger.error("getCase(%s) Failed: Statuscode=%d. Message:%s" %
(caseid, case_response.statusCode, case_response.failureMessage))
return None
else:
#logger.info(case_response)
return case_response.cases
def update_external_ticket(self, case):
if not case.external_ticket:
logger.error("External ticket not found for case %s", case.case_id)
raise Exception("External ticket not found for case %s" % case.case_id)
logger.info("Updating case %s(%s) with external ticket %s for customer %s." %
(case.name, case.case_id, case.external_ticket, case.customer_name))
caseToSave = {
'caseID' : case.case_id,
'name' : case.name,
'customerName' : case.customer_name,
'externalTicket': case.external_ticket,
'assignedToUser': "admin"
}
caseArray = [caseToSave]
case_response = self.service.saveCases(caseArray)
if not case_response or len(case_response) <=0:
log.error("Failed to get response from webserver.")
return False
case_response = case_response[0]
logger.info("saveCase(%s) Response: Statuscode=%d. Message:%s" %
(case.case_id, case_response.statusCode, case_response.failureMessage))
if(case_response.statusCode != 0):
logger.error("saveCase(%s) Failed: Statuscode=%d. Message:%s" %
(case.case_id, case_response.statusCode, case_response.failureMessage))
raise Exception("SaveCase Failed: %s" % case_response.failureMessage)
return True
class CustomerClient(webservice.soapclient.TrSoapClient):
def __init__(self, wsdl_url, username, password):
self.servicename = "Customer"
logger.info("Creating %s instance. url:%s,user:%s" % (self.__class__.__name__,
wsdl_url, username))
webservice.soapclient.TrSoapClient.__init__(self, wsdl_url, username,
password, self.servicename)
|
from urllib import request, parse, error
import json
import socket
import re
import time
socket.setdefaulttimeout(5)
class ZabbixApi(object):
def __init__(self, url, username, password):
self.__url = url
self.__username = username
self.__password = password
self.__rpc_version = "2.0"
self.__header = {"Content-Type": "application/json"}
res = self.__auth()
if res[0]:
self.__token = res[1]
else:
self.__token = None
# print(self.__token)
def __http_request(self, method, params, request_id=1, token=None):
"""
发送API请求方法
:param method: 被调用的API方法
:param params: 将传递给API方法的参数
:param request_id: 请求的任意标识符
:return: 结果
"""
# print(params)
request_data = {
"jsonrpc": self.__rpc_version,
"method": method,
"params": params,
"id": request_id,
"auth": token
}
msg = ""
result = None
req = request.Request(self.__url, json.dumps(request_data).encode("utf-8"))
for k, v in self.__header.items():
req.add_header(k, v)
try:
result = request.urlopen(req)
except error.ContentTooShortError:
msg = "content is too short"
except error.URLError:
msg = "url error"
except error.HTTPError:
msg = "http error"
finally:
if result:
result = json.loads(result.read().decode())
if "result" in result:
return True, result.get("result")
else:
return False, result.get("error", {}).get("data")
else:
return False, msg
def __auth(self):
params = {
"user": self.__username,
"password": self.__password
}
request_id = 1
method = "user.login"
res = self.__http_request(method, params, request_id)
# print(res)
return res
def get_hosts(self):
params = {
"output": "extend",
"selectGroups": "extend",
"selectInterfaces": [
"interfaceid",
"ip",
"type"
],
"selectParentTemplates": [
"templateid",
"name"
]
}
request_id = 2
method = "host.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_groups(self):
params = {
"output": "extend"
}
request_id = 3
method = "hostgroup.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_templates(self):
params = {
"output": "extend"
}
request_id = 3
method = "template.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def create_host_by_agent(self, ip, name, groups, templates):
params = {
"host": name,
"interfaces": [
{
"type": 1,
"main": 1,
"useip": 1,
"ip": ip,
"dns": "",
"port": "10050"
}
],
"groups": groups,
"templates": templates,
}
request_id = 4
method = "host.create"
res = self.__http_request(method, params, request_id, self.__token)
return res
def delete_hosts(self, hostids):
params = hostids
request_id = 5
method = "host.delete"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_hosts_by_id(self, hostid):
params = {
"hostids": hostid,
"output": "extend",
"selectGroups": "extend",
"selectInterfaces": [
"interfaceid",
"ip",
"type"
],
"selectParentTemplates": [
"templateid",
"name"
]
}
request_id = 6
method = "host.get"
res = self.__http_request(method, params, request_id, self.__token)
# print(res)
return res
def get_hosts_by_name(self, name):
params = {
"output": "extend",
"selectGroups": "extend",
"selectInterfaces": [
"interfaceid",
"ip",
"type"
],
"selectParentTemplates": [
"templateid",
"name"
],
"filter": {
"host": name
}
}
request_id = 7
method = "host.get"
res = self.__http_request(method, params, request_id, self.__token)
# print(res)
return res
def get_graph_by_id(self, hostid):
params = {
"hostids": hostid,
"sortfield": "name"
}
request_id = 8
method = "graph.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_item_by_graph(self, graphid):
params = {
"output": "extend",
"graphids": graphid
}
request_id = 9
method = "graphitem.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_history_by_item(self, itemid, history=3, starttime=None, endtime=None):
nowtime = time.time()
starttime = starttime if starttime else nowtime - 60 * 60
endtime = endtime if endtime else nowtime
params = {
"output": "extend",
"history": history,
"itemids": itemid,
"sortfield": "clock",
"sortorder": "DESC",
"time_from": int(starttime),
"time_till": int(endtime)
}
request_id = 10
method = "history.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_item_by_host(self, hostid):
params = {
"output": "extend",
"hostids": hostid,
"sortfield": "name"
}
request_id = 11
method = "item.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_item_by_id(self, itemid):
params = {
"output": "extend",
"itemids": itemid,
"sortfield": "name",
}
request_id = 12
method = "item.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
def get_last_history_by_item(self, itemid, history=3):
params = {
"output": "extend",
"history": history,
"itemids": itemid,
"sortfield": "clock",
"sortorder": "DESC",
"limit": 1
}
request_id = 13
method = "history.get"
res = self.__http_request(method, params, request_id, self.__token)
return res
if __name__ == "__main__":
url = "http://192.168.95.136/zabbix/api_jsonrpc.php"
username = "Admin"
password = "zabbix"
zabbix_obj = ZabbixApi(url=url, username=username, password=password)
host_info = zabbix_obj.get_hosts_by_name("zabbix-1")
host_info = host_info[1] if host_info[0] else {}
if host_info and host_info[0]:
# print(host_info)
host_id = host_info[0].get("hostid")
# print(host_id)
graphs = zabbix_obj.get_graph_by_id(host_id)
# print(graphs)
graph_list = []
if graphs[0] and graphs[1]:
for graph in graphs[1]:
items = zabbix_obj.get_item_by_graph(graph.get("graphid"))
print("_______________")
print(graph.get("name"))
# print(graph)series
legend = []
series = []
unit = ""
if items[0] and items[1]:
for item in items[1]:
item_type = item.get("type")
item_obj = zabbix_obj.get_item_by_id(item["itemid"])[1][0]
unit = item_obj.get("unit") if item_obj.get("unit") else ""
history = item_obj["value_type"]
item_name = item_obj.get("name")
keys = re.search(r"\[([^\]]*)\]", item_obj.get("key_"))
keys = keys.groups()[0] if keys else ""
for _key in keys.split(","):
item_name = item_name.replace("$%s" % (keys.index(_key) + 1), _key)
item_name = "%s 单位(%s)" % (item_name, item_obj.get("unit")) if item_obj.get("unit") else item_name
legend.append(item_name)
if item_type != 2:
history_data = zabbix_obj.get_history_by_item(item_obj["itemid"], history)[1]
else:
history_data = zabbix_obj.get_last_history_by_item(item_obj["itemid"], history)[1]
series_data = []
for data in history_data:
series_data.append(data.get("value"))
series.append({
"name": item_name,
"tpye": "line",
"data": series_data
})
# print(series)
graph_list.append({
"legend":legend,
"series":series,
"unit":unit
})
# print(legend)
# print(series)
print(json.dumps(graph_list, indent=2))
# print(json.dumps(zabbix_obj.get_hosts()[1], indent=2))
# print(json.dumps(zabbix_obj.get_groups()[1], indent=2))
# print(json.dumps(zabbix_obj.get_templates()[1], indent=2))
# print(json.dumps(zabbix_obj.create_host_by_agent("192.168.95.134", "zabbix-3", [{"groupid":"2"}],[{"templateid":"10001"},])[1], indent=2))
# print(json.dumps(zabbix_obj.delete_hosts(["10107"])[1], indent=2))
# print(json.dumps(zabbix_obj.get_hosts_by_id(["10105","10084"])[1], indent=2))
# print(json.dumps(zabbix_obj.get_hosts_by_name(["zabbix-1","Zabbix server"])[1], indent=2))
# graphs = zabbix_obj.get_graph_by_id("10105")[1]
# items = zabbix_obj.get_item_by_host("10105")[1]
# # print(json.dumps(items, indent=2))
# items = zabbix_obj.get_item_by_id(25400)[1]
# # print(json.dumps(items, indent=2))
# # exit()
# for graph in graphs:
# # print(graph["name"])
# items = zabbix_obj.get_item_by_graph(graph["graphid"])
# print(items[1])
# for item_obj in items[1]:
# item_obj = zabbix_obj.get_item_by_id(item_obj["itemid"])[1][0]
# history = item_obj["value_type"]
# item_name = item_obj.get("name")
#
# print(item_obj)
# # print(item_name)
# # print(item_obj.get("key_"))
# keys = re.search(r"\[([^\]]*)\]", item_obj.get("key_"))
#
# keys = keys.groups()[0] if keys else ""
# # print(keys.split(","))
# for _key in keys.split(","):
# item_name = item_name.replace("$%s" % (keys.index(_key) + 1), _key)
# print(item_name)
# print(item_obj.get("units"))
# # print(item_obj.get("name"),1, item_obj.get("key_"),item_obj["itemid"])
# # print(history[1][0]["value_type"])
# history_data = zabbix_obj.get_history_by_item(item_obj["itemid"], history)[1]
# # print(item_obj)
# print(time.time())
# print(history_data)
# # break
# # break
#
#
# # print(json.dumps(zabbix_obj.get_graph_by_id("10105")[1], indent=2))
# # print(json.dumps(zabbix_obj.get_item_by_graph(549)[1], indent=2))
# # print(json.dumps(zabbix_obj.get_history_by_item("23258")[1], indent=2))
|
import random
from django.apps import apps
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.fields import NOT_PROVIDED
from faker import Factory as FakerFactory
from .compat import get_model_fields, string_types
from .exceptions import ForeignKeyError
from .lazy import Lazy
from .utils import language_to_locale
from .values import Evaluator
user_model = get_user_model()
locale = language_to_locale(settings.LANGUAGE_CODE)
class Factory(object):
def __init__(self, fake=None):
self.fake = fake or FakerFactory.create(locale)
def seed(self, seed, set_global=False):
self.fake.seed(seed)
if set_global:
random.seed(seed)
def build_one(self, model, fields=None, pre_save=None, seed=None, make_fks=False, iteration=None):
if fields is None:
fields = {}
if pre_save is None:
pre_save = []
if seed:
fake = FakerFactory.create(locale)
fake.seed(seed)
else:
fake = self.fake
evaluator = Evaluator(fake, factory=self, iteration=iteration)
if isinstance(model, string_types):
model = apps.get_model(*model.split('.'))
instance = model()
m2ms = []
lazies = []
for field_name, model_field in get_model_fields(model):
if isinstance(model_field, models.AutoField):
continue
if field_name not in fields and model_field.null or model_field.default != NOT_PROVIDED:
continue
if isinstance(model_field, models.ManyToManyField):
m2ms.append(model_field)
continue
if isinstance(model_field, models.ForeignKey):
if not make_fks:
raise ForeignKeyError('field %s is a ForeignKey' % field_name)
if field_name in fields:
value = evaluator.evaluate(fields[field_name])
else:
value = evaluator.fake_value(model_field)
if model_field.blank:
value = ''
if model_field.choices:
value = fake.random_element(model_field.choices)[0]
if isinstance(value, Lazy):
lazies.append((field_name, value))
continue
if isinstance(model_field, models.ForeignKey):
field_name += '_id'
value = value.pk
# special case for user passwords
if model == user_model and field_name == 'password':
instance.set_password(value)
else:
setattr(instance, field_name, value)
for field_name, lazy in lazies:
value = getattr(instance, lazy.name)
if callable(value):
value = value(*lazy.args, **lazy.kwargs)
setattr(instance, field_name, value)
for func in pre_save:
func(instance)
return instance
def build(self, model, fields=None, pre_save=None, seed=None, quantity=None, make_fks=False):
if fields is None:
fields = {}
if quantity:
return [self.build_one(model, fields, pre_save, seed, make_fks, i) for i in range(quantity)]
else:
return self.build_one(model, fields, pre_save, seed, make_fks)
def make_one(self, model, fields=None, pre_save=None, post_save=None, seed=None, iteration=None):
if fields is None:
fields = {}
if post_save is None:
post_save = []
instance = self.build_one(model, fields, pre_save, seed, make_fks=True, iteration=iteration)
instance.save()
for func in post_save:
func(instance)
return instance
def make(self, model, fields=None, pre_save=None, post_save=None, seed=None, quantity=None):
if fields is None:
fields = {}
if quantity:
return [self.make_one(model, fields, pre_save, post_save, seed, i) for i in range(quantity)]
else:
return self.make_one(model, fields, pre_save, post_save, seed)
factory = Factory()
|
import math
import random
random.seed(28492)
class Synapse:
def __init__(self, weight):
self.weight=random.random()
self.value=0
def output():
self.out = weight*value
class Neuron(Synapse):
def __init__(self, value):
self.value = 0
self.connect_to = []
def calc():
self.value = connect_to[0].out
class AI:
def __init__(self, Synapse, Neuron):
#Input
self.input_neuron = Neuron
#1st Layer
self.l1_neuron = Neuron
self.l1_neuron.connect_to.append(Synapse) #(self.input.neuron.value)
#Output
self.out_neuron = Neuron
self.out_neuron.connect_to.append(Synapse) #(self.l1.neuron.value)
A1 = AI(Synapse, Neuron)
A1.input_neuron.value = random.randint(1,9)
A1.l1_neuron.connect_to[0].value = A1.input.neuron.value
A1.l1_neuron.connect_to[0].output()
A1.l1_neuron.calc()
A1.out_neuron.connect_to[0].value = A1.l1.neuron.value
A1.out_neuron.connect_to[0].output()
A1.out_neuron.calc()
|
import requests
print(requests.get("http://localhost:5000/test/Shagaev").text)
|
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import sys
import logging
from cement.core import controller
from cement.ext.ext_logging import LoggingLogHandler
from ebcli import __version__
from ..core import io, fileoperations, operations
from ..objects.exceptions import NoEnvironmentForBranchError
from ..resources.strings import strings, flag_text
from ..objects import region
from ..lib import aws
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
if self.app.pargs.debug:
io.echo('-- EBCLI Version:', __version__)
io.echo('-- Python Version:', sys.version)
if self.app.pargs.verbose:
LoggingLogHandler.set_level(self.app.log, 'INFO')
self.set_profile()
self.do_command()
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False):
env_name = self.app.pargs.environment_name
if not env_name:
#If env name not provided, grab branch default
env_name = operations. \
get_current_branch_environment()
if not env_name:
# No default env, lets ask for one
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
def get_region(self):
region = self.app.pargs.region
if not region:
region = fileoperations.get_default_region()
return region
def set_profile(self):
profile = self.app.pargs.profile
if profile:
aws.set_profile_override(profile)
else:
profile = fileoperations.get_default_profile()
if profile:
aws.set_profile(profile)
def complete_command(self, commands):
if not self.complete_region(commands):
if len(commands) == 1: # They only have the main command so far
# lets complete for positional args
region = fileoperations.get_default_region()
app_name = fileoperations.get_application_name()
io.echo(*operations.get_env_names(app_name, region))
def complete_region(self, commands):
# we only care about top command
cmd = commands[-1]
if cmd == '-r' or cmd == '--region':
io.echo(*[r.name for r in region.get_all_regions()])
return True
return False
|
T = int(input())
for _ in range(T):
n = int(input())
a = list(map(int, input().split()))
A = []
for i in range(0,n*2,2):
A.append([a[i],a[i+1]])
A = sorted(A, key = lambda i: i[0])
for i in range(n-1):
if A[i][1] >= A[i+1][0]:
A[i][1] = max(A[i][1],A[i+1][1])
A[i+1] = A[i]
A[i] = None
for i in range(n):
if A[i] != None:
print(A[i][0],end=" ")
print(A[i][1],end=" ")
print()
|
'''
# Целевая функция
def W():
Ограничения:
2*x1 + 5*x2 + 2*x3 <= 12
7*x1 + x2 + 2*x3 <= 18
x1, x2, x3 >= 0
return 3*x1 + 4*x2 + 6*x3 -> max
'''
'''
1) Приводим к каноническому виду: Вводим добавочные неотрицательные переменные,
если <= то со знаком +, если >=, то со знаком -
2*x1 + 5*x2 + 2*x3 + x4 = 12
7*x1 + x2 + 2*x3 + x5 = 18
Добавили x4, x5
x1, x2, x3, x4, x5 >= 0
2) Составляем симплекс-таблицу
x1 x2 x3 x4 x5 свободные члены
x4 2 5 2 1 0 12
x5 7 1 2 0 1 18
-3 -4 -6 0 0 0 <--- коэффициенты при целевой ф-ции со знаком "-" так как задача на max
3) Выбираем столбец с наименьшей оценкой (у нас это -6 x3)
4) Исходя из пункта 3 выбираем разрешающий элемент:
min(12/2; 18/2) = 6 (строка x4)
5) Далее действуем по правилу прямоугольника: отнимаем из второй строке первую, умноженную на 2/2=1.
Из третьей строки отнимаем элемент из первой строки того же столбца, умноженный на наим. оценку деленную на 2 (-6/2).
Делим первую строку на 2.
Получаем новую таблицу, вводя в базис x3 и выводя из базиса x4.
x1 x2 x3 x4 x5 свободные члены
x3 1 2.5 1 1 0 6
x5 5 -4 0 -1 1 6
r 3 11 0 3 0
элементы для r: 1) -3-2*(-6)/2=3; 2) -4-5*(-6)/2=11;
3) -6-2*(-6)/2=0; 4) 0-1*(-6)/2=3; 5) 0-0*(-6)/2=0.
6) В последней строке нет отрицательных элементов, значит задача решена.
Ответ: 0, 0, 6
'''
# 1)
# Ограничения
cond1 = [2, 5, 2]
cond2 = [7, 1, 2]
cond = [cond1, cond2]
eq_cond1 = 12
eq_cond2 = 18
eq_cond = [eq_cond1, eq_cond2]
sign = '<='
# Целевая функция
func = [3, 4, 6]
minmax = 'MAX'
# Для приведения к каноническому виду
def kan_form(cond):
n = []
for i in range(len(cond)):
if sign == '<=':
n.append(1)
elif sign == '>=':
n.append(-1)
return n
# 2) Составляем симплекс-таблицу
n = 4
# Создаем таблицу нужного размера и заполняем нулями
def get_zero_table(cond, n):
table_zero = [[0] * (len(cond) + n) for i in range(len(cond) + 1)]
return table_zero
print('table_zero:', get_zero_table(cond, n))
table = get_zero_table(cond, n)
def simplex_table(cond, eq_cond, minmax, table):
c = 0
# Добавляем ограничения в таблицу
for i in range(len(table)):
for j in range(len(table[i])):
if i < len(cond) and j < len(cond[i]):
table[i][j] = cond[i][j]
else: # Добавляем коэффициенты при добавочных переменных
if c < len(kan_form(cond)):
table[i][j] = kan_form(cond)[c]
i += 1
c += 1
# Добавляем свободные члены
for i in range(len(eq_cond)):
table[i][-1] = eq_cond[i]
# Добавляем коэффициенты при целевой функции
if minmax == 'MAX':
for i in range(len(func)):
table[-1][i] = -func[i]
elif minmax == 'MIN':
for i in range(len(func)):
table[-1][i] = func[i]
return table
print('simplex table:', simplex_table(cond, eq_cond, minmax, table))
# 3) Выбираем столбец с наименьшей оценкой
def find_min_column():
global min_est, index_min
if minmax == 'MAX':
min_est = min(simplex_table(cond, eq_cond, minmax, table)[-1])
index_min = min(range(len(simplex_table(cond, eq_cond, minmax, table)[-1])), key=simplex_table(cond, eq_cond, minmax, table)[-1].__getitem__)
return min_est, index_min
print('Наименьшая оценка:', find_min_column()[0], 'Индекс столбца с наименьшей оценкой:', find_min_column()[1])
# 4) Исходя из пункта 3 выбираем разрешающий элемент:
def perm_element():
lst_for_find_perm_el = []
current_table = simplex_table(cond, eq_cond, minmax, table)
for i in range(len(current_table) - 1):
lst_for_find_perm_el.append(current_table[i][-1]/current_table[i][find_min_column()[1]])
for_perm_el = min(lst_for_find_perm_el)
for i in range(len(lst_for_find_perm_el)):
if lst_for_find_perm_el[i] == for_perm_el:
ind_perm_el = i
perm_el = simplex_table(cond, eq_cond, minmax, table)[ind_perm_el][find_min_column()[1]]
# найдем индекс столбца разрешающего элемента
row_perm_el = min(range(len(lst_for_find_perm_el)),
key=lst_for_find_perm_el.__getitem__)
return perm_el, row_perm_el
print('Разрешающий элемент:', perm_element()[0], 'Индекс строки разрешающего элемента:', perm_element()[1])
# 5) Перерасчитываем значения симплекс-таблицы и разрешающего элемента,
# пока в последней строке есть отрицательные элементы
from math import fabs
def recalculation_simplex_table_for_max():
current_table = simplex_table(cond, eq_cond, minmax, table)
perm_el = perm_element()[0]
row_ind_perm = perm_element()[1]
column_ind_perm = find_min_column()[1]
###
fabs_values_from_table = [fabs(i) for i in current_table[-1]]
###
while max(current_table[-1]) < max(fabs_values_from_table):
for j in range(len(current_table[row_ind_perm])):
current_table[row_ind_perm][j] = current_table[row_ind_perm][j] / perm_el
for i in range(len(current_table)):
for j in range(len(current_table[i])):
current_table[i][j] = current_table[i][j] - (current_table[row_ind_perm][j] * current_table[i][column_ind_perm]) / perm_el
return current_table
print('')
print('Перерасчет симплекс-таблицы:')
print(recalculation_simplex_table_for_max())
###############
def old_simplex_table(cond, eq_cond, minmax):
find_n = kan_form(cond)
table = [cond[i] + [0] * i + [find_n[i]] + [eq_cond[i]] for i in range(len(find_n))]
if minmax == 'MAX':
from_func = []
for i in func:
from_func.append(-i)
table.append(from_func)
elif minmax == 'MIN':
table.append(func)
table[-1].append(0)
count = 0
while count != len(find_n):
table[-1].append(0)
count += 1
###
table[0][4] = 0
table[0].append(12)
###
return table
|
# IntelHex library version information
version_info = (2, 1)
version_str = '.'.join([str(i) for i in version_info])
|
#!/usr/bin/env python
"""
pyjld.phidgets.erl_manager.cmd
"""
__author__ = "Jean-Lou Dupont"
__email = "python (at) jldupont.com"
__fileid = "$Id$"
__all__ = ['PhidgetsErlManagerCmd',]
from pyjld.system.command.base import BaseCmd, BaseCmdException
import pyjld.system.daemon as daemon
from erl_manager_daemon import PhidgetsErlManagerDaemon
class PhidgetsErlManagerCmd(BaseCmd):
def __init__(self, logger_factory, messages):
BaseCmd.__init__(self)
self.logger_factory = logger_factory
self.messages = messages
self.app = PhidgetsErlManagerDaemon(self.logger_factory, self.messages)
self.runner = daemon.DaemonRunner( self.app )
self.config_erl_name = None
self.config_erl_port = None
self.config_erl_cookie = None
#================================================
# COMMANDS
#================================================
def cmd_start(self, *pargs):
"""Start the daemon"""
try:
self.setParams()
self.runner.cmd_start()
except Exception,e:
self._handleException(e)
def cmd_stop(self, *pargs):
"""Stop the daemon"""
try:
self.setParams()
self.runner.cmd_stop()
except Exception,e:
self._handleException(e)
e.already_handled=True
def cmd_restart(self, *pargs):
"""Restarts the daemon"""
try:
self.setParams()
self.runner.cmd_restart()
except Exception,e:
self._handleException(e)
e.already_handled=True
#================================================
# PRIVATE
#================================================
def setParams(self):
self.runner.setParams(self.config_erl_name,
self.config_erl_port,
self.config_erl_cookie)
def _getLogger(self, console=False):
return self.logger_factory(console)
def _handleException(self,e):
logger = self._getLogger()
logger.error(e)
|
class Node:
def __init__(self,data):
self.data = data
self.reference = None
node1 = Node(10)
print(node1)
|
from urllib import parse
urls1 = ['province=上海&city=上海市&district=南汇区&all=Y&page=0&_=1509106871111',
'province=上海&city=上海市&district=卢湾区&all=Y&page=0&_=1509106871111',
'province=上海&city=上海市&district=嘉定区&all=Y&page=0&_=1509106871111',
'province=上海&city=上海市&district=奉贤区&all=Y&page=0&_=1509106871111',
'province=上海&city=上海市&district=宝山区&all=Y&page=0&_=1509106871111'
]
#中文转URL编码
# with open('url.txt','a') as f:
# for url in urls1:
# new = parse.quote(url)
# f.write(new)
# f.write('\n')
#URL编码转中文
urls2 = ['province=%E4%B8%8A%E6%B5%B7&city=%E4%B8%8A%E6%B5%B7%E5%B8%82&district=%E5%8D%97%E6%B1%87%E5%8C%BA&all=Y&page=0&_=1509106871111',
'province=%E4%B8%8A%E6%B5%B7&city=%E4%B8%8A%E6%B5%B7%E5%B8%82&district=%E5%8D%97%E6%B1%87%E5%8C%BA&all=Y&page=0&_=1509106871111',
'province=%E4%B8%8A%E6%B5%B7&city=%E4%B8%8A%E6%B5%B7%E5%B8%82&district=%E5%8D%A2%E6%B9%BE%E5%8C%BA&all=Y&page=0&_=1509106871111',
'province=%E4%B8%8A%E6%B5%B7&city=%E4%B8%8A%E6%B5%B7%E5%B8%82&district=%E5%98%89%E5%AE%9A%E5%8C%BA&all=Y&page=0&_=1509106871111',
'province=%E4%B8%8A%E6%B5%B7&city=%E4%B8%8A%E6%B5%B7%E5%B8%82&district=%E5%98%89%E5%AE%9A%E5%8C%BA&all=Y&page=0&_=1509106871111'
]
with open('url.txt','a') as f:
for url in urls2:
new = parse.unquote(url)
f.write(new)
f.write('\n')
|
import django.dispatch
create_message = django.dispatch.Signal(providing_args=["room", "user", "content", "msg_type"])
|
# import socket programming library
#import socket
from socket import *
import os
import sys
# import thread module
from _thread import *
import threading
import json
from datetime import datetime,date,timedelta
import random
blockingdic = {}
blockdic = {}
blocktime = 0
loginuser = []
tempidlist = []
# thread function
def threaded(c):
while True:
global blockingdic
global blockdic
global loginuser
global tempidlist
data = c.recv(1024)
if not data:
print('Someone disconnect')
break
request = json.loads(data.decode('utf-8'))
if request['messagetype'] == 'credentials':
credentials = {}
with open('credentials.txt') as file:
for line in file.readlines():
pairs = line.split(' ')
credentials[pairs[0]] = pairs[1].strip()
if request['username'] in blockingdic:
timenow = datetime.now()
timeblocked = blockingdic[request['username']]
blockedtime = timenow - timeblocked
if blockedtime.total_seconds() < blocktime:
c.send(bytes('B',encoding='utf-8'))
continue
else:
blockingdic.pop(request['username'])
if request['username'] in credentials and request['password'] == credentials[request['username']]:
c.send(bytes('Y',encoding='utf-8'))
loginuser.append(str(request['username']))
username = request['username']
print(f'{username} log in')
else:
if request['username'] not in blockdic:
blockdic[request['username']] = 1
else:
blockdic[request['username']] += 1
if blockdic[request['username']] == 3:
blockdic.pop(request['username'])
blockingdic[request['username']] = datetime.now()
c.send(bytes('NB',encoding='utf-8'))
else:
c.send(bytes('N',encoding='utf-8'))
elif request['messagetype'] == 'logout':
username = request['username']
loginuser.remove(str(request['username']))
print(f'{username} logout')
continue
elif request['messagetype'] == 'downloadtempid':
username = request['username']
tempid = ''.join(["{}".format(random.randint(0, 9)) for num in range(0, 20)])
while tempid in tempidlist:
tempid = ''.join(["{}".format(random.randint(0, 9)) for num in range(0, 20)])
creattime = datetime.now()
expiredtime = creattime + timedelta(minutes = 15)
newline = username + ' ' + tempid + ' ' + creattime.strftime('%d/%m/%Y %H:%M:%S') + ' ' + expiredtime.strftime('%d/%m/%Y %H:%M:%S') + '\n'
newtempidfile = open('tempIDs.txt','a')
newtempidfile.write(newline)
newtempidfile.close()
message = tempid + ',' + creattime.strftime('%d/%m/%Y %H:%M:%S') + ',' + expiredtime.strftime('%d/%m/%Y %H:%M:%S')
print(f'> user: {username}\n> Tempid: \n{tempid}')
c.send(bytes(message, encoding='utf-8'))
elif request['messagetype'] == 'uploadcontactlog':
contactlist = {}
username = request['username']
print(f'>received contact from {username}')
for i in request['log']:
contactid = i
contactcreattime = request['log'][i]['createtime']
contactexpiretime = request['log'][i]['expiredtime']
contactuser = {}
contactuser['createtime'] = contactcreattime
contactuser['expiredtime'] = contactexpiretime
contactlist[contactid] = contactuser
print(f'{contactid},\n{contactcreattime},\n{contactexpiretime};\n')
print('> Contact log checking')
tempiduser = {}
with open('tempIDs.txt') as file:
for line in file.readlines():
#print(line)
data = {}
pairs = line.split(' ')
starttime = pairs[2].strip() + ' ' + pairs[3].strip()
expiredtime = pairs[4].strip() + ' ' + pairs[5].strip()
data['tele'] = pairs[0]
data['createtime'] = starttime
data['expiredtime'] = expiredtime
tempiduser[pairs[1]] = data
#print(tempiduser)
userlist = []
for i in contactlist:
if i in tempiduser and i not in userlist:
tele = tempiduser[i]['tele']
tid = i
Ctime = tempiduser[i]['createtime']
userlist.append(i)
print(f'{tele},\n{Ctime},\n{tid};\n')
# connection closed
c.close()
def start(port):
# get ip from os
#host = 'localhost'
hostname = gethostname()
host = gethostbyname(hostname)
serverPort = port
s = socket(AF_INET, SOCK_STREAM)
# start TCP servers socket
s.bind((host, port))
print(f"Server working on {host}:{serverPort}")
print("socket binded to port", serverPort)
# put the socket into listening mode
s.listen(5)
print("socket is listening")
# clear and create empty tempID.txt
f = open('tempIDs.txt','w')
f.close()
# forever loop until client wants to exit
while True:
# establish connection with client
c, addr = s.accept()
# one client connect in
print('Connected to :', addr[0], ':', addr[1])
# Start a new thread and return its identifier
start_new_thread(threaded, (c,))
s.close()
# Helper function
def setblocktime(time):
global blocktime
blocktime = time
return
if __name__ == '__main__':
if len(sys.argv) != 3:
print('required port and block_duration time\nTry python3 server.py <server_port> <block_duration>')
exit()
port = int (sys.argv[1])
blocktime = int (sys.argv[2])
# set blocking time as global
setblocktime(blocktime)
start(port)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 27 17:25:38 2018
@author: bdus
try to preprocess data
"""
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import random
import scipy.io as sio
from sklearn import preprocessing
from sklearn import datasets, svm, metrics
from sklearn.decomposition import PCA
from sklearn.preprocessing import OneHotEncoder
# =========== get data ===========
pwd = os.path.dirname(__file__)
DATA_PATH = os.path.join(pwd,'..','data')
print('DATA_PATH',DATA_PATH)
raw_dataset = sio.loadmat(os.path.join(DATA_PATH,'Indian_pines_corrected.mat'))['indian_pines_corrected']
raw_labels = sio.loadmat(os.path.join(DATA_PATH,'Indian_pines_gt.mat'))['indian_pines_gt']
num_classes = 16
percentage = 0.1
num_b = 200
# =========== show data ===========
def draw_bar(labels):
"""
@Description:
Draw labels
@input :
labels.shape == (145,145)
@output :
None
"""
labels = np.array(labels)
x = np.arange(np.min(labels),np.max(labels)+1,1)
y = np.array([np.sum(labels==i) for i in x])
print('x: ',x)
plt.stem(x,y,'-')
#plt.bar(x,y,0.4,color="green")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
plt.show()
if labels.shape == (145,145):
fig = plt.figure()
plt.imshow(labels)
# =========== preprocessing ===========
def scalar(data):
'''
0-1归一化
'''
maxnum = np.max(data)
minnum = np.min(data)
result = np.float32((data - minnum) / (maxnum - minnum))
return result
def z_score(data):
'''
标准化
'''
mean = np.mean(data)
stdnum = np.std(data)
result = np.float32((data - mean) / stdnum)
return result
def scalar_row(data):
'''
按行标准化
'''
sum_row = np.sqrt(np.sum(data**2,1)).reshape([-1,1])
data = data / sum_row
return data
def del_background(dataset, labels, normalization = 4, pca = False):
'''
对数据进行归一化处理;
normalization = 1 : 0-1归一化
normalization = 2 : 标准化
normalization = 4 : 按行归一化
#attation 数据归一化要做在划分训练样本之前;
'''
[m,n,b] = np.shape(dataset)
dataset = np.asarray(dataset,dtype = 'float32').reshape([m*n,b,])
labels = np.asarray(labels).reshape([m*n,1,])
if pca:
pca = PCA(n_components =50)
dataset = pca.fit_transform(dataset)
if normalization ==1:
min_max_scaler = preprocessing.MinMaxScaler()
dataset = min_max_scaler.fit_transform(dataset).astype('float32')
elif normalization ==2:
stand_scaler = preprocessing.StandardScaler()
dataset = stand_scaler.fit_transform(dataset).astype('float32')
elif normalization ==3:
stand_scaler = preprocessing.StandardScaler()
dataset = stand_scaler.fit_transform(dataset).astype('float32')
min_max_scaler = preprocessing.MinMaxScaler()
dataset = min_max_scaler.fit_transform(dataset).astype('float32')
elif normalization ==4:
dataset = scalar_row(dataset)
else:
pass
#删除背景部分:label为0的点默认为背景点,可忽略
index = np.argwhere(labels[:,-1] == 0).flatten()
dataset = np.delete(dataset, index, axis = 0)
labels = np.delete(labels, index, axis = 0)
#将label放到光谱维最后一位,保证label与data同步
data_com = np.concatenate((dataset,labels),axis =1 )
return(data_com)
def devided_train(data_com, num_classes = 16, percentage = 0.1):
'''
data_com:二维矩阵,每行对应一个像素点,label为最后一位
num_class: 地物类别数
percentage: 训练样本百分比
'''
#划分训练样本与测试样本:
b = data_com.shape[1]
#创建两个空数组,用于后续拼接每一类样本
train_com = np.empty([1, b])
test_com = np.empty([1, b])
for i in range(1, num_classes + 1):
index_class = np.argwhere(data_com[:,-1] == i).flatten()
data_class = data_com[index_class]
num_class = len(data_class)
#随机取一定数量的训练样本
if percentage <= 1:
num_train = np.ceil(num_class * percentage).astype('uint8')
else:
num_train = percentage
index_train = random.sample(range(num_class), num_train)
train_class = data_class[index_train]
test_class = np.delete(data_class,index_train, axis = 0)
#将各类训练样本拼接成完整的训练集与测试集
train_com = np.concatenate((train_com, train_class), axis = 0)
test_com = np.concatenate((test_com,test_class), axis = 0)
#删除最初的空数组
train_com = np.delete(train_com, 0, axis = 0)
test_com = np.delete(test_com, 0, axis = 0)
return(train_com, test_com)
def preprocess(data_com, shuffle = True ):
#数据预处理
#1. 打乱数据(训练集)
if shuffle:
num_train = data_com.shape[0]
seed = [i for i in range(num_train)]
random.shuffle(seed)
data_com = data_com[seed]
#2. 将数据与label分开
label = data_com[:,-1].astype('uint8')
data =np.delete(data_com, -1, axis = 1).astype('float32')
return(data, label)
# ============= get data ============
dataset = raw_dataset.reshape([145,145,200])
labels = raw_labels.reshape([145,145])
data_com = del_background(dataset, labels)
[train_com, test_com] = devided_train(data_com,num_classes = num_classes, percentage = percentage)
[train_data, train_label] = preprocess(train_com, shuffle = False)
[test_data, test_label] = preprocess(test_com, shuffle = False)
def one_hot_encode(num,cla):
tmp = np.zeros(cla)
tmp[num-1]=1
return tmp
|
__author__ = 'sb5518'
"""
This Module contains the grades_by_year_graph_generator(grades_dictionary, graph_name) function which
purpose is to generate the graphs required in question 5. It creates a graph of lines for the total amount of grades by
year. The required input is a dictionary of dictionaries in the form {grade:{year:total_restaurants, ...}, ...}
"""
import matplotlib.pyplot as plt
from matplotlib import patches
def grades_by_year_graph_generator(grades_dictionary, graph_name):
if not isinstance(grades_dictionary, dict):
raise TypeError('Please introduce a valid dictionary of dictionaries for grades and number of distict restaurants by year')
for dictionary in grades_dictionary.values():
if not isinstance(dictionary, dict):
raise TypeError('At least one element in the dictionary is not a dictionary')
if not isinstance(graph_name, str):
raise TypeError('The function did not receive a valid string representation to build the file name')
try:
plt.close()
fig, ax = plt.subplots()
ax.plot(grades_dictionary['A'].keys(), grades_dictionary['A'].values(), color='r')
ax.plot(grades_dictionary['B'].keys(), grades_dictionary['B'].values(), color='b')
ax.plot(grades_dictionary['C'].keys(), grades_dictionary['C'].values(), color='g')
ax.set_xlabel('Year')
ax.set_xticks([2011,2012,2013,2014,2015], minor=False)
ax.set_xticklabels(['2011','2012','2013','2014','2015'])
label_1, label_2, label_3 = patches.Rectangle((0, 0), 1, 1, fc="r"), patches.Rectangle((0, 0), 1, 1, fc="b"), patches.Rectangle((0, 0), 1, 1, fc="g")
ax.legend([label_1,label_2,label_3],['Grade A', 'Grade B', 'Grade C'], loc='best')
ax.set_ylabel('Number of grades by Year')
ax.legend([label_1,label_2,label_3],['Grade A', 'Grade B', 'Grade C'], loc='best')
plt.savefig("grade_improvement_" + graph_name.lower(), format='pdf')
except LookupError:
raise LookupError('Please check that the dictionary of Grades by year has the right format')
|
from django.contrib import admin
from story.models import *
admin.site.register(Story)
admin.site.register(StoryImage)
admin.site.register(TestImage)
|
import os
import numpy as np
import torch
import torch.utils.data.dataset as Dataset
from skimage.transform import resize
import SimpleITK as sitk
import random
import scipy.io as sio
from scipy import ndimage,misc
# from batchgenerators.transforms import MirrorTransform, SpatialTransform
from aug_tool import Crop, MirrorTransform, SpatialTransform
def resize_image(image, old_spacing, new_spacing, order=3):
new_shape = (int(np.round(old_spacing[0] / new_spacing[0] * float(image.shape[0]))),
int(np.round(old_spacing[1] / new_spacing[1] * float(image.shape[1]))),
int(np.round(old_spacing[2] / new_spacing[2] * float(image.shape[2]))))
# int(np.round(old_spacing[2]/new_spacing[2]*float(image.shape[2]))))
return resize(image, new_shape, order=order, mode='edge')
# def read_image(image,spacing,spacing_target):
# image = resize_image(image, spacing, spacing_target, order=1).astype(np.float32)
# edsize = image.shape
# image = resize(image, (edsize[0], 128, 128), order=1, mode='edge').astype(np.float32)
# m = np.zeros((1, 128, 128))
# image = np.append(image, m, axis=0)
# image = np.append(m, image, axis=0)
# return image
def read_image(image,spacing,spacing_target):
image = resize_image(image, spacing, spacing_target, order=1).astype(np.float32)
edsize = image.shape
image = resize(image, (edsize[0], 128, 128), order=1, mode='edge').astype(np.float32)
m = np.zeros((1, 128, 128))
image = np.append(image, m, axis=0)
image = np.append(m, image, axis=0)
return image
def read_label(image,spacing,spacing_target):
edsize = image.shape
tem = convert_to_one_hot(image)
vals = np.unique(image)
result = []
for i in range(len(tem)):
result.append(resize_image(tem[i].astype(np.float32), spacing, spacing_target, order=1)[None])
image = vals[np.vstack(result).argmax(0)]
m = np.zeros((1, 128, 128))
tem = convert_to_one_hot(image)
vals = np.unique(image)
result = []
for i in range(len(tem)):
result.append(resize(tem[i].astype(np.float32), (edsize[0],128,128), order=1, mode='edge')[None])
image = vals[np.vstack(result).argmax(0)]
image = np.append(image, m, axis=0)
image = np.append(m, image, axis=0)
return image
def normor(image):
image -=image.mean()
image /=image.std()
return image
def convert_to_one_hot(seg):
vals = np.unique(seg)
res = np.zeros([len(vals)] + list(seg.shape), seg.dtype)
for c in range(len(vals)):
res[c][seg == c] = 1
return res
Path_mat='/media/cyh/mat/'
Path_4d = './data/4D/'
Path_lab = './data/label/'
Path_test_mat='./data/mat_test/'
# path_test = './data/test/4D/'
# path_test_label = './data/test/label/'
img_4D = []
img_4D_test = []
for root,dirs,files in os.walk(Path_mat):
for file in files:
mat_path = os.path.join(root, file)
img_4D.append(mat_path)
for root,dirs,files in os.walk(Path_test_mat):
for file in files:
mat_test_path = os.path.join(root, file)
img_4D_test.append(mat_test_path)
class Data(Dataset.Dataset):
def __init__(self,im_4D):
self.im_4D = im_4D
self.mirror_transform = MirrorTransform()
self.spatial_transform = SpatialTransform(do_elastic_deform=False,
alpha=(0., 1000.),
sigma=(10., 13.),
do_rotation=True,
angle_x=(0, 2 * np.pi),
angle_y=(0, 0),
angle_z=(0, 0),
do_scale=True,
scale=(0.75, 1.25),
border_mode_data='constant',
border_cval_data=0,
order_data=1,
random_crop=False)
def __len__(self):
return len(self.im_4D)
def __getitem__(self, index):
###########读mat
source=sio.loadmat(self.im_4D[index])['source']
img_ed = sio.loadmat(self.im_4D[index])['moving_vol']
# print(img_ed.shape)
img_es = sio.loadmat(self.im_4D[index])['fixed_vol']
spacing=(1.000,1.000,1.000)
spacing=list(spacing)
############
labeled=sio.loadmat(self.im_4D[index])['moving_gt']
labeles = sio.loadmat(self.im_4D[index])['fixed_gt']
spacing_target = (1.000,1.000,1.000)
spacing_target = list(spacing_target)
"数据增强"
# img_ed = img_ed[np.newaxis, np.newaxis, :, :, :]
# labeled = labeled[np.newaxis, np.newaxis, :, :, :]
# img_es = img_es[np.newaxis, np.newaxis, :, :, :]
# labeles = labeles[np.newaxis, np.newaxis, :, :, :]
# source = source[np.newaxis, np.newaxis, :, :, :]
"镜像"
mirror_code = self.mirror_transform.rand_code()
img_ed = self.mirror_transform.augment_mirroring(img_ed, mirror_code)
labeled = self.mirror_transform.augment_mirroring(labeled, mirror_code)
img_es = self.mirror_transform.augment_mirroring(img_es, mirror_code)
labeles = self.mirror_transform.augment_mirroring(labeles, mirror_code)
source = self.mirror_transform.augment_mirroring(source, mirror_code)
"spatial transform"
coords = self.spatial_transform.rand_coords(img_ed.shape)
# coords = self.spatial_transform.rand_coords(img_ed.shape)
img_ed = self.spatial_transform.augment_spatial(img_ed, coords, is_label=False)
labeled = self.spatial_transform.augment_spatial(labeled, coords, is_label=True)
img_es = self.spatial_transform.augment_spatial(img_es, coords, is_label=False)
labeles = self.spatial_transform.augment_spatial(labeles, coords, is_label=True)
source = self.spatial_transform.augment_spatial(source, coords, is_label=False)
#
#
img_ed = np.transpose(img_ed, (2, 1, 0)) # xyz-zyx
img_es = np.transpose(img_es, (2, 1, 0)) # xyz-zyx
labeled = np.transpose(labeled, (2, 1, 0)) # xyz-zyx
labeles = np.transpose(labeles, (2, 1, 0)) # xyz-zyx
source=np.transpose(source,(2,1,0))
# img_ed = image4[0,:,:,:]
####
source=read_image(source,spacing,spacing_target)
img_ed = read_image(img_ed,spacing,spacing_target)
img_es = read_image(img_es,spacing,spacing_target)
labeled = read_label(labeled, spacing, spacing_target)
labeles = read_label(labeles, spacing, spacing_target)
source=normor(source)
img_ed = normor(img_ed)
img_es = normor(img_es)
# img_mid = normor(img_mid)
# img_pre = normor(img_pre)
# img_aft = normor(img_aft)
labeled = convert_to_one_hot(labeled)
labeles = convert_to_one_hot(labeles)
# print(source.shape)
# print(img_ed.shape)
# print(img_es.shape)
source=source[np.newaxis,:,:,:]
img_ed = img_ed[np.newaxis, :, :, :]
img_es = img_es[np.newaxis, :, :, :]
# print(img_ed.shape)
return source,img_ed,img_es,labeled,labeles
class Data_test(Dataset.Dataset):
def __init__(self,img_4D_test):
self.im_4D = img_4D_test
def __len__(self):
return len(self.im_4D)
def __getitem__(self, index):
#####4D
source = sio.loadmat(self.im_4D[index])['source']
source = np.transpose(source, (2, 1, 0)) # xyz-zxy
img_ed = sio.loadmat(self.im_4D[index])['moving_vol']
img_ed = np.transpose(img_ed, (2, 1, 0)) # xyz-zxy
img_es = sio.loadmat(self.im_4D[index])['fixed_vol']
img_es = np.transpose(img_es, (2, 1, 0)) # xyz-zxy
# spacing = np.array(source.GetSpacing())[[2, 1, 0]] ###[z,x,y]
spacing = (1.000, 1.000, 1.000)
spacing = list(spacing)
############
labeled = sio.loadmat(self.im_4D[index])['moving_gt']
labeled = np.transpose(labeled, (2, 1, 0))
# labeled = sitk.GetArrayFromImage(labed).astype(float)
labeles = sio.loadmat(self.im_4D[index])['fixed_gt']
labeles = np.transpose(labeles, (2, 1, 0))
# labeles = sitk.GetArrayFromImage(labes).astype(float)
spacing_target = (1.000, 1.000, 1.000)
spacing_target = list(spacing_target)
# spacing_target[0] = spacing[0]
# shapeim4 = image4.shape
# loc_mid = int(shapeim4[0]*0.5)
# loc_pre = int(shapeim4[0]*0.25)
# loc_aft = int(shapeim4[0]*0.75)
# img_ed = image4[0,:,:,:]
####
source = read_image(source, spacing, spacing_target)
img_ed = read_image(img_ed, spacing, spacing_target)
img_es = read_image(img_es, spacing, spacing_target)
labeled = read_label(labeled, spacing, spacing_target)
labeles = read_label(labeles, spacing, spacing_target)
source = normor(source)
img_ed = normor(img_ed)
img_es = normor(img_es)
# img_mid = normor(img_mid)
# img_pre = normor(img_pre)
# img_aft = normor(img_aft)
labeled = convert_to_one_hot(labeled)
labeles = convert_to_one_hot(labeles)
source = source[np.newaxis, :, :, :]
img_ed = img_ed[np.newaxis, :, :, :]
img_es = img_es[np.newaxis, :, :, :]
print(source.shape)
return source, img_ed, img_es, labeled, labeles
train_data = Data(img_4D)
test_data = Data_test(img_4D_test)
|
from unittest import result
from django.http import response
from django.test import TestCase
from django.urls import reverse
from alimentos.models import Food
import pytest
class TestAlimentos(TestCase):
def setUp(self):
food = Food.objects.create(name='Banana', brand='Seu Ze', section='Frutas', content='6', price='2.95' )
food.save()
self.response_get = self.client.get('food/')
# Routes tests
def test_route_create(self):
response = self.client.get('/create_food/')
self.assertEqual(response.status_code, 200)
def test_route_read_all(self):
response = self.client.get('/food/')
self.assertEqual(response.status_code, 200)
def test_route_update(self):
result = Food.objects.last()
response = self.client.get(f'/update_food/{result.id}')
self.assertEqual(response.status_code, 200)
def test_route_delete(self):
result = Food.objects.last()
response = self.client.post(f'/delete_food/{result.id}')
self.assertEqual(response.status_code, 302)
#Views tests
def test_template_create(self):
response = self.client.get('/create_food/')
self.assertTemplateUsed(response, 'form_food.html')
def test_template_read_all(self):
response = self.client.get('/food/')
self.assertTemplateUsed(response, 'food.html')
def test_template_update(self):
result = Food.objects.last()
response = self.client.get(f'/update_food/{result.id}')
self.assertTemplateUsed(response, 'form_food.html')
def test_template_delete(self):
result = Food.objects.last()
response = self.client.post(f'/delete_food/{result.id}')
self.assertTemplateNotUsed(response, None)
# Models Test
def test_model(self):
food = Food.objects.get(name='Banana')
self.assertEqual(food.price_food(), 'The Banana cost 2.95')
|
a = int(input('Digite um ano: '))
if a % 4==0 and a % 100 != 0 or a % 400 == 0:
print(' O ano {} É BISSEXTO'.format(a))
else:
print('O ano {} NÃO É BISSEXTO'.format(a))
|
import glob
import pickle
import numpy as np
import pandas as pd
import tensorflow.keras.models as models
from flight_delay_prediction.constant import CONTINUOUS_INPUTS, CATEGORICAL_INPUTS
from flight_delay_prediction.errors import UnknownCategoryError
from flight_delay_prediction.utils import cached_classproperty
class ResourcesAccess:
@classmethod
def preprocess_categorical(cls, key, value):
try:
return Resources.preprocess_objects[key].transform(np.array(value).reshape(-1, 1))
except ValueError:
raise UnknownCategoryError(f'Error for key {key}', f'Could not find value {value}')
@classmethod
def preprocess_continuous(cls, keys_values):
data = np.zeros(len(CONTINUOUS_INPUTS))
for i, keys in enumerate(CONTINUOUS_INPUTS):
data[i] = keys_values[keys]
return Resources.preprocess_objects['transformer'].transform(data.reshape(1, -1))
@classmethod
def predict(cls, keys_values):
# prepare categorical inputs
categorical = []
for key in CATEGORICAL_INPUTS:
categorical.append(cls.preprocess_categorical(key, keys_values[key])[0])
# prepare continuous inputs
cont = cls.preprocess_continuous(keys_values)
model_input = categorical + [cont]
return Resources.model.predict(model_input)
class Resources:
# resources_path = f'{settings.BASE_DIR}\\resources'
resources_path = 'E:\\UBB\\Licenta\\django_rest_api\\resources'
preprocess_path = f'{resources_path}\\preprocessing\\'
model_path = f'{resources_path}\\keras_model'
"""
Get a mapping of preprocessing objects
:return dictionary of keys being the filenames without extension and values the objects
"""
@cached_classproperty
def preprocess_objects(cls) -> dict:
preprocess_objects = {}
for file_path in glob.glob(cls.preprocess_path + '*.pkl'):
file_name = file_path.split('\\')[-1].split('.')[0]
obj = pickle.load(open(file_path, 'rb'))
preprocess_objects[file_name] = obj
print('log: Loaded preprocessing objects from file')
return preprocess_objects
"""
Returns the pre-trained Keras model loaded from model.json and model.h5 files
"""
@cached_classproperty
def model(cls):
# load json and create model
json_file = open(cls.model_path + '\\model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = models.model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(cls.model_path + "\\model.h5")
print('log: Loaded model from file')
return loaded_model
"""
Returns a mapping of the airport code to its geographical coordinates
:return {'iata_code':{'lat':float, 'long':float}, ...}
"""
@cached_classproperty
def airport_codes(cls):
df = pd.read_csv(cls.resources_path + '\\ourairports.csv')
# df = pd.read_csv(cls.resources_path + '\\usa-airports.csv')
df = df[['iata_code', 'latitude_deg', 'longitude_deg']]
df = df.rename(columns={'latitude_deg': 'lat', 'longitude_deg': 'long'})
df = df.round(3)
df = df.set_index('iata_code')
print('log: Loaded airport codes from file')
return df.to_dict('iata_code')
|
import json
import time
import os, sys
import requests
'''
Given a .json database of anime, picks the corresponding Anilist page and
adds a new a new key with charachters (that were picked up via the
Anilist.co API).
'''
def update_database(database='anime-offline-database.json'):
'''
Searches in the entire database for anilist.co links, gets the ID of them
and calls anilist_recognition.finding to get the data from their API.
Adds then the data to the database.
'''
with open(database, 'r+',encoding='utf8') as anime_database:
data = json.load(anime_database) #Importing databse
iterations = 0
internet_fails = 0
start_time = time.time()
time.sleep(0.0000000000001) #Avoiding division by 0
while iterations < len(data["data"]): #Checking every source that exists
sources = data["data"][iterations]["sources"]
for url in sources:
if 'anilist.co' in url: #Found anilist url to get the data.
id = url.rsplit('/', 1)[-1] #Last part of url has ID
characters = finding(id) #Get character list.
if characters == 'Connection Error':
time.sleep(10) #Wait a bit and retry
internet_fails += 1
characters = finding(id)
if characters == 'Not found' or characters == [] or characters == 'Connection Error': #no data = no characters
data['data'][iterations]['has_characters'] = False
data["data"][iterations]['downloaded'] = False
data["data"][iterations]['characters'] = []
else:
if characters != 0: #Sucessfully completed request
#Updating json database
data["data"][iterations]['downloaded'] = False
data["data"][iterations]['characters'] = characters
data['data'][iterations]['has_characters'] = True
if 'downloaded' not in data["data"][iterations]: #IF it was not found, there is no data from anilist
data['data'][iterations]['has_characters'] = False
data["data"][iterations]['characters'] = []
data["data"][iterations]['downloaded'] = False
print('''Completed update of %s/%s | Internet fails up now: %s | Velocity: %s characters/second'''
% (str(iterations),
str(len(data["data"])),
internet_fails,
round(float(iterations)/float(time.time() - start_time), 3)
),
end='\r')
iterations += 1
anime_database.seek(0) # rewind
anime_database.write(json.dumps(data, indent=True))
anime_database.truncate()
def clean_database(database='anime-offline-database.json'):
'''
Removes all content that does not have has_charachters == True, or don´t
even have this key (i.e.problems in getting the data).
'''
with open(database, 'r',encoding='utf8') as anime_database:
data = json.load(anime_database) #Importing databse
iterations = 0
print('Database Loaded...')
while iterations < len(data["data"]) : #Checking every source that exists
print(str(iterations) + '/' + str(len(data["data"])) + ' entried cleaned.', end='\r')
#Start by deleting the keys we don´t want:
data["data"][iterations].pop('type', None)
data["data"][iterations].pop('episodes', None)
data["data"][iterations].pop('status', None)
data["data"][iterations].pop('animeSeason', None)
data["data"][iterations].pop('picture', None)
data["data"][iterations].pop('thumbnail', None)
data["data"][iterations].pop('relations', None)
data["data"][iterations].pop('tags', None)
data["data"][iterations].pop('synonyms', None)
if 'has_characters' not in data["data"][iterations]: #Did not get anything fron anilist, because there was no anilist.co link.
del data["data"][iterations]
else:
if (
data["data"][iterations]["has_characters"] == False) or (
data["data"][iterations]["characters"] == []
): #Cleaning if there are no charachters
del data["data"][iterations]
else:
iterations += 1
with open(database, 'w',encoding='utf8') as anime_database:
anime_database.seek(0) # rewind
anime_database.write(json.dumps(data, indent=True))
anime_database.truncate()
def finding(id):
'''
Returns json of the characters found for the anime id that was inputed.
'''
query = '''
query ($id: Int) {
Media (id: $id, type: ANIME) {
characters(sort: ID){
nodes {
name {
full
}
}
}
}
}
'''
#Id of the anime
variables = {
'id': id
}
url = 'https://graphql.anilist.co'
# Make the Api request
session = requests.Session()
try:
response = session.post(url, json={'query': query, 'variables': variables})
except Exception:
return 'Connection Error'
try:
return response.json()["data"]["Media"]["characters"]['nodes']
except: #Not found
return 'Not found'
def main():
if len(sys.argv) != 3:
sys.stderr.write("Usage: database_updater.py <database-dir> <update_database or clean_database>\n")
sys.exit(-1)
if not os.path.isfile(sys.argv[1]):
raise RuntimeError("%s: not found" % database)
elif sys.argv[2] == 'update_database':
update_database(sys.argv[1])
elif sys.argv[2] == 'clean_database':
clean_database(sys.argv[1])
else:
sys.stderr.write("Operation could not be perfomed. Usage: database_updater.py <database-dir> <update_database or clean_database>\n")
sys.exit(-1)
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.