text stringlengths 8 6.05M |
|---|
import logging
import pprint
import requests as r
from time import sleep
from os import environ
class Gigas:
"""
Models a connection to Gigas API
"""
def __init__(self, apiuser=None, apipswd=None, api_endpoint="https://api.madrid.gigas.com"):
"""
Creates the Gigas object given the credentials
"""
self.api_endpoint = api_endpoint
self.apiuser = apiuser if apiuser else environ.get("GIGAS_API_USER")
self.apipswd = apipswd if apipswd else environ.get("GIGAS_API_PASSWORD")
self.token = ""
self.headers = {
"Authorization": "",
"Accept": "application/json"
}
self._update_temporary_token()
self.auth_retries = 0
def _wait_for_transaction(self, transaction_id, polling_interval=5, max_retries=24):
"""
Waits for transaction `transaction_id` to be completed or errored, with
optional `polling_interval` and `max_retries`
"""
logging.info("Waiting for transaction_id: %s" % str(transaction_id))
num_retries = 0
while (True):
res = r.get(self.api_endpoint + "/transaction/" + str(transaction_id) + "/status", headers=self.headers)
logging.info("Status: %s" % str(res.json()))
if ("error" in res.json()):
if (res.json()["error"] == "Transaction not found"):
logging.warning("Transaction %s not found" % str(transaction_id))
return "Not found"
else:
logging.error("Error waiting for transaction %s" % str(transaction_id))
return "Error"
elif (res.json()["status"] == "complete"):
logging.info("Transaction %s complete" % str(transaction_id))
return "Complete"
else:
logging.info("Status: %s, retry: %i" % (res.json()["status"], num_retries))
num_retries += 1
if (num_retries >= max_retries):
logging.warning("Too many retries for transaction %s" % str(transaction_id))
return "Timeout"
else:
continue
def _update_temporary_token(self):
"""
Requests a temporary token and stores it in the headers
"""
payload = {'login': self.apiuser, 'password': self.apipswd}
res = r.post(self.api_endpoint + "/token", data=payload)
self.token = res.json()["token"]
logging.info("Got token: %s" % self.token)
self.headers["Authorization"] = "Gigas token=" + self.token
def create_vm(self, memory, cpus, hostname, label, primary_disk_size, swap_disk_size, template_id):
"""
Creates the vm with specified values for
- memory (in mb)
- cpus
- label
- primary_disk_size
- swap_disk_size
- template_id
"""
logging.info("Creating new vm")
payload = {'memory': memory,
'cpus': cpus,
'hostname': hostname,
'label': label,
'primary_disk_size': primary_disk_size,
'swap_disk_size': swap_disk_size,
'template_id': template_id,
}
res = r.post(self.api_endpoint + "/virtual_machine", data=payload, headers=self.headers)
logging.info("VM creation result: %s" % res.json())
if res.status_code == r.codes.unauthorized:
logging.warning("Unauthorized access to API")
self.auth_retries += 1
# Update the token and try again
if self.auth_retries < 2:
logging.warning("Requesting a new API token")
self._update_temporary_token()
res = r.post(self.api_endpoint + "/virtual_machine", data=payload, headers=self.headers)
else:
# Raise a 401
logging.error("Too many unauthorized access to API")
res.raise_for_status()
transaction_id = res.json()["queue_token"]
logging.info("Creating vm - queue_token: %s" % transaction_id)
machine_id = res.json()["resource"]["id"]
self._wait_for_machine_to_be_built(machine_id)
transaction_result = self._wait_for_transaction(transaction_id)
if ((transaction_result == "Complete") or (transaction_result == "Not found")):
machine_details = self.get_machine_info(machine_id)
return GigasVM(vm_attributes = machine_details)
else:
logging.error("Transaction %s errored" % str(transaction_id))
return False
def get_machine_info(self, machine_id):
"""
Returns a dict with the key/value pairs of a vm attributes
"""
res = r.get(self.api_endpoint + "/virtual_machine/" + str(machine_id), headers=self.headers)
vm_attributes = res.json()
res = r.get(self.api_endpoint + "/virtual_machine/" + str(machine_id) + "/network_interfaces", headers=self.headers)
interface_ids = (interface["id"] for interface in res.json())
ip_addresses = []
for ip in r.get(self.api_endpoint + "/ip_addresses", headers=self.headers).json():
if ip["interface_id"] in interface_ids:
ip_addresses.append(ip["address"])
vm_attributes["ip_addresses"] = ip_addresses
logging.info("Attributes of the VM: %s " % str(vm_attributes))
return vm_attributes
def delete_vm(self, vm):
"""
Deletes an existing GigasVM object
"""
res = r.delete(self.api_endpoint + "/virtual_machine/" + str(vm.id), headers=self.headers)
transaction_id = res.json()["queue_token"]
transaction_result = self._wait_for_transaction(transaction_id)
del vm
def _wait_for_machine_to_be_built(self, machine_id, polling_interval=5, max_retries=48):
"""
Waits for a vm to be in 'built state'
"""
num_retries = 0
ready = False
while (not ready and num_retries <= max_retries):
res = r.get(self.api_endpoint + "/virtual_machine/" + str(machine_id), headers=self.headers)
vm_attributes = res.json()
status = vm_attributes['status']
logging.info("Status of the VM: %s " % status)
if status != 'online':
logging.info("Waiting for %i seconds" % polling_interval)
sleep(polling_interval)
num_retries += 1
continue
else:
return True
return False
class GigasVM:
"""
Models a Virtual Machine in Gigas environment
"""
def __init__(self, vm_attributes):
"""
Copies the attributes in the vm_attributes dict to the instance
"""
logging.info("Creating vim with attributes")
logging.info(pprint.pprint(vm_attributes))
for key,value in vm_attributes.items():
setattr(self, key, value)
if __name__ == '__main__':
g = Gigas()
vm = g.create_vm(memory = 512, cpus = 1, hostname = "test",label = "test-label", primary_disk_size = 20, swap_disk_size = 1,template_id = 70)
|
# Find and Replace: Print the position of the first instance of the word "day", then create a new string where the word "day" is replaced with the word "month".
words = "It's Thanksgiving day. It's my birthday, too!"
print (words.find("day"))
newWords = words
print (newWords.replace("day", "month"))
# Min and Max: Print the min and max values of a list.
x = [1, 2, 3, 4, 5, 0]
print min(x)
print max(x)
# First and Last: Print the first and last values in a list. Create a new list containing only the first and last values.
x = [1, 2, 3, 4]
first = x[0]
last = x[len(x) - 1]
print first, last
newList = []
newList.append(first)
newList.append(last)
print newList
# New List: Start with a list, sort it, then split your list in half. Push the list created from the first half to position 0 of the list created from the second half.
x = [4, 3, 1, 2, 6, 3]
xSorted = sorted(x)
firstHalf = xSorted[0 : len(xSorted) / 2]
secondHalf = xSorted[len(xSorted) / 2 : len(xSorted)]
secondHalf.insert(0, firstHalf)
print secondHalf |
# Generated by Django 3.1.6 on 2021-02-03 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_app'),
]
operations = [
migrations.AddField(
model_name='app',
name='build_stat',
field=models.CharField(choices=[('B', 'Built'), ('NB', 'Not Built')], default='NB', max_length=2),
),
]
|
"""You have been given an array of size N consisting of integers.
In addition you have been given an element M you need to find and print the index of the last occurrence
of this element M in the array if it exists in it. Consider this array to be 1 indexed.
Input Format:
The first line consists of 2 integers N and M denoting the size of the array and the element to be searched for in the array respectively . The next line contains N space separated integers denoting the elements of of the array.
Output Format
Print a single integer denoting the index of the last occurrence of integer M in the array if it exists, otherwise print -1."""
"""# SAMPLE INPUT
# 5 1
# 1 2 3 4 1
# SAMPLE OUTPUT
# 5"""
#Code
n, m = list(map(int,input("Enter the size of array and the target element: ").split()))
a = list(map(int,input("Enter the elements of the list: ").split()))
x = len(a)
print(x)
for i in range(x):
if a[i::] == m:
print(i)
break
|
import unittest
from katas.kyu_6.equal_sides_of_an_array import find_even_index
class FindEvenIndexTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(find_even_index([1, 2, 3, 4, 3, 2, 1]), 3)
def test_equals_2(self):
self.assertEqual(find_even_index([1, 100, 50, -51, 1, 1]), 1)
def test_equals_3(self):
self.assertEqual(find_even_index([1, 2, 3, 4, 5, 6]), -1)
def test_equals_4(self):
self.assertEqual(find_even_index([20, 10, 30, 10, 10, 15, 35]), 3)
def test_equals_5(self):
self.assertEqual(find_even_index([20, 10, -80, 10, 10, 15, 35]), 0)
def test_equals_6(self):
self.assertEqual(find_even_index([10, -80, 10, 10, 15, 35, 20]), 6)
def test_equals_7(self):
self.assertEqual(find_even_index(range(1, 100)), -1)
def test_equals_8(self):
self.assertEqual(find_even_index([0, 0, 0, 0, 0]), 0)
def test_equals_9(self):
self.assertEqual(find_even_index([-1, -2, -3, -4, -3, -2, -1]), 3)
def test_equals_10(self):
self.assertEqual(find_even_index(range(-100, -1)), -1)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
#for pdfs
from reportlab.pdfgen import canvas
from django.http import HttpResponse
from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from django.core.urlresolvers import reverse
import sys, csv, string
from search_address import Places
def home(request):
request.session['nearby_locations']={}
return render(request, "housing/home.html")
def search(request):
if request.method !="POST":
return redirect(reverse('housing:home'))
search_address = request.POST['address_string']
search_address_obj = Places(search_address)
try:
request.session['search_address']= search_address_obj.full_address
print request.session['search_address']
except:
request.session['search_address']= search_address
request.session['nearby_locations']=search_address_obj.restrictive_locations()
print request.session['nearby_locations']
request.session['valid_address']=search_address_obj.valid_address
return redirect(reverse('housing:results'))
def results(request):
response = {
'search_address':request.session['search_address'],
'nearby_locations':request.session['nearby_locations'],
'valid_address':request.session['valid_address']
}
return render(request, "housing/results.html", response) |
#Quiz 2 Part 2
#By: Jacob Buurman
#November 25, 2020
#I pledge my honor that I have abided by the Stevens Honor System - Jacob Buurman
#I understand that I may access the course textbook and course lecture notes but I
#am not to access any other resource. I also pledge that I worked alone on this exam.
def main():
num_1 = eval(input("Enter 1 for mathematical functions\nEnter 2 for string operations\n"))
if num_1 == 1:
num_2 = eval(input("Enter 1 for addition\nEnter 2 for subtraction\nEnter 3 for multiplication\nEnter 4 for division\n"))
result = 0
if num_2 == 1:
a = float(input("Enter first number to add: "))
b = float(input("Enter second number to add: "))
result = round(a + b, 4)
elif num_2 == 2:
a = float(input("Enter first number to subtract: "))
b = float(input("Enter second number to subtract: "))
result = round(a - b, 4)
elif num_2 == 3:
a = float(input("Enter first number to multiply: "))
b = float(input("Enter second number to multiply: "))
result = round(a * b, 4)
elif num_2 == 4:
a = float(input("Enter first number to divide: "))
b = float(input("Enter second number to divide: "))
result = round(a / b, 4)
else:
print("ERROR - MUST ENTER 1, 2, 3, OR 4")
print("The answer is", result)
elif num_1 == 2:
num_3 = eval(input("Enter 1 to determine the number of vowels in a string\nEnter 2 to encrypt a string\n"))
vowels = 0
if num_3 == 1:
str = input("Enter a string of characters to see how many vowels there are: ")
for i in str:
if i == "a" or i == "e" or i == "i" or i == "o" or i == "u" or i == "A" or i == "E" or i == "I" or i == "O" or i == "U":
vowels = vowels + 1
print("There are", vowels, "vowels.")
elif num_3 == 2:
str = input("Enter a string of characters to encrypt: ")
for i in str:
print("", (ord(i)) ** 3 * 2, end = "")
print()
else:
print("ERROR - MUST ENTER 1 OR 2")
else:
print("ERROR - MUST ENTER 1 OR 2")
main() |
from setuptools import setup, find_packages
import os
BASE_DIR = os.path.dirname(__file__)
with open(os.path.join(BASE_DIR, 'requirements.txt')) as _file:
requirements = [line.strip() for line in _file]
setup(
name='pydicts',
version='v0.1.1',
packages=find_packages(),
install_requires=requirements,
license='MIT',
author='Victor Matheus de Castro Geraldo',
author_email='victormatheuscastro@gmail.com',
description='Unofficial Python API for Dicio and DicionarioInformal'
) |
import logging as logging_
from django.db import transaction
from django.urls import reverse
from django.shortcuts import redirect
from django.views.generic import TemplateView
from django.contrib.auth import login
from django.contrib import messages
from django.http import HttpResponseServerError, Http404
from formtools.wizard.views import NamedUrlSessionWizardView
from utils import get_request_ip
from custom_auth.models import User
from core.models import Address, UseCaseModel
from core.views import DataLayerViewMixin, WizardSegmentMixin
from core.utils import segment_event
from stages import transitions
from fit_quiz.views import FIT_QUIZ_SESSION_DATA_PREFIX
from inquiry.forms import (
InquiryFirstForm, InquiryHomeForm, InquiryHomeownerForm, WizardClientUserCreationForm
)
from inquiry.models import Inquiry
from inquiry.outcomes import (
INQUIRY_OUTCOME_SLUG_MAP, INQUIRY_OUTCOME_CONTEXTS, get_outcome_context,
get_state_zip_code_outcome_key
)
logger = logging_.getLogger('portals.apps.' + __name__)
def _get_inquiry_segment_event_data(client):
""" Returns dict of inquiry and client data for use in segment events E25 and E69 """
return {
'tracking_status': 'investment inquiry submitted',
'email': client.email,
'phone': client.phone_number,
'email_confirmed': client.email_confirmed, # always False
'friendly_id': client.friendly_id,
'first_name': client.user.first_name,
'last_name': client.user.last_name,
'use_case_debts': client.inquiry.use_case_debts,
'use_case_diversify': client.inquiry.use_case_diversify,
'use_case_renovate': client.inquiry.use_case_renovate,
'use_case_education': client.inquiry.use_case_education,
'use_case_buy_home': client.inquiry.use_case_buy_home,
'use_case_business': client.inquiry.use_case_business,
'use_case_emergency': client.inquiry.use_case_emergency,
'use_case_retirement': client.inquiry.use_case_retirement,
'when_interested': client.inquiry.when_interested,
'household_debt': client.inquiry.household_debt,
'referrer_name': client.inquiry.referrer_name,
'property_type': client.inquiry.property_type,
'primary_residence': client.inquiry.primary_residence,
'home_value': client.inquiry.home_value,
'ten_year_duration_prediction': client.inquiry.ten_year_duration_prediction,
'street': client.inquiry.address.street,
'unit': client.inquiry.address.unit,
'city': client.inquiry.address.city,
'state': client.inquiry.address.state,
'zip_code': client.inquiry.address.zip_code,
'sms_allowed': True if hasattr(client, 'sms_consent') else False,
}
class InquiryApplyWizard(WizardSegmentMixin, NamedUrlSessionWizardView):
# Use CustomFormToolsSessionStorage to fix bug EN-308 (described further in its docstring)
storage_name = 'inquiry.utils.CustomFormToolsSessionStorage'
form_list = [
# Form for Address obj
("first", InquiryFirstForm),
# Forms for Inquiry obj
("home", InquiryHomeForm),
("homeowner", InquiryHomeownerForm),
# Form for Client account creation
("signup", WizardClientUserCreationForm),
]
# Templates for Forms. Must have same key as tuple[0] in form_list.
templates = {
"first": "inquiry/first.html",
"home": "inquiry/home.html",
"homeowner": "inquiry/homeowner.html",
"signup": "inquiry/signup.html"
}
def get_template_names(self):
return [self.templates[self.steps.current]]
def get_step_url(self, step):
return reverse(self.url_name, kwargs={'step': step})
@staticmethod
def fill_form_initial(session, initial):
"""
fills initial with the items in session starting with FIT_QUIZ_SESSION_DATA_PREFIX
"""
for key in session.keys():
if key.startswith(FIT_QUIZ_SESSION_DATA_PREFIX):
remaining = key[len(FIT_QUIZ_SESSION_DATA_PREFIX):]
initial.update({remaining: session.get(key)})
def get_form_initial(self, step):
# If fit quiz data is in session, use it to pre-populate some inquiry first form fields
initial = self.initial_dict.get(step, {})
InquiryApplyWizard.fill_form_initial(self.request.session, initial)
return initial
def _get_email(self, form_data, form_current_step):
"""
returns the email from:
+ the email in form_data if the current step is the first step
+ the email from the first step if this is not the first step
+ the fit quiz if the user filled a fit quiz
"""
email = ''
# the signup form also has an email field so make sure this is the first form
if form_current_step == 'first' and 'email' in form_data:
# this is the first step
return form_data['email']
# if this is not the first step then get the email from the first step because the user
# may have typed in a different email than that from the fit quiz
if form_current_step != 'first':
# self.storage.get_step_data('first') should not be None at this point
return self.storage.get_step_data('first').getlist('first-email')[0]
# this is the first step so pre-populate the email from a fit quiz
initial = self.get_form_initial(form_current_step)
if 'email' in initial:
email = initial['email']
return email
def get(self, request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.email_confirmed:
return redirect(request.user.homepage)
messages.info(
request, (
"It looks like you've already submitted an investment estimate form but have "
"not yet confirmed your email address."
)
)
return super().get(request, *args, **kwargs)
def _create_address(self, address_data):
address = Address(**address_data)
address.full_clean()
address.save()
return address
def _create_inquiry(self, inquiry_data, client, ip_address, address):
inquiry_data['client'] = client
inquiry_data['ip_address'] = ip_address
inquiry = Inquiry(**inquiry_data)
inquiry.address = address
inquiry.full_clean()
inquiry.save()
return inquiry
def done(self, form_list, form_dict, **kwargs):
error_s = (
'Sorry, there was an error creating your account. Please contact support@hometap.com'
)
ip_address = get_request_ip(self.request)
# separate Inquiry data and Client signup data
signup_data = form_dict['signup'].cleaned_data
inquiry_data = {}
for form_name, form in form_dict.items():
if form_name == 'home' or form_name == 'homeowner':
inquiry_data.update(form.cleaned_data)
# add the email from the first step to signup_data
first_data = form_dict['first'].cleaned_data
signup_data['email'] = first_data.pop('email')
# add the use case from the first step to the inquiry_data
for field in UseCaseModel._meta.fields:
inquiry_data[field.name] = first_data.pop(field.name)
# we can't wrap the creation of all objects in a with transaction.atomic context because
# create_client() is decorated with transaction.atomic, so create the client first
try:
client = User.objects.create_client(
email=signup_data['email'],
password=signup_data['password1'],
first_name=inquiry_data['first_name'],
last_name=inquiry_data['last_name'],
phone_number=signup_data['phone_number'],
state=first_data['state'],
ip_address=ip_address,
sms_opt_in=signup_data['sms_opt_in'],
agree_to_terms=signup_data['agree_to_terms'],
email_confirmed=False
)
user = client.user
except Exception as e:
logger.error('User+Client save failed {0}'.format(e))
return HttpResponseServerError(error_s)
try:
with transaction.atomic():
# create Address obj
logger_error_s = 'Address save failed'
address = self._create_address(first_data)
# create Inquiry obj
logger_error_s = 'Inquiry save failed'
inquiry = self._create_inquiry(inquiry_data, client, ip_address, address)
except Exception as e:
user.delete()
logger.error('{0} {1}'.format(logger_error_s, e))
return HttpResponseServerError(error_s)
# can't transition to the stage inside the transaction.atomic context because
# Transition.execute() has a transaction.atomic decorator
try:
logger_error_s = 'Client submit inquiry failed'
transitions.ClientSubmitInquiry(client=client).execute(inquiry=inquiry)
except Exception as e:
user.delete()
address.delete()
inquiry.delete()
logger.error('{0} {1}'.format(logger_error_s, e))
return HttpResponseServerError(error_s)
# Log in the client, but they won't actually be able to do anything (will
# not pass our custom auth middleware) until their email is confirmed.
# 'Secretly' logging them in allows them to be 'fully' logged in
# after clicking a confirmation link if they do it soon after
# signing up
login(self.request, user)
# Segment Event E69 -- Note, we also send a similar datalayer event E25 on page load
event_d = _get_inquiry_segment_event_data(client)
segment_event(client.email, 'investment inquiry - created account - server', event_d)
return redirect(reverse('inquiry:submitted'))
@staticmethod
def _get_wizard_step_event_data(cleaned_data, exported_fields, form_current_step, outcome):
tracking_status = '{0} screen {1}'.format(form_current_step, outcome)
segment_data = {'tracking_status': tracking_status}
for field in exported_fields:
if field in cleaned_data:
segment_data.update({field: cleaned_data[field]})
return segment_data
def _send_wizard_step_segment_event(self, form_current_step, form, email, outcome):
""" sends a Segment event corresponding to the given wizard step """
if form_current_step == 'first':
exported_fields = [
'street',
'unit',
'city',
'state',
'zip_code',
'use_case_debts',
'use_case_education',
'use_case_diversify',
'use_case_buy_home',
'use_case_renovate',
'use_case_other',
'use_case_business',
'use_case_emergency',
'use_case_retirement',
'email',
]
elif form_current_step == 'home':
exported_fields = [
'property_type',
'primary_residence',
'rent_type',
'ten_year_duration_prediction',
'home_value',
'household_debt',
]
elif form_current_step == 'homeowner':
exported_fields = ['first_name', 'last_name', 'referrer_name', 'notes']
elif form_current_step == 'signup':
exported_fields = ['phone_number', 'sms_opt_in', 'agree_to_terms']
# Segment Event E22, E23, E74, E77 -- inquiry screen submitted
cleaned_data = form.cleaned_data
event_data = self._get_wizard_step_event_data(
cleaned_data, exported_fields, form_current_step, outcome
)
event_name = 'investment inquiry - {0} screen submitted'.format(form_current_step)
segment_event(email, event_name, event_data)
def _vet_based_on_form(self, form_current_step, form):
"""
runs vetting on the given step
redirects using a slug if the step is vetted
"""
if form_current_step != 'first':
# currently, we only validate the first step
return (None, '', '')
# vet first step if not a good inquiry based on state or zip code
cleaned_data = form.cleaned_data
outcome_key, vetted_message = get_state_zip_code_outcome_key(
cleaned_data['state'], cleaned_data['zip_code']
)
if outcome_key is not None:
return (INQUIRY_OUTCOME_SLUG_MAP[outcome_key], 'inquiry:outcome', vetted_message)
# passed tests
return (None, '', '')
class InquiryOutcomeView(TemplateView):
template_name = 'inquiry/inquiry_outcome.html'
def dispatch(self, request, *args, **kwargs):
if self.kwargs['slug'] not in INQUIRY_OUTCOME_CONTEXTS:
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
outcome_context = get_outcome_context(self.kwargs['slug'])
context.update(outcome_context)
return context
class InquirySubmitted(DataLayerViewMixin, TemplateView):
"""
Render the email_confirm_awaiting page, passing in an event dict in context
for the tracking event E25 for the tag manager javascript code to send.
Note: If the user refreshes this page, E25 will send again
"""
template_name = 'custom_auth/email_confirm_awaiting.html'
event_id = 'E25'
# TODO(Charlie): restore after completing EN-331
# send_event_once_per_session = True
def dispatch(self, request, *args, **kwargs):
"""
Ensure user is an authenticated client and has not yet confirmed their
email, else redirect them. This is similar to the functionality of the
authorization_decorators, but they rely on all client URLs requiring
the user to have confirmed their email; this view is an exception.
"""
if not request.user.is_authenticated:
return redirect(reverse('custom_auth:login_client'))
if request.user.email_confirmed or not request.user.is_client:
return redirect(request.user.homepage)
return super().dispatch(request, *args, **kwargs)
def get_event(self, **kwargs):
# Segment Event E25 -- Note, we also send a similar serverside event E69
event_d = _get_inquiry_segment_event_data(self.request.user.client)
event_d['event'] = 'investment inquiry - created account'
return event_d
|
#fibonacci using generators; this function uses yield instead of return, hence it is generator
def fib(n):
try:
a=0
b=1
for i in range(int(n)):
if i==0:
yield a
elif i==1:
yield b
else:
c=a+b
yield c
a=b
b=c
except:
print('Incorrect type error, Enter intergers only')
if __name__=='__main__':
print("Welcome to Fibonacci series calculator, In the shell below enter the number up to which you want fibonacci series to be generated or enter 'quit' to exit:")
while True:
print('>>>',end='')
n=input()
if n=='quit':
break
else:
for num in fib(n):
print(num) |
# comment
import datetime
import json
import os
from flask import Flask, render_template, request, redirect, url_for, session, flash, Blueprint
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects.mysql import TINYINT
from flask_navigation import Navigation
from recurrent import RecurringEvent
from wtforms import Form, StringField, PasswordField, validators
from flask_restplus import Api, fields, Resource
from ConfigParser import SafeConfigParser, NoSectionError
from passlib.hash import sha256_crypt
app = Flask(__name__, static_folder='../static')
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
nav = Navigation(app)
class RegistrationForm(Form):
first_name = StringField('First Name', [validators.Length(min=1, max=35)])
last_name = StringField('Last Name', [validators.Length(min=1, max=35)])
email = StringField('Email Address', [validators.Length(min=6, max=35)])
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password')
# dialect+driver://username:password@host:port/database
try:
"""Parse the properties and use SQL Alchemy to connect to DB"""
parser = SafeConfigParser()
parser.read('../properties.ini')
db_host = parser.get('aws-user-pw', 'host')
db_user = parser.get('aws-user-pw', 'user')
db_password = parser.get('aws-user-pw', 'password')
db_port = parser.get('aws-user-pw', 'port')
db_database = parser.get('aws-user-pw', 'todo-database')
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://' + db_user + ':' + db_password + \
'@' + db_host + ':' + db_port + '/' + db_database
except NoSectionError as err:
print('You need the correct Properties file in your root directory')
db = SQLAlchemy(app)
class User(db.Model):
"""Object mapping of users"""
id = db.Column(db.Integer, primary_key=True)
firstName = db.Column(db.String(52), nullable=False)
lastName = db.Column(db.String(52), nullable=False)
email = db.Column(db.String(128), nullable=False)
password = db.Column(db.String(256), nullable=False)
api_key = db.Column(db.String(52), nullable=False)
def __init__(self, id, first_name, last_name, email, password):
self.id = id
self.firstName = first_name
self.lastName = last_name
self.email = email
self.password = password
class Todo(db.Model):
"""Object mapping of todos"""
id = db.Column(db.Integer, primary_key=True)
dueDate = db.Column(db.TIMESTAMP, nullable=False)
createdAt = db.Column(db.TIMESTAMP, nullable=False)
createdBy = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
text = db.Column(db.Text, nullable=True)
completed = db.Column(TINYINT(1), nullable=True)
deleted = db.Column(TINYINT(1), nullable=True)
def __init__(self, id, due_date, created_at, created_by, text):
self.id = id
self.dueDate = due_date
self.createdAt = created_at
self.createdBy = created_by
self.text = text
self.completed = 0
self.deleted = 0
class UserLogActivity(db.Model):
"""Object mapping of User Activity"""
id = db.Column(db.Integer, primary_key=True)
userID = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
activityType = db.Column(db.String(52), nullable=True)
time = db.Column(db.TIMESTAMP, nullable=False)
ipAddress = db.Column(db.String(32), nullable=True)
details = db.Column(db.Text)
def __init__(self, id, user_id, activity_type, time, ip_address, details=None):
self.id = id
self.userID = user_id
self.activityType = activity_type
self.time = time
self.ipAddress = ip_address
self.details = details
@app.route('/logout', methods=['GET'])
def logout():
"""Logout route for users"""
response = redirect(url_for('login'))
log = UserLogActivity(None, int(request.cookies.get('id')), "logout", get_timestamp_sql(), request.remote_addr)
db.session.add(log)
db.session.commit()
response.delete_cookie('email')
response.delete_cookie('id')
return response
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm(request.form)
if request.method == 'POST' and form.validate():
user_first_name = form.first_name.data
user_last_name = form.last_name.data
user_email = form.email.data
user_password = str(form.password.data)
user_password = sha256_crypt.hash(user_password)
new_user = User(None, user_first_name, user_last_name, user_email, user_password)
db.session.add(new_user)
db.session.commit()
flash('Thanks for registering ' + user_first_name + '!')
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Login route for users"""
error = None
if request.method == 'POST':
email = request.form['email']
pw = request.form['password']
temp_user = User.query.filter_by(email=email).first()
if not sha256_crypt.verify(pw, temp_user.password):
error = 'Invalid Credentials. Please try again.'
else:
log = UserLogActivity(None, temp_user.id, "login", get_timestamp_sql(), request.remote_addr)
db.session.add(log)
db.session.commit()
response = redirect(url_for('home'))
response.set_cookie('email', email)
response.set_cookie('id', str(temp_user.id))
return response
return render_template('login.html', error=error)
@app.route('/deleted')
@app.route('/restore/<int:todo_id>')
def deleted(todo_id=None):
"""Deleted page for user's todos"""
cookie = request.cookies.get('email')
if not cookie:
return redirect(url_for('login'))
cur_user = User.query.filter_by(email=request.cookies.get('email')).first()
if request.path.startswith('/restore/'):
log = UserLogActivity(None, cur_user.id, "restore todo", get_timestamp_sql(),
request.remote_addr, todo_id)
db.session.add(log)
restore_todo = Todo.query.filter_by(id=todo_id).first()
restore_todo.deleted = 0
db.session.add(restore_todo)
db.session.commit()
todos = Todo.query.filter_by(createdBy=cur_user.id, deleted=1).all()
if todos is not None:
f = '%A, %b %d %I:%M %p'
for todo in todos:
todo.dueDateFormat = datetime.datetime.strftime(todo.dueDate, f)
todo.createdAtFormat = datetime.datetime.strftime(todo.createdAt, f)
if todo.completed == 0:
todo.completed = False
else:
todo.completed = True
return render_template(
'deleted-todos-page.html', todos=todos)
@app.route('/reverse/<order>')
@app.route('/', methods=['POST', 'GET'])
@app.route('/home', methods=['POST', 'GET'])
@app.route('/delete/<int:delete_id>')
@app.route('/check/<int:todo_id>')
def home(delete_id=None, todo_id=None, order="ASC"):
"""Home page for user's todos"""
cookie = request.cookies.get('email')
if not cookie:
return redirect(url_for('login'))
cur_user = User.query.filter_by(email=request.cookies.get('email')).first()
first_name = cur_user.firstName
if request.method == 'POST':
"""Once a todo is added, we process and add it"""
created_at_time = get_timestamp_sql()
text = request.form['text']
raw_due_time = request.form['duedate']
# Natural language processing of date
r = RecurringEvent(now_date=datetime.datetime.now())
datetime_due_time = r.parse(raw_due_time)
sql_time_format = '%Y-%m-%d %H:%M:%S'
due_time = datetime.datetime.strftime(datetime_due_time, sql_time_format)
# Creating the to do for the add to db
new_todo = Todo(None, due_time, created_at_time, cur_user.id, text)
db.session.add(new_todo)
log = UserLogActivity(None, cur_user.id, "add todo", created_at_time, request.remote_addr)
db.session.add(log)
db.session.commit()
# check if we switching completed
if request.path.startswith('/check/'):
update_todo = Todo.query.filter_by(id=todo_id).first()
if update_todo.completed == 0:
update_todo.completed = 1
else:
update_todo.completed = 0
db.session.add(update_todo)
db.session.commit()
# check if we deleting
if delete_id:
del_todo = Todo.query.filter_by(id=delete_id, deleted=0).first()
if del_todo:
log = UserLogActivity(None, cur_user.id, "delete todo", get_timestamp_sql(),
request.remote_addr, del_todo.id)
db.session.add(log)
del_todo.deleted = 1
db.session.add(del_todo)
db.session.commit()
if request.path.startswith('/reverse/'):
if order == "ASC":
order = "DESC"
else:
order = "ASC"
if order == "ASC":
todos = list(reversed(Todo.query.filter_by(createdBy=cur_user.id, deleted=0).order_by(Todo.dueDate).all()))
else:
todos = Todo.query.filter_by(createdBy=cur_user.id, deleted=0).order_by(Todo.dueDate).all()
if order == "ASC":
asc_order = True
else:
asc_order = False
if todos is not None:
f = '%A, %b %d %I:%M %p'
for todo in todos:
todo.dueDateFormat = datetime.datetime.strftime(todo.dueDate, f)
todo.createdAtFormat = datetime.datetime.strftime(todo.createdAt, f)
if todo.completed == 0:
todo.completed = False
else:
todo.completed = True
return render_template(
'main-page.html', todos=todos, first_name=first_name, asc_order=asc_order)
def get_timestamp_sql():
"""Method to get time right now for SQL inserts"""
sql_time_format = '%Y-%m-%d %H:%M:%S'
created_at_time = datetime.datetime.strftime(datetime.datetime.now(), sql_time_format)
return created_at_time
api = Api(app, version='1.0', title='Todo API',
description='Proof of Concept API')
todo_ns = api.namespace('todo_api', 'Todo Methods')
todo_model = api.model('Todo', {
'id': fields.Integer(required=False),
'dueDate': fields.DateTime(required=True, description='Time this is due'),
'text': fields.String(required=True, description='Text of the todo')
})
# APIs #
@todo_ns.route('/<string:api_key>')
class TodoApi(Resource):
@todo_ns.doc('List all todos')
def get(self, api_key):
'''List all tasks'''
cur_user = User.query.filter_by(api_key=api_key).first()
if not cur_user:
return "No such API key"
todos = Todo.query.filter_by(createdBy=cur_user.id, deleted=0).order_by(Todo.dueDate).all()
todos_json = {}
for todo in todos:
if todo.completed:
str_complete = "True"
else:
str_complete = "False"
todos_json[todo.id] = {"text": todo.text, "due date": str(todo.dueDate), "completed": str_complete}
return todos_json
nav.Bar('top', [
nav.Item('Home', 'home'),
])
if __name__ == "__main__":
app.secret_key = os.urandom(12)
app.run()
|
import shutil
import os
import time
def backup_file(foutname):
""" """
assert(type(foutname) == str)
if os.path.isfile(foutname):
backup_name = foutname.split(".")[0] + \
"_old" + time.strftime("_%Y_%m_%d_%Hh%M") + "." + \
foutname.split(".")[1]
shutil.move(foutname, backup_name)
def backup_folder(foldername):
""" """
assert(type(foldername) == str)
if os.path.isdir(foldername):
shutil.move(foldername, foldername + "_old" + time.strftime("_%Y_%m_%d_%Hh%M") )
def build_file_list(fin_name):
""" """
indata_list = []
abs_dir = os.path.split(fin_name)[0] ; fname = os.path.split(fin_name)[1].split(".")[0]
ext = os.path.split(fin_name)[1].split(".")[1]
iteration = 0
fname_tmp = os.path.join(abs_dir, fname + str(iteration) + "." + ext)
while os.path.isfile(fname_tmp):
with open(fname_tmp) as indata:
indata_list.append(indata.readlines())
iteration += 1
fname_tmp = os.path.join(abs_dir, fname + str(iteration) + "." + ext)
if indata_list == []:
print("Invalid name format for file name: ") ; raise ValueError
return indata_list
# def modify_file(fin_tomod, modifs, fout):
# """
# fin_tomod (str): the file as input to modify_file
# modifs (list of strs): the lines to modify in fin_tomod
# fout (str) : the file name to which write the modifications
# """
# with open(fin_tomod, mode="r") as input_tomod:
# # 3.1) Modify the elements in the input_file
# input_tomod_s = input_tomod.read()
# for line in modifs:
# line_rstrip = line.rstrip()
# tmp = line.split(":")
# regx = ""
# for part in tmp[:-1]:
# regx += part + ":"
# regx = re.escape(regx)
# OME_input_s = re.sub(regx, line_strip, OME_input_s)
# if iteration >= 1 and "real frequency grid file" not in " ".join(self.OME_input[iter_OME_input]):
# OME_input_s = re.sub(r"real frequency grid file:", "real frequency grid file:" + self.w_vec_file, OME_input_s)
# #print(OME_input_s)
# with open(fout, mode="w") as OME_output:
# OME_output.write(OME_input_s)
|
# -*- coding: UTF-8 -*-
'''
Created on 20171025
@author: leochechen
@Summary: ctf服务端Socket代码
'''
from __future__ import print_function
import time
import sys
import json
import socket
import struct
import weakref
from exptions import *
from worker import Worker
from ctf_local import lock_self
# 一次性能够接受的连接数
BACKLOG = 50
(HOST, PORT) = '0.0.0.0', 6777
class ISocketServer(object):
'''
ctf测试框架socket服务端
'''
def __init__(self):
self.timeout = 30
self.retry_sleep_time = 30
self.count = 0
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((HOST, PORT))
self.socket.listen(BACKLOG)
self.log_file = file("ctfServer.log", 'w')
self.thread_pool = []
def rebuild(self):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.bind((HOST, PORT))
self.socket.listen(BACKLOG)
except Exception, e:
self.log_info("rebuild-{0}:{1}".format(type(e), e))
def log_info(self, info):
print (info, file=sys.stdout)
print (info, file=self.log_file)
self.log_file.flush()
def start(self):
self.log_info("CTF Server start now...")
self.accept()
def accept(self):
'''
监听某端口,如有连接到则开一个线程负责与之交互
:return:
'''
while True:
try:
sock, addr = self.socket.accept()
woker = Worker(weakref.proxy(self), sock, addr)
woker.start()
except Exception, e:
self.log_info("accept-{0}:{1}".format(type(e), e))
time.sleep(self.retry_sleep_time)
self.rebuild()
@lock_self
def send_data(self, sock, data):
try:
serialized = json.dumps(data)
except (TypeError, ValueError) as e:
raise CTFInvaildArg('你只能发送JSON序列化之后的数据')
try:
length = len(serialized)
buff = struct.pack("i", length)
sock.send(buff)
sock.sendall(serialized)
except socket.timeout:
sock.close()
except socket.error as e:
sock.close()
def recv_data(self, sock):
length_buffer = sock.recv(4)
if length_buffer:
total = struct.unpack_from("i", length_buffer)[0]
else:
raise CTFTestServerError('recv length is None?')
view = memoryview(bytearray(total))
next_offset = 0
while total - next_offset > 0:
recv_size = sock.recv_into(view[next_offset:], total - next_offset)
next_offset += recv_size
try:
deserialized = json.loads(view.tobytes())
return deserialized
except (TypeError, ValueError) as e:
raise CTFInvaildArg('Data received was not in JSON format')
if __name__ == "__main__":
ISocketServer().start()
|
from enum import Enum
"""
0 = CHAR
1 = SMALLINT
2 = INTEGER
3 = FLOAT
4 = SMALLFLOAT
5 = DECIMAL
6 = SERIAL 1
7 = DATE
8 = MONEY
9 = NULL
10 = DATETIME
11 = BYTE
12 = TEXT
13 = VARCHAR
14 = INTERVAL
15 = NCHAR
16 = NVARCHAR
17 = INT8
18 = SERIAL8 1
19 = SET
20 = MULTISET
21 = LIST
22 = ROW (unnamed)
23 = COLLECTION
40 = LVARCHAR fixed-length opaque types 2
41 = BLOB, BOOLEAN, CLOB variable-length opaque types 2
43 = LVARCHAR (client-side only)
45 = BOOLEAN
52 = BIGINT
53 = BIGSERIAL 1
2061 = IDSSECURITYLABEL 2, 3
4118 = ROW (named)
"""
class InformixTypes(Enum):
SQL_TYPE_CHAR = 0, 'CharField'
SQL_TYPE_SMALLINT = 1, 'SmallIntegerField'
SQL_TYPE_INTEGER = 2, 'IntegerField'
SQL_TYPE_FLOAT = 3, 'FloatField'
SQL_TYPE_DOUBLE = 3, 'FloatField'
SQL_TYPE_REAL = 4, 'FloatField'
SQL_TYPE_SMFLOAT = 4, 'FloatField'
SQL_TYPE_DECIMAL = 5, 'DecimalField'
SQL_TYPE_NUMERIC = 5, 'DecimalField'
SQL_TYPE_SERIAL = 6, 'AutoField'
SQL_TYPE_DATE = 7, 'DateField'
SQL_TYPE_MONEY = 8, 'DecimalField'
SQL_TYPE_NULL = 9, None
SQL_TYPE_DATETIME = 10, 'DateTimeField'
SQL_TYPE_BYTE = 11, 'BinaryField'
SQL_TYPE_TEXT = 12, 'TextField'
SQL_TYPE_VARCHAR = 13, 'CharField'
SQL_TYPE_INTERVAL = 14, 'DurationField'
SQL_TYPE_NCHAR = 15, 'CharField'
SQL_TYPE_NVARCHAR = 16, 'CharField'
SQL_TYPE_INT8 = 17, 'IntegerField'
SQL_TYPE_SERIAL8 = 18, 'AutoField'
SQL_TYPE_SET = 19, None
SQL_TYPE_MASK = 31, None
SQL_TYPE_UDTVAR = 40, 'CharField'
SQL_TYPE_UDTFIXED = 41, None
SQL_TYPE_LVARCHAR = 43, 'CharField'
SQL_TYPE_BOOLEAN = 45, 'BoolField'
SQL_TYPE_BIGINT = 52, 'BigIntegerField'
SQL_TYPE_BIG_SERIAL = 53, 'AutoField'
def __init__(self, num, field = None):
self.num = num
self.field = field
@classmethod
def field_map(cls):
return {e.num: e.field for e in cls}
|
# entrada (a e b) string
entrada = str(input())
e = entrada.split()
if (-1000 <= int(e[0]) < 1000):
a = int(e[0])
else:
print("inteiro invalido, tente de novo")
if (-1000 <= int(e[1]) < 1000 ):
b = int(e[1])
else:
print("inteiro invalido, tente de novo")
if a > 0 and b > 0:
q = int(a/b)
r = int(a%b)
for r in range(abs(b)):
if ((a - r) % b) == 0:
q = int((a - r)/b)
break
print(q, end=" ")
print(r)
|
from django.shortcuts import render
from django.http import JsonResponse
from django.views import View
from django.db.models import Count
from rest_framework import viewsets, generics
from .models import Prediksi, PrediksiDetail
from ann.models import Training
from datawarehouse.models import Mahasiswa, Angkatan, ProgramStudi, CalculateTemp
from traintest.models import ArtificianNeuralNetworkAlgorithm
from .serializers import PrediksiSerializers, PrediksiDetailSerializers
#authentication
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
# Create your views here.
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class PrediksiView(View):
def get(self, request):
list_angkatan = Angkatan.objects.all()
return render(request, 'prediksi/index.html', {
'list_angkatan': list_angkatan
})
def post(self, request):
action = request.POST.get('action', False)
if action == 'CREATE':
nama = request.POST.get('nama', '')
prediksi_exists = Prediksi.objects.filter(nama=nama).exists()
if prediksi_exists:
return JsonResponse({
'status': 'error',
'message': 'Neural Network dengan nama yang sama telah tersimpan'
})
else:
prediksi = self.prediksi(request)
return JsonResponse({
'status': 'success',
'message': 'Prediksi ' + prediksi.nama +' berhasil disimpan tersimpan'
})
def prediksi(self, request):
nama = request.POST.get('nama', '')
training_id = request.POST.get('training', '')
angkatan_id = request.POST.get('angkatan')
training = Training.objects.get(id=training_id)
angkatan = Angkatan.objects.get(id=angkatan_id)
neural_network_id = training.neural_network.id
ANN = ArtificianNeuralNetworkAlgorithm(id=neural_network_id, training=training)
list_mahasiswa = Mahasiswa.objects.filter(
status='A', angkatan__tahun__lte=angkatan.tahun).order_by('angkatan__tahun', 'nim')
jumlah_mahasiswa = (Mahasiswa.objects.filter(
status='A', angkatan__tahun__lte=angkatan.tahun).aggregate(jumlah = Count('id'))['jumlah'])
prediksi = Prediksi.objects.create(
nama=nama, training=training, temp_jumlah_mahasiswa=0, status=False)
for mahasiswa in list_mahasiswa:
inputs = CalculateTemp().get_inputs(mahasiswa)
output_net, prediction, label = ANN.Algorithm.predict(inputs)
PrediksiDetail.objects.create(prediksi=prediksi, mahasiswa=mahasiswa,
output_net=output_net, label=label)
Prediksi.objects.filter(id=prediksi.id).update(
temp_jumlah_mahasiswa=jumlah_mahasiswa)
return prediksi
@method_decorator(login_required, name='get')
@method_decorator(login_required, name='post')
class PrediksiDetailView(View):
def get(self, request, **kwargs):
prediksi_id = kwargs.get('id')
prediksi = Prediksi.objects.get(id=prediksi_id)
training = prediksi.training
neural_network = training.neural_network
#prodi
prodi_list = ProgramStudi.objects.all().order_by()
prodi_select = '<select class="form-control" name="Prodi"><option value=""></option>'
for prodi in prodi_list:
prodi_select += '<option value="' + prodi.nama + '">' + prodi.nama + '</option>'
prodi_select += '</select>'
#angkatan
angkatan_list = Angkatan.objects.all().order_by()
angkatan_select = '<select class="form-control" name="Angkatan"><option value=""></option>'
for angkatan in angkatan_list:
angkatan_select += '<option value="' + str(angkatan.tahun) + '">' + str(angkatan.tahun) + '</option>'
angkatan_select += '</select>'
return render(request, 'prediksi/detail.html', {
'prediksi': prediksi,
'training': training,
'neural_network': neural_network,
'angkatan': angkatan_select,
'prodi': prodi_select
})
def post(self, request, **kwargs):
action = request.POST.get('action')
prediksi_id = kwargs.get('id')
if action == 'DELETE':
prediksi_nama = Prediksi.objects.get(id=prediksi_id).nama
Prediksi.objects.get(id=prediksi_id).delete()
return JsonResponse({
'status': 'success',
'message': 'Prediksi ' + prediksi_nama +' berhasil disimpan'
})
elif action == 'CHANGE':
status = request.POST.get('status')
prediksi_nama = Prediksi.objects.get(id=prediksi_id).nama
if status == '1':
Prediksi.objects.filter(id=prediksi_id).update(status=False)
return JsonResponse({
'status': 'success',
'message': 'Status prediksi ' + prediksi_nama +' menjadi tidak aktif'
})
elif status == '0':
Prediksi.objects.exclude(id=prediksi_id).update(status=False)
Prediksi.objects.filter(id=prediksi_id).update(status=True)
return JsonResponse({
'status': 'success',
'message': 'Status prediksi ' + prediksi_nama +' menjadi aktif'
})
return JsonResponse({
'status': 'fail',
'message': 'Terjadi kesalahan'
})
class PrediksiList(generics.ListAPIView):
serializer_class = PrediksiSerializers
def get_queryset(self):
return Prediksi.objects.all().order_by('nama')
class PrediksiDetailList(generics.ListAPIView):
serializer_class = PrediksiDetailSerializers
def get_queryset(self):
prediksi_id = self.request.query_params.get('prediksi_id', '')
prediksi_status = self.request.query_params.get('status', '')
if prediksi_status:
return PrediksiDetail.objects.filter(prediksi__status=prediksi_status)
return PrediksiDetail.objects.filter(prediksi__id=prediksi_id) |
n = str(input('Digite o seu nome completo: ')).strip(' ')
print('Analisandoo seu nome...')
print(f'O seu nome em maiúsculas é {n.upper()}')
print(f'O seu nome em minúsculas é {n.lower()}')
print(f'Seu nome tem ao todo {len(n) - n.count(" ")} letras')
print(f'Seu primeiro nome tem {n.find(" ")} letras')
|
def ovatko_kaikki_alkiot_samoja(lista):
i = 0
k = 1
v = 0
while len(lista) > k:
if lista[i] == lista [k]:
v += 0
else:
v += 1
i += 1
k += 1
if v == 0:
return True
else:
return False
|
import numpy as np
import cupy as cp
import matplotlib.pyplot as plt
from sklearn.datasets import make_moons
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import edonet
# Make test dataset.
def make_dataset():
x, y = make_moons(n_samples=4000, noise=0.2, random_state=0)
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
encoder = [[1, 0], [0, 1]]
y = np.array([encoder[i] for i in y])
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)
x_train = cp.array(x_train, dtype=cp.float32)
x_test = cp.array(x_test, dtype=cp.float32)
y_train = cp.array(y_train, dtype=cp.float32)
y_test = cp.array(y_test, dtype=cp.float32)
return x_train, x_test, y_train, y_test
# Make grid.
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
# Plot decision boundary.
def plot_contours(ax, model, xx, yy, **params):
z = model.predict(cp.c_[xx.ravel(), yy.ravel()]).argmax(axis=1)
z = cp.asnumpy(z).reshape(xx.shape)
out = ax.contourf(xx, yy, z, **params)
return out
# Show decision boundary and scatter dataset.
def show_data_and_decision(model, x, y):
fig, ax = plt.subplots(figsize=(8, 6))
x0, x1 = x[:, 0], x[:, 1]
xx, yy = make_meshgrid(x0, x1)
plot_contours(ax, model, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(x0, x1, c=y.argmax(axis=1), cmap=plt.cm.coolwarm, s=20, edgecolors='k')
plt.show()
# Make and test model.
def main():
# Make dataset.
x_train, x_test, y_train, y_test = make_dataset()
# Make and train model.
model = edonet.NeuralNet(input_size=2,
layers=({'type': 'Dense', 'nr_nodes': 8, 'activation': 'relu'},
{'type': 'Dense', 'nr_nodes': 8, 'activation': 'tanh'},
{'type': 'Dense', 'nr_nodes': 8, 'activation': 'sigmoid'},
{'type': 'Dense', 'nr_nodes': 2, 'activation': 'softmax'}),
loss='CEL',
seed=0)
model.fit(x_train, y_train, epochs=10, learning_rate=0.01, batch_size=100, optimizer='Adam')
# Show result on test set.
model.evaluate(x_test, y_test)
show_data_and_decision(model, cp.asnumpy(x_test), cp.asnumpy(y_test))
if __name__ == "__main__":
main()
|
#! /usr/bin/python
#-*-coding:utf-8-*-
import dbus
import sys
import json
# 该接口为测试环境
SERVICE_NAME='com.testModule.frontServer'
OBJECT_PATH='/com/testModule/frontServer/object'
INTERFACE='com.testModule.frontServerInterface'
# 该接口为生产环境
# SERVICE_NAME='com.bmjc.ui'
# OBJECT_PATH='/bmjc/ui'
# INTERFACE='bmjc.ui'
def handleMsgFromUI():
arguments=sys.argv
temp=arguments[1]
temp=json.loads(temp)
msgToUI={}
msgToUI["scenename"]=str(temp["scenename"])
msgToUI["functionname"]=str(temp["functionname"])
return msgToUI
class CommHandler:
def __init__(self):
pass
def orgDataReportMsg(self,dataMsgPara):
dataReportMsg=handleMsgFromUI()
dataReportMsg["resulttype"]="datareport"
dataReportMsg["result"]=dataMsgPara
return json.dumps(dataReportMsg)
def orgErrReportMsg(self,errordescritionPara):
errReportMsg=handleMsgFromUI()
errReportMsg["resulttype"]="errorreport"
resultTemp={}
resultTemp["errordescrition"]=str(errordescritionPara)
errReportMsg["result"]=resultTemp
return json.dumps(errReportMsg)
def orgProgReportMsg(self,currentcompletionPara,currentstatusPara):
progReportMsg=handleMsgFromUI()
progReportMsg["resulttype"]="progressreport"
resultTemp={}
resultTemp["currentcompletion"]=str(currentcompletionPara)
resultTemp["currentstatus"]=str(currentstatusPara)
progReportMsg["result"]=resultTemp
return json.dumps(progReportMsg)
def sendMsgToUI(self, resultMsgPara):
bus =dbus.SessionBus()
obj = bus.get_object(SERVICE_NAME,OBJECT_PATH)
iface =dbus.Interface(obj,INTERFACE)
print "backends向UI发送信息:"+str(resultMsgPara)
responseMsgFromUI = iface.updateFromTool(resultMsgPara)
print "backends收到UI响应信息:"+str(responseMsgFromUI) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import csv
import re
import sqlite3
from datetime import datetime
def print_usage():
print ("Usage: migrate.py "
"<csv-file-expenses> "
"<csv-file-places> "
"<sqlite-output-file>")
def read_csv_table(file_name):
with open(file_name, 'rb') as csvfile:
csvreader = csv.DictReader(csvfile)
return [{unicode(k, 'utf-8'): unicode(v, 'utf-8')
for k, v in row.iteritems()}
for row in csvreader]
# TODO(c3ee0412-31c7-4d7d-bd86-36ff23045542): move org2sqlite_date
# inside of ExpensesTable
def org2sqlite_date(datestring):
m = re.match('<(\d{4})-(\d{2})-(\d{2}) ?\w*\.?(?: (\d{2}):(\d{2}))?>',
datestring,
re.UNICODE)
(year, month, day, hours, minutes) = m.groups()
if hours is None:
hours = 0
if minutes is None:
minutes = 0
return datetime(int(year), int(month), int(day), int(hours), int(minutes))
class ExpensesTable(object):
def __init__(self, csv_table):
self.records = [{'date': org2sqlite_date(row['date']),
'amount': int(float(row['amount']) * 100.00),
'name': row['name'],
'category': row['category'],
'place': row['place']}
for row in csv_table]
def dump(self, database):
for record in self.records:
database.insert_into_table('Expenses', record)
database.commit()
class PlacesTable(object):
def __init__(self, csv_table):
self.records = [{'codename': row['id'],
'address': row['address']}
for row in csv_table]
def dump(self, database):
for record in self.records:
database.insert_into_table('Places', record)
database.commit()
class SqliteDatabase(object):
def __init__(self, database_connection):
self.database_connection = database_connection
schema_init_script = ('create table if not exists Places ('
' id integer primary key,'
' codename text not null,'
' address text not null'
');'
'create table if not exists Expenses ('
' id integer primary key,'
' date datetime not null,'
' amount integer not null,'
' name text not null,'
' category text not null,'
' place text'
');')
database_connection.executescript(schema_init_script)
database_connection.commit()
def insert_into_table(self, table_name, record):
record_keys = record.keys()
column_names = ', '.join(record_keys)
column_values = ', '.join(map(lambda name: ':' + name,
record_keys))
query = 'INSERT INTO %s (%s) VALUES (%s)' % (table_name,
column_names,
column_values)
self.database_connection.execute(query, record)
def commit(self):
self.database_connection.commit()
if __name__ == '__main__':
if len(sys.argv) < 3:
print_usage()
exit(1)
expenses_csv_table = read_csv_table(sys.argv[1])
places_csv_table = read_csv_table(sys.argv[2])
database = SqliteDatabase(sqlite3.connect(sys.argv[3]))
PlacesTable(places_csv_table).dump(database)
ExpensesTable(expenses_csv_table).dump(database)
|
def renaming_tree(tree, outputtree, mapping, keep = False):
with open(tree) as handle:
t = handle.readlines()
if len(t) > 0:
t=t[0][:-1]
else :
print tree, "is empty for some reason"
t = ""
if keep:
mapping = { k: k + "|" + v for k,v in mapping.iteritems()}
for k,v in mapping.iteritems():
t = t.replace(k + ":", mapping[k] + ":")
t = t.replace(k + ",", mapping[k] + ",")
with open(outputtree,"w") as handle:
handle.writelines(t + "\n")
|
import urllib
from bs4 import BeautifulSoup
url = "https://www.google.com/maps/d/u/0/viewer?mid=1Z1dI8hoBZSJNWFx2xr_MMxSxSxY&hl=en_US&ll=50.03793259999999%2C6.491228948437538&z=8"
page = urllib.urlopen(url).read()
content = BeautifulSoup(page,"lxml")
data = open('data.txt','w')
i = 0
for div in content.findAll('div'):
print i
text = u''.join(div.text).encode('utf-8').strip()
data.write(text)
break |
from dataclasses import dataclass, field
from typing import ClassVar
import datetime
@dataclass
class Employee:
fname: str
lname: str
pay: int
email: str = field(init=False)
# class variables
raise_amount: ClassVar[float] = 0.5
num_of_emps: ClassVar[int] = 0
def __post_init__(self):
self.email = self.fname + self.lname + '@gmail.com'
Employee.num_of_emps += 1
def fullname(self):
return f'{self.fname} {self.lname}'
def apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
@classmethod
def set_raise_amt(cls, amount):
cls.raise_amount = amount
@classmethod
def from_string(cls, emp_str):
fname, lname, pay = emp_str.split('-')
return cls(fname, lname, pay)
@staticmethod
def is_workday(day):
if day.weekday() == 5 or day.weekday() == 6:
return False
return True
|
import pygame
import os
import global_variables as gv
class Window:
def __init__(self):
pygame.init()
pygame.key.set_repeat(1, 1)
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (gv.WINDOW_X_POS, gv.WINDOW_Y_POS)
self.__display = pygame.display
self.__display.set_caption(gv.WINDOW_NAME)
self.__surface = pygame.display.set_mode(gv.WINDOW_SIZE)
self.__clock = pygame.time.Clock()
""" GETTERS """
@property
def display(self):
return self.__display
@property
def surface(self):
return self.__surface
@property
def clock(self):
return self.__clock
|
#!/usr/bin/env python3
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import torch
import torch.nn as nn
from .build import MODEL_REGISTRY
from models.NetABC import NetABC
from utils.models import pad_if_necessary, is_3d
try:
from torch.cuda.amp import autocast
except ImportError:
pass
IS_3D = True
class BasicBlock(nn.Sequential):
def __init__(
self, in_channels, out_channels, kernel_size=(3, 3, 3), stride=(1, 1, 1), dilation=(1, 1, 1),
normalization=('batchnorm', 'instancenorm')[1], nonlinearity=('relu', 'leakyrelu')[1]
):
super().__init__(
self._create_convolution(in_channels, out_channels, kernel_size, stride, dilation),
self._create_normalization(out_channels, normalization),
self._create_nonlinearity(nonlinearity),
)
@staticmethod
def _create_convolution(in_channels, out_channels, kernel_size, stride, dilation):
padding = tuple((torch.tensor(dilation) * (torch.tensor(kernel_size) // 2)).tolist()) # does the same
return nn.Conv3d(
in_channels, out_channels, kernel_size=is_3d(kernel_size, IS_3D), stride=is_3d(stride, IS_3D),
padding=is_3d(padding, IS_3D, lb=0), dilation=is_3d(dilation, IS_3D), groups=1, bias=False
) # TODO check bias=True, most nets use False though because of BN
@staticmethod
def _create_normalization(out_channels, normalization):
if normalization is None:
output = list()
if normalization == 'batchnorm':
output = nn.BatchNorm3d(out_channels, momentum=0.1, affine=True, track_running_stats=False, eps=1e-5)
elif normalization == 'instancenorm':
output = nn.InstanceNorm3d(out_channels, momentum=0.1, affine=True, track_running_stats=False, eps=1e-5)
else:
raise NotImplementedError('only batchnorm, instancenorm, and None are addressed.')
return output
@staticmethod
def _create_nonlinearity(nonlinearity):
if nonlinearity is None:
output = list()
if nonlinearity == 'relu':
output = nn.ReLU(inplace=True)
elif nonlinearity == 'leakyrelu':
output = nn.LeakyReLU(inplace=True)
else:
raise NotImplementedError('only relu, leakyrelu and None are addressed.')
return output
class ContextBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation=(3, 3, 3), p=0.3):
super().__init__(
BasicBlock(in_channels, out_channels, dilation=dilation),
nn.Dropout3d(p=p),
BasicBlock(out_channels, out_channels, dilation=dilation),
)
class ParamUpSamplingBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, scale_factor=(2, 2, 2)):
super().__init__(
nn.Upsample(scale_factor=is_3d(scale_factor, IS_3D), mode='trilinear', align_corners=False),
BasicBlock(in_channels, out_channels),
)
class LocalizationBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation=(2, 2, 2)):
super().__init__(
BasicBlock(in_channels, out_channels, dilation=dilation),
BasicBlock(out_channels, out_channels, kernel_size=(3, 3, 3)), # network architecture changed
)
class CompoundBlock(nn.Module):
def __init__(self, i, in_channels, out_channels, stride, dilation, p):
super().__init__()
self._create_net(i, in_channels, out_channels, stride, dilation, p)
def _create_net(self, i, in_channels, out_channels, stride, dilation, p):
kwargs = dict()
if i == 0:
kwargs['dilation'] = dilation
else:
kwargs['stride'] = stride
self.input_layer = BasicBlock(in_channels, out_channels, **kwargs)
self.context_layer = ContextBlock(out_channels, out_channels, dilation=dilation, p=p)
def forward(self, x):
x_input = self.input_layer(x)
x = self.context_layer(x_input)
x = x_input + x
return x
class Encoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg.clone()
self.stride = self.cfg.MODEL.SETTINGS.ENCODER_STRIDE
self.dilation = self.cfg.MODEL.SETTINGS.ENCODER_DILATION
self.p = self.cfg.MODEL.DROPOUT_RATE
self._create_net()
@staticmethod
def get_layer_name(i):
return 'encoder_layer{:03}'.format(i)
def _create_net(self):
in_channels = self.cfg.MODEL.INPUT_CHANNELS
for i in range(self.cfg.MODEL.ENCO_DEPTH):
out_channels = self.cfg.MODEL.SETTINGS.N_BASE_FILTERS * 2 ** i
self.add_module(
self.get_layer_name(i),
CompoundBlock(i, in_channels, out_channels, stride=self.stride, dilation=self.dilation, p=self.p),
)
in_channels = out_channels
def forward(self, x):
outputs = list()
for i in range(self.cfg.MODEL.ENCO_DEPTH):
x = getattr(self, self.get_layer_name(i))(x)
outputs.append(x)
return outputs
class Decoder(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg.clone()
self.dilation = self.cfg.MODEL.SETTINGS.DECODER_DILATION
self._create_net()
@staticmethod
def get_layer_name(i, postfix):
return 'decoder_layer{:03}_{}'.format(i, postfix)
def _create_net(self):
self.enco_depth = self.cfg.MODEL.ENCO_DEPTH - 1 # the for loop begins from 0 ends at d-1
in_channels = self.cfg.MODEL.SETTINGS.N_BASE_FILTERS * 2 ** self.enco_depth
for i in range(self.enco_depth):
i_r = self.enco_depth - 1 - i
out_channels = self.cfg.MODEL.SETTINGS.N_BASE_FILTERS * 2 ** i_r
self.add_module(
self.get_layer_name(i, 'upsampling'),
ParamUpSamplingBlock(in_channels, out_channels, scale_factor=(2, 2, 2)),
)
self.add_module(
self.get_layer_name(i, 'localization'),
LocalizationBlock(2 * out_channels, out_channels, dilation=self.dilation),
)
in_channels = out_channels
def forward(self, x_input):
x = x_input[self.enco_depth]
outputs = list()
for i in range(self.enco_depth):
i_r = self.enco_depth - 1 - i
x = getattr(self, self.get_layer_name(i, 'upsampling'))(x)
x, x_input[i_r] = pad_if_necessary(x, x_input[i_r])
x = torch.cat((x, x_input[i_r]), dim=1)
x = getattr(self, self.get_layer_name(i, 'localization'))(x)
if i_r < self.cfg.MODEL.NUM_PRED_LEVELS:
outputs.append(x)
return outputs
class SegHead(nn.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg.clone()
self._create_net()
@staticmethod
def get_layer_name(i, postfix):
return 'seghead_layer{:03}_{}'.format(i, postfix)
def _create_net(self):
self.num_pred_levels = self.cfg.MODEL.NUM_PRED_LEVELS
for i in range(self.num_pred_levels):
i_r = self.num_pred_levels - 1 - i
in_channels = self.cfg.MODEL.SETTINGS.N_BASE_FILTERS * 2 ** i_r
self.add_module(
self.get_layer_name(i, 'conv'),
nn.Conv3d(in_channels, self.cfg.MODEL.NUM_CLASSES, kernel_size=is_3d((1, 1, 1), IS_3D)),
)
if not i == self.num_pred_levels - 1:
self.add_module(
self.get_layer_name(i, 'upsam'),
nn.Upsample(scale_factor=is_3d((2, 2, 2), IS_3D), mode='trilinear', align_corners=False),
)
def forward(self, x_input):
x = 0
for i in range(self.num_pred_levels):
x_b = getattr(self, self.get_layer_name(i, 'conv'))(x_input[i])
if not i == 0:
x, x_b = pad_if_necessary(x, x_b)
x = x + x_b
if not i == self.num_pred_levels - 1:
x = getattr(self, self.get_layer_name(i, 'upsam'))(x)
return x
@MODEL_REGISTRY.register()
class Unet3D(NetABC):
def __init__(self, cfg):
super().__init__(cfg)
def set_processing_mode(self):
global IS_3D
IS_3D = self.cfg.MODEL.PROCESSING_MODE == '3d'
def _create_net(self):
self.Encoder = Encoder(self.cfg)
self.Decoder = Decoder(self.cfg)
self.SegHead = SegHead(self.cfg)
def forward_core(self, x):
x = self.Encoder(x)
x = self.Decoder(x)
x = self.SegHead(x)
return x
|
import json
data = {}
data['jobDesc'] = "Mow Lawn"
data['pay'] = "15.00"
data['time'] = "ASAP"
json_data_1 = json.dumps(data)
with open('config.json', 'w') as outputfile:
json.dump(data, outputfile)
data = {}
data['jobDesc'] = "Painting Fence"
data['pay'] = "30.00"
data['time'] = "4/9/2017, 12:00"
json_data_2 = json.dumps(data)
with open('config.json', 'a') as outputfile:
json.dump(data, outputfile)
data = {}
data['jobDesc'] = "Weedwhacking"
data['pay'] = "12.00"
data['time'] = "4/20/2017, 16:20"
json_data_3 = json.dumps(data)
with open('config.json', 'a') as outputfile:
json.dump(data, outputfile)
data = {}
data['jobDesc'] = "Car Wash + Wax"
data['pay'] = "20.00"
data['time'] = "4/20/2017, 17:30"
json_data_4 = json.dumps(data)
with open('config.json', 'a') as outputfile:
json.dump(data, outputfile)
data = {}
data['jobDesc'] = "Move Furniture"
data['pay'] = "75.00"
data['time'] = "4/9/2017, 7:00"
json_data_5 = json.dumps(data)
with open('config.json', 'a') as outputfile:
json.dump(data, outputfile)
data = {}
data['jobDesc'] = "Tech Support"
data['pay'] = "20.00"
data['time'] = "ASAP"
json_data_6 = json.dumps(data)
with open('config.json', 'a') as outputfile:
json.dump(data, outputfile)
#jsonParentDump = json.dump(json_data_1, json_data_2, json_data_3, json_data_4, json_data_5, json_data_6); |
import pandas as pd
import numpy as np
import scipy as sp
import re
from sklearn import ensemble
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
def mungling(data):
"""Data mungling:
* lower case columns;
* fill missing values;
* add new features
"""
#data = data.drop(['Ticket', 'Embarked'], axis=1)
data.rename(columns=lambda v: v.lower(), inplace=True)
data.rename(columns={'passengerid': 'id', 'pclass': 'klass'}, inplace=True)
# extract title ('Mr.', 'Miss.', etc)
data['title'] = data.name.map(
lambda n: re.search('\w+\.', n).group().lower()[:-1])
# some cabin names start with "F .."
data.cabin = data.cabin.map(lambda c: str(c).replace('F ', '').lower())
# extract cabin type (first letter)
# Nan cabins mark as "n"
data['cabin_type'] = data.cabin.map(
lambda c: str(c).split()[0][0])
data['alone'] = data.sibsp + data.parch
data.alone = data.alone.map(lambda v: 0 if v > 0 else 1)
# fill missing age as an mean of a particular group
data.age = data.groupby(['sex', 'alone', 'title']).age.transform(
lambda age: age.fillna(age.mean()))
data.age.fillna(data.age.mean(), inplace=True)
# for those how both ticket to more then one cabin (traveling with
# family) price should be divided by numer of cabins
nums = data.cabin.map(lambda c: len(str(c).split(' ')))
data.fare = (data.fare / nums)
# the same for ticket price
#data.fare = data.groupby(['klass', 'cabin_type', 'age']).fare.transform(
# lambda g: g.fare.fillna(g.fare.mean()))
data.fare.fillna(data.fare.mean(), inplace=True)
# crew do not pay for the ticket
data['crew'] = data.fare.map(lambda p: 1 if p == 0 else 0)
data['sex_num'] = data.sex.replace({'male': 0, 'female': 1})
return data
def prepare_sets():
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
# Stack train and test data sets together in order
# to fill missing values using global averages (age, fare).
# To do so we need to remove Survived from train data frame
# and mark data in both collections as 'train' an 'test'
# correspondingly.
survived = train_data.Survived
train_data = train_data.drop(['Survived'], axis=1)
train_data['type'] = 'train'
test_data['type'] = 'test'
data = train_data.append(test_data, ignore_index=True)
data = mungling(data)
sex = pd.get_dummies(data.sex, prefix='sex')
cabin = pd.get_dummies(data.cabin_type, prefix='cabin')
klass = pd.get_dummies(data.klass, prefix='klass')
#sub_data = data[['age', 'klass', 'alone', 'crew', 'parch', 'sibsp']]
#sub_data = data[['age', 'klass', 'sex_num', 'fare', 'parch', 'sibsp']]
sub_data = data[['age', 'klass', 'sex_num', 'fare', 'alone', 'crew']]
#sub_data = pd.merge(sub_data, sex, left_index=True, right_index=True)
#sub_data = pd.merge(sub_data, cabin, left_index=True, right_index=True)
#sub_data = pd.merge(sub_data, klass, left_index=True, right_index=True)
return data, sub_data, survived
if __name__ == '__main__':
data, sub_data, survived = prepare_sets()
test_data = sub_data[data.type == 'test']
train_data = sub_data[data.type == 'train']
#X_train, X_test, y_train, y_test = cross_validation.train_test_split(
# train_data, survived, test_size=0.3, random_state=0)
#params = [{
# 'n_estimators': [100, 150, 200, 300],
# 'min_samples_leaf': [1, 3, 5, 10],
# 'min_samples_split': [2, 5, 8, 11],
# 'max_depth': [None, 5, 50],
# "bootstrap": [True, False],
# #'max_features': [None, 'auto', 'sqrt', 'log2']
# }]
params = {
'n_estimators': [200, 250],
'learning_rate': [0.1, 0.2, 0.3],
#'min_samples_leaf': [1, 2],
'min_samples_split': [2, 3],
'subsample': [1., 0.9, 0.8, 0.7]
}
clf = ensemble.GradientBoostingClassifier(n_estimators=10)
#clf = ensemble.RandomForestClassifier(n_estimators=10, random_state=1)
clf = GridSearchCV(clf, params, cv=5, n_jobs=2)
#clf.fit(X_train, y_train)
#print clf.best_estimator_
#y_true, y_pred = y_test, clf.predict(X_test)
#print classification_report(y_true, y_pred)
#clf = ensemble.RandomForestClassifier(n_estimators=150, min_samples_leaf=3,
# min_samples_split=10, random_state=1, bootstrap=True)
#clf = ensemble.RandomForestClassifier(n_estimators=200, min_samples_leaf=2, min_samples_split=10, random_state=1)
clf.fit(train_data, survived)
print clf.best_estimator_
predictions = clf.predict(test_data)
print 'PassengerId,Survived'
for i in range(test_data.shape[0]):
print '%s,%s' % (data[data.type=='test'].irow(i)['id'], predictions[i])
#n_folds = 5
#predictions = np.zeros((n_folds, test_data.shape[0]))
#idx = 0
# for train_idx, test_idx in cross_validation.KFold(
# n=train_data.shape[0], n_folds=n_folds):
# clf = ensemble.AdaBoostClassifier(n_estimators=100, learning_rate=0.1)
#clf = ensemble.RandomForestClassifier(n_estimators=300, random_state=1)
# clf.fit(train_data.ix[train_idx], survived[train_idx])
# predictions[idx] = clf.predict(test_data)
# idx += 1
#tdata = data[data.type == 'test']
#predictions = predictions.T
#print 'PassengerId,Survived'
#for i in range(test_data.shape[0]):
# v = 1 if sum(predictions[i]) >= int(round(n_folds / 2.)) else 0
#print predictions[i], v
# row = tdata.irow(i)
# print "%s,%s" % (row['id'], v)
|
try:
from pm4py.objects.log.importer.xes import factory as xes_import_factory
from pm4py.objects.log.exporter.xes import factory as xes_exporter
from pm4py.objects.log.util import sampling
import tracematcher
from attributeAnonymizier import AttributeAnonymizier as AttributeAnonymizier
from trace_variant_query import privatize_tracevariants
import datetime
import sys
import pandas as pd
import sqlite3
import os
def freq(lst):
d = {}
for i in lst:
if d.get(i):
d[i] += 1
else:
d[i] = 1
return d
log_path = sys.argv[1]
epsilon = float(sys.argv[2])
N = int(sys.argv[3])
k = int(sys.argv[4])
dbName = sys.argv[5]###
secure_token = sys.argv[6]###
################################## pripel code
new_ending = "_epsilon_" + "_k" + str(k) + "_anonymizied.xes"
result_log_path = log_path.replace(".xes",new_ending)
print("\n output_path pripel: ",result_log_path,"\n")
starttime = datetime.datetime.now()
log = xes_import_factory.apply(log_path)
starttime_tv_query = datetime.datetime.now()
tv_query_log = privatize_tracevariants(log, epsilon, k, N)
print(len(tv_query_log))
endtime_tv_query = datetime.datetime.now()
print("Time of TV Query: " + str((endtime_tv_query - starttime_tv_query)))
print("print0")
starttime_trace_matcher = datetime.datetime.now()
print("print1")
traceMatcher = tracematcher.TraceMatcher(tv_query_log,log)
print("print2")
matchedLog = traceMatcher.matchQueryToLog()
print(len(matchedLog))
endtime_trace_matcher = datetime.datetime.now()
print("Time of TraceMatcher: " + str((endtime_trace_matcher - starttime_trace_matcher)))
distributionOfAttributes = traceMatcher.getAttributeDistribution()
occurredTimestamps, occurredTimestampDifferences = traceMatcher.getTimeStampData()
print(min(occurredTimestamps))
starttime_attribute_anonymizer = datetime.datetime.now()
attributeAnonymizier = AttributeAnonymizier()
anonymiziedLog, attritbuteDistribution = attributeAnonymizier.anonymize(matchedLog,distributionOfAttributes,epsilon,occurredTimestampDifferences,occurredTimestamps)
endtime_attribute_anonymizer = datetime.datetime.now()
print("Time of attribute anonymizer: " +str(endtime_attribute_anonymizer - starttime_attribute_anonymizer))
print(result_log_path)
result_log_path = result_log_path.replace("\\",os.path.sep)#####
xes_exporter.export_log(anonymiziedLog, result_log_path)
endtime = datetime.datetime.now()
print("Complete Time: " + str((endtime-starttime)))
print("Time of TV Query: " + str((endtime_tv_query - starttime_tv_query)))
print("Time of TraceMatcher: " + str((endtime_trace_matcher - starttime_trace_matcher)))
print("Time of attribute anonymizer: " +str(endtime_attribute_anonymizer - starttime_attribute_anonymizer))
print(result_log_path)
print(freq(attritbuteDistribution))
######################################
puffer,targetFile = result_log_path.split("media"+os.path.sep)
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute("UPDATE eventlogUploader_document SET status = ?, docfile = ? WHERE token = ?", ("FINISHED", targetFile, secure_token))
conn.commit()
conn.close()
print("Done!")
except:
log_path = sys.argv[1]
epsilon = float(sys.argv[2])
N = int(sys.argv[3])
k = int(sys.argv[4])
dbName = sys.argv[5]
secure_token = sys.argv[6]
conn = sqlite3.connect(dbName)
c = conn.cursor()
c.execute("UPDATE eventlogUploader_document SET status = ? WHERE token = ?", ("ERROR", secure_token))
conn.commit()
conn.close() |
get_ipython().run_line_magic('env', 'HTTP_PROXY=http://trenco:58471')
get_ipython().run_line_magic('env', 'HTTPS_PROXY=http://trenco:58471')
# palain, lyrane, trenco, (klovia)
#import more_scholarly as scholarly
import scholarly
import pandas as pd
def get_researcher():
df = pd.read_csv('df100.csv')
return [name for name in df.name]
researchers = get_researcher()
print(researchers)
#Get all co-authors from extract of a researcher
def get_coauthor(name):
search_query1 = scholarly.search_author(name)
author_info = next(search_query1).fill()
#print(author_info)
titles = [pub.bib['title'] for pub in author_info.publications]
coauthors = []
for index in range(2):
#for index in range(len(author_info.publications)):
search_query = scholarly.search_pubs_query(titles[index])
pubs = next(search_query).fill()
#print(pubs)
author = pubs.bib['author'].split("and")
#print(author)
coauthors.extend(author)
print(coauthors)
coauthor_list = list(set(coauthors))
return coauthor_list
#Build a dictionary to store the coauthors of each researcher
df100 = pd.read_csv('df100.csv', index_col='name')
coauthor = {}
for name in researchers:
if name not in coauthor:
coauthor[name] = {}
coauthor[name] = get_coauthor(name)
df_author = pd.DataFrame(coauthor).set_index('name')
df = pd.concat([df0, df_author],axis=1)
df.to_csv('df100_with_coauthor.csv')
search_query1 = scholarly.search_author('Quanfa Zhang')
author_info = next(search_query1).fill()
#print(author_info)
titles = [pub.bib['title'] for pub in author_info.publications]
coauthors = []
for index in range(2):
#for index in range(len(author_info.publications)):
search_query = scholarly.search_pubs_query(titles[index])
pubs = next(search_query).fill()
#print(pubs)
author = pubs.bib['author'].split("and")
#print(author)
coauthors.extend(author)
print(coauthors)
coauthor_list = list(set(coauthors))
search_query1 = scholarly.search_author('Wenjun Huang')
author_info = next(search_query1).fill()
#print(author_info)
titles = [pub.bib['title'] for pub in author_info.publications]
coauthors = []
for index in range(3):
#for index in range(len(author_info.publications)):
search_query = scholarly.search_pubs_query(titles[index])
pubs = next(search_query).fill()
#print(pubs)
author = pubs.bib['author'].split("and")
#print(author)
coauthors.extend(author)
print(coauthors)
coauthor_list = list(set(coauthors))
|
#Prime factors
import math
def prime_factors(n):
array=[]
while(n%2==0):
n=n/2
array.append(2)
for i in range(3,int(math.sqrt(n))+1,2):
while(n%i==0):
array.append(i)
n=n/i
if(n>2):
array.append(n)
return array
a=prime_factors(600851475143)
print a
|
import datetime
import queue
import socket
import threading
MUSIC_S = 'M'
VIDEO_S = 'V'
SERVERS_NUM = 3
VIDEO = 'V'
PIC = 'P'
MUSIC = 'M'
SERVER_TYPES = [VIDEO_S, VIDEO_S, MUSIC_S]
q_lock = threading.Lock()
current = datetime.datetime.now()
exp = [current, current, current]
q_first = queue.Queue()
q_second = queue.Queue()
q_third = queue.Queue()
def sched(tasks):
begin_time = datetime.datetime.now() + datetime.timedelta(weeks=1)
best_perm = []
arr_permutation = [([], [], [])]
for t in tasks:
arr_permutation = apply_tasks(arr_permutation, t)
permutations = [(last_t(p), p) for p in arr_permutation]
for (t, p) in permutations:
if t < begin_time:
begin_time = t
best_perm = p
opt_perm(best_perm)
sec = (begin_time - datetime.datetime.now()).seconds
perm_to_print = ([task[0] for task in best_perm[0]],
[task[0] for task in best_perm[1]],
[task[0] for task in best_perm[2]])
print('opt permutation: %s, finish time %s (%s seconds)' % (perm_to_print, begin_time, sec))
return best_perm
def apply_tasks(list, task):
res = []
for t in list:
curr = (t[0][:], t[1][:], t[2][:])
curr[0].append(task)
res.append(curr)
curr = (t[0][:], t[1][:], t[2][:])
curr[1].append(task)
res.append(curr)
curr = (t[0][:], t[1][:], t[2][:])
curr[2].append(task)
res.append(curr)
return res
######## Queue functions ######
def q_add(buff, client_socket):
q_lock.acquire()
task = buff.decode("utf-8")
tasks = []
for q in [q_first, q_second, q_third]:
q_get(q, tasks)
tasks.append((buff, client_socket, int(task[1])))
(tasks_of_1, tasks_of_2, tasks_of_3) = sched(tasks)
for task in tasks_of_1:
q_first.put(task)
for task in tasks_of_2:
q_second.put(task)
for task in tasks_of_3:
q_third.put(task)
q_lock.release()
def q_get(q, tasks):
while 1:
try:
tasks.append(q.get_nowait())
except queue.Empty:
break
############## Time functions ################
def last_t(p):
t = []
for i in range(SERVERS_NUM):
final = when_will_end(SERVER_TYPES[i], p[i], exp[i])
t.append(final)
return max(t)
def task_find_time(server_type: str, task_type: str, task_time: int) -> int:
if server_type == VIDEO_S:
if task_type == MUSIC:
return task_time * 2
else:
if task_type == VIDEO:
return task_time * 3
elif task_type == PIC:
return task_time * 2
return task_time
def when_will_end(server_type: str, tasks: [(bytes, socket.socket, int)],
begin: datetime.datetime) -> datetime.datetime:
time_job = 0
for task in tasks:
t_type = chr(task[0][0])
t_time = task[2]
time_job += task_find_time(server_type, t_type, t_time)
if begin < datetime.datetime.now():
begin = begin.now()
return begin + datetime.timedelta(seconds=time_job)
######################## Scheduling #################
def opt_perm(perm):
for i in range(len(perm)):
tasks = perm[i]
if len(tasks) > 1:
tasks.sort(key=lambda t: task_find_time(SERVER_TYPES[i], chr(t[0][0]), t[2]))
def ask_for_sched(client_socket):
buff = client_socket.recv(2)
print("receive ", buff.decode("utf-8"), " from ", client_socket.getpeername())
q_add(buff, client_socket)
def thread_h(task_queue, svr_sock, server_index):
while True:
q_lock.acquire()
try:
(buff, client_socket, task_time) = task_queue.get_nowait()
except queue.Empty:
q_lock.release()
continue
exp[server_index] = datetime.timedelta(seconds=task_time) + datetime.datetime.now()
q_lock.release()
peer_name = str(client_socket.getpeername())
svr_sock.sendall(buff)
server_buff = svr_sock.recv(2)
exp[server_index] = datetime.datetime.now()
client_socket.sendall(server_buff)
client_socket.close()
##################################################
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(("10.0.0.1", 80))
server_socket.listen(5)
socket_first = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_first.connect(("192.168.0.101", 80))
socket_second = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_second.connect(("192.168.0.102", 80))
socket_third = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket_third.connect(("192.168.0.103", 80))
thread_first = threading.Thread(args=(q_first, socket_first, 0), target=thread_h)
thread_second = threading.Thread(args=(q_second, socket_second, 1), target=thread_h)
thread_third = threading.Thread(args=(q_third, socket_third, 2), target=thread_h)
thread_first.start()
thread_second.start()
thread_third.start()
while True:
(sockToClient, address) = server_socket.accept()
ask_for_sched(sockToClient)
|
#Padrão singleton, todos objetos vão ter o mesmo endereço de memoria compartilhando o mesmo estado
class Pessoa:
def __new__(cls):
if not hasattr(cls, 'instance'):
cls.instance = super(Pessoa, cls).__new__(cls)
return cls.instance
class Endereco:
def __init__(self):
pass
pessoa1 = Pessoa()
print("Objeto Pessoa 1 Singleton", pessoa1)
pessoa2 = Pessoa()
print("Objeto Pessoa 2 Singleton", pessoa2)
endereco1 = Endereco()
print("Objeto Endereco 1 não Singleton", endereco1)
endereco2 = Endereco()
print("Objeto Endereco 1 não Singleton", endereco2)
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.webdriver.firefox.options import Options
import itertools
import random
start_phone = [''.join(x) for x in list(itertools.permutations('899',3))]
def get_doctors(place):
options = Options()
options.add_argument('-headless')
driver = webdriver.Firefox(options=options)
driver.get('https://www.practo.com/')
# For Searching for the locality
locality_input_element = driver.find_element_by_xpath('//input[@data-qa-id="omni-searchbox-locality"]')
locality_input_element.send_keys(Keys.CONTROL + "a")
locality_input_element.send_keys(Keys.DELETE)
locality_input_element.send_keys(place)
sleep(1)
suggestion = driver.find_element_by_xpath('//div[@class="c-omni-suggestion-group"][1]/div[1]')
suggestion.click()
sleep(1)
# For Searching the type of doctor
doctor_input_element = driver.find_element_by_xpath('//input[@data-qa-id="omni-searchbox-keyword"]')
doctor_input_element.send_keys('Psychiatrist')
sleep(2)
suggestion = driver.find_element_by_xpath('//div[@class="c-omni-suggestion-group"][1]/div[1]')
suggestion.click()
doctor_details = []
doctors_list = driver.find_elements_by_xpath('//div[@class="info-section"]')
doctor_phones = [random.choice(start_phone)+str(random.getrandbits(26)) for _ in range(len(doctors_list))]
for doctor, phone in zip(doctors_list, doctor_phones):
name = doctor.find_element_by_xpath('.//h2[@class="doctor-name"]').text
experience = doctor.find_element_by_xpath('.//div[1]/div[2]/div').text
consultation_fee = doctor.find_element_by_xpath('.//span[@data-qa-id="consultation_fee"]').text
doctor_details.append({
'Name': name,
'Phone': phone,
'Experience': experience,
'Consultation_fee': consultation_fee
})
return(doctor_details)
if __name__ == '__main__':
print(get_doctors('Vileparle'))
|
import pyglet
def center_image(image):
"""Sets an image's anchor point to its center"""
image.anchor_x = image.width / 2
image.anchor_y = image.height / 2
# Define resource path
pyglet.resource.path = ['../resources']
pyglet.resource.reindex()
# Load resources and set as center
player_image = pyglet.resource.image("player.png")
center_image(player_image)
bullet_image = pyglet.resource.image("bullet.png")
center_image(bullet_image)
asteroid_image = pyglet.resource.image("asteroid.png")
center_image(asteroid_image)
# Load flame file and set position
engine_image = pyglet.resource.image("engine_flame.png")
engine_image.anchor_x = engine_image.width * 1.5
engine_image.anchor_y = engine_image.height / 2
|
#%%
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'qt')
get_ipython().run_line_magic('matplotlib', 'inline')
learning_rate = 0.01
training_epochs = 10
x_train = np.linspace(-1, 1, 101)
y_train = 2 * x_train + np.random.randn(*x_train.shape) * 0.33
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
def model(X, w):
return tf.multiply(X, w)
w = tf.Variable(0.0, name="weights")
y_model = model(X, w)
cost = tf.square(Y-y_model)
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(training_epochs):
for (x, y) in zip(x_train, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
w_val = sess.run(w)
sess.close()
plt.scatter(x_train, y_train)
y_learned = x_train*w_val
plt.plot(x_train, y_learned, 'r')
plt.show( )
#%%
#%%
|
from urllib import quote
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.contrib.sites.models import Site
from forms import InviteForm
from signup.views import render_with_context
from utils import addToQueryString
import strings
@login_required
def index(request):
vars = {}
if request.method == "POST":
invite_form = InviteForm(request.POST, request.FILES)
if invite_form.is_valid():
invite_form.save(request.user)
return HttpResponseRedirect(addToQueryString(reverse("inviteindex"),
{'notice': strings.INVITE_NOTICE_SUCCESS}))
else:
vars['invite_form'] = invite_form
else:
vars['invite_form'] = InviteForm()
vars['siteurl'] = quote("http://%s" % Site.objects.get_current().domain)
return render_with_context(request, "invite/invite_page.html", vars) |
__version__ = '1.16.14'
from hashlib import sha1
hashed_version = sha1(__version__).hexdigest()
|
a = "hello, world"
print(a[-5:-2])
|
#!/usr/bin/python
import math
import os
import matplotlib.pyplot as plt
from ODE import *
from input import *
from output import *
from plot import*
from acceleration import *
def main_functions():
inputData = input()
m1 = inputData.get('m1')
m2 = inputData.get('m2')
L1 = inputData.get('L1')
L2 = inputData.get('L2')
theta1 = inputData.get('theta1')
theta2 = inputData.get('theta2')
g = inputData.get('g')
t = inputData.get('t')
temp = ODE(theta1, theta2, m1, m2, L1,L2,g,t)
output(temp[0], temp[1],temp[2])
plot(temp[0],temp[1],temp[2])
main_functions()
|
import numpy as np
from scipy.signal import blackmanharris
import scipy.signal
def amplitudes_and_freqs(inputs, sampling_rate, axis=1, n=None):
amplitudes = np.abs(np.fft.rfft(inputs, axis=axis, n=n))
n_samples = n
if n_samples is None:
n_samples = inputs.shape[axis]
freq_bins = np.fft.rfftfreq(n_samples, d=1.0/sampling_rate)
return amplitudes, freq_bins
def bps_and_freqs(inputs, sampling_rate, axis=1, n=None):
if n is None:
n = inputs.shape[axis]
amplitudes, freqs = amplitudes_and_freqs(inputs, sampling_rate, axis, n)
return (amplitudes * amplitudes) / n, freqs
def multiply_blackmann_harris_window(inputs, axis=1):
n = inputs.shape[axis]
w = blackmanharris(n)
#w=w/np.mean(w)
# make w have same dimensionality as inputs,
# to ensure correct dimensions are multiplied
for i_axis in xrange(inputs.ndim):
if i_axis != axis:
w = np.expand_dims(w, i_axis)
return w * inputs
def lowpass_topo(topo, high_cut_hz, sampling_rate, axis=0, filt_order=4):
nyq_freq = 0.5 * sampling_rate
b, a = scipy.signal.butter(filt_order, high_cut_hz / nyq_freq, btype='lowpass')
filtered = scipy.signal.filtfilt(b,a, topo, axis=0)
return filtered
def highpass_topo(topo, low_cut_hz, sampling_rate, axis=0, filt_order=4):
nyq_freq = 0.5 * sampling_rate
b, a = scipy.signal.butter(filt_order, low_cut_hz / nyq_freq, btype='highpass')
filtered = scipy.signal.filtfilt(b,a, topo, axis=0)
return filtered
def bandpass_topo(topo, low_cut_hz, high_cut_hz, sampling_rate, axis=0, filt_order=4):
nyq_freq = 0.5 * sampling_rate
low = low_cut_hz / nyq_freq
high = high_cut_hz / nyq_freq
b, a = scipy.signal.butter(filt_order, [low, high], btype='bandpass')
filtered = scipy.signal.filtfilt(b,a, topo, axis=0)
return filtered |
import pytest
from src.elevator import (
Elevator, MoveRequest,
)
from src.operator import Operator
def test_simple():
elevator = Elevator(
tonnage=1000,
floors_count=25,
current_direction=0,
current_weight=0,
current_floor=17,
is_light_on=True,
is_smoked=True,
requests=[],
is_communication_on=False,
is_doors_open=False,
is_empty=True
)
operator = Operator([elevator])
operator.open_doors(0)
assert elevator.is_doors_open == True
operator.close_doors(0)
assert elevator.is_doors_open == False
assert operator.open_doors(1) == 'Wrong elevator number'
|
import win32gui, win32api, win32con
import psutil
import os
from time import sleep
def kill_911():
pids = psutil.pids()
for pid in pids:
try:
p = psutil.Process(pid)
except:
continue
# print('pid-%s,pname-%s' % (pid, p.name()))
if p.name() == 'Client.exe':
cmd = 'taskkill /F /IM Client.exe'
os.system(cmd)
if p.name() == 'Monitor.exe':
cmd = 'taskkill /F /IM Monitor.exe'
os.system(cmd)
if p.name() == 'MonitorGUI.exe':
cmd = 'taskkill /F /IM MonitorGUI.exe'
os.system(cmd)
# print('kill_911 finished')
def click_position(hwd, x_position, y_position, sleeps):
"""
鼠标左键点击指定坐标
:param hwd:
:param x_position:
:param y_position:
:param sleep:
:return:
"""
# 将两个16位的值连接成一个32位的地址坐标
long_position = win32api.MAKELONG(x_position, y_position)
# win32api.SendMessage(hwnd, win32con.MOUSEEVENTF_LEFTDOWN, win32con.MOUSEEVENTF_LEFTUP, long_position)
# 点击左键
win32api.PostMessage(hwd, win32con.WM_LBUTTONDOWN, win32con.MK_LBUTTON, long_position)
win32api.PostMessage(hwd, win32con.WM_LBUTTONUP, win32con.MK_LBUTTON, long_position)
# print('ok')
# sleep(int(sleeps))
def login_911():
'''
auto login 911 after 911 client opened
'''
# sleep(30)
# print('begin login_911')
# try:
# kill_OK()
# except Exception as e:
# writelog('',str(e))
# print('into 911')
# 查找911客户端
handle = 0
while handle == 0:
handle = win32gui.FindWindow("ThunderRT6FormDC", None)
sleep(1)
# print('find client',handle)
left, top, right, bottom = win32gui.GetWindowRect(handle)
# print(left, top, right, bottom)
while True:
left1, top1, right1, bottom1 = win32gui.GetWindowRect(handle)
# print(left1, top1, right1, bottom1)
if bottom1 != bottom:
# print(left1, top1, right1, bottom1)
break
sleep(1)
kill_OK()
sleep(3)
subHandle = win32gui.FindWindowEx(handle, 0, "ThunderRT6CommandButton", None)
# print(subHandle,win32gui.GetWindowText(subHandle))
click_position(subHandle,20,20,3)
sleep(3)
while True:
try:
left1, top1, right1, bottom2 = win32gui.GetWindowRect(handle)
kill_OK()
sleep(3)
click_position(subHandle,20,20,3)
sleep(3)
except:
break
# 查找choose server按钮
# subHandle = 0
# while subHandle == 0:
# # "ThunderRT6CommandButton"
# subHandle = win32gui.FindWindowEx(handle, 0, "ThunderRT6CommandButton", None)
# sleep(1)
# print(subHandle)
# print('find login button :',subHandle)
# kill_OK()
# sleep(1)
# click_position(subHandle,20,20,3)
# print('login_911 finished')
def kill_OK():
'''
click ok button after get server failed,if there is this button
'''
calssname = u"#32770"
titlename = u"Client"
hwnd = win32gui.FindWindow(calssname,titlename)
handle_ok = win32gui.FindWindowEx(hwnd, 0, "Button", None)
# print(hwnd)
# print(handle_ok)
# print('this is ok')
if hwnd != 0:
# print('Connect server failed or login fialed')
click_position(handle_ok,10,10,3)
else:
pass
# print('Connect server success or login success')
def OpenCCleaner():
# print('start kill_911')
# kill_911()
# print('After kill_911 finished')
os.system(r'start ..\tools\Cleaner\ccsetup312\CCleaner64.exe')
sleep(5)
run_CCleaner()
# print('After client started,begin login_911')
# login_911()
# print('login_911 finished')
# print('end')
# click_position(subHandle,20,20,3)
def run_CCleaner():
handle = 0
while handle == 0:
handle = win32gui.FindWindow(None, 'Piriform CCleaner')
print(1)
sleep(1)
print(handle)
title = win32gui.GetWindowText(handle)
print(title)
subHandle = win32gui.FindWindowEx(handle,0,'#32770',None)
subHandle = win32gui.FindWindowEx(subHandle,0,'Button','&Run Cleaner')
click_position(subHandle,10,10,3)
print('subHandle:', subHandle)
title = win32gui.GetWindowText(subHandle)
print(title)
return handle
def run_changer():
print('========')
os.system(r'start ..\tools\Cleaner\changer.exe')
sleep(5)
classname = '#32770'
title = '?B????????'
handle = 0
while handle== 0:
handle = win32gui.FindWindow(None, title)
title = win32gui.GetWindowText(handle)
print(title)
print(1)
print(title)
subHandle = win32gui.FindWindowEx(handle,0,'Button','修改选项')
title2 = win32gui.GetWindowText(subHandle)
print(title2)
# 一键修改
handle_modify = win32gui.FindWindowEx(handle,0,'Button', '一键修改')
# title2 = win32gui.GetClassName(handle_head)
title2 = win32gui.GetWindowText(handle_modify)
print(title2)
click_position(handle_modify,10,10,3)
sleep(15)
# 重启电脑
handle_modify = win32gui.FindWindowEx(handle,0,'Button', '重启电脑')
# title2 = win32gui.GetClassName(handle_head)
title2 = win32gui.GetWindowText(handle_modify)
print(title2)
click_position(handle_modify,10,10,3)
def Restart():
OpenCCleaner()
sleep(5)
run_changer()
if __name__=='__main__':
# for i in range(10):
OpenCCleaner()
# sleep(5)
run_changer()
# sleep(5)
# run_CCleaner()
|
import requests, time
from create_pwd import CreatePasswd
from ShowProgress import TimeCount
class Guest_Wifi:
def turn_on_off(self):
if not self.status :
print('Trying to turn on {}'.format(self.data['ssid']))
print('Password : {}'.format(self.pw.new))
self.new_pw()
self.data['run'] = '1'
self.data['action'] = 'bssidonoff'
requests.post(self.set_url, data=self.data, headers=self.headers, cookies=self.cookies)
print('Turned on {},ends in {} minutes \n'.format(self.data['ssid'],int(self.countdown.total_secs/60)))
self.status = True
self.countdown.left_mins_count()
elif self.status:
print('Trying to turn off wifi 1')
self.data['run'] = '0'
self.data['action'] = 'bssidonoff'
requests.post(self.set_url, data=self.data, headers=self.headers, cookies=self.cookies)
print('Turned off {} \n'.format(self.data['ssid']))
self.hard_pw()
def new_pw(self):
self.data['wpapsk'] = self.pw.new
self.data['action'] = 'allsubmit'
requests.post(self.set_url, data=self.data, headers=self.headers, cookies=self.cookies)
def hard_pw(self):
self.data['wpapsk'] = self.pw.hard
self.data['action'] = 'allsubmit'
requests.post(self.set_url, data=self.data, headers=self.headers, cookies=self.cookies)
def __init__(self,**kwargs):
self.headers = kwargs['Check'].headers
self.cookies = kwargs['Login'].cookie
self.url = kwargs['Check'].basic_url
self.remote_port = kwargs['Check'].remote_port
self.set_url = 'http://' + self.url + ':' + self.remote_port + '/sess-bin/timepro.cgi'
if 'datas' not in kwargs:
print('-----------Error--------------')
print('You should input control datas')
print('------------------------------')
exit()
else :
self.data = kwargs['datas']
if 'time' not in kwargs:
self.countdown = TimeCount(10,2)
else:
self.countdown = TimeCount(kwargs['time'], 2)
self.status = False
self.pw = CreatePasswd()
if 'ssid' in kwargs:
self.data['ssid'] = kwargs['ssid']
else:
self.data['ssid'] = 'One Time Wifi {}'.format(kwargs['datas']['sidx'])
if 'password' in kwargs:
self.pw.new = kwargs['password']
|
#!/usr/bin/env python
import cgi, cgitb
import logging
import os
from roundwared import server
from roundwared import settings
cgitb.enable() #Turn on debugging for evelopment.
form = cgi.FieldStorage()
logging.basicConfig(
filename=settings.config["log_file"],
filemode="a",
level=logging.DEBUG,
format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s',
)
if form.getvalue('config'):
settings.initialize_config(
os.path.join(
'/etc/roundware/',
form.getvalue('config')))
dict_form = server.form_to_dict(form)
result = server.request_stream(dict_form)
url = result['STREAM_URL']
num_rec = server.number_of_recordings(dict_form)
print "Content-type: text/html"
print
print "<html><head><title>Roundware - Listen</title></head><body>"
print "<p>" + str(num_rec) + " recordings match your selection."
print "<p>You can use the <a href=\""+url+"\">MP3 url</a> or the "
print "<a href=\""+url+".m3u\">M3U url</a>."
print "<p>You can <a href=\"/cgi-bin/control.py?stream_url="+url+"\">control</a> your stream too.</p>"
print "</body></html>"
|
import sys
import pickle
from sklearn import svm
class ClassifierModel:
def __init__(self, omodelclassifier, omodelreg, feature_size):
self.omodelclassifier = omodelclassifier
self.omodelreg = omodelreg
self.feature_size = feature_size
def train_classifier(self, trainfile):
print 'Feature Size', self.feature_size
fi = open(trainfile, 'r')
X = []
Y = []
for line in fi:
line = line.strip()
(control, targetb, _, featurestr) = line.split('\t')
Y.append(int(targetb))
xx = [0.0 for i in range(self.feature_size)]
featpairs = featurestr.split()
for feat in featpairs:
(fidx, vv) = feat.split(':')
xx[int(fidx)] = float(vv)
X.append(xx)
print 'Training start'
'''
Training start
'''
class_weights = {0:0.1, 1:0.9} #for unbalanced data, label-0 is much more than label-1.
clf = svm.LinearSVC(class_weight=class_weights)
clf.fit(X, Y)
'''
Training End
'''
print 'Classifier Training End'
pickle.dump(clf, open(self.omodelclassifier, 'wb'))
self.clf = clf
def train_predict(self, trainfile):
print 'Feature Size', self.feature_size
fi = open(trainfile, 'r')
X = []
Y = []
for line in fi:
line = line.strip()
(control, targetb, targetd, featurestr) = line.split('\t')
if(int(targetb) == 0):
continue
Y.append(float(targetd))
xx = [0.0 for i in range(feature_size)]
featpairs = featurestr.split()
for feat in featpairs:
(fidx, vv) = feat.split(':')
xx[int(fidx)] = float(vv)
X.append(xx)
print 'Regression Training start'
'''
Training start
'''
reg = svm.LinearSVR()
reg.fit(X, Y)
'''
Training End
'''
print 'Regression Training End'
pickle.dump(reg, open(self.omodelreg, 'wb'))
self.reg = reg
def readCmodel(self):
pkl_file = open(self.omodelclassifier, 'rb')
self.clf = pickle.load(pkl_file)
pkl_file.close()
def readRmodel(self):
pkl_file = open(self.omodelreg, 'rb')
self.reg = pickle.load(pkl_file)
pkl_file.close()
'''
inference function for binary classification
'''
def classify(self, X):
ytest = self.clf.predict(X)
return ytest
'''
inference function for regression.
'''
def predict(self, X):
ytest = self.reg.predict(X)
return ytest
if __name__ == '__main__':
if(len(sys.argv)!=5):
print 'Usage: train_feature_file, output_model_for_classifier, output_model_for_predict, feature_size'
sys.exit()
trainfile = sys.argv[1]
modelclassifier = sys.argv[2]
modelreg = sys.argv[3]
feature_size = int(sys.argv[4])
cm = ClassifierModel(modelclassifier, modelreg, feature_size)
cm.train_classifier(trainfile)
cm.train_predict(trainfile)
|
# -*- coding: utf-8 -*-
'''
Runner Module for Interacting with Zenoss
:configuration: This module can be used by specifying the name of a
configuration profile in the master config.
For example:
.. code-block:: yaml
zenoss:
hostname: https://zenoss.example.com
username: admin
password: admin123
'''
from __future__ import absolute_import
# Import python libs
import json
import logging
import re
# Import salt libs
try:
import requests
HAS_LIBS = True
except ImportError:
HAS_LIBS = False
log = logging.getLogger(__name__)
ROUTERS = {'MessagingRouter': 'messaging',
'EventsRouter': 'evconsole',
'ProcessRouter': 'process',
'ServiceRouter': 'service',
'DeviceRouter': 'device',
'NetworkRouter': 'network',
'TemplateRouter': 'template',
'DetailNavRouter': 'detailnav',
'ReportRouter': 'report',
'MibRouter': 'mib',
'ZenPackRouter': 'zenpack'}
PROD_STATES = {'Production': 1000,
'Pre-Production': 500,
'Test': 400,
'Maintenance': 300,
'Decommissioned': -1}
def __virtual__():
'''
Only load if requests is installed
'''
if HAS_LIBS:
return 'zenoss'
def _session():
'''
Create a session to be used when connecting to Zenoss.
'''
config = __opts__.get('zenoss', None)
session = requests.session()
session.auth = (config.get('username'), config.get('password'))
session.verify = False
session.headers.update({'Content-type': 'application/json; charset=utf-8'})
return session
def _http_get(page, data=None):
'''
Make a normal http get to Zenoss
'''
config = __opts__.get('zenoss', None)
if config is None:
log.debug('No zenoss configurations found in master config')
return False
else:
url = '{0}/zport/dmd/{1}?{2}'.format(config.get('hostname'),
page,
data)
response = _session().get(url)
return response.ok
def _http_post(page, data=None):
'''
Make a normal http post to Zenoss
'''
config = __opts__.get('zenoss', None)
if config is None:
log.debug('No zenoss configurations found in master config')
return False
else:
url = '{0}/zport/dmd/{1}'.format(config.get('hostname'),
page)
data = json.dumps(data)
response = _session().post(url, data)
return response.ok
def _router_request(router, method, data=None):
'''
Make a request to the Zenoss API router
'''
if router not in ROUTERS:
return False
req_data = json.dumps([dict(
action=router,
method=method,
data=data,
type='rpc',
tid=1)])
config = __opts__.get('zenoss', None)
log.debug('Making request to router %s with method %s', router, method)
url = '{0}/zport/dmd/{1}_router'.format(config.get('hostname'), ROUTERS[router])
response = _session().post(url, data=req_data)
# The API returns a 200 response code even whe auth is bad.
# With bad auth, the login page is displayed. Here I search for
# an element on the login form to determine if auth failed.
if re.search('name="__ac_name"', response.content):
log.error('Request failed. Bad username/password.')
raise Exception('Request failed. Bad username/password.')
return json.loads(response.content).get('result', None)
def _get_all_devices():
data = [{'uid': '/zport/dmd/Devices', 'params': {}, 'limit': None}]
all_devices = _router_request('DeviceRouter', 'getDevices', data=data)
return all_devices
def find_device(device=None):
'''
Find a device in Zenoss. If device not found, returns None.
Parameters:
device: (Required) The device name in Zenoss
CLI Example:
salt-run zenoss.find_device device=saltmaster
'''
all_devices = _get_all_devices()
for dev in all_devices['devices']:
if dev['name'] == device:
# We need to save the hash for later operations
dev['hash'] = all_devices['hash']
log.info('Found device %s in Zenoss', device)
return dev
log.info('Unable to find device %s in Zenoss', device)
return False
def device_exists(device=None):
'''
Check to see if a device already exists in Zenoss.
Parameters:
device: (Required) The device name in Zenoss
CLI Example:
salt-run zenoss.device_exists device=saltmaster
'''
if find_device(device):
return True
return False
def add_device(deviceName,
deviceClass,
title=None,
snmpCommunity='',
snmpPort=161,
manageIp="",
model=True,
collector='localhost',
rackSlot=0,
locationPath="",
systemPaths=[],
groupPaths=[],
prod_state='Production',
comments="",
hwManufacturer="",
hwProductName="",
osManufacturer="",
osProductName="",
priority=3,
tag="",
serialNumber="",
zCommandUsername="",
zCommandPassword="",
zWinUser="",
zWinPassword="",
zProperties={},
cProperties={}):
'''
A function to connect to a zenoss server and add a new device entry.
Parameters:
deviceName: (Required) The device name in Zenoss
deviceClass: (Required) The device class to use. If none, will determine based on kernel grain.
prod_state: (Optional)(Default Production) The prodState to set on the device.
title: (Optional) See Zenoss documentation
snmpCommunity: (Optional) See Zenoss documentation
snmpPort: (Optional) See Zenoss documentation
manageIp: (Optional) See Zenoss documentation
model: (Optional) See Zenoss documentation
collector: (Optional)(Default localhost) The collector to use for this device.
rackSlot: (Optional) See Zenoss documentation
locationPath: (Optional) See Zenoss documentation
systemPaths: (Optional) See Zenoss documentation
groupPaths: (Optional) See Zenoss documentation
prod_state: (Optional) See Zenoss documentation
comments: (Optional) See Zenoss documentation
hwManufacturer: (Optional) See Zenoss documentation
hwProductName: (Optional) See Zenoss documentation
osManufacturer: (Optional) See Zenoss documentation
osProductName: (Optional) See Zenoss documentation
priority: (Optional) See Zenoss documentation
tag: (Optional) See Zenoss documentation
serialNumber: (Optional) See Zenoss documentation
zCommandUsername: (Optional) See Zenoss documentation
zCommandPassword: (Optional) See Zenoss documentation
zWinUser: (Optional) See Zenoss documentation
zWinPassword: (Optional) See Zenoss documentation
zProperties: (Optional) See Zenoss documentation
cProperties: (Optional) See Zenoss documentation
CLI Example:
salt-run zenoss.add_device deviceName=saltmaster deviceClass='/Server/Linux'
'''
if device_exists(deviceName):
return 'Device already exists'
log.info('Adding device %s to zenoss', deviceName)
data = dict(deviceName=deviceName,
deviceClass=deviceClass,
title=title,
snmpCommunity=snmpCommunity,
snmpPort=snmpPort,
manageIp=manageIp,
model=model,
collector=collector,
rackSlot=rackSlot,
locationPath=locationPath,
systemPaths=systemPaths,
groupPaths=groupPaths,
productionState=PROD_STATES[prod_state],
comments=comments,
hwManufacturer=hwManufacturer,
hwProductName=hwProductName,
osManufacturer=osManufacturer,
osProductName=osProductName,
priority=priority,
tag=tag,
serialNumber=serialNumber,
zCommandUsername=zCommandUsername,
zCommandPassword=zCommandPassword,
zWinUser=zWinUser,
zWinPassword=zWinPassword,
zProperties=zProperties,
cProperties=cProperties)
response = _router_request('DeviceRouter', 'addDevice', data=[data])
return response
def set_prod_state(prod_state, device=None):
'''
A function to set the prod_state in zenoss.
Parameters:
prod_state: (Required) String value of the state
- Production
- Pre-Production
- Test
- Maintenance
- Decommissioned
device: (Required) The device name in Zenoss
CLI Example:
salt-run zenoss.set_prod_state prod_state=1000 device=saltmaster
'''
device_object = find_device(device)
if not device_object:
return "Unable to find a device in Zenoss for {0}".format(device)
log.info('Setting prodState to %d on %s device', prod_state, device)
data = dict(uids=[device_object['uid']], prodState=PROD_STATES[prod_state], hashcheck=device_object['hash'])
return _router_request('DeviceRouter', 'setProductionState', [data])
def get_decomm():
'''
A function to get all decommissioned devices in Zenoss.
CLI Example:
salt-run zenoss.get_decomm
'''
log.info('Get all decommissioned devices from Zenoss')
decomm_device = []
all_devices = _get_all_devices()
for dev in all_devices['devices']:
if dev['productionState'] == PROD_STATES['Decommissioned']:
decomm_device.append(dev['name'])
if decomm_device.__len__() > 0:
return decomm_device
else:
return 'No devices returned'
log.info(dev['hash'])
return True
def send_event(summary, device, severity, evclasskey=None, evclass=None, component=None):
'''
A function to send events to Zenoss
Parameters:
summary: (Required) The summary of the event
device: (Required) The device name in Zenoss
severity: (Required) String value of the state
- Critical
- Error
- Warning
- Info
- Debug
- Clear
evclasskey: (optional) The Event class key from Zenoss
evclass: (optional) The Event class for the event
component: (optional) The component on the device this message refers to
CLI Example:
salt-run zenoss.send_event summary='Config just executed' device=saltmaster severity='Info'
'''
data = [{
'summary': summary,
'device': device,
'component': component,
'severity': severity,
'evclasskey': evclasskey,
'evclass': evclass}]
ret = _router_request('EventsRouter', 'add_event', data=data)
return ret
def add_user(username, email):
data = []
data.append('tableName=userlist')
data.append('zenScreenName=manageUserFolder.pt')
data.append('filter=""')
data.append('userid=' + username)
data.append('email=' + email)
data.append('manage_addUser:method=OK')
data = '&'.join(data)
ret = _http_get('ZenUsers', data=data)
return ret
def reset_password(username):
data = []
data.append('manage_resetPassword:method=Submit')
ret = _http_post('ZenUsers/' + username, data=data)
return ret
def update_password(username, password):
config = __opts__.get('zenoss', None)
data = []
data.append('roles:list=ZenUser')
#data.append('email=mywebtest2@saltstack.com')
#data.append('pager=')
#data.append('defaultPageSize=40')
#data.append('defaultAdminRole=ZenUser')
#data.append('netMapStartObject=')
data.append('password=' + password)
data.append('sndpassword=' + password)
data.append('oldpassword=' + config.get('password'))
data.append('manage_editUserSettings:method=+Save+Settings+')
data = '&'.join(data)
ret = _http_get('ZenUsers/' + username, data=data)
return ret
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
from operator import itemgetter
__all__ = [
# SERVICES WORKING ON HW MODULE TABLE
'sub_dict'
]
def sub_dict(d, ks):
vals = []
if len(ks) >= 1:
vals = itemgetter(*ks)(d)
if len(ks) == 1:
vals = [vals]
return dict(zip(ks, vals))
|
import webapp2
import vision_rest_api
label_dict = dict()
landmark_dict = dict()
dominant_color_dict = dict()
def update_label_dict(labels, image):
for label in labels:
if label in label_dict:
label_dict[label].append(image)
else:
label_dict[label] = []
label_dict[label].append(image)
def update_landmark_dict(landmarks, image):
for landmark in landmarks:
if landmark in landmark_dict:
landmark_dict[landmark].append(image)
else:
landmark_dict[landmark] = []
landmark_dict[landmark].append(image)
def update_dominant_color_dict(dominant_color, image):
# When image size is more than 4MB, detect_properties_GT returns empty list
# which creates list index out of range error, so I am skipping it
if (len(dominant_color)>1):
if (len(dominant_color_dict) == 0):
dominant_color_dict[dominant_color] = [image]
else:
range = 30
for key in dominant_color_dict.keys():
diff_r = abs(key[0] - dominant_color[0])
diff_g = abs(key[1] - dominant_color[2])
diff_b = abs(key[2] - dominant_color[2])
if (diff_r <= range):
if (diff_g <= range):
if (diff_b <= range):
dominant_color_dict[key].append(image)
else:
dominant_color_dict[dominant_color] = []
dominant_color_dict[dominant_color].append(image)
else:
dominant_color_dict[dominant_color] = []
dominant_color_dict[dominant_color].append(image)
else:
dominant_color_dict[dominant_color] = []
dominant_color_dict[dominant_color].append(image)
class MainPage(webapp2.RequestHandler):
def get(self):
photonames = ['2000s (15).jpg', '2000s (25).jpg', '2007.12.15 DSC00075.jpg', '2007.12.20 DSC00099.jpg', '2008.06.09 DSCN0241.JPG', '2008.06.11 DSCN0287.JPG', '2008.06.11 DSCN0289.JPG', '2010.10.19 19102010200.jpg', '2010.12.12 12122010200.jpg', '2011.02.20 20022011082.jpg', '2011.02.20 20022011134.jpg', '2011.02.21 21022011138.jpg', '2011.02.22 220220111590.jpg', '2011.02.22 22022011165.jpg', '2011.09.27 IMG (120).JPG', '2012.01.05 050120121826.jpg', '2012.01.09 090120122101.jpg', '2012.05.25 25052012174.jpg', '2013.02.06 377765_501271499919187_1848268795_n.jpg', '2014.02.12 DSC_0074.jpg', '2014.03.12 DSCN2600.JPG', '2014.03.12 DSCN2622.JPG', '2016.08.27 IMG_20160827_104641.jpg', '2017.06.14 20170614_192952-01.jpg', '2017.06.14 IMG_20170614_173138.jpg', '2017.07.22 IMG_20170722_155305.jpg', '2017.09.02 IMG_20170902_133309.jpg', '2017.09.02 IMG_20170902_133514.jpg']
#photonames = ['2000s (15).jpg', '2000s (25).jpg', '2007.12.15 DSC00075.jpg']
#photonames = ['2000s (15).jpg']
url_base = "gs://seventh-terrain-179700.appspot.com/"
for a_photo in photonames:
labels = vision_rest_api.vision_api_label_detection(url_base+a_photo)
update_label_dict(labels, a_photo)
landmarks = vision_rest_api.vision_api_landmark_detection(url_base+a_photo)
update_landmark_dict(landmarks, a_photo)
dominant_color = vision_rest_api.vision_api_property_detection(url_base+a_photo)
update_dominant_color_dict(dominant_color, a_photo)
# label_dict_sorted = sorted(label_dict, key=lambda k: len(label_dict[k]), reverse=True)
# self.response.write("LABEL_DETECTION<br>")
# self.response.write(label_dict_sorted)
# self.response.write("<br>")
# landmark_dict_sorted = sorted(landmark_dict, key=lambda k: len(landmark_dict[k]), reverse=True)
# self.response.write("LANDMARK_DETECTION<br>")
# self.response.write(landmark_dict_sorted)
# self.response.write("<br>")
# dominant_color_dict_sorted = sorted(dominant_color_dict, key=lambda k: len(dominant_color_dict[k]), reverse=True)
# self.response.write("IMAGE_PROPERTIES<br>")
# self.response.write(dominant_color_dict_sorted)
# self.response.write("<br>")
self.response.write("LABEL_DETECTION<br>")
for k in sorted(label_dict, key=lambda k: len(label_dict[k]), reverse=True):
self.response.write(k)
self.response.write(", ")
self.response.write(len(label_dict[k]))
self.response.write(", ")
self.response.write(label_dict[k])
self.response.write("<br>")
self.response.write("<br>")
self.response.write("LANDMARK_DETECTION<br>")
for k in sorted(landmark_dict, key=lambda k: len(landmark_dict[k]), reverse=True):
self.response.write(k)
self.response.write(", ")
self.response.write(len(landmark_dict[k]))
self.response.write(", ")
self.response.write(landmark_dict[k])
self.response.write("<br>")
self.response.write("<br>")
self.response.write("IMAGE_PROPERTIES<br>")
for k in sorted(dominant_color_dict, key=lambda k: len(dominant_color_dict[k]), reverse=True):
self.response.write(k)
self.response.write(", ")
self.response.write(len(dominant_color_dict[k]))
self.response.write(", ")
self.response.write(dominant_color_dict[k])
self.response.write("<br>")
application = webapp2.WSGIApplication([('/', MainPage)],
debug=True)
|
# -*- coding: utf-8 -*-
class Solution:
def minimumMoves(self, s: str) -> int:
chars, result = list(s), 0
for i, char in enumerate(chars):
if char == "X":
chars[i : i + 3], result = ["O", "O", "O"], result + 1
return result
if __name__ == "__main__":
solution = Solution()
assert 1 == solution.minimumMoves("XXX")
assert 2 == solution.minimumMoves("XXOX")
assert 0 == solution.minimumMoves("000")
|
import numpy as np
import queue
from math import sqrt
from vectorizer.utils.distances import distance
from collections import deque
def flood_fill(px, py, segments, border):
s = segments.shape
seg = segments[py,px]
visited = np.zeros(s, dtype=bool)
queued = np.zeros(s, dtype=bool)
blockSize = 3
# ale aktualnie brak pomysłu jak inaczej to zrobić
# przerywamy cykl w jednym punkcie i przechodzimy od punktu blokady,
# po powstałej z cyklu ścieżce
cont = []
for i in range(-blockSize,blockSize+1):
for j in range(-blockSize,blockSize+1):
y = (py+i)%s[0]
x = (px+j)%s[1]
if segments[y,x] == seg and border[y,x]:
visited[y,x] = True
if (y != py) or (x != px):
cont.append((y,x))
if len(cont) <= 0:
return []
# wygląda to na niepawidłowe dane
start = cont.pop()
q = queue.Queue()
q.put(start)
path = []
while not(q.empty()):
(qy,qx) = q.get()
path.append((qy,qx))
visited[qy,qx] = True
queued[qy,qx] = False
for i in range(-1,2):
for j in range(-1,2):
y = (qy+i)%s[0]
x = (qx+j)%s[1]
if border[y,x] and segments[y,x] == seg and not(visited[y,x]) and not(queued[y,x]):
queued[y,x] = True
q.put((y,x))
for i in range(-blockSize,blockSize+1):
for j in range(-blockSize,blockSize+1):
y = (py+i)%s[0]
x = (px+j)%s[1]
if segments[y,x] == seg and border[y,x]:
visited[y,x] = False
if len(path) > 0:
q.put(path[len(path)-1])
elif len(cont) > 0:
q.put(cont[len(cont)-1])
else:
q.put(start)
while not(q.empty()):
(qy,qx) = q.get()
path.append((qy,qx))
visited[qy,qx] = True
queued[qy,qx] = False
for i in range(-1,2):
for j in range(-1,2):
y = (qy+i)%s[0]
x = (qx+j)%s[1]
if border[y,x] and segments[y,x] == seg and not(visited[y,x]) and not(queued[y,x]):
queued[y,x] = True
q.put((y,x))
return path
def th(x):
return x[2]
def reconstruct_path(x,y,parent):
path = []
while x >= 0 and y >= 0:
path.append((y,x))
(y,x) = parent[y,x]
return path
def border_path(px, py, segments, border):
sx = px
sy = py
s = segments.shape
seg = segments[py,px]
visited = np.zeros(s, dtype=bool)
queued = np.zeros(s, dtype=bool)
lengths = np.zeros(s, dtype=int)
parent = np.zeros((s[0], s[1], 2), dtype=int)
for i in range(0, s[0]):
for j in range(0, s[1]):
parent[i,j] = np.array([-1,-1])
q = deque()
q.append((py,px))
c = 1
while c > 0:
(qy,qx) = q.popleft()
visited[qy,qx] = True
queued[qy,qx] = False
c -= 1
for i in range(-1,2):
for j in range(-1,2):
y = (qy+i)%s[0]
x = (qx+j)%s[1]
if segments[y,x] == seg and border[y,x] and not(visited[y,x]) and not(queued[y,x]):
# print ("Dodano: ({},{})".format(x,y))
q.append((y,x))
c += 1
# print("Liczba elementow: {}".format(c))
queued[y,x] = True
parent[y,x] = np.array([qy,qx])
lengths[y,x] = lengths[qy,qx] + 1
was_parent = np.zeros(s, dtype=bool)
for i in range(0, s[0]):
for j in range(0, s[1]):
if visited[i,j]:
(py,px) = parent[i,j]
# print("Rodzic: ({},{}) pktu: ({},{})".format(px,py,j,i))
was_parent[py,px] = True
path_endings = []
for i in range(0, s[0]):
for j in range(0, s[1]):
if visited[i,j] and not(was_parent[i,j]):
path_endings.append((i,j,lengths[i,j]))
print("Start: ({},{})".format(sx,sy))
print("Path endings: {}".format(path_endings))
# mamy zakończenia
# bierzemy 2 najdłuższe ścieżki i jeśli możemy, a powinniśmy
# tzn. są siadami to łączymy w jeden cykl
size = -1
mp1 = (-1,-1)
mp2 = (-1,-1)
pendings = []
mapping = {}
# ta część jest jedna z wolniejszych pewnie, albo zmniejsz liczbę segmentów
# albo zmniejsz liczbę kandydatów (x największych ...)
counter = 0
cMax = 4
for (ey,ex,el) in sorted(path_endings,key=th,reverse=True):
if counter >= cMax:
break
l = reconstruct_path(ex,ey, parent)
s = set(l)
pendings.append(((ey,ex),s))
mapping[(ey,ex)] = l
counter += 1
for (p1,s1) in pendings:
for (p2,s2) in pendings:
if len(s1) + len(s2) <= size: # nie ma sensu sprawdzać
continue
s = s1.union(s2)
n = len(s)
if n > size:
mp1 = p1
mp2 = p2
size = n
(p1y,p1x) = mp1
(p2y,p2x) = mp2
l1 = mapping[mp1]
l2 = mapping[mp2]
if distance(mp1,mp2) < distance(mp1,(sy,sx)):
l1.reverse()
print ("p1: {}, p2: {}".format(mp1,mp2))
return (l1 + l2)
def convert_mask_to_border_paths(segments, border):
n = np.max(segments)
visited = np.zeros(n+1, dtype=bool)
s = segments.shape
paths = {}
for i in range(0, s[0]):
for j in range(0, s[1]):
seg = segments[i,j]
if border[i,j] and not(visited[seg]):
visited[seg] = True
# border_path = flood_fill(j,i, segments, border)
b = border_path(j,i, segments, border)
paths[seg] = b
return paths
def border_points(bpath,my,mx):
eps = 0.001
n = len(bpath)
points = []
m = (my,mx)
window = 5
for i in range(0, n):
p = bpath[i]
r = distance(p,m)
rmin = r
rmax = r
for j in range(-window,window+1):
q = bpath[(i+j)%n]
rq = distance(q,m)
rmin = min(rmin, rq)
rmax = max(rmax, rq)
if abs(rmin-r) < eps:
points.append(p)
elif abs(rmax-r) < eps:
points.append(p)
return points
return points
|
from itertools import izip
def step_through_with(s):
for a, b in izip(s, s[1:]):
if a == b:
return True
return False
|
comp = 0
cont = 0
def merge_sort(lista):
global comp
if len(lista) > 1:
mid = len(lista) // 2
L = lista[:mid]
R = lista[mid:]
merge_sort(L)
merge_sort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] < R[j]:
lista[k] = L[i]
i += 1
else:
lista[k] = R[j]
j += 1
comp += 1
k += 1
while i < len(L):
lista[k] = L[i]
i += 1
k += 1
comp += 1
while j < len(R):
lista[k] = R[j]
j += 1
k += 1
comp += 1
if len(lista) == vetor:
print("Lista ordenada:", lista)
print("Comparações:", comp)
return lista
def gerar(int):
from random import randint
tamanho = int
resposta = [0] * tamanho
for i in range(tamanho):
resposta[i] = randint(0, 1000000)
print("Lista não ordenada:", resposta, "\n")
return merge_sort(resposta)
print("Qual o tamanho do vetor:")
print("1 - 5\n2 - 10\n3 - 100\n4 - 1000\n5 - 10000\n")
vetor = int(input())
while vetor < 1 or vetor > 5:
print("Opção invalida.")
print("Qual o tamanho do vetor:")
print("1 - 5\n2 - 10\n3 - 100\n4 - 1000\n5 - 10000\n")
vetor = int(input())
if vetor == 1:
vetor = 5
elif vetor == 2:
vetor = 10
elif vetor == 3:
vetor = 100
elif vetor == 4:
vetor = 1000
else:
vetor = 10000
for x in range(51):
cont += comp
comp = 0
gerar(vetor)
print("-" * 100)
print("Media de comparações:", cont / 50)
|
from licant import *
module("gxx.dprint.common", "impl",
sources=["dprint_func_impl.c", "dprintxx.cpp"],
mdepends=["gxx.printf"]
)
implementation("gxx.dprint", "stub",
sources = "dprint_func_stub.c dprint_stub.c dprintxx.cpp".split(" ")
)
implementation("gxx.dprint", "diag",
sources = ["dprint_diag.c"],
cc_flags = "-Wno-pointer-to-int-cast",
mdepends = [
"gxx.diag",
("gxx.dprint.common", "impl")
],
)
implementation("gxx.dprint", "manually",
sources = ["dprint_manually.c"],
mdepends = [("gxx.dprint.common","impl")],
)
implementation("gxx.dprint", "stdout",
sources = ["dprint_stdout.c"],
mdepends = [("gxx.dprint.common","impl")],
)
module_defimpl("gxx.dprint", "stdout") |
"""
在遍历列表时,可以获取列表每个元素的值及其所在下标位置。
"""
numbers = [10, 29, 30, 41]
for index, value in enumerate(numbers):
print(index, value)
# 双击xx.py文件也可以执行文件,加入下面的语句后,在文件执行完毕后会停留在命令窗口中,直到按下enter键
input("Press <enter>")
|
from rest_framework import generics
from api.models import Customer
from rest_framework.response import Response
from api.serializers import CustomerSerializer
from authentication.serializers import UserSerializer
from authentication.models import User
from rest_framework.views import APIView
from django.db.models import Q
import json
from socketio_app.views import sio
class CustomerView(generics.ListCreateAPIView):
"""
Api for create and list customers
"""
serializer_class = CustomerSerializer
def get_queryset(self):
user = self.request.user
queryset = Customer.objects.filter(owner=user.id)
return queryset
def post(self, request, *args, **kwargs):
user = self.request.user
first_name = self.request.data['first_name']
last_name = self.request.data['last_name']
email = self.request.data['email']
company = self.request.data['company']
phone = self.request.data['phone']
address = self.request.data['address']
apartment = self.request.data['apartment']
city = self.request.data['city']
country = self.request.data['country']
region = self.request.data['region']
postal_code = self.request.data['postal_code']
image = self.request.data['image']
customer = Customer.objects.create(
owner=user,
first_name=first_name,
last_name=last_name,
email=email,
company=company,
phone=phone,
address=address,
apartment=apartment,
city=city,
country=country,
region=region,
postal_code=postal_code,
image=image)
customer_serializer = CustomerSerializer(customer)
sio.emit('create_change_customer', {
'data': {
'state': 'created',
'customer': customer_serializer.data,
'user': {
'email': user.email,
'id': user.id
}
}
}, namespace='/test')
return Response(customer_serializer.data)
class CustomerUpdateView(generics.UpdateAPIView):
"""
Api for updating customer
"""
serializer_class = CustomerSerializer
def get_queryset(self):
customer_id = self.kwargs['pk']
def update(self, request, *args, **kwargs):
customer_id = self.kwargs['pk']
user = self.request.user
first_name = self.request.data['first_name']
last_name = self.request.data['last_name']
email = self.request.data['email']
company = self.request.data['company']
phone = self.request.data['phone']
address = self.request.data['address']
apartment = self.request.data['apartment']
city = self.request.data['city']
country = self.request.data['country']
region = self.request.data['region']
postal_code = self.request.data['postal_code']
image = self.request.data['image']
customer = Customer.objects.filter(pk=customer_id).first()
if first_name:
customer.first_name = first_name
if last_name:
customer.last_name = last_name
if email:
customer.email = email
if company:
customer.company = company
if phone:
customer.phone = phone
if address:
customer.address = address
if apartment:
customer.apartment = apartment
if city:
customer.city = city
if country:
customer.country = country
if region:
customer.region = region
if postal_code:
customer.postal_code = postal_code
if image:
customer.image = image
customer.save()
customer_serializer = CustomerSerializer(customer)
sio.emit('create_change_customer', {
'data': {
'state': 'updated',
'customer': customer_serializer.data,
'user': {
'email': user.email,
'id': user.id
}
}
}, namespace='/test')
return Response(customer_serializer.data)
class CustomerDeleteView(generics.DestroyAPIView):
"""
Api for deleting customer
"""
serializer_class = CustomerSerializer
def get_queryset(self):
customer_id = self.kwargs['pk']
user = self.request.user
queryset = Customer.objects.filter(pk=customer_id)
customer = Customer.objects.filter(pk=customer_id).first()
customer_serializer = CustomerSerializer(customer)
sio.emit('create_change_customer', {
'data': {
'state': 'deleted',
'customer': customer_serializer.data,
'user': {
'email': user.email,
'id': user.id
}
}
}, namespace='/test')
return queryset
class SearchCustomerView(APIView):
"""
search user by username or email
"""
def get(self, request, *args, **kwargs):
user_id = self.request.user.id
filter = request.query_params['filter']
filter_json = json.loads(filter)
customers = User.objects.filter(Q(email__icontains=filter_json['arg'])).exclude(id=user_id).all()
customers_serializer = UserSerializer(customers, many=True)
return Response(customers_serializer.data)
|
import numpy as np
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import random
plt.switch_backend('Qt4Agg')
import os
#from scipy import stats
f_list = []
z_list = []
dir_list = list()
for root, dirs, files in os.walk(".", topdown=False):
for name in dirs:
if os.path.isfile(name+"/force.txt"):
dir_list.append(os.path.join(root, name))
for z in dir_list:
fname=z+'/force_ave.txt'
data1 = np.genfromtxt(fname, skip_header=2, skip_footer=0)
f = data1[3]
f_list.append(np.mean(f))
z_list.append(float(z[2:]))
zz = np.array(z_list)
ff = np.array(f_list)
inds = zz.argsort()
zsort = zz[inds]
fsort = ff[inds]
FE=np.trapz(fsort[5:], zsort[5:])
print("FE: ", FE)
fname1='../brush_profiles/brush_dens.txt'
data1 = np.loadtxt(fname1, skiprows=4)
z1 = data1[:,1]
g1 = data1[:,2]
fname2='../brush_profiles/end_dens.txt'
data2 = np.loadtxt(fname2, skiprows=4)
z2 = data2[:,1]
g2 = data2[:,2]
binsz = z1[1]-z1[0]
surface = (2*7.87500*2*9.09325)
N_obs = 10000
h = 40.0
norm = N_obs*surface*binsz
avg_z=0
norm_z=0
#for i in range(len(z1)):
# avg_z += g1[i]*i*binsz
# norm_z += g1[i]
#
#
#print "h: ", 8.0/3.0*avg_z/norm_z
x1=[0, 25]
fig, ax1 = plt.subplots()
t = np.arange(0.01, 10.0, 0.01)
ax1.plot(z1,g1/norm,lw=4.1,color='b',label='rho(z), brush')
ax1.plot(z2,g2/norm*10.0,lw=4.1,color='g',label='rho(z) x 10.0, ends')
ax1.legend(frameon=False, loc="upper left")
ax1.set_xlabel(r'$z$')
ax1.set_ylim((0,max(g1/norm)*1.25))
# Make the y-axis label, ticks and tick labels match the line color.
ax1.set_ylabel(r'$\rho(z)$', color='b')
ax1.tick_params('y', colors='b')
ax2 = ax1.twinx()
ax2.scatter(z_list, f_list, label='vertical force', lw=2.6, color="red")
ax2.plot(zsort[0:], fsort[0:], lw=1.2,linestyle="--", color="red")
ax2.set_ylabel(r'$f_z$', color='r')
ax2.set_ylim((0,max(f_list)*1.25))
ax2.tick_params('y', colors='r')
ax2.legend(frameon=False, loc="upper right")
ax1.set_xlim((0,25))
plt.rcParams.update({'font.size': 14})
fig.tight_layout()
plt.savefig("brush_force.png", dpi=200)
plt.show()
|
import os
import logging
class Config:
smtp_server = os.environ.get('SMTP_SERVER', 'smtp.gmail.com')
smtp_port = int(os.environ.get('SMTP_PORT', 587))
use_starttls = os.environ.get('USE_STARTTLS', True)
smtp_username = os.environ.get('SMTP_USERNAME')
smtp_password = os.environ.get('SMTP_PASSWORD')
from_email = os.environ.get('FROM_EMAIL', smtp_username)
debug = os.environ.get('DEBUG', True)
log_level = os.environ.get('LOG_LEVEL', 'INFO')
debug = True if log_level == 'DEBUG' else False
# create application logger
log = logging.getLogger()
log.setLevel(Config.log_level)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(Config.log_level)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
|
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
import scipy.constants as con
from scipy.optimize import curve_fit
from scipy import stats
from uncertainties import ufloat
# in matplotlibrc leider (noch) nicht möglich
# Abstand zwischen Schirm und Blende
l = 1 # m
a=633e-9 #m
#sin phi= np.sin(x/l) =x/np.sqrt(x**2+l**2) sin ist gegenkathete durch hypothenuse
############################## Einzelspalt 1 ###################################
#Intensitätsverteilung Einzelspalt
def f(x,A_0,b):
return A_0**2 * b**2 * (a/(np.pi*b*(np.sin(x/l))))**2 * np.sin((np.pi*b*(np.sin(x/l))/a))**2
xx=np.linspace(-11e-3,11e-3,10000)
x1,I1 = np.genfromtxt('data/e1.txt', unpack=True)
I1=I1-0.15e-3 #Abzug Offsetstrom Potenz als Unterschied zischen micro und nano
#in SI
x1=x1*1e-3 #m
I1=I1*1e-6 #A hier quasi Einheitenlos
params1,cov1= curve_fit(f,x1,I1,p0=(5,0.4e-3))
err1 = np.sqrt(np.diag(cov1))
print('E1\n')
print('A_0: ',params1[0],'\pm',err1[0],'b/mm: ',params1[1]*1e3,'\pm',err1[1])
#Furier
def furE(x,A_0,b):
return (4 * A_0**2 * a**2)/(4*np.pi**2 *(x/l)**2)*np.sin((2 * np.pi * b *x / l)/(a))**2
fparams1,fcov1=curve_fit(furE,x1,I1,p0=(5,0.4e-3))
ferr1=np.sqrt(np.diag(fcov1))
#in SI
#print('\nFurier E1\nA_0= ',fparams1[0],'\pm',ferr1[0],'\nb: ',fparams1[1],'\pm',ferr1[1])
plt.plot(xx,f(xx,*params1)*1e6, label='Regression')
#plt.plot(xx,furE(xx,*fparams1),label='Furier')
plt.plot(xx,f(xx,params1[0],0.4e-3)*1e6, '--', color='#e86143', label='Theorie')#Theoriekurve b=0.4mm, A_0=params1[0]
plt.plot(x1,I1*1e6, 'kx', markersize=5, label='Messwerte')
plt.grid(True)
#plt.axis([-0.011,0.011,-0.0000005,0.000006])
plt.xlabel(r'$x \:/\: \si{\meter}$')
plt.ylabel(r'$I \:/\: \si{\micro\ampere}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/e1.pdf')
plt.close()
############################## Einzelspalt 2 ###################################
xxx=np.linspace(-26e-3,26e-3,10000)
x2, I2 = np.genfromtxt('data/e2.txt', unpack=True)
I2=I2-0.15 #Abzug Offsetstrom beides in nano
#in SI
x2=x2*1e-3 #m
I2=I2*1e-9 #A hier quasi Einheitenlos
params2,cov2= curve_fit(f,x2,I2,p0=(5,0.075e-3))
err2 = np.sqrt(np.diag(cov2))
print('E2\n')
print('A_0: ',params2[0],'\pm',err2[0],'b/mm: ',params2[1]*1e3,'\pm',err2[1])
#Furier
fparams2,fcov2=curve_fit(furE,x2,I2,p0=(5,0.075e-3))
ferr2=np.sqrt(np.diag(fcov2))
#in SI
#print('\nFurier E2\nA_0= ',fparams2[0],'\pm',ferr2[0],'\nb: ',fparams2[1],'\pm',ferr2[1])
plt.plot(xxx,f(xxx,*params2)*1e6, label='Regression')
#Theoriekurve b=0.075mm, A_0=params2[0]
#plt.plot(xxx,furE(xxx,*fparams2),label='Furier')
plt.plot(xxx,f(xxx,params2[0],0.075e-3)*1e6, '--', color='#e86143', label='Theorie')
plt.plot(x2,I2*1e6, 'kx', markersize=5, label='Messwerte')
plt.grid(True)
#plt.axis([-0.026,0.026,-0.00000001,0.00000016])
plt.xlabel(r'$x \:/\: \si{\meter}$')
plt.ylabel(r'$I \:/\: \si{\micro\ampere}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/e2.pdf')
plt.close()
#sin phi= x/l =x/np.sqrt(x**2+l**2) sin ist gegenkathete durch hypothenuse
############################## Doppelspalt #####################################
#Intensität am Doppelspalt
def g(x,A_0,b,s):
return 4 * A_0 * np.cos(np.pi * s *(np.sin(x/l)) / a)**2 * (a/(np.pi * b * (np.sin(x/l))))**2 * np.sin(np.pi * b * (np.sin(x/l))/ a)**2
xd, Id = np.genfromtxt('data/d.txt', unpack=True)
Id=Id-0.15e-3 #Abzug Offsetstrom Potenz als Unterschied zischen micro und nano
#in SI
xd=xd*1e-3 #m
Id=Id*1e-6 #A hier quasi Einheitenlos
params3,cov3=curve_fit(g,xd,Id,p0=(18,0.15e-3,0.5e-3))
err3=np.sqrt(np.diag(cov3))
print('\n\nDS\n')
print('A_0: ',params3[0],'\pm',err3[0],'b/mm: ',params3[1]*1e3,'\pm',err3[1],'\ns/mm: ',params3[2]*1e3,'\pm',err3[2])
plt.plot(xxx,g(xxx,*params3)*1e6,label='Regressions')
plt.plot(xxx,g(xxx,params3[0],0.15e-3,0.5e-3)*1e6, '--', color='#e86143', label='Theorie')
plt.plot(xd, Id*1e6, 'kx', markersize=5, label='Messwerte')
plt.grid(True)
#plt.axis([-0.026,0.026,-0.0000001,0.0000022])
plt.xlabel(r'$x \:/\: \si{\meter}$')
plt.ylabel(r'$I \:/\: \si{\micro\ampere}$')
plt.legend(loc='best')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('plots/d.pdf')
plt.close()
#Diskussion
#Fehler in A_0
A=np.array([params1[0],params2[0]])
fA=np.array([fparams1[0],fparams2[0]])
dA=(A-fA)/fA
print('\n\nFehler in A :',dA)
#Fehler in b
tb=np.array([0.4e-3,0.075e-3,0.15e-3])
b=np.array([params1[1],params2[1],params3[1]])
db=(tb-b)/tb
ds=(0.5e-3-params3[2])/0.5e-3
print('\nFehler b_exp: ',db,'\nds',ds)
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\quocs\Desktop\design.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(869, 593)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.img_person = QtWidgets.QGraphicsView(self.centralwidget)
self.img_person.setGeometry(QtCore.QRect(550, 30, 261, 221))
self.img_person.setObjectName("img_person")
self.txt_name = QtWidgets.QTextEdit(self.centralwidget)
self.txt_name.setGeometry(QtCore.QRect(190, 40, 291, 41))
self.txt_name.setObjectName("txt_name")
self.txt_mssv = QtWidgets.QTextEdit(self.centralwidget)
self.txt_mssv.setGeometry(QtCore.QRect(190, 90, 291, 41))
self.txt_mssv.setObjectName("txt_mssv")
self.txt_khoa = QtWidgets.QTextEdit(self.centralwidget)
self.txt_khoa.setGeometry(QtCore.QRect(190, 150, 291, 41))
self.txt_khoa.setObjectName("txt_khoa")
self.txt_computer = QtWidgets.QTextEdit(self.centralwidget)
self.txt_computer.setGeometry(QtCore.QRect(190, 210, 291, 41))
self.txt_computer.setObjectName("txt_computer")
self.txt_uuid = QtWidgets.QTextEdit(self.centralwidget)
self.txt_uuid.setGeometry(QtCore.QRect(190, 390, 291, 41))
self.txt_uuid.setObjectName("txt_uuid")
self.txt_memory = QtWidgets.QTextEdit(self.centralwidget)
self.txt_memory.setGeometry(QtCore.QRect(190, 460, 291, 41))
self.txt_memory.setObjectName("txt_memory")
self.name = QtWidgets.QLabel(self.centralwidget)
self.name.setGeometry(QtCore.QRect(30, 40, 111, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.name.setFont(font)
self.name.setObjectName("name")
self.mssv = QtWidgets.QLabel(self.centralwidget)
self.mssv.setGeometry(QtCore.QRect(30, 90, 111, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.mssv.setFont(font)
self.mssv.setObjectName("mssv")
self.khoa = QtWidgets.QLabel(self.centralwidget)
self.khoa.setGeometry(QtCore.QRect(30, 150, 111, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.khoa.setFont(font)
self.khoa.setObjectName("khoa")
self.computer = QtWidgets.QLabel(self.centralwidget)
self.computer.setGeometry(QtCore.QRect(30, 210, 141, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.computer.setFont(font)
self.computer.setObjectName("computer")
self.uuid = QtWidgets.QLabel(self.centralwidget)
self.uuid.setGeometry(QtCore.QRect(30, 390, 111, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.uuid.setFont(font)
self.uuid.setObjectName("uuid")
self.memory = QtWidgets.QLabel(self.centralwidget)
self.memory.setGeometry(QtCore.QRect(30, 460, 111, 41))
font = QtGui.QFont()
font.setPointSize(20)
self.memory.setFont(font)
self.memory.setObjectName("memory")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 869, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
# self.txt_memory.setDisabled(True)
# self.txt_uuid.setDisabled(True)
# self.txt_computer.setDisabled(True)
# self.txt_khoa.setDisabled(True)
# self.txt_mssv.setDisabled(True)
# self.txt_name.setDisabled(True)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.name.setText(_translate("MainWindow", "Name"))
self.mssv.setText(_translate("MainWindow", "MSSV"))
self.khoa.setText(_translate("MainWindow", "Khoa"))
self.computer.setText(_translate("MainWindow", "Computer"))
self.uuid.setText(_translate("MainWindow", "UUID"))
self.memory.setText(_translate("MainWindow", "Memory"))
|
clock = int(input('what time is it?'))
hour = int(str(clock)[:-2])
minit = int(str(clock)[-2:])
if hour>=12:
if minit<=59:
print('%d시 %d분은 오후입니다' %(hour, minit))
else:
print("")
else:
if minit<=59:
print('%d시 %d분은 오전입니다' %(hour, minit))
else:
print("")
|
import unittest
import numpy as np
from crystalz.preprocessing.overlaps import *
class TestOverlaps(unittest.TestCase):
def test_augmentation_of_atoms(self):
centers = np.array([
[0, 0, 0],
[.1, 0, -.2],
])
radii = np.array([1, 2])
atoms = 'Kinds are ignored', centers, radii
vectors = np.array([
[1.1, 0, 0],
[0, 1.2, 0],
[0, 0, 1.3]
])
# We should explicitly list the new coordinates but it's tiresome,
# and with the orthogonal vectors provided it's not too hard to
# check that the code for expected_centers is correct
# (we're not checking the non-orthogonal case though)
expected_centers = []
for k1 in (-1, 0, 1):
for k2 in (-1, 0, 1):
for k3 in (-1, 0, 1):
for x, y, z in centers:
expected_centers.append([x + 1.1*k1, y + 1.2*k2, z + 1.3*k3])
expected_radii = [1, 2] * 27
centers_augmented, radii_augmented = repeat_once(atoms, vectors)
self.assertIsInstance(centers_augmented, np.ndarray)
self.assertIsInstance(radii_augmented, np.ndarray)
np.testing.assert_allclose(expected_radii, radii_augmented)
np.testing.assert_allclose(expected_centers, centers_augmented)
|
# -*- coding: utf-8 -*-
# 开发人员 :黎工
# 开发时间 :2020/7/15 8:35
# 文件名称 :flask_converter.PY
# 开发工具 :PyCharm
from flask import Flask
from werkzeug.routing import BaseConverter
app = Flask(__name__)
# 1、定义自己的类转换器
class RegexConverter(BaseConverter):
def __init__(self, url_map, regex):
# 调用父类的初始化方法
super(RegexConverter, self).__init__(url_map)
# 将正则表达式的参数保存到对象的属性中,flask会去使用整个属性来进行路由的正则匹配
self.regex = regex
# 2、将定义好的类转化器添加到flask的应用中
app.url_map.converters['reg'] = RegexConverter
@app.route("/send/<reg(r'1[35678]\d{9}'):mobile>")
def send_msn(mobile):
return "send msn to %s" % mobile
if __name__ == '__main__':
# url_map 可以查看整个flask中的路由信息
print(app.url_map)
app.run(debug=True) |
import telebot
import time
from telebot import types
import datetime as dt
import sqlite3 as sql
def file_exists(s):
s.encode('utf-8')
try:
file = open(s,'r')
except IOError as e:
print(e)
return False
file.close()
return True
def sm(bot,chat,s,markup):
try:
bot.send_message(chat,s,reply_markup = markup)
except Exception as e:
print(e)
bot = telebot.TeleBot("TOKEN")
cnt = 1
markup = types.ReplyKeyboardMarkup(True, False,row_width=1)
get_ht = types.KeyboardButton(text='Узнать домашнее задание')
add_ht = types.KeyboardButton(text='Добавить домашнее задание')
get_nt = types.KeyboardButton(text='Посмотреть заметку')
add_nt = types.KeyboardButton(text='Добавить заметку')
change_gr = types.KeyboardButton(text='Сбросить настройки аккаунта')
c_dev = types.KeyboardButton(text='Связаться с разработчиком')
get_tt = types.KeyboardButton(text = 'Узнать расписание')
markup.row(get_ht, add_ht)
markup.row(get_nt, add_nt)
markup.row(get_tt)
markup.row(change_gr, c_dev)
bckbtn = types.KeyboardButton(text="Вернуться назад")
mark = types.ReplyKeyboardMarkup(True, False, row_width = 1)
mark.row(bckbtn)
gr_arr = ["10В1","10В2","10Г1","10Г2","10Г3","10Г4","10Г5","10Д1","10Д2","10МИ1","10МИ2","10МИ3","10МИ4","10МИ5","10МЭ1","10МЭ2","10МЭ3","10МЭ4","10МЭ5","10МЭ6","10П1","10П2","10СЭ1","10СЭ2","10СЭ3","10СЭ4","10СЭ5","10Ю1","10Ю2","11В1","11В2","11Г1","11Г2","11Г3","11Г4","11Г5","11Г6","11Д1","11Д2","11МИ1","11МИ2","11МИ3","11МЭ1","11МЭ2","11МЭ3","11МЭ4","11МЭ5","11МЭ6", "11П1","11П2","11СЭ1","11СЭ2","11СЭ3","11СЭ4","11СЭ5","11СЭ6","11СЭ7","11Ю1","11Ю2"]
lesson_id = ['first','second','third','fourth','fifth','sixth','seventh','eighth','ninth','tenth','eleventh','twelvth','thirteenth','fourteenth','fifteenth','sixteenth','eighteenth','nineteenth','twentieth','twentyfirst','twentysecond','twentythird','twentyfourth','twentyfifth','twentysixth','twentyseventh','twentyeighth','twentyninth','thirtieth','thirtyfirst','thirtysecond','thirtythird','thirtyfourth','thirtyfifth','thirtysixth','thirtyseventh','thirtyeighth','thirtyninth','fortyth','fortyfirst','fortysecond','fortythird','fortyfourth','fortyfifth','fortysixth','fortyseventh','fortyeighth','fortyninth','fiftyth']
def day_of_week(d):
arr = ["Понедельник", "Вторник", "Среда","Четверг","Пятница","Суббота","Воскресенье"]
return arr[d.weekday()]
@bot.message_handler(commands=['start', 'help'])
def send_welcome(message):
sm(bot,message.chat.id,'Приветствую, введите Вашу группу в формате 11МИ3',markup)
@bot.message_handler(content_types = ['photo'])
def photo(message):
global markup
tmp = []
f = open(str(message.chat.id)+"_tmp.txt","r")
for line in f:
if line != "" or line != "\n":
tmp.append(line[:-1])
print(line)
f.close()
if len(tmp) == 4:
file_info = bot.get_file(message.photo[-1].file_id)
down_file = bot.download_file(file_info.file_path)
print(file_info)
src = tmp[3]
with open(src+".jpg","wb") as new_file:
new_file.write(down_file)
f = open(str(message.chat.id)+"_tmp.txt","w")
f.write("")
f.close()
bot.send_message(message.chat.id, "Фотография успешно добавлена", reply_markup = markup)
@bot.message_handler(content_types = ['text'])
def text(message):
global markup
global mark
tmp = []
conn = sql.connect("user_info")
c = conn.cursor()
ch = False
c.execute('''CREATE TABLE IF NOT EXISTS users(
ids text primary key, gr text)''')
print(message.chat.id)
for i in c.execute("select ids from users"):
for j in range(len(i)):
if int(i[0]) == message.chat.id:
ch = True
if not ch:
reg_lc(bot,message,conn,c)
return False
f = open(str(message.chat.id)+"_tmp.txt","r")
for line in f:
if line != "" or line != "\n":
tmp.append(line[:-1])
print(line)
f.close()
if message.text == "Вернуться назад":
f = open(str(message.chat.id)+"_tmp.txt","w")
f.write("")
f.close()
sm(bot,message.chat.id, "Чем могу помочь?", markup)
elif len(tmp) == 1:
try:
arr = list(map(int,message.text.split(".")))
arr.reverse()
d = dt.date(arr[0],arr[1],arr[2])
tmp.append(message.text)
tmp.append(day_of_week(d))
if tmp[2] == "Воскресенье" or tmp[2] == "Четверг":
sm(bot, message.chat.id, "В этот день нет пар", markup)
tmp = []
elif tmp[0] == "add_ht":
s = "добавить"
sm(bot, message.chat.id, "Введите id предмета, на который вы хотите " + s + " домашнее задание", mark)
elif tmp[0] == "get_ht":
s = "узнать"
sm(bot, message.chat.id, "Введите id предмета, на который вы хотите " + s + " домашнее задание", mark)
elif tmp[0] == "add_n":
s = "добавить"
sm(bot,message.chat.id, "Введите id предмета, на который вы хотите " + s + " заметку", mark)
elif tmp[0] == "get_n":
s = "узнать"
sm(bot,message.chat.id, "Введите id, на который вы хотите " + s + " заметку", mark)
f = open(str(message.chat.id)+"_tmp.txt", "a+")
f.write(tmp[1]+"\n")
f.write(tmp[2]+"\n")
f.close()
return None
except Exception as e:
print(e)
sm(bot,message.chat.id, "Некорректный формат данных, попробуйте снова", mark)
elif len(tmp) == 3:
if tmp[0] == "get_ht":
conn = sql.connect("user_info")
c = conn.cursor()
c.execute("select [gr] from users where [ids] = (?)", (str(message.chat.id),))
g = c.fetchone()
gr = g[0]
s = gr+"_"+tmp[1]+"_"+message.text
print(s)
conn.close()
ch = file_exists(s+".txt")
chf = file_exists(s+".jpg")
if not ch and not chf:
sm(bot,message.chat.id, "На указанный урок нет домашнего задания", markup)
if ch:
doc = open(s+".txt", 'r')
k = ""
for line in doc:
k += line
sm(bot,message.chat.id, k,markup)
if chf:
with open(s+'.jpg', 'rb') as f:
bot.send_photo(message.chat.id, f, reply_markup= markup)
f = open(str(message.chat.id)+"_tmp.txt","w")
f.write("")
f.close()
elif tmp[0] == "add_ht":
conn = sql.connect("user_info")
c = conn.cursor()
c.execute("select [gr] from users where [ids] = (?)", (str(message.chat.id),))
g = c.fetchone()
gr = g[0]
s = gr+"_"+tmp[1]+"_"+message.text
conn.close()
tmp.append(s)
sm(bot,message.chat.id, "Введите текст домашнего задания, либо отправьте фотографию. Обращаю внимание на факт, что фотография может быть только одна к каждому уроку, новые фотографии перезаписывают старые.",mark)
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[3]+"\n")
f.close()
elif tmp[0] == "get_n":
conn = sql.connect("user_info")
c = conn.cursor()
c.execute("select [gr] from users where [ids] = (?)", (str(message.chat.id),))
g = c.fetchone()
gr = g[0]
s = gr+"_"+tmp[1]+"_"+message.text+"_note"
print(s)
conn.close()
ch = file_exists(s+".txt")
if not ch:
sm(bot,message.chat.id, "На указанный урок нет заметки", markup)
else:
doc = open(s+".txt", 'r')
s = ""
for line in doc:
s += line
sm(bot,message.chat.id, s, markup)
f = open(str(message.chat.id)+"_tmp.txt","w")
f.write("")
f.close()
elif tmp[0] == "add_n":
conn = sql.connect("user_info")
c = conn.cursor()
c.execute("select [gr] from users where [ids] = (?)", (str(message.chat.id),))
g = c.fetchone()
gr = g[0]
s = gr+"_"+tmp[1]+"_"+message.text+"_note"
conn.close()
tmp.append(s)
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[3]+"\n")
f.close()
sm(bot,message.chat.id, "Введите текст заметки", mark)
elif len(tmp) == 4:
s = tmp[3]
doc = open(s+".txt", "a+")
try:
doc.write(message.chat.username+" добавил:\n" + message.text+"\n")
except Exception as e:
print(e)
doc.write("Пользователь " + str(message.chat.id)+" добавил:\n" + message.text+"\n")
if tmp[0] == "add_ht":
sm(bot,message.chat.id, "ДЗ успешно добавлено.",markup)
else:
sm(bot,message.chat.id, "Заметка успешно добавлена.", markup)
f = open(str(message.chat.id)+"_tmp.txt","w")
f.write("")
f.close()
elif message.text == "Добавить домашнее задание":
tmp.append("add_ht")
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[0]+"\n")
f.close()
sm(bot,message.chat.id, "Введите дату в формате 01.01.2018", mark)
elif message.text == "Узнать домашнее задание":
tmp.append("get_ht")
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[0]+"\n")
f.close()
sm(bot,message.chat.id, "Введите дату в формате 01.01.2018", mark)
elif message.text == "Добавить заметку":
tmp.append("add_n")
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[0]+"\n")
f.close()
sm(bot,message.chat.id, "Введите дату в формате 01.01.2018", mark)
elif message.text == "Посмотреть заметку":
tmp.append("get_n")
f = open(str(message.chat.id)+"_tmp.txt","a+")
f.write(tmp[0]+"\n")
f.close()
sm(bot,message.chat.id, "Введите дату в формате 01.01.2018", mark)
elif message.text == 'Связаться с разработчиком':
sm(bot,message.chat.id, 'Связаться с разработчиком можно в Telegram @IceBlink1 либо по почте lyutiko.alex@gmail.com', markup)
elif message.text == "Сбросить настройки аккаунта":
del_lc(bot,message,conn,c)
elif message.text == "Узнать расписание":
conn = sql.connect("user_info")
c = conn.cursor()
c.execute("select [gr] from users where [ids] = (?)", (str(message.chat.id),))
g = c.fetchone()
gr = g[0]
s = g[0]+".txt"
ch = file_exists(s)
if not ch:
sm(bot,message.chat.id, "Нет данных", markup)
else:
doc = open(s, 'r')
k = ""
cnt = 1
arr = ["Вторник", "Среда", "Пятница", "Суббота"]
for line in doc:
try:
line = int(line)
except Exception as e:
for i in arr:
if i+"\n" == line and line != "Понедельник\n":
cnt = 1
sm(bot,message.chat.id,k,markup)
k = line
if line == "Понедельник\n":
k+=line
elif line[0].isalpha():
pass
elif line == g[0]+'\n':
k+=line
elif line == "\n":
pass
else:
k += line[0:-1]+" id = " + str(cnt) + " \n"
cnt+=1
print(line)
try:
bot.send_message(message.chat.id, k, reply_markup = markup)
except Exception as o:
print(o)
sm(bot,message.chat.id, "Бот перегружен, попробуйте позже", markup)
else:
sm(bot,message.chat.id,"Простите, я Вас не понимаю", markup)
def reg_lc(bot,message,conn,c):
global markup
global gr_arr
if message.text in gr_arr:
c.execute("insert into users([ids]) values(?)", (message.chat.id,))
c.execute("update users set [gr] = ? where [ids] = ?",(message.text,message.chat.id))
sm(bot,message.chat.id, "Вы успешно зарегистрировались", markup)
conn.commit()
conn.close()
f = open(str(message.chat.id)+"_tmp.txt","w+")
f.close()
else:
bot.send_message(message.chat.id, "Нет данных о такой группе, попробуйте еще раз или свяжитесь с разработчиком")
return None
def del_lc(bot, message, conn, c):
c.execute("delete from users where [ids] = ?", (message.chat.id,))
sm(bot,message.chat.id, "Данные успешно удалены. Для повторной регистрации отправьте номер новой группы",markup)
conn.commit()
conn.close()
return None
while True:
try:
bot.polling(none_stop = True)
except Exception as e:
time.sleep(15)
|
import os
from .custom_cameras_calibration_factory import CustomCamerasCalibrationFactory
from .video_dataset_adapter import VideoDatasetAdapter
from ..data_transform_manager import DataTransformManager
from ..unsupervised_depth_data_module import UnsupervisedDepthDataModule
from ..video_dataset import VideoDataset
class CustomDataModuleFactory():
def __init__(self, directory="datasets"):
self._left_directory = os.path.join(directory, "left")
self._right_directory = os.path.join(directory, "right")
def make_dataset_manager(self, final_size, transform_manager_parameters, split=(80, 10, 10), num_workers=4,
device="cpu"):
left_dataset = VideoDatasetAdapter(self._left_directory)
right_dataset = VideoDatasetAdapter(self._right_directory)
original_image_size = left_dataset.get_image_size()
transform_manager = DataTransformManager(
original_image_size,
final_size,
transform_manager_parameters
)
dataset = VideoDataset(
left_dataset,
right_dataset
)
cameras_calibration = CustomCamerasCalibrationFactory().make_cameras_calibration(original_image_size,
final_size, device)
return UnsupervisedDepthDataModule(dataset, transform_manager, cameras_calibration,
num_workers=num_workers, split=split)
|
# -*- coding: utf-8 -*-
"""Setup module."""
from os import path
from setuptools import setup
# from setuptools import find_packages
NAME = 'little-bio-parser'
PACKAGES = ['lilbio']
PACKAGE_FILE = 'package.json'
SETUP_REQUIRES = []
INSTALL_REQUIRES = [
# 'gopen @ http://github.com/simomarsili/gopen/archive/v0.3.1.tar.gz']
# 'gopen @ http://github.com/simomarsili/gopen/archive/master.tar.gz'
'gopen>=0.6'
]
EXTRAS_REQUIRES = {'test': ['pytest']}
def get_version(source):
""" Retrieve version number."""
import json
with open(source, 'r') as fp:
version_data = json.load(fp)
try:
return version_data['version']
except KeyError:
# no version number in package.json
raise KeyError('check version file: no version number')
def get_long_description(here):
"""Get the long description from the README file."""
import codecs
with codecs.open(path.join(here, 'README.rst'), encoding='utf-8') as _rf:
return _rf.read()
HERE = path.abspath(path.dirname(__file__))
VERSION = get_version(path.join(HERE, PACKAGE_FILE))
LONG_DESCRIPTION = get_long_description(HERE)
setup(
name=NAME,
version=VERSION,
description='A template project with packages',
long_description=LONG_DESCRIPTION,
author='Simone Marsili',
author_email='simo.marsili@gmail.com',
url='https://github.com/simomarsili/' + NAME,
packages=PACKAGES,
# packages=find_packages(exclude=['tests']),
package_data={
'': ['LICENSE.txt', 'README.rst', 'requirements.txt', 'package.json']
},
include_package_data=True,
setup_requires=SETUP_REQUIRES,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRES,
license='BSD 3-Clause',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 17:42:40 2018
@author: sumi
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 22 14:22:46 2018
@author: sumi
"""
from keras import layers
from keras import models
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.utils import np_utils
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Dense, Dropout, Flatten
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing import image
import matplotlib.pyplot as plt
import os, shutil
#Path to the directory where the original dataset was uncompressed
original_dataset_dir = '/Users/sumi/python/research/deep_learning_with_python/chap5/cats_n_dogs_original'
#Directory where you’ll store your smaller dataset
base_dir = '/Users/sumi/python/research/deep_learning_with_python/chap5/cats_n_dogs'
os.mkdir(base_dir)
#Directories for the training, validation, and test splits
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
validation_dir = os.path.join(base_dir, 'validation')
os.mkdir(validation_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
#Directory with training cat pictures
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
#Directory with training dog pictures
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
#Directory with validation cat pictures
validation_cats_dir = os.path.join(validation_dir, 'cats')
os.mkdir(validation_cats_dir)
#Directory with validation dog pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
os.mkdir(validation_dogs_dir)
#Directory with test cat pictures
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
#Directory with test dog pictures
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
#Copies the first 1,000 cat images to train_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
#Copies 1,000 to 1500 cat images to validation_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1000,1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_cats_dir, fname)
shutil.copyfile(src, dst)
#Copies 1500 to 2000 cat images to test_cats_dir
fnames = ['cat.{}.jpg'.format(i) for i in range(1500,2000)]
for fnamem in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
#Copies the first 1,000 dog images to train_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
#Copies 1,000 to 1500 dog images to validation_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1000,1500)]
for fname in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(validation_dogs_dir, fname)
shutil.copyfile(src, dst)
#Copies 1500 to 2000 dog images to test_dogs_dir
fnames = ['dog.{}.jpg'.format(i) for i in range(1500,2000)]
for fnamem in fnames:
src = os.path.join(original_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
#print('total training cat images:', len(os.listdir(train_cats_dir)))
## Instantiating a small convnet for dogs vs. cats classification
model = models.Sequential()
model.add(Conv2D(32, (3,3), activation = 'relu', input_shape = (150, 150, 3)))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(64, (3,3), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128, (3,3), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(128, (3,3), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Flatten())
model.add(Dense(512, activation = 'relu'))
model.add(Dense(1, activation = 'sigmoid'))
model.summary()
## Configuring the model for training
model.compile(loss='binary_crossentropy', optimizer = optimizers.RMSprop(lr = 1e-4),
metrics = ['acc'])
## Using ImageDataGenerator to read images from directories
### Rescales all images by 1/255
train_datagen = ImageDataGenerator(rescale = 1./255)
test_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = train_datagen.flow_from_directory(train_dir, # Target directory
target_size = (150, 150), # Resizes all images to 150 × 150
batch_size = 20,
class_mode = 'binary') # Because you use binary_crossentropy loss, you need binary labels.
validation_generator = test_datagen.flow_from_directory(validation_dir,
target_size = (150, 150),
batch_size = 20,
class_mode = 'binary')
## Fitting the model using a batch generator
history = model.fit_generator(train_generator, steps_per_epoch = 100, epochs = 20,
validation_data = validation_generator, validation_steps = 50)
## Saving the model
model.save('cats_and_dogs_small_1.h5')
## Displaying curves of loss and accuracy during training
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, 'bo', label = 'Training acc')
plt.plot(epochs, val_acc, 'b', label = 'Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label = 'Training loss')
plt.plot(epochs, val_loss, 'b', label = 'Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 26 10:42:09 2018
@author: Jahanzaib Malik
"""
import random
import math
import sys
from random import randint
'''
Euclid's extended algorithm for finding the multiplicative inverse of two numbers
'''
def egcd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = egcd(b % a, a)
return (g, x - (b // a) * y, y)
def modinv(a, m):
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
'''
if a number is prime according to fermat's theorm
'''
def is_prime(num, test_count):
if num == 1:
return False
if test_count >= num:
test_count = num - 1
for x in range(test_count):
val = randint(1, num - 1)
if pow(val, num-1, num) != 1:
return False
return True
def generate_big_prime(n):
found_prime = False
while not found_prime:
p = randint(2**(n-1), 2**n)
if is_prime(p, 1000):
return p
def generate_keypair(p, q):
n = p * q
phi = (p - 1) * (q - 1)
# Choose an integer e such that e and phi(n) are coprime
# e should in between(1, phi)
e = 517
# Use Euclid's Algorithm to verify that e and phi(n) are comprime
'''g = gcd(e, phi)
while g != 1:
e = random.randrange(1, phi)
g = gcd(e, phi)
'''
# Use Extended Euclid's Algorithm to generate the private key
#d = modinv(e, phi)
d = 358063
# Return public and private keypair
# Public key is (e, n) and private key is (d, n)
return ((e, n), (d, n))
def encrypt(publicKey, plaintext):
# Unpack the key into it's components
key, n = publicKey
# Convert each letter in the plaintext to numbers based on the character using a^b mod m
cipher = []
for char in plaintext:
order = ord(char)
# print("Order is : ", order)
coded = pow(order, key, n)
# print("Power : ",coded)
cipher.append(coded)
# print(cipher)
# Return the array of bytes
return cipher
def decrypt(privateKey, ciphertext):
# Unpack the key into its components
key, n = privateKey
# Generate the plaintext based on the ciphertext and key using a^b mod m
plain = []
for char in ciphertext:
# print(char)
power = pow(char, key) % n
# print("power is : ", power)
uncoded = chr(power)
# print("uncoded : ", uncoded)
plain.append(uncoded)
# Return the array of bytes as a string
return ''.join(plain)
if __name__ == '__main__':
print("RSA Encryption Assignment---Jahanzaib Malik")
print("Generating your public/private keypairs now . . .")
p = generate_big_prime(1024)
q = generate_big_prime(1024)
print("large p & q Generated")
public, private = generate_keypair(p, q)
#public, private = generate_keypair(p, q)
print("Your public key is ", public, " \n and your private key is ", private)
message = "bear"
''' Given Input...
"bear",
"kangaroo",
"wombat",
"A koala is not a bear even if its Chinese translation means bear without tail.",
"Longer text messages will be given next week."
'''
encrypted_msg = encrypt(public, message)
print("Your encrypted message is: ")
print(''.join(map(lambda x: str(x), encrypted_msg)))
print("Decrypting message with private key ", private, " . . .")
print("Your message is:")
print(decrypt(private, encrypted_msg))
|
import random
import settings
import exceptions
class Player:
def __init__(self, name):
self.name = name
self.lives = settings.LIVES
self.score = 0
@staticmethod
def fight(attack, defense):
if attack == defense:
return 0
elif (attack == settings.WARRIOR and defense == settings.ROGUE) or (attack == settings.ROGUE and defense == settings.WIZARD) or (attack == settings.WIZARD and defense == settings.WARRIOR):
return 1
else:
return -1
def attack(self, enemy_obj):
print(self.name + ' attack now:')
while True:
print('Choose your hero: 1 for Warrior, 2 for Wizard, 3 for Rogue')
your_attack = int(input())
if your_attack in [settings.WARRIOR, settings.WIZARD, settings.ROGUE]:
break
enemy_attack = Enemy.select_attack()
print('Enemy choose is ', enemy_attack)
result = Player.fight(your_attack, enemy_attack)
if result == 0:
print("Tie")
elif result == 1:
enemy_obj.decrease_lives()
self.score += 1
print("You win")
print('Enemy has ', enemy_obj.lives, 'lives left')
elif result == -1:
self.decrease_lives()
print('Enemy wins')
print('You have ', self.lives, 'lives left')
else:
raise exceptions.UnexpectedException()
def defense(self, enemy_obj):
print('Choose your hero for defence')
while(True):
print('Enter 1 for Warrior, 2 for Wizard, 3 for Rogue')
your_attack = int(input())
if (your_attack in [settings.WARRIOR, settings.WIZARD, settings.ROGUE]):
break
enemy_attack = Enemy.select_attack()
print('Enemy choise is', enemy_attack)
result = Player.fight(enemy_attack, your_attack)
if result == 0:
print('Tie')
elif result == 1:
self.decrease_lives()
print('Enemy wins')
print('You have ', self.lives, ' lives left')
elif result == -1:
enemy_obj.decrease_lives()
print('You win')
print('Enemy has ', enemy_obj.lives, ' lives left')
else:
raise exceptions.UnexpectedException()
def decrease_lives(self):
self.lives -= 1
if (self.lives == 0):
raise exceptions.GameOver()
class Enemy:
def __init__(self, level):
self.lives = self.level = level
@staticmethod
def select_attack():
return random.choice([settings.WARRIOR, settings.WIZARD, settings.ROGUE])
def decrease_lives(self):
self.lives -= 1
if (self.lives == 0):
raise exceptions.EnemyDown() |
import sunspec2.xlsx as xlsx
import pytest
import openpyxl
import openpyxl.styles as styles
import json
def test___init__():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
assert wb.filename == 'sunspec2/tests/test_data/wb_701-705.xlsx'
assert wb.params == {}
wb2 = xlsx.ModelWorkbook()
assert wb2.filename is None
assert wb2.params == {}
def test_get_models():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
assert wb.get_models() == [701, 702, 703, 704, 705]
wb2 = xlsx.ModelWorkbook()
assert wb2.get_models() == []
def test_save(tmp_path):
wb = xlsx.ModelWorkbook()
wb.save(tmp_path / 'test.xlsx')
wb2 = xlsx.ModelWorkbook(filename=tmp_path / 'test.xlsx')
iter_rows = wb2.xlsx_iter_rows(wb.wb['Index'])
assert next(iter_rows) == ['Model', 'Label', 'Description']
def test_xlsx_iter_rows():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
iter_rows = wb.xlsx_iter_rows(wb.wb['704'])
assert next(iter_rows) == ['Address Offset', 'Group Offset', 'Name',
'Value', 'Count', 'Type', 'Size', 'Scale Factor',
'Units', 'RW Access (RW)', 'Mandatory (M)', 'Static (S)',
'Label', 'Description', 'Detailed Description']
assert next(iter_rows) == [None, None, 'DERCtlAC', None, None, 'group',
None, None, None, None, None, None, 'DER AC Controls',
'DER AC controls model.', None]
def test_spreadsheet_from_xlsx():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
assert wb.spreadsheet_from_xlsx(704)[0:2] == [['Address Offset', 'Group Offset', 'Name', 'Value', 'Count',
'Type', 'Size', 'Scale Factor', 'Units', 'RW Access (RW)',
'Mandatory (M)', 'Static (S)', 'Label', 'Description',
'Detailed Description'],
['', '', 'DERCtlAC', None, None, 'group', None, None, None,
None, None, None, 'DER AC Controls', 'DER AC controls model.', None]]
# need deep diff to compare from_xlsx to json file, right now just compares with its own output
def test_from_xlsx():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
with open('sunspec2/models/json/model_704.json') as f:
model_json_704 = json.load(f)
from_xlsx_output = {
"group": {
"name": "DERCtlAC",
"type": "group",
"label": "DER AC Controls",
"desc": "DER AC controls model.",
"points": [
{
"name": "ID",
"type": "uint16",
"mandatory": "M",
"static": "S",
"label": "Model ID",
"desc": "Model name model id.",
"value": 704
},
{
"name": "L",
"type": "uint16",
"mandatory": "M",
"static": "S",
"label": "Model Length",
"desc": "Model name model length."
},
{
"name": "PFWInjEna",
"type": "enum16",
"access": "RW",
"label": "Power Factor Enable (W Inj) Enable",
"desc": "Power factor enable when injecting active power.",
"comments": [
"Set Power Factor (when injecting active power)"
],
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "PFWInjEnaRvrt",
"type": "enum16",
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "PFWInjRvrtTms",
"type": "uint32",
"units": "Secs",
"access": "RW",
"label": "PF Reversion Time (W Inj)",
"desc": "Power factor reversion timer when injecting active power."
},
{
"name": "PFWInjRvrtRem",
"type": "uint32",
"units": "Secs",
"label": "PF Reversion Time Rem (W Inj)",
"desc": "Power factor reversion time remaining when injecting active power."
},
{
"name": "PFWAbsEna",
"type": "enum16",
"access": "RW",
"label": "Power Factor Enable (W Abs) Enable",
"desc": "Power factor enable when absorbing active power.",
"comments": [
"Set Power Factor (when absorbing active power)"
],
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "PFWAbsEnaRvrt",
"type": "enum16",
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "PFWAbsRvrtTms",
"type": "uint32",
"units": "Secs",
"access": "RW",
"label": "PF Reversion Time (W Abs)",
"desc": "Power factor reversion timer when absorbing active power."
},
{
"name": "PFWAbsRvrtRem",
"type": "uint32",
"units": "Secs",
"label": "PF Reversion Time Rem (W Abs)",
"desc": "Power factor reversion time remaining when absorbing active power."
},
{
"name": "WMaxLimEna",
"type": "enum16",
"access": "RW",
"label": "Limit Max Active Power Enable",
"desc": "Limit maximum active power enable.",
"comments": [
"Limit Maximum Active Power Generation"
],
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "WMaxLim",
"type": "uint16",
"sf": "WMaxLim_SF",
"units": "Pct",
"access": "RW",
"label": "Limit Max Power Setpoint",
"desc": "Limit maximum active power value."
},
{
"name": "WMaxLimRvrt",
"type": "uint16",
"sf": "WMaxLim_SF",
"units": "Pct",
"access": "RW",
"label": "Reversion Limit Max Power",
"desc": "Reversion limit maximum active power value."
},
{
"name": "WMaxLimEnaRvrt",
"type": "enum16",
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "WMaxLimRvrtTms",
"type": "uint32",
"units": "Secs",
"access": "RW",
"label": "Limit Max Power Reversion Time",
"desc": "Limit maximum active power reversion time."
},
{
"name": "WMaxLimRvrtRem",
"type": "uint32",
"units": "Secs",
"label": "Limit Max Power Rev Time Rem",
"desc": "Limit maximum active power reversion time remaining."
},
{
"name": "WSetEna",
"type": "enum16",
"access": "RW",
"label": "Set Active Power Enable",
"desc": "Set active power enable.",
"comments": [
"Set Active Power Level (may be negative for charging)"
],
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "WSetMod",
"type": "enum16",
"access": "RW",
"label": "Set Active Power Mode",
"desc": "Set active power mode.",
"symbols": [
{
"name": "W_MAX_PCT",
"value": 1,
"label": "Active Power As Max Percent",
"desc": "Active power setting is percentage of maximum active power."
},
{
"name": "WATTS",
"value": 2,
"label": "Active Power As Watts",
"desc": "Active power setting is in watts."
}
]
},
{
"name": "WSet",
"type": "int32",
"sf": "WSet_SF",
"units": "W",
"access": "RW",
"label": "Active Power Setpoint (W)",
"desc": "Active power setting value in watts."
},
{
"name": "WSetRvrt",
"type": "int32",
"sf": "WSet_SF",
"units": "W",
"access": "RW",
"label": "Reversion Active Power (W)",
"desc": "Reversion active power setting value in watts."
},
{
"name": "WSetPct",
"type": "int32",
"sf": "WSetPct_SF",
"units": "Pct",
"access": "RW",
"label": "Active Power Setpoint (Pct)",
"desc": "Active power setting value as percent."
},
{
"name": "WSetPctRvrt",
"type": "int32",
"sf": "WSetPct_SF",
"units": "Pct",
"access": "RW",
"label": "Reversion Active Power (Pct)",
"desc": "Reversion active power setting value as percent."
},
{
"name": "WSetEnaRvrt",
"type": "enum16",
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "WSetRvrtTms",
"type": "uint32",
"units": "Secs",
"access": "RW",
"label": "Active Power Reversion Time",
"desc": "Set active power reversion time."
},
{
"name": "WSetRvrtRem",
"type": "uint32",
"units": "Secs",
"label": "Active Power Rev Time Rem",
"desc": "Set active power reversion time remaining."
},
{
"name": "VarSetEna",
"type": "enum16",
"access": "RW",
"label": "Set Reactive Power Enable",
"desc": "Set reactive power enable.",
"comments": [
"Set Reacitve Power Level"
],
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "VarSetMod",
"type": "enum16",
"access": "RW",
"label": "Set Reactive Power Mode",
"desc": "Set reactive power mode.",
"symbols": [
{
"name": "W_MAX_PCT",
"value": 1,
"label": "Reactive Power as Watt Max Pct",
"desc": "Reactive power setting is percent of maximum active power."
},
{
"name": "VAR_MAX_PCT",
"value": 2,
"label": "Reactive Power as Var Max Pct",
"desc": "Reactive power setting is percent of maximum reactive power."
},
{
"name": "VAR_AVAIL_PCT",
"value": 3,
"label": "Reactive Power as Var Avail Pct",
"desc": "Reactive power setting is percent of available reactive power."
},
{
"name": "VARS",
"value": 4,
"label": "Reactive Power as Vars",
"desc": "Reactive power is in vars."
}
]
},
{
"name": "VarSetPri",
"type": "enum16",
"symbols": [
{
"name": "ACTIVE",
"value": 1,
"label": "Active Power Priority",
"desc": "Active power priority."
},
{
"name": "REACTIVE",
"value": 2,
"label": "Reactive Power Priority",
"desc": "Reactive power priority."
},
{
"name": "IEEE_1547",
"value": 3,
"label": "IEEE 1547 Power Priority",
"desc": "IEEE 1547-2018 power priority mode."
},
{
"name": "PF",
"value": 4,
"label": "PF Power Priority",
"desc": "Track PF setting derived from current active and reactive power settings."
},
{
"name": "VENDOR",
"value": 5,
"label": "Vendor Power Priority",
"desc": "Power priority is vendor specific mode."
}
]
},
{
"name": "VarSet",
"type": "int32",
"sf": "VarSet_SF",
"units": "Var",
"access": "RW",
"label": "Reactive Power Setpoint (Vars)",
"desc": "Reactive power setting value in vars."
},
{
"name": "VarSetRvrt",
"type": "int32",
"sf": "VarSet_SF",
"units": "Var",
"access": "RW",
"label": "Reversion Reactive Power (Vars)",
"desc": "Reversion reactive power setting value in vars."
},
{
"name": "VarSetPct",
"type": "int32",
"sf": "VarSetPct_SF",
"units": "Pct",
"access": "RW",
"label": "Reactive Power Setpoint (Pct)",
"desc": "Reactive power setting value as percent."
},
{
"name": "VarSetPctRvrt",
"type": "enum16",
"sf": "VarSetPct_SF",
"units": "Pct",
"access": "RW",
"label": "Reversion Reactive Power (Pct)",
"desc": "Reversion reactive power setting value as percent.",
"symbols": [
{
"name": "DISABLED",
"value": 0,
"label": "Disabled",
"desc": "Function is disabled."
},
{
"name": "ENABLED",
"value": 1,
"label": "Enabled",
"desc": "Function is enabled."
}
]
},
{
"name": "VarSetRvrtTms",
"type": "uint32",
"units": "Secs",
"access": "RW",
"label": "Reactive Power Reversion Time",
"desc": "Set reactive power reversion time."
},
{
"name": "VarSetRvrtRem",
"type": "uint32",
"units": "Secs",
"label": "Reactive Power Rev Time Rem",
"desc": "Set reactive power reversion time remaining."
},
{
"name": "RGra",
"type": "uint32",
"units": "%WMax/Sec",
"access": "RW",
"label": "Normal Ramp Rate",
"desc": "Ramp rate for increases in active power during normal generation.",
"comments": [
"Ramp Rate"
],
"symbols": [
{
"name": "A_MAX",
"value": 1,
"label": "Max Current Ramp",
"desc": "Ramp based on percent of max current per second."
},
{
"name": "W_MAX",
"value": 2,
"label": "Max Active Power Ramp",
"desc": "Ramp based on percent of max active power per second."
}
]
},
{
"name": "PF_SF",
"type": "sunssf",
"static": "S",
"label": "Power Factor Scale Factor",
"desc": "Power factor scale factor.",
"comments": [
"Scale Factors"
]
},
{
"name": "WMaxLim_SF",
"type": "sunssf",
"static": "S",
"label": "Limit Max Power Scale Factor",
"desc": "Limit maximum power scale factor."
},
{
"name": "WSet_SF",
"type": "sunssf",
"static": "S",
"label": "Active Power Scale Factor",
"desc": "Active power scale factor."
},
{
"name": "WSetPct_SF",
"type": "sunssf",
"static": "S",
"label": "Active Power Pct Scale Factor",
"desc": "Active power pct scale factor."
},
{
"name": "VarSet_SF",
"type": "sunssf",
"static": "S",
"label": "Reactive Power Scale Factor",
"desc": "Reactive power scale factor."
},
{
"name": "VarSetPct_SF",
"type": "sunssf",
"static": "S",
"label": "Reactive Power Pct Scale Factor",
"desc": "Reactive power pct scale factor."
}
],
"groups": [
{
"name": "PFWInj",
"type": "sync",
"label": " ",
"desc": " ",
"comments": [
"Power Factor Settings"
],
"points": [
{
"name": "PF",
"type": "uint16",
"sf": "PF_SF",
"access": "RW",
"label": "Power Factor (W Inj) ",
"desc": "Power factor setpoint when injecting active power."
},
{
"name": "Ext",
"type": "enum16",
"access": "RW",
"label": "Power Factor Excitation (W Inj)",
"desc": "Power factor excitation setpoint when injecting active power.",
"symbols": [
{
"name": "OVER_EXCITED",
"value": 0,
"label": "Over-excited",
"desc": "Power factor over-excited excitation."
},
{
"name": "UNDER_EXCITED",
"value": 1,
"label": "Under-excited",
"desc": "Power factor under-excited excitation."
}
]
}
]
},
{
"name": "PFWInjRvrt",
"type": "sync",
"label": " ",
"desc": " ",
"points": [
{
"name": "PF",
"type": "uint16",
"sf": "PF_SF",
"access": "RW",
"label": "Reversion Power Factor (W Inj) ",
"desc": "Reversion power factor setpoint when injecting active power."
},
{
"name": "Ext",
"type": "enum16",
"access": "RW",
"label": "Reversion PF Excitation (W Inj)",
"desc": "Reversion power factor excitation setpoint when injecting active power.",
"symbols": [
{
"name": "OVER_EXCITED",
"value": 0,
"label": "Over-excited",
"desc": "Power factor over-excited excitation."
},
{
"name": "UNDER_EXCITED",
"value": 1,
"label": "Under-excited",
"desc": "Power factor under-excited excitation."
}
]
}
]
},
{
"name": "PFWAbs",
"type": "sync",
"label": " ",
"desc": " ",
"points": [
{
"name": "PF",
"type": "uint16",
"sf": "PF_SF",
"access": "RW",
"label": "Power Factor (W Abs) ",
"desc": "Power factor setpoint when absorbing active power."
},
{
"name": "Ext",
"type": "enum16",
"access": "RW",
"label": "Power Factor Excitation (W Abs)",
"desc": "Power factor excitation setpoint when absorbing active power.",
"symbols": [
{
"name": "OVER_EXCITED",
"value": 0,
"label": "Over-excited",
"desc": "Power factor over-excited excitation."
},
{
"name": "UNDER_EXCITED",
"value": 1,
"label": "Under-excited",
"desc": "Power factor under-excited excitation."
}
]
}
]
},
{
"name": "PFWAbsRvrt",
"type": "sync",
"label": " ",
"desc": " ",
"points": [
{
"name": "PF",
"type": "uint16",
"sf": "PF_SF",
"access": "RW",
"label": "Reversion Power Factor (W Abs) ",
"desc": "Reversion power factor setpoint when absorbing active power."
},
{
"name": "Ext",
"type": "enum16",
"access": "RW",
"label": "Reversion PF Excitation (W Abs)",
"desc": "Reversion power factor excitation setpoint when absorbing active power.",
"symbols": [
{
"name": "OVER_EXCITED",
"value": 0,
"label": "Over-excited",
"desc": "Power factor over-excited excitation."
},
{
"name": "UNDER_EXCITED",
"value": 1,
"label": "Under-excited",
"desc": "Power factor under-excited excitation."
}
]
}
]
}
]
},
"id": 704
}
assert wb.from_xlsx(704) == from_xlsx_output
def test_set_cell():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
with pytest.raises(ValueError) as exc:
wb.set_cell(wb.wb['704'], 1, 2, 3)
assert 'Workbooks opened with existing file are read only' in str(exc.value)
wb2 = xlsx.ModelWorkbook()
assert wb2.set_cell(wb2.wb['Index'], 2, 1, 3, style='suns_comment').value == 3
def test_set_info():
wb = xlsx.ModelWorkbook()
values = [''] * 15
values[14] = 'detail'
values[13] = 'description'
values[12] = 'label'
wb.set_info(wb.wb['Index'], 2, values)
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
next(iter_rows)
assert next(iter_rows) == [None, None, None, None, None, None,
None, None, None, None, None, None, 'label', 'description', 'detail']
def test_set_group():
wb = xlsx.ModelWorkbook()
values = [''] * 15
values[2] = 'name'
values[5] = 'type'
values[4] = 'count'
values[14] = 'detail'
values[13] = 'description'
values[12] = 'label'
wb.set_group(wb.wb['Index'], 2, values, 2)
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
next(iter_rows)
assert next(iter_rows) == ['', '', 'name', '', 'count', 'type', '', '', '', '', '', '',
'label', 'description', 'detail']
def test_set_point():
wb = xlsx.ModelWorkbook()
values = [''] * 15
values[0] = 'addr_offset'
values[1] = 'group_offset'
values[2] = 'name'
values[3] = 'value'
values[4] = 'count'
values[5] = 'type'
values[6] = 'size'
values[7] = 'sf'
values[8] = 'units'
values[9] = 'access'
values[10] = 'mandatory'
values[11] = 'static'
wb.set_point(wb.wb['Index'], 2, values, 1)
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
next(iter_rows)
assert next(iter_rows) == ['addr_offset', 'group_offset', 'name', 'value', 'count',
'type', 'size', 'sf', 'units', 'access', 'mandatory', 'static', '', '', '']
def test_set_symbol():
wb = xlsx.ModelWorkbook()
values = [''] * 15
values[2] = 'name'
values[3] = 'value'
values[14] = 'detail'
values[13] = 'description'
values[12] = 'label'
wb.set_symbol(wb.wb['Index'], 2, values)
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
next(iter_rows)
assert next(iter_rows) == ['', '', 'name', 'value', '', '', '',
'', '', '', '', '', 'label', 'description', 'detail']
def test_set_comment():
wb = xlsx.ModelWorkbook()
wb.set_comment(wb.wb['Index'], 2, ['This is a comment'])
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
next(iter_rows)
assert next(iter_rows)[0] == 'This is a comment'
def test_set_hdr():
wb = xlsx.ModelWorkbook()
wb.set_hdr(wb.wb['Index'], ['This', 'is', 'a', 'test', 'header'])
iter_rows = wb.xlsx_iter_rows(wb.wb['Index'])
assert next(iter_rows) == ['This', 'is', 'a', 'test', 'header']
def test_spreadsheet_to_xlsx():
wb = xlsx.ModelWorkbook(filename='sunspec2/tests/test_data/wb_701-705.xlsx')
with pytest.raises(ValueError) as exc:
wb.spreadsheet_to_xlsx(702, [])
assert 'Workbooks opened with existing file are read only' in str(exc.value)
spreadsheet_smdx_304 = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description', 'Detailed Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model',
'Include to support orientation measurements', ''],
['', '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier', ''],
['', '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length', ''],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', '', ''],
['', '', 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination', ''],
['', '', 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination', ''],
['', '', 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination', '']
]
wb2 = xlsx.ModelWorkbook()
wb2.spreadsheet_to_xlsx(304, spreadsheet_smdx_304)
iter_rows = wb2.xlsx_iter_rows(wb2.wb['304'])
for row in spreadsheet_smdx_304:
assert next(iter_rows) == row
def test_to_xlsx(tmp_path):
spreadsheet_smdx_304 = [
['Address Offset', 'Group Offset', 'Name', 'Value', 'Count', 'Type', 'Size', 'Scale Factor', 'Units',
'RW Access (RW)', 'Mandatory (M)', 'Static (S)', 'Label', 'Description'],
['', '', 'inclinometer', '', '', 'group', '', '', '', '', '', '', 'Inclinometer Model',
'Include to support orientation measurements'],
[0, '', 'ID', 304, '', 'uint16', '', '', '', '', 'M', 'S', 'Model ID', 'Model identifier'],
[1, '', 'L', '', '', 'uint16', '', '', '', '', 'M', 'S', 'Model Length', 'Model length'],
['', '', 'inclinometer.incl', '', 0, 'group', '', '', '', '', '', '', '', ''],
['', 0, 'Inclx', '', '', 'int32', '', -2, 'Degrees', '', 'M', '', 'X', 'X-Axis inclination'],
['', 2, 'Incly', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Y', 'Y-Axis inclination'],
['', 4, 'Inclz', '', '', 'int32', '', -2, 'Degrees', '', '', '', 'Z', 'Z-Axis inclination']
]
with open('sunspec2/models/json/model_304.json') as f:
m_703 = json.load(f)
wb = xlsx.ModelWorkbook()
wb.to_xlsx(m_703)
iter_rows = wb.xlsx_iter_rows(wb.wb['304'])
for row in spreadsheet_smdx_304:
assert next(iter_rows) == row
|
#!/usr/bin/env
# encoding: utf-8
"""
Created by John DiBaggio on 2018-08-10
The d-neighborhood Neighbors(Pattern, d) is the set of all k-mers whose Hamming distance from Pattern does not exceed d.
Generate the d-Neighborhood of a String
Find all the neighbors of a pattern.
Given: A DNA string Pattern and an integer d.
Return: The collection of strings Neighbors(Pattern, d).
Sample Dataset
ACG
1
Sample Output
CCG
TCG
GCG
AAG
ATG
AGG
ACA
ACC
ACT
ACG
Execute like:
python3 src/ba1n.py data/ba1n.txt output/ba1n.txt
"""
__author__ = 'johndibaggio'
import sys
import fileinput
if __name__ == '__main__':
from lib.bio_util import BioUtil
else:
from .lib.bio_util import BioUtil
argv = list(sys.argv)
input_pattern = ""
input_d = 0
for line in fileinput.input(argv[1]):
if len(line) > 0:
text = line.replace('\n', '')
try:
val = int(text)
input_d = val
except ValueError:
input_pattern += text
d_neighbors = BioUtil.neighbors(input_pattern, input_d)
output_string = "\n".join(d_neighbors)
print("The {}-neighborhood of pattern \"{}\" are:\n{}".format(input_d, input_pattern,output_string))
output_file = open(argv[2], "w+")
output_file.write(output_string)
output_file.close()
|
# -*- coding: utf-8 -*-
from datetime import tzinfo
from .transition_type import TransitionType
class TimezoneInfo(tzinfo):
def __init__(self, tz, transition_type):
"""
:type tz: Timezone
:type transition_type: TransitionType
:type is_dst: bool
"""
self._tz = tz
self._transition_type = transition_type
@classmethod
def create(cls, tz, utc_offset, is_dst, abbrev):
return cls(tz, TransitionType(utc_offset, is_dst, abbrev))
@property
def tz(self):
return self._tz
@property
def name(self):
return self._tz._name
@property
def offset(self):
return self._transition_type.utc_offset
@property
def is_dst(self):
return self._transition_type.is_dst
@property
def abbrev(self):
return self._transition_type.abbrev
@property
def adjusted_offset(self):
return self._transition_type.adjusted_offset
def tzname(self, dt):
return self.abbrev
def utcoffset(self, dt):
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.tz.convert(dt)
return dt.tzinfo.adjusted_offset
else:
return self._transition_type.adjusted_offset
def dst(self, dt):
if not self.is_dst:
return None
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.tz.convert(dt)
offset = dt.tzinfo._transition_type.adjusted_offset
else:
offset = self._transition_type.adjusted_offset
return offset
def fromutc(self, dt):
dt = dt.replace(tzinfo=None)
idx = max(0, self._tz._find_transition_index(dt, '_utc_time') - 1)
tzinfo = self._tz._tzinfos[self._tz._transitions[idx]._transition_type_index]
return (dt + tzinfo.adjusted_offset).replace(tzinfo=tzinfo)
def __repr__(self):
return '<TimezoneInfo [{}, {}, {}]>'.format(
self.name,
self.offset,
self.is_dst
)
class _UTC(TimezoneInfo):
def __init__(self):
super(_UTC, self).__init__(None, TransitionType(0, False, 'GMT'))
@property
def name(self):
return 'UTC'
def utcoffset(self, dt):
return self._transition_type.adjusted_offset
def dst(self, dt):
return None
def fromutc(self, dt):
return dt.replace(tzinfo=self)
UTC = _UTC()
|
import json
import logging
from typing import List
import pytest
from tests import issues_file_path, pull_requests_file_path, \
pull_request_file_path
from tests.test_cases import pull_requests_get_all_test_cases
@pytest.fixture(autouse=True)
def set_log_level(caplog):
caplog.set_level(logging.DEBUG)
@pytest.fixture(autouse=True)
def use_test_database(monkeypatch):
monkeypatch.setattr('bitcoin_acks.database.session.is_test', True)
from bitcoin_acks.database.createdb import create_database, drop_database
drop_database(echo=False)
create_database(echo=False)
@pytest.fixture
def repository():
from bitcoin_acks.models.repositories import Repositories
from bitcoin_acks.database.session import session_scope
r = Repositories()
r.path = 'bitcoin'
r.name = 'bitcoin'
with session_scope() as session:
session.add(r)
session.flush()
session.expunge(r)
return r
@pytest.fixture(scope='session')
def issues_data() -> List[dict]:
with open(issues_file_path, 'r') as outfile:
issues = json.load(outfile)
return issues
@pytest.fixture(scope='session')
def pull_requests_data() -> List[dict]:
data = []
try:
with open(pull_request_file_path, 'r') as outfile:
pr = json.load(outfile)
data.append(pr)
except FileNotFoundError:
pass
for test_case in pull_requests_get_all_test_cases:
state, limit = test_case
file_path = pull_requests_file_path.format(state=str(state),
limit=str(limit))
try:
with open(file_path, 'r') as outfile:
pull_requests = json.load(outfile)
data.extend(pull_requests)
except FileNotFoundError:
pass
if not len(data):
raise Warning('no pull request test data available')
return data
@pytest.fixture(scope='session')
def valid_pr_number() -> int:
return 10757
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
from TestUtils import TestUtilsMixin
log = logging.getLogger('test.auto')
class MergeMeta(unittest.TestCase, TestUtilsMixin):
"""Split and merge the !METADATA table"""
order = 30
settings = TestUtilsMixin.settings.copy()
def setUp(self):
TestUtilsMixin.setUp(self);
def runTest(self):
out, err, code = self.shell(self.masterHost(), '''
addsplits -t !METADATA 1 2 3 4 5
createtable a1
createtable a2
createtable a3
createtable a4
createtable a5
merge -t !METADATA
yes
sleep 2
scan -np -t !METADATA
''')
assert code == 0
# look for delete entries for the abandoned directories
assert out.find('~del') >= 0
class MergeMetaFail(unittest.TestCase, TestUtilsMixin):
"""test a failed merge of the !METADATA table"""
order = 30
settings = TestUtilsMixin.settings.copy()
def setUp(self):
TestUtilsMixin.setUp(self);
def runTest(self):
out, err, code = self.shell(self.masterHost(), '''
merge -t !METADATA -b ! -e !!
''')
assert code != 0
def suite():
result = unittest.TestSuite()
result.addTest(MergeMeta())
result.addTest(MergeMetaFail())
return result
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#########################################################################################
# #
# grating_plot.py: create grating movement plots for monthly report #
# #
# author: t. isobe (tisobe@cfa.harvard.eud) #
# #
# last update: Oct 04, 2021 #
# #
#########################################################################################
import os
import sys
import re
import string
import math
import unittest
import time
import matplotlib as mpl
if __name__ == '__main__':
mpl.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
#
#--- reading directory list
#
path = '/data/mta/Script/Month/SIM/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
datafile = "/data/mta/www/mta_otg/OTG_sorted.rdb"
#-----------------------------------------------------------------------------------------
#-- plot_grat_movement: create grating movement plots ---
#-----------------------------------------------------------------------------------------
def plot_grat_movement(year, month):
"""
create grating movement plots
input: none, but read from database
outupu: monthly_grat.png and monthly_grat_ang.png
"""
#
#--- read data
#
[ltime, h_in_ang, h_out_ang, l_in_ang, l_out_ang, h_in, h_out, l_in, l_out]\
= get_grat_data(year, month)
#
#--- plot insertion/retraction angle plots
#
plot_steps(year, month, ltime, h_in_ang, h_out_ang, l_in_ang, l_out_ang)
#
#--- plot hetg/letg cumulative count rate plots
#
plot_cum_grating(ltime, h_in, l_in)
#-----------------------------------------------------------------------------------------
#-- get_grat_data: read database and extract needed information, then create data --
#-----------------------------------------------------------------------------------------
def get_grat_data(year, mon):
"""
read database and extract needed information, then create data
input: none but read from the database: "/data/mta/www/mta_otg/OTG_sorted.rdb"
output: [time, h_in_ang, h_out_ang, l_in_ang, l_out_ang, h_in, h_out, l_in, l_out]
where: time --- time in fractional year
h_in_ang --- hetig insertion angle
h_out_ang --- hetig retraction angle
l_in_ang --- letig insertion angle
l_out_ang --- letig retraction angle
h_in --- hetig insertion cumm count
h_out --- hetig retraction cumm count
l_in --- letig insertion cumm count
l_out --- hetig retraction cumm count
"""
#
#--- read data
#
data = mcf.read_data_file(datafile)
direct = []
grating = []
start = []
stop = []
hposa = []
hposb = []
fposa = []
fposb = []
for i in range(1, len(data)):
ent = data[i]
atemp = re.split('\s+', ent)
try:
test = float(atemp[2])
test2 = float(atemp[4])
except:
continue
direct.append(atemp[0].strip())
grating.append(atemp[1].strip())
val = convert_time(atemp[2])
start.append(val)
stop.append(convert_time(atemp[4]))
hposa.append(float(atemp[18]))
hposb.append(float(atemp[19]))
fposa.append(float(atemp[20]))
fposb.append(float(atemp[21]))
#
#--- create start and stop lists of data bin. the width is a month
#
[blist, elist] = create_monthly_bins(2000, year, mon)
blen = len(blist)
ltime = [0 for x in range(0, blen)]
h_in = [0 for x in range(0, blen)] #--- hetg insertion cumm count rate
h_in_ang = [0 for x in range(0, blen)] #--- hetg insertion angle
h_out = [0 for x in range(0, blen)] #--- hetg retraction cumm count rate
h_out_ang = [0 for x in range(0, blen)] #--- hetg retraction angle
l_in = [0 for x in range(0, blen)]
l_in_ang = [0 for x in range(0, blen)]
l_out = [0 for x in range(0, blen)]
l_out_ang = [0 for x in range(0, blen)]
for j in range(1, blen):
ltime[j] = 0.5 * (blist[j] + elist[j]) #--- take a mid point for the bin's time
#
#-- creating cummulative count; the current bin should have, at least, as the same as the
#-- previous bin
#
h_in[j] = h_in[j-1]
h_out[j] = h_out[j-1]
l_in[j] = l_in[j-1]
l_out[j] = l_out[j-1]
h_in_ang_cnt = 0
h_out_ang_cnt = 0
l_in_ang_cnt = 0
l_out_ang_cnt = 0
#
#--- since the data are not ordered by date, go through begining to the end
#--- every bin cycle
#
for i in range(0, len(start)):
if start[i] >= blist[j] and start[i] < elist[j]:
if direct[i] == 'INSR':
if grating[i] == 'HETG':
h_in_ang[j] += fposa[i]
h_in_ang_cnt += 1
h_out_ang[j] += hposa[i]
h_out_ang_cnt += 1
if grating[i] == 'LETG':
l_in_ang[j] += fposa[i]
l_in_ang_cnt += 1
l_out_ang[j] += hposa[i]
l_out_ang_cnt += 1
#
#--- taking monthly average
#
if h_in_ang_cnt> 0:
h_in_ang[j] /= h_in_ang_cnt
if h_out_ang_cnt> 0:
h_out_ang[j] /= h_out_ang_cnt
if l_in_ang_cnt> 0:
l_in_ang[j] /= l_in_ang_cnt
if l_out_ang_cnt> 0:
l_out_ang[j] /= l_out_ang_cnt
#
#--- adding in/out count for the month to appropriate bins
#
h_in[j] += h_in_ang_cnt
h_out[j] += h_out_ang_cnt
l_in[j] += l_in_ang_cnt
l_out[j] += l_out_ang_cnt
return [ltime, h_in_ang, h_out_ang, l_in_ang, l_out_ang, h_in, h_out, l_in, l_out]
#-----------------------------------------------------------------------------------------
#-- convert_time: convert time format from <year><ydate>.<hh><mm><ss> to frac year ---
#-----------------------------------------------------------------------------------------
def convert_time(otime):
"""
convert time format from <year><ydate>.<hh><mm><ss> to frac year
input: otime --- time in e.g. 2014059.122333.1
output: fyear --- fractional year, e.g. 2014.1630585
"""
year = float(otime[0:4])
ydate = float(otime[4:7])
hours = float(otime[8:10])
mins = float(otime[10:12])
secs = float(otime[12:14])
if mcf.is_leapyear(year):
base = 366.0
else:
base = 365.0
fday = hours / 24.0 + mins / 1440.0 + secs / 86400.0
fyear = year + (ydate + fday) / base
return fyear
#-----------------------------------------------------------------------------------------
#-- create_monthly_bins: create a month wide bin for given periods ---
#-----------------------------------------------------------------------------------------
def create_monthly_bins(ystart, ystop, mstop):
"""
create a month wide bin for given periods
input: ystart --- starting year
ystop --- stopping year
mstop --- stopping month of the stopping month
output: [blist, elist] a list of lists of starting and stoping period in fractional year
"""
interval1 = [0.0, 31.0, 59.0, 90.0, 120.0, 151.0, 181.0, 212.0, 243.0, 273.0, 304.0, 334.0, 365.0]
interval2 = [0.0, 31.0, 60.0, 91.0, 121.0, 152.0, 182.0, 213.0, 244.0, 274.0, 305.0, 335.0, 366.0]
blist = []
elist = []
for year in range(ystart, ystop+1):
#
#--- check leap year
#
if mcf.is_leapyear(year):
interval = interval2
base = 366.0
else:
interval = interval1
base = 365.0
#
#--- go around 12 months
#
for i in range(0, 12):
if year == ystop and i >= mstop:
break
begin = year + interval[i] / base
end = year + interval[i+1] / base
if int(end) > year:
end = year + 1
blist.append(begin)
elist.append(end)
return [blist, elist]
#-----------------------------------------------------------------------------------------
#-- : create insertion and retraction angle plots for hetig and letig --
#-----------------------------------------------------------------------------------------
def plot_steps(year, mon, ltime, set1, set2, set3, set4):
"""
create insertion and retraction angle plots for hetig and letig
input: ltime --- time in fractional year
set1 --- mean hetig insertion angle
set2 --- mean hetig retraction angle
set3 --- mean letig insertion angle
set4 --- mean letig retraction angle
where "mean" means month average
output: monthly_grat_ang.png
"""
xmin = 2000
xmax = year + 1
if mon > 6:
xmax += 1
ymin1 = 5.0
ymax1 = 10.0
ymin2 = 76
ymax2 = 81
#
#--- set a few parameters
#
fsize = 9 #--- font size
lsize = 0 #--- line width
color = 'red'
marker = 'o'
msize = 2
plt.close("all")
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
plt.subplots_adjust(hspace=0.20, wspace=0.11) #--- spacing of panels
#
#--- 'Mean HETG Inserted Angle
#
a1 = plt.subplot(221)
plot_sub(a1, ltime, set1, xmin, xmax, ymin1, ymax1, color, lsize, marker, msize, tline='Mean HETG Inserted Angle')
a1.set_ylabel('Insertion Angle (Degree)', size=fsize)
#
#--- 'Mean HETG Retracted Angle
#
a2 = plt.subplot(223)
plot_sub(a2, ltime, set2, xmin, xmax, ymin2, ymax2, color, lsize, marker, msize, tline='Mean HETG Retracted Angle')
a2.set_xlabel('Time (year)', size=fsize)
a2.set_ylabel('Retraction Angle (Degree)', size=fsize)
#
#--- 'Mean LETG Inserted Angle
#
a3 = plt.subplot(222)
plot_sub(a3, ltime, set3, xmin, xmax, ymin1, ymax1, color, lsize, marker, msize, tline='Mean LETG Inserted Angle')
#
#--- 'Mean LETG Retracted Angle
#
a4 = plt.subplot(224)
plot_sub(a4, ltime, set4, xmin, xmax, ymin2, ymax2, color, lsize, marker, msize, tline='Mean LETG Rectracted Angle')
a4.set_xlabel('Time (year)', size=fsize)
#
#--- save the plot
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(7.5, 3.75)
outname = 'monthly_grat_ang.png'
plt.savefig(outname, format='png', dpi=200)
#-----------------------------------------------------------------------------------------
#-- plot_cum_grating: plot cummulative count rates of hetig and letig insertion --
#-----------------------------------------------------------------------------------------
def plot_cum_grating(ltime, h_in, l_in):
"""
plot cummulative count rates of hetig and letig insertion.
input: ltime --- fractional year
h_in --- hetig insertion cummulative count rate (month step)
l_in --- letig insertion cummulative count rate
output: monthly_grat.prn
"""
#
#--- set x axis plotting range
#
out = time.strftime('%Y:%m', time.gmtime())
atemp = re.split(':', out)
year = int(atemp[0])
mon = int(atemp[1])
xmin = 2000
xmax = int(year) + 1
if mon > 6:
xmax += 1
#
#--- set y axis plotting range
#
ymin = 0.0
ymax = max(h_in)
ymax2 = max(l_in)
if ymax2 > ymax:
ymax = ymax2
ymax = int(1.1 * ymax) + 10
#
#--- set a few parameters
#
fsize = 9
lsize = 0
color = 'red'
marker = 'o'
msize = 3
plt.close("all")
mpl.rcParams['font.size'] = fsize
props = font_manager.FontProperties(size=fsize)
plt.subplots_adjust(hspace=0.08, wspace=0.10)
#
#--- HETG Cumulative Count Plots
#
a1 = plt.subplot(121) #--- two panel plot: left
plot_sub(a1, ltime, h_in, xmin, xmax, ymin, ymax, color, lsize, marker, msize, tline='HETG')
a1.set_xlabel('Time (year)', size=fsize)
a1.set_ylabel('Cumulative Insertion Counts', size=fsize)
#
#--- LETG Cumulative Count Plots
#
a1 = plt.subplot(122) #--- two panel plot: right
plot_sub(a1, ltime, l_in, xmin, xmax, ymin, ymax, color, lsize, marker, msize, tline='LETG')
a1.set_xlabel('Time (year)', size=fsize)
#
#--- save the plot
#
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10.0, 7.5)
outname = 'monthly_grat.png'
plt.savefig(outname, format='png', dpi=100)
#-----------------------------------------------------------------------------------------
#-- plot_sub: plotting each panel --
#-----------------------------------------------------------------------------------------
def plot_sub(ap, x, y, xmin, xmax, ymin, ymax, color, lsize, marker, msize, tline=''):
"""
plotting each panel
input ap --- panel name
x --- x data list
y --- y data list
xmin --- xmin
xmax --- xmax
ymin --- ymin
ymax --- ymax
color --- color of data point
lsize --- line size
marker --- marker shape
msize --- size of the marker
tlime --- extra text line
"""
ap.set_autoscale_on(False)
ap.set_xbound(xmin,xmax)
ap.set_xlim(xmin=xmin, xmax=xmax, auto=False)
ap.set_ylim(ymin=ymin, ymax=ymax, auto=False)
plt.plot(x, y , color=color, lw=lsize, marker=marker, markersize=msize)
if tline != '':
xpos = 0.05 * (xmax - xmin) + xmin
ypos = ymax -0.10 * (ymax - ymin)
text(xpos, ypos, tline, fontsize=11,style='italic', weight='bold')
#-----------------------------------------------------------------------------------------
#-- TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST TEST ---
#-----------------------------------------------------------------------------------------
class TestFunctions(unittest.TestCase):
"""
testing functions
"""
#------------------------------------------------------------
def test_get_grat_data(self):
[time, h_in_ang, h_out_ang, l_in_ang, l_out_ang, h_in, h_out, l_in, l_out] = get_grat_data()
h_in_ang_test = [5.96, 0, 5.96, 6.086666666666667, 5.96]
h_out_ang_test = [79.09, 0, 79.09000000000002, 79.09, 79.09]
l_in_ang_test = [7.09, 7.09, 7.09, 6.71, 6.773333333333333]
l_out_ang_test = [78.15, 78.15, 77.96, 77.96, 77.96]
h_in_test = [368, 368, 374, 377, 379]
h_out_test = [368, 368, 374, 377, 379]
l_in_test = [204, 206, 207, 208, 214]
l_out_test = [204, 206, 207, 208, 214]
self.assertEquals(h_in_ang[100:105], h_in_ang_test)
self.assertEquals(h_out_ang[100:105], h_out_ang_test)
self.assertEquals(l_in_ang[100:105], l_in_ang_test)
self.assertEquals(l_out_ang[100:105], l_out_ang_test)
self.assertEquals(h_in[100:105], h_in_test)
self.assertEquals(h_out[100:105], h_out_test)
self.assertEquals(l_in[100:105], l_in_test)
self.assertEquals(l_out[100:105], l_out_test)
#------------------------------------------------------------
def test_convert_time(self):
time = '2014059.122333.1'
val = convert_time(time)
val = round(val, 7)
self.assertEquals(val, 2014.1630585)
#------------------------------------------------------------
def test_create_monthly_bins(self):
out1 = [2013.0, 2013.0849315068492, 2013.1616438356164, 2013.2465753424658, 2013.3287671232877, 2013.4136986301369, 2013.495890410959, 2013.5808219178082, 2013.6657534246576, 2013.7479452054795, 2013.8328767123287, 2013.9150684931508, 2014.0, 2014.0849315068492]
out2 = [2013.0849315068492, 2013.1616438356164, 2013.2465753424658, 2013.3287671232877, 2013.4136986301369, 2013.495890410959, 2013.5808219178082, 2013.6657534246576, 2013.7479452054795, 2013.8328767123287, 2013.9150684931508, 2014, 2014.0849315068492, 2014.1616438356164]
ystart = 2013
ystop = 2014
mstop = 2
[blist, elist] = create_monthly_bins(ystart, ystop, mstop)
self.assertEquals(blist, out1)
self.assertEquals(elist, out2)
#-----------------------------------------------------------------------------------------
#
#--- pylab plotting routine related modules
#
from pylab import *
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
import matplotlib.lines as lines
chk = 0
if len(sys.argv) == 2:
chk = 1
if __name__ == '__main__':
if chk > 0:
plot_grat_movement()
else:
unittest.main()
|
"""
Python 3.6.8 script for accessing the www.collegefootballdata.com API
for getting team play as a Pandas dataframe object.
Documentation of JSON GET request may be found here:
https://api.collegefootballdata.com/api/docs/?url=/api-docs.json
This script will return the data as a Pandas dataframe object where each row
is a play record and the column is a JSON property such as 'down' or
'yards_gained'
Permission to use for any purpose granted - would love to hear its purpose.
January 2020, Bob Scharmann, Jr.
"""
import json
import pandas as pd
import requests
def get_team_plays(url="https://api.collegefootballdata.com/plays",
year=2019,
team="Florida State"):
"""
Function to get team data from www.collegefootballdata.com API
"""
params = {"team": team, "year": year}
my_response = requests.get(url, params=params)
data = pd.DataFrame()
# For successful API call, response code will be 200 (OK)
if my_response.ok:
# Loading the response data into a dict variable
# json.loads takes in only binary or string variables so using content to
# fetch the binary content. Loads (Load String) takes a Json file and
# converts into python data structure (dict or list, depending on JSON)
j_data = json.loads(my_response.content)
j_data_normalized = pd.io.json.json_normalize(j_data)
data = pd.DataFrame.from_records(j_data_normalized)
else:
# If response code is not ok (200), print the resulting http error code
# with description
my_response.raise_for_status()
return data
|
# how often does it beat ivv given 50 days?
# how often is 1 year up?
import z
import buy
import regen_stock
import gained_discount
debug = None
dates = z.getp("dates")
years8 = -1*252*8
asdate8 = dates[years8]
index8 = dates.index(asdate8)
cdates = dates[index8:]
cdates.reverse()
def proc(astock):
mdates = list()
prev = None
for i, row in enumerate(buy.getRows(astock, asdate8)):
c_date = row['Date']
# buy.addSortedHigh("a", float(row['Chg']), c_date, 5)
mdates.append(c_date)
mdates.reverse()
prevdate = None
for i,date in enumerate(mdates):
if date != cdates[i]:
return prevdate
prevdate = date
def procs( cleanup = True):
stocks = [debug.upper()] if debug else z.getp("listofstocks")
prob_dic = dict()
missmatch = dict()
for i, astock in enumerate(stocks):
if not i % 100:
print("astock: {}".format( astock))
try:
miss = proc(astock)
if miss:
missmatch[astock] = miss
except Exception as e:
print("astock: {}".format( astock))
z.trace(e)
pass
if not cleanup:
print("missmatch: {}".format( missmatch))
return
delets = list()
cleanups = dict()
for astock, date in missmatch.items():
idx = cdates.index(date)
if idx < 120:
delets.append(astock)
else:
cleanups[astock] = date
gained_discount.batchdelete(delets)
for astock, date in cleanups.items():
gained_discount.cleanup(astock, date)
if __name__ == '__main__':
procs(False)
|
from Settings_file import HOST, PORT
import socket
def OpenSocket():
s = socket.socket()
s.connect((HOST, PORT)) |
import argparse
import json
import logging
import os
from email import utils
from tornado.template import Template
from providers import facebook
from providers.google_rss import GoogleRSS
from utils.config import version, getLogHandler
parser = argparse.ArgumentParser(prog='G+RSS.Poller')
parser.add_argument('--log_path', required=True)
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.addHandler(getLogHandler(os.path.join(args.log_path, 'test_google.log')))
logger.level = logging.INFO
f = open('templates/feed.xml')
template = Template(f.read())
f.close()
json_data = open('data/113347540216001053968.json')
data = json.load(json_data)
#print(data)
items = GoogleRSS.gen_items(data, option='photo')
print(template.generate(version=version, gid='UNIT_TEST', pubDate=utils.formatdate(), items=items))
#for item in items:
# #message = facebook.FacebookPublisher._format_message(item['description'])
# if item['type'] in ('album', 'photo'):
# print({'photo_id': item['photo_id'], 'album_id': item['album_id']})
|
from itertools import groupby
def get_range():
range_input=[i for i in range(367479,893698)]
return range_input
def has_doubles(n):
has_double=len(set(str(n))) < len(str(n))
if has_double:
return n
def check_proof(with_double,no_smaller_end):
start_range=[i for i in range(367479,893698)]
def remove_smaller_ends(number):
if str(number)[0] <= str(number)[1]:
if str(number)[1] <= str(number)[2]:
if str(number)[2] <= str(number)[3]:
if str(number)[3] <= str(number)[4]:
if str(number)[4] <= str(number)[5]:
number=get_grouping(number)
return number
def get_grouping(number):
digits=[len(list(group)) for key, group in groupby(str(number))]
if 6 in digits or 5 in digits:
return None
else:
if 4 in digits and 2 not in digits:
return None
elif 3 in digits and 2 not in digits:
return None
else:
return number
result=[i for i in list(map(has_doubles,get_range())) if i is not None]
result_end=[i for i in list(map(remove_smaller_ends,result)) if i is not None]
check_proof(result,result_end) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder import logger
from girder.api import access
from girder.api.describe import Description, describeRoute
from girder.api.rest import Resource
from girder.models.model_base import GirderException
from .settings import SmqtkSetting
from .utils import base64FromUrl
import requests
class Smqtk(Resource):
def __init__(self):
setting = SmqtkSetting()
self.search_url = setting.get('IMAGE_SPACE_SMQTK_NNSS_URL')
self.resourceName = 'smqtk'
self.route('POST', ('compute',), self.computeDescriptor)
@access.user
@describeRoute(
Description('Compute the descriptor for a given image.')
.param('url', 'URL of image to compute descriptor of')
.errorResponse('Failed to compute descriptor.', 500)
)
def computeDescriptor(self, params):
# @todo Naively assuming we will always be able to retrieve the URL
image, _type = base64FromUrl(params['url'])
r = requests.post('%(base_url)s/compute/base64://%(image)s?content_type=%(type)s' % {
'base_url': self.search_url,
'image': image,
'type': _type})
if not r.ok:
logger.error('Failed to compute SMQTK descriptor for image %s.' % params['url'])
raise GirderException('Failed to compute descriptor',
'girder.plugins.imagespace_smqtk.smqtk.computeDescriptor')
else:
return r.json()
|
def count_iter(n):
ctr = 0
while int(n) != 6174:
a = ''.join(sorted(str(n)))
b = a[::-1]
n = str(abs(int(a) - int(b))).zfill(4)
ctr += 1
return True if ctr == 7 else False
sevens = 0
for i in range(1000, 10000):
if len(set(str(i))) == 1:
continue
sevens += count_iter(i)
print(sevens) |
from clients.base import BaseClient
class LanguageClient(BaseClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def local_update(self, epoch):
self.model.train()
for i in range(epoch):
hidden_state = self.model.init_hidden(self.dataloader.batch_size)
for inputs, targets in self.dataloader:
inputs = inputs.to(self.device)
targets = targets.to(self.device)
self.optimizer.zero_grad()
hidden_state = self.model.repackage_hidden(hidden_state)
logits, hidden_state = self.model(inputs, hidden_state)
loss = self.criterion(logits, targets)
loss.backward()
self.optimizer.step()
if self.scheduler is not None:
self.scheduler.step()
|
"""
A matrix will be an N sized list of 4 element lists.
Each individual list will represent an [x, y, z, 1] point.
For multiplication purposes, consider the lists like so:
x0 x1 xn
y0 y1 yn
z0 z1 ... zn
1 1 1
"""
import math
#print the matrix such that it looks like
#the template in the top comment
def print_matrix( matrix ):
for i in range(4):
for j in range(len(matrix)):
print(matrix[j][i]," ",end="")
print("\n")
#turn the paramter matrix into an identity matrix
#you may assume matrix is square
def ident( matrix ):
for i in range(4):
for j in range(4):
if i==j:
matrix[i][j]=1
else:
matrix[i][j]=0
#multiply m1 by m2, modifying m2 to be the product
#m1 * m2 -> m2
def matrix_mult( m1, m2 ):
for i in range(len(m2)):
sumList=[]
for j in range(4):
sum=0
for k in range(4):
sum+=m1[k][j]*m2[i][k]
sumList.append(sum)
m2[i]=sumList
def new_matrix(rows = 4, cols = 4):
m = []
for c in range( cols ):
m.append( [] )
for r in range( rows ):
m[c].append( 0 )
return m
|
import os
import subprocess
import re
#list of User folders
usr = os.listdir("/home")
def check_amount_monitors():
info = subprocess.run("bash ~/TvPost/Bash_files/Screen_divitions_config/check_active_monitors.sh",
shell=True,
capture_output=True,
text=True)
return info.stdout
def formato_50_50():
#Check that the base resolutions file exsits
os.system('python3 /home/pi/TvPost/Py_files/Screen_format/Screen_info.py;')
resolution_values=[]
try:
with open(os.path.join("/home", usr[0], "TvPost/Resolutions", "base_resolution.txt")) as file:
for line in file:
regex = re.search(r':-(.+)', line)
if regex:
resolution_values.append(regex.group(1))
except:
print('No se encuentra el archivo')
#Pass the argument to screen layout bash
#$1 = width in pixels
#$2 = width in mm
#$3 = height in pixels
#$4 = height in mm
#$5 = video adapter name
#$6 = amount of active monitors (Split screen already selected and functioning)
#for value in resolution_values:
# print(value)
#Gettin active monitors
resolution_values.append(int(check_amount_monitors()))
os.system('bash ~/TvPost/Bash_files/Screen_divitions_config/50_50.sh {} {} {} {} {} {}'.format(resolution_values[0], resolution_values[1], resolution_values[2], resolution_values[3], resolution_values[4], resolution_values[5]))
#print(resolution_values)
formato_50_50()
|
print('\n MSSERG | 11.12.2016')
p1=int(input(' 2^64 ='))
if p1 == 2**64:
print(" Правильно")
else:
print(" Неправильно")
input(' Конец программы') |
import os
import requests
import pandas as pd
from numpy import array
from numpy import zeros
from numpy import savetxt
from scipy.cluster.vq import vq, kmeans2, whiten
from sets import Set
site = "https://www.hackerschool.com"
access_token = "?access_token=" + os.environ.get('HS_API_TOKEN')
caller_email = "mdulieu@gmail.com"
# Get people from api and return a json object (list of dictionaries)
def get_people_in_all_batches():
allBatches = requests.get(site + '/api/v1/batches' + access_token)
allBatches = allBatches.json()
batch_id = []
for batch in allBatches:
batch_id.append(batch['id'])
allPeople = []
for batch_nb in range(len(allBatches)):
allPeople += requests.get(site + '/api/v1/batches/%d/people' % allBatches[batch_nb]['id'] + access_token).json()
return allPeople
# Get people from api and returns json object containing all people from the last two batches
def get_people_in_two_last_batches():
allBatches = requests.get(site + '/api/v1/batches' + access_token)
twoLastBatches = allBatches.json()[0:2]
currentPeople1 = requests.get(site + '/api/v1/batches/%d/people' % twoLastBatches[0]['id'] + access_token)
currentPeople2 = requests.get(site + '/api/v1/batches/%d/people' % twoLastBatches[1]['id'] + access_token)
currentPeople = currentPeople1.json() + currentPeople2.json()
return currentPeople
# Takes as an argument a json list of people and an email, and returns the api id corresponding to the email
def get_caller_id(people, caller_email):
for person in people:
if person['email'] == caller_email:
caller_id = person['id']
return caller_id
# Get a json object containing people from the api and returns a list of sorted skills without doubles
def get_all_skills(people):
allSkillsSet = Set([])
for person in people:
for skill in person['skills']:
allSkillsSet.add(skill.lower())
allSkills = list(allSkillsSet)
allSkills = sorted(allSkills)
return allSkills
# Take a json object containing people and returns a list of ids + a dictionary with keys == ids referring to people
def create_people_dict(people):
#first_name = []
#last_name = []
people_id = []
people_dictionary = {}
for person in people:
#first_name.append(person['first_name'])
#last_name.append(person['last_name'])
people_id.append(person['id'])
people_dictionary[person['id']] = person
return people_id, people_dictionary
# Take a list of ids, a dictionary created by previous func, and a list of all skills
# Saves an csv array with people ids for each row, and skills for each column in HSData.csv
def create_people_skills_matrix(people_id, people_dictionary, allSkills):
peopleSkills = zeros((len(people_id), len(allSkills)))
for i, people in enumerate(people_id):
for j, skill in enumerate(allSkills):
peopleSkills[i,j] = skill in people_dictionary[people]['skills']
savetxt("HSData.csv", peopleSkills, delimiter=",", fmt = "%5i")
# Take a json of people, saves in skills.csv a list of all skills sorted without doubles
def create_dirty_skills_csv_file(people):
allSkills = get_all_skills(people)
allSkillsDF = pd.DataFrame(allSkills)
allSkillsDF.to_csv("skills.csv", index = False, encoding = "utf-8")
# Take the name of a file containing a list of skills with doubles and the name of the file that will contain the cleaned skills
# Write in clean_skills_file a Data Frame in csv containing the skills cleaned (sorted, no doubles)
def last_clean_up_skills(dirty_skills_file, clean_skills_file):
skills = [line.strip() for line in open(dirty_skills_file)]
clean_skills = Set(skills)
clean_skills_in_a_list = list(clean_skills)
clean_skills_in_a_list.sort()
allSkillsClean = pd.DataFrame(clean_skills_in_a_list)
allSkillsClean.to_csv(clean_skills_file, index = False)
# Take the name of a file containing a Data Frame with clean skills, the name of the file that will contain the result, and a json object containing people
# Writes in the new file a data frame containing the count for each skill
def get_count_for_each_skill(my_skills_file, file_with_count_name, people):
skills = [line.strip() for line in open(my_skills_file)]
allSkillsDF = pd.DataFrame(skills)
allSkillsDF.columns = ["Skills"]
allSkillsDF['Count'] = 0
for person in people:
for index, row in allSkillsDF.iterrows():
if row['Skills'] == 'd':
if 'd' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'r':
if 'r' in [x.lower() for x in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'c':
if 'c' in [x.lower() for x in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
else:
for skill in person['skills']:
if ('c/c++' in skill.lower()) or ('c++/c' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'x':
if 'x' in [x.lower() for x in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'io':
if 'io' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ip':
if 'ip' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ir':
if 'ir' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'java':
if 'java' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'art':
if 'art' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'git':
if 'git' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'cs':
if 'cs' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'go':
if 'go' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'cl':
if 'cl' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'css':
if 'css' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'js':
if 'js' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ml':
if 'ml' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'sql':
if 'sql' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'data':
if 'data' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'learning':
if 'learning' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'mac':
if 'mac' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'rails':
if 'rails' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ruby':
if 'ruby' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ux':
if 'ux' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'backbone.js':
if 'backbone' in [skill.lower() for skill in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
else:
for skill in person['skills']:
if ('backbone.js' in skill.lower()) or ('backbone-js' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == ('biking'):
for skill in person['skills']:
if ('bicycle' in skill.lower()) or ('bicycling' in skill.lower()) or ('biking' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'bittorrent':
for skill in person['skills']:
if ('bittorent' in skill.lower()) or ('bittorrent' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'coffeescript':
for skill in person['skills']:
if ('coffeescript' in skill.lower()) or ('coffescript' in skill.lower()) or ('cofeescript' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'cryptography':
for skill in person['skills']:
if ('cryptography' in skill.lower()) or ('crypto' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'front end':
for skill in person['skills']:
if ('front end' in skill.lower()) or ('front-end' in skill.lower()) or ('frontend' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'front end development':
for skill in person['skills']:
if ('front end development' in skill.lower()) or ('front-end development' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'full stack':
for skill in person['skills']:
if ('full stack' in skill.lower()) or ('full-stack' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'javascript':
for skill in person['skills']:
if ('javacript' in skill.lower()) or ('javascript' in skill.lower()) or ('javascrip'in skill.lower()) or ('javavscript' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'mapReduce':
for skill in person['skills']:
if ('map reduce' in skill.lower()) or ('map-reduce' in skill.lower()) or ('mapReduce' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'mathematics':
for skill in person['skills']:
if ('mathematics' in skill.lower()) or ('maths' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'artificial intelligence':
if 'ai' in [x.lower() for x in person['skills']]:
allSkillsDF.ix[index, 'Count'] += 1
continue
else:
for skill in person['skills']:
if ('artificial intelligence' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'multithreading':
for skill in person['skills']:
if ('multithreading' in skill.lower()) or ('multthreading' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'objective c': #not working ?!?
for skill in person['skills']:
if ('objective c' in skill.lower()) or ('obj-c' in skill.lower()) or ('objc' in skill.lower()) or ('objective-c' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'python':
for skill in person['skills']:
if ('python' in skill.lower()) or ('pytho' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'statistics':
for skill in person['skills']:
if ('statistics' in skill.lower()) or ('stats' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'technical writing':
for skill in person['skills']:
if ('technical writing' in skill.lower()) or ('tech writing' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'web development':
for skill in person['skills']:
if ('web development' in skill.lower()) or ('web dev' in skill.lower()) or ('web-dev' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'html':
for skill in person['skills']:
if ('html' in skill.lower()) or ('html5' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'node.js':
for skill in person['skills']:
if ('node' in skill.lower()) or ('node.js' in skill.lower()) or ('nodejs' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
elif row['Skills'] == 'ruby on rails':
for skill in person['skills']:
if ('rails' in skill.lower()):
allSkillsDF.ix[index, 'Count'] += 1
continue
else:
for skill in person['skills']:
if row['Skills'] in skill.lower():
allSkillsDF.ix[index, 'Count'] += 1
allSkillsDF.to_csv(file_with_count_name, index = False)
#print allSkillsDF
# skills.csv and cleanSkills.csv files have been modified by hand
people = get_people_in_all_batches()
get_count_for_each_skill("cleanSkills.csv", "skillsCount.csv", people)
|
# 根据年龄与性别判断对一个人的称呼
name = input("pls input your name:")
age = int(input("pls input your age:"))
sex = input("pls input your sex:")
if age >= 18:
if sex == "man":
print("%s,you are %d years old,is a man." % (name, age))
elif sex == "woman":
print("%s,you are %d years old,is a woman." % (name, age))
else:
print("you type wrong sex.pls input again.")
elif age >= 1:
if sex == "man":
print("%s,you are %d years old,is a boy." % (name, age))
elif sex == "woman":
print("%s,you are %d years old,is a girl." % (name, age))
else:
print("you type wrong sex.pls input again.")
else:
print("you input wrong age.pls input again.")
|
from django.shortcuts import get_object_or_404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import permission_required
from django.db.models import Max
from django.utils import timezone
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.pagination import LimitOffsetPagination
from problem.models import Problem, Tag
from .serializers import (
ProblemSerializer,
ProblemDescriptionSerializer,
ProblemListSerializer,
TagSerializer,
)
from segmentoj.decorator import (
parameter_required, syllable_required,
)
from status.models import Status
from .decorator import view_hidden_problem_permission_check
class ProblemView(APIView):
@method_decorator(parameter_required('pid'))
@method_decorator(view_hidden_problem_permission_check())
def get(self, request, pid):
# Get the content of a problem
problem = get_object_or_404(Problem, pid=pid)
ps = ProblemSerializer(problem)
return Response({'res': ps.data}, status=status.HTTP_200_OK)
@method_decorator(permission_required('problem.add_problem', raise_exception=True))
def post(self, request):
# Add a new problem
data = request.data
ps = ProblemSerializer(data=data)
ps.is_valid(raise_exception=True)
ps.save()
return Response(status=status.HTTP_201_CREATED)
@method_decorator(parameter_required('pid'))
@method_decorator(permission_required('problem.change_problem', raise_exception=True))
def patch(self, request, pid):
data = request.data
problem = get_object_or_404(Problem, pid=pid)
ps = ProblemSerializer(problem, data=data, partial=True)
ps.is_valid(raise_exception=True)
ps.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@method_decorator(permission_required('problem.delete_problem'))
def delete(self, request, pid):
data = request.data
problem = get_object_or_404(Problem, pid=pid)
problem.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProblemDescriptionView(APIView):
@method_decorator(parameter_required('pid'))
@method_decorator(view_hidden_problem_permission_check())
def get(self, request, pid):
problem = get_object_or_404(Problem, pid=pid)
pds = ProblemDescriptionSerializer(problem)
return Response({'res': pds.data}, status=status.HTTP_200_OK)
class ProblemTestdataView(APIView):
@method_decorator(parameter_required('pid'))
@method_decorator(view_hidden_problem_permission_check())
@method_decorator(permission_required('problem.download_testdata', raise_exception=True))
def get(self, request, pid):
# Get problem testdata URL
problem = get_object_or_404(Problem, pid=pid)
return Response({
'res': problem.testdata_url
}, status=status.HTTP_200_OK)
@method_decorator(parameter_required('pid'))
@method_decorator(permission_required('problem.edit_problem', raise_exception=True))
@method_decorator(syllable_required('testdata_url', str))
def patch(self, request, pid):
# Change problem testdata URL
new_value = request.data['testdata_url']
problem = get_object_or_404(Problem, pid=pid)
problem.testdata_url = new_value
problem.testdata_last_update = timezone.now()
problem.save()
return Response(status=status.HTTP_204_NO_CONTENT)
@method_decorator(parameter_required('pid'))
@method_decorator(permission_required('problem.edit_problem', raise_exception=True))
def delete(self, request, pid):
problem = get_object_or_404(Problem, pid=pid)
problem.testdata_url = None
problem.testdata_last_update = timezone.now()
problem.save()
return Response(status=status.HTTP_204_NO_CONTENT)
class TagView(APIView):
@method_decorator(parameter_required('tid'))
def get(self, request, tid):
# Get a tag
tag = get_object_or_404(Tag, id=tid)
ts = TagSerializer(tag)
return Response({'res': ts.data}, status=status.HTTP_200_OK)
@method_decorator(permission_required('problem.add_tag', raise_exception=True))
def post(self, request):
# add new tag
data = request.data
ts = TagSerializer(data=data)
ts.is_valid(raise_exception=True)
tag = ts.save()
return Response({'res': {'id': tag.id}}, status=status.HTTP_201_CREATED)
@method_decorator(parameter_required('tid'))
@method_decorator(permission_required('problem.delete_tag', raise_exception=True))
def delete(self, request, tid):
# delete a tag
data = request.data
tag = get_object_or_404(Tag, id=tid)
tag.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class ProblemListView(APIView):
def get(self, request):
def process_data(x):
pid = x.get('id')
userid = request.user.id
statusset = Status.objects.filter(problem=pid, owner=userid)
if statusset.count() == 0:
x['score'] = -1
else:
x['score'] = statusset.aggregate(Max('score'))['score__max']
x.pop('id') # Don't Leak ID in DataBase
return x
problem_filter = {}
data = request.GET
if not request.user.has_perm('problem.view_hidden'):
problem_filter['enabled'] = True
if data.get('title'):
problem_filter['title__icontains'] = data.get('title')
queryset = Problem.objects.filter(**problem_filter).order_by('pid')
pg = LimitOffsetPagination()
problems = pg.paginate_queryset(queryset=queryset, request=request, view=self)
ps = ProblemListSerializer(problems, many=True)
return Response(
{'count': queryset.count(), 'res': [process_data(x) for x in ps.data]}, status=status.HTTP_200_OK
)
class ProblemListCountView(APIView):
def get(self, request):
problem_filter = {}
data = request.GET
if not request.user.has_perm('problem.view_hidden'):
problem_filter['enabled'] = True
if data.get('title'):
problem_filter['title__icontains'] = data.get('title')
queryset = Problem.objects.filter(**problem_filter)
res = queryset.count()
return Response({'res': res}, status=status.HTTP_200_OK)
class TagListView(APIView):
def get(self, request):
queryset = Tag.objects.all()
ts = TagSerializer(queryset, many=True)
return Response({'count': queryset.count(), 'res': ts.data}, status=status.HTTP_200_OK)
|
#!/usr/bin/python
import math
import random
from typing import List
from glm import ivec2, vec2, length, vec3
from pygame.surface import SurfaceType
from game.base.being import Being
from game.base.enemy import Enemy
from game.base.entity import Entity
from game.base.inputs import Axis
from game.base.stats import Stats
from game.constants import *
from game.entities.bullet import Bullet
from game.entities.blast import Blast
from game.entities.boss import Boss
from game.entities.butterfly import Butterfly
from game.entities.message import Message
from game.entities.powerup import Powerup
from game.entities.weapons import Weapon, WEAPONS
from game.util import ncolor
class Player(Being):
def __init__(self, app, scene, speed=PLAYER_SPEED, level=0):
super().__init__(app, scene, filename=SHIP_IMAGE_PATH)
self.game_state = self.scene.state
# persistant stats for score screen
self.stats = self.app.data["stats"] = self.app.data.get("stats", Stats())
self.scene.player = self
self.max_hp = self.hp = 3
self.friendly = True # determines what Beings you can damage
self.crosshair_surf: SurfaceType = app.load_img(CROSSHAIR_IMAGE_PATH, 3)
self.crosshair_surf_green = app.load_img(CROSSHAIR_GREEN_IMAGE_PATH, 3)
self.crosshair_scale = 1
self.slots += [
self.app.inputs["hmove"].always_call(self.set_vel_x),
self.app.inputs["vmove"].always_call(self.set_vel_y),
self.app.inputs["fire"].always_call(self.fire),
self.app.inputs["switch-gun"].on_press_repeated(self.next_gun, 0.5),
# self.app.inputs["test"].on_press(self.explode),
]
self.position = vec3(0, 0, 0)
self.collision_size = vec3(50, 50, 500)
self.speed = vec3(speed)
self.velocity = vec3(self.speed)
self.alive = True
self.solid = True
self.blinking = False
self.targeting = False
self.hide_stats = 0
self.score_flash = 0.0
self.weapon_flash = 0.0
self.health_flash = 0.0
self.level = level
self.weapons: List[Weapon] = self.get_guns()
self.current_weapon = 0
self.scripts += [self.blink, self.smoke]
@property
def targeting(self):
return self._targeting
@targeting.setter
def targeting(self, t):
self._targeting = t
self.crosshair_t = 0
@property
def weapon(self):
return self.weapons[self.current_weapon % len(self.weapons)]
@property
def score(self):
return self.stats.score
@score.setter
def score(self, s):
self.stats.score = s
self.score_flash = 1
# def flash_score(self, script):
# yield
# while True:
# if self.score_flash:
# for x in range(50):
# self.score_light = True
# yield script.sleep(.2)
# self.score_light = False
# yield script.sleep(.2)
# self.score_light = False
# yield
def get_guns(self):
return [
self.scene.add(gun(self.app, self.scene, self))
for gun in WEAPONS
if gun.level <= self.level
]
def restart(self):
self.hp = 3
self.visible = True
self.alive = True
self.blinking = False
self.speed = vec3(PLAYER_SPEED)
self.clear_scripts()
self.scripts += [self.blink, self.smoke]
for wpn in self.weapons:
wpn.remove()
self.weapons: List[Weapon] = self.get_guns()
self.current_weapon = 0
self.app.state.terminal.clear()
self.app.state.restart()
def kill(self, damage, bullet, enemy):
# TODO: player death
# self.scene.play_sound('explosion.wav')
# self.acceleration = -Y * 100
self.hp = 0
self.explode()
# self.remove()
self.visible = False
self.alive = False
self.stats.deaths += 1
self.app.state.terminal.write_center("Oops! Try Again!", 10, "red")
# restart game in 2 seconds
self.scene.slotlist += self.scene.when.once(2, lambda: self.restart())
return False
def hurt(self, damage, bullet, enemy):
"""
Take damage from an object `bullet` shot by enemy
"""
if self.hp <= 0:
return 0
if self.blinking or not self.alive:
return 0
dmg = super().hurt(damage, bullet, enemy)
# self.scene.add(Message(self.app, self.scene, letter, position=pos))
if dmg:
self.blinking = True
self.health_flash = 1
return dmg
# damage = min(self.hp, damage) # calc effective damage (not more than hp)
# self.hp -= damage
# self.blinking = True
# if self.hp <= 0:
# self.kill(damage, bullet, enemy) # kill self
# # if self.hp < 3:
# # self.smoke_event = scene.when.every(1, self.smoke)
# return damage
def collision(self, other, dt):
if isinstance(other, Enemy) and not isinstance(other, Boss):
if other.alive:
self.hurt(other.hp, None, other)
other.kill(other.hp, None, self)
elif isinstance(other, Powerup):
if other.heart:
self.hp = self.max_hp
else:
for i, wpn in enumerate(self.weapons):
if wpn.letter == other.letter:
wpn.ammo = wpn.max_ammo
self.current_weapon = i
break
# print("powerup")
self.play_sound("powerup.wav")
other.solid = False
other.remove()
def find_enemy_in_crosshair(self):
# Assuming state is Game
camera = self.app.state.camera
screen_center = vec2(camera.screen_size) / 2
crosshair_radius = self.crosshair_surf.get_width() / 2
# Entities are sorted from far to close and we want the closest
for entity in reversed(self.scene.slots):
entity = entity.get()
if (
isinstance(entity, Enemy)
and camera.distance(entity.position) < AIM_MAX_DIST
):
center = camera.world_to_screen(entity.position)
if (
center
and length(center - screen_center)
< crosshair_radius + entity.render_size.x / 2
):
return entity
def write_weapon_stats(self):
if not self.alive:
return
if not self.hide_stats:
ty = 0
ofs = ivec2(0, 10)
terminal = self.app.state.terminal
wpn = self.weapons[self.current_weapon]
# extra space here to clear terminal
if wpn.max_ammo < 0:
ammo = wpn.letter + " ∞"
else:
ammo = f"{wpn.letter} {wpn.ammo}/{wpn.max_ammo}"
if len(ammo) < 10:
ammo += " " * (10 - len(ammo)) # pad
col = glm.mix(ncolor(wpn.color), ncolor("white"), self.weapon_flash)
self.game_state.terminal.write(" ", (1, ty), col)
self.game_state.terminal.write(ammo, (1, ty), col, ofs)
col = glm.mix(ncolor("red"), ncolor("white"), self.health_flash)
# self.game_state.terminal.write(
# " " + "♥" * self.hp + " " * (3 - self.hp), 1, "red"
# )
self.game_state.terminal.write_center(" ", ty + 1, col)
self.game_state.terminal.write_center(" ", ty, col)
self.game_state.terminal.write_center(
"♥" * self.hp + " " * (self.hp - self.max_hp), ty, "red", ofs
)
# Render Player's Score
score_display = "Score: {}".format(self.stats.score)
score_pos = (
terminal.size.x - len(score_display) - 1,
ty,
)
col = glm.mix(ncolor("white"), ncolor("yellow"), self.score_flash)
self.game_state.terminal.write(" ", score_pos + ivec2(0, 1), col)
self.game_state.terminal.write(score_display, score_pos, col, ofs)
# self.game_state.terminal.write("WPN " + wpn.letter, (0,20), wpn.color)
# if wpn.max_ammo == -1:
# self.game_state.terminal.write("AMMO " + str(wpn.ammo) + " ", (0,21), wpn.color)
# else:
# self.game_state.terminal.write("AMMO n/a ", (0,21), wpn.color)
else:
self.game_state.terminal.clear(0)
def next_gun(self, btn): # FIXME
# switch weapon
self.weapon_flash = 1
self.current_weapon = (self.current_weapon + 1) % len(self.weapons)
while self.weapon.ammo == 0:
self.current_weapon = (self.current_weapon + 1) % len(self.weapons)
self.play_sound("powerup.wav")
def set_vel_x(self, axis: Axis):
if not self.alive:
return
self.velocity.x = axis.value * self.speed.x
def set_vel_y(self, axis: Axis):
if not self.alive:
return
self.velocity.y = axis.value * self.speed.y
def find_aim(self):
camera = self.app.state.camera
butt = self.find_enemy_in_crosshair()
if butt is None:
aim = camera.rel_to_world(vec3(0, 0, -camera.screen_dist))
else:
aim = butt.position
return aim
def fire(self, button):
if not self.alive:
return
if not button.pressed:
return
# no ammo? switch to default
if not self.weapon.ammo:
self.current_weapon = 0
if self.weapon.fire(self.find_aim()):
self.weapon_flash = 1
self.play_sound(self.weapon.sound)
def update(self, dt):
if self.position.y <= -299:
# too low ?
self.velocity.y = max(0, self.velocity.y)
self.position.y = -299
elif self.position.y >= 300:
# too high ?
self.velocity.y = min(0, self.velocity.y)
self.position.y = 300
if not self.alive:
self.velocity.x = 0
self.velocity.y = 0
if self.targeting:
self.crosshair_t = (self.crosshair_t + dt) % 1
self.crosshair_scale = 1 + 0.05 * math.sin(self.crosshair_t * math.tau * 2)
self.score_flash = self.score_flash - dt
self.weapon_flash = self.weapon_flash - dt
self.health_flash = self.health_flash - dt
super().update(dt)
def smoke(self, script):
while self.alive:
if self.hp < 3:
self.scene.add(
Entity(
self.app,
self.scene,
"smoke.png",
position=self.position + vec3(0, -20, 0),
velocity=(
vec3(random.random(), random.random(), random.random())
- vec3(0.5)
)
* 2,
life=0.2,
particle=True,
)
)
yield script.sleep(self.hp)
yield
# def engine(self, script):
# while self.alive:
# self.scene.add(
# Entity(
# self.app,
# self.scene,
# "smoke.png",
# position=self.position + vec3(0, -20, 0),
# velocity=(
# vec3(random.random(), random.random(), random.random())
# - vec3(0.5)
# )
# * 2,
# life=0.2,
# particle=True,
# )
# )
# yield script.sleep(0.2)
def blink(self, script):
self.blinking = False
while self.alive:
if self.blinking:
for i in range(10):
self.visible = not self.visible
yield script.sleep(0.1)
self.visible = True
self.blinking = False
yield
# def flash_stats(self, script):
# self.stats_visible = True
# for x in range(10):
# self.stats_visible = not self.stats_visible
# yield script.sleep(.1)
# self.stats_visible = True
def render(self, camera):
self.write_weapon_stats()
# Ship
rect = self._surface.get_rect()
rect.center = (self.app.size[0] / 2, self.app.size[1] * 0.8)
direction = self.velocity.xy / self.speed.xy
rect.center += direction * (10, -10)
if self.visible:
# stretch player graphic
sz = ivec2(*self._surface.get_size())
img = self._surface
if self.velocity:
sz.y += self.velocity.y / self.speed.y * 10
img = pygame.transform.scale(self._surface, sz)
if self.velocity.x:
rot = -self.velocity.x / self.speed.x * 30
img = pygame.transform.rotate(img, rot)
nrect = (rect[0], rect[1], *sz)
self.app.screen.blit(img, nrect)
# Crosshair
if self.alive:
rect = self.crosshair_surf.get_rect()
rect.center = self.app.size / 2
if self.find_enemy_in_crosshair():
if not self.targeting:
self.targeting = True # triggers
sz = ivec2(vec2(rect[2], rect[3]) * self.crosshair_scale)
img = pygame.transform.scale(self.crosshair_surf_green, sz)
rect[2] -= round(sz.x / 2)
rect[3] -= round(sz.y / 2)
self.app.screen.blit(img, rect)
else:
if self.targeting:
self.targeting = False # triggers
self.app.screen.blit(self.crosshair_surf, rect)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygame, os, sys
TELE1 = (30,4)
ROLL1 = (50,4)
ROLL2 = (256,4)
TELE2 = (276,4)
def load_image(nombre, dir_imagen, alpha=False):
ruta = os.path.join(dir_imagen, nombre)
try:
image = pygame.image.load(ruta)
except:
print "Error, no se puede cargar la imagen: ", ruta
sys.exit(1)
if alpha == True:
image = image.convert_alpha()
else:
image = image.convert()
return image
class Hud(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
player1 = load_image("player1.png", "sprites/", alpha=True)
player2 = load_image("player2.png", "sprites/", alpha=True)
self.empty = load_image("empty.png", "sprites/", alpha=False)
self.power = load_image("powertransport-inside.png", "sprites/", alpha=False)
self.image = pygame.Surface([320,24], pygame.SRCALPHA, 32)
self.rect = self.image.get_rect()
self.image.blit(player1, (10,4))
self.image.blit(player2, (296,4))
self.image.blit(self.empty, TELE1)
self.image.blit(self.empty, TELE2)
def putPower(self, player):
if player == 1:
self.image.blit(self.power, TELE1)
elif player == 2:
self.image.blit(self.power, TELE2)
def removePower(self, player):
if player == 1:
self.image.blit(self.empty, TELE1)
elif player == 2:
self.image.blit(self.empty, TELE2)
def getTime(self, totalSeconds):
minutes = totalSeconds / 60
seconds = totalSeconds - minutes*60
time = "0"+str(minutes)+":"+str(seconds) if seconds > 9 else "0"+str(minutes)+":0"+str(seconds)
return time
def draw(self, screen, seconds):
screen.blit(self.image, (0,0))
font=pygame.font.Font(None,28)
time=font.render(self.getTime(seconds), 1,(255,255,255))
screen.blit(time, (140, 4))
|
#!/usr/bin/env python
# coding=utf-8
#
#https://python-gitlab.readthedocs.io/en/stable/api-usage.html
__Author__ = '王帅朋'
__Date__ = '2019-01-10'
import os
import re
import sys
import subprocess
import webbrowser
import gitlab
import datetime
import pjconfig
import update
import send
import reviewer
from xpinyin import Pinyin
#脚本输入参数(全局变量)
inputAssiness = None #外部输入指定的review的人
inputSourceBranch = None #外部输入指定的源分支
inputTargetBranch = None #外部输入指定的目标分支
VERSION = " 1.0.0"
#全局变量
gl = None
project = None
GLOBAL_BEFORE_PULL_FILE_LIST = []
GLOBAL_BRANCH_SOURCE = None
DEFAULT_BRANCH_TARGET = "dev" #默认的目标分支
ASSINESS = "" #代码写死review的人 优先级: 外部输入 > 代码写死 > 获取git配置的提交者
CAMMAND_GIT_REMOTE_URL = "git config remote.origin.url" #获取当前仓库的SSH 或者HTTP URL
CAMMAND_GIT_ADD_ALL = "git add ." #暂存
CAMMAND_GIT_COMMIT_MSG = "git commit -m" #提交代码
CAMMAND_CURRENT_LOCAL_BRANCHS = "git branch -l" #查看本地分支
CAMMAND_REMOTE_BRANCHS = "git branch -r" #查看远程分支
CAMMAND_PULL_TARGET_BRANCE = "git pull --rebase" #rebase拉代码
CAMMAND_FORCE_DELETE_LOCAL_BRANCH = "git branch -D" #强制删除本地分支
CAMMAND_DELETE_BRANCH_ORIGIN = "git push origin -d" #删除远程分支
CAMMAND_CHECKOUT_BRANCH = "git checkout -b" #创建并切换到某分支
CAMMAND_SWITCH_BRANCH = "git checkout" #切换到某分支
CAMMAND_PUSH_BRANCH = "git push origin" #把某分支推到远端
CAMMAND_LAST_MY_COMMITID = "git rev-parse HEAD" #获取最后一次的提交的ID
CAMMAND_GIT_REBASE_ABORT = "git rebase --abort" #rebase的时候有冲突产生
CAMMAND_GIT_FETCH = "git fetch -p" #抓取更新
CAMMAND_GIT_LOG = "git log" #获取某分支的提交信息
CAMMAND_GIT_RESET = "git reset" #重置代码后边需要加上ID
CAMMAND_GIT_STASH = "git stash save" #暂存代码
CAMMAND_GIT_STASH_LIST = "git stash list" #拉取所有暂存代码列表
CAMMAND_GIT_STASH_POP = "git stash pop" #恢复暂存代码
CAMMAND_GIT_STASH_DROP = "git stash drop" #删除暂存代码
CAMMAND_POD_INSTALL = "pod install" #pod install
CAMMAND_GIT_CONFIG_DELETE_TARGET_BRANCH = "git config --unset pushconfig.targetbranch"#删除本地配置的默认目标分支
CAMMAND_GIT_CONFIG_TARGET_BRANCH = "git config pushconfig.targetbranch" #本地配置默认目标分支
CAMMAND_GIT_CONFIG_MERGEREQUEST_IDS = "git config pushconfig.mrids" #记录mr的ID
CAMMAND_GIT_CONFIG_PROJECT_ID = "git config pushconfig.projectid" ##缓存项目的ID
CAMMAND_GIT_CONFIG_PRIVATE_TOKEN = "git config --global pushconfig.privatetoken" #连接gitlab的 token
CAMMAND_GIT_CONFIG_GITLAB_URL = "git config --global pushconfig.url" #连接gitlab的域名
MSG_NOCHANGE = "nothing to commit, working tree clean" #没有改动
MSG_CHANGE_NOTADD = "Changes not staged for commit:" #有改动 但未add
MSG_CHANGE_ADD = "Changes to be committed:" #有改动 且已add
MSG_PULL = "(use \"git pull\" to" #有待拉取
MSG_NEEDPUSH1 = "(use \"git push\" to publish your local commits)" #有待push,第一种情况
MSG_NEEDPUSH2 = "have diverged," #有待push,第二种情况
reload(sys)
sys.setdefaultencoding('utf8')
def test():
print "\ntest打印-------------"
# print current_login_user_name()
# print current_path()
# mark_count_of_files()
# pod_install_if_need()
# dateTime = datetime.datetime.now().strftime('%m-%d_%H:%M:%S')
# print "源分支是:"+source_branch()
# print "目标分支是:"+target_branch()
print "test打印完毕-------------\n"
def main():
#检查是否是git仓库和其他配置
check_git_repo()
print("目标分支是:"+target_branch()+" 源分支是:"+source_branch())
#初始化配置
setup()
#TODO:
# test()
# return
#检查当前所在分支必须在target分支
check_current_branch()
#目标分支是否在远端存在
originTargetExist = check_origin_target_exist()
if originTargetExist:
case_normal_merge_push()
else:
push_branch_target()
pass
def check_git_repo():
#是否是git仓库
path = os.getcwd()+"/.git"
print "当前路径是:"+os.getcwd()
localTargetBranch = cammand_out_put(CAMMAND_GIT_CONFIG_TARGET_BRANCH, False, None)
if localTargetBranch:
print "本地配置的默认目标分支是:"+localTargetBranch+"如需修改删除请执行push -h查看操作"
if not os.path.exists(path):
print "请在git仓库根目录下使用此命令"
exit(0)
#校验远程仓库地址
if not len(git_config_remote_url()):
print ("❌❌❌没有远程仓库地址!!!\n请先执行如下命令配置本仓库远程仓库的地址")
print CAMMAND_GIT_REMOTE_URL+" "+"xxx"
exit(0)
#校验URL
if not gitlab_url():
print ("❌❌❌没有域名!!!\n请先执行如下命令配置本仓库gitlab的http域名,例如http://gitlab.xxx.cn")
print CAMMAND_GIT_CONFIG_GITLAB_URL+" "+"xxx"
exit(0)
#校验token
if not private_token():
print ("❌❌❌无法登录!!!\n请参考网址(https://blog.csdn.net/bing900713/article/details/80222188)获取gitlab token,并在git仓库下执行如下命令配置登录gitlab的token后重试(token权限全勾选)")
print CAMMAND_GIT_CONFIG_PRIVATE_TOKEN+" "+"xxx"
exit(0)
def setup():
print "正在初始化..."
print "连接gitlab"
privateToken = private_token()
gitlabUrl = gitlab_url()
global gl
gl = gitlab.Gitlab(gitlabUrl, private_token=privateToken, api_version=4, per_page=20)
print "获取项目ID"
projectID = project_ID()
print "获取项目"
global project
project = gl.projects.get(projectID)
#认证当前用户
gl.auth()
print "初始化完毕"
def case_normal_merge_push():
#代码状态
status = git_status()
#检查代码状态,不能push则会直接退出程序
check_git_status(status)
#计数文件总数
mark_count_of_files()
#拉代码
conflict = pull_target_branch()
#是否有冲突,走不同流程
if conflict:
case_conflict()
else:
case_no_conflict()
pass
def case_conflict():
print "代码有冲突,走冲突流程"
#放弃变基
give_up_rebase()
#获取本地提交的commit数量
commitCount = ahead_target_branch_commit()
#拉取目标分支的提交log
commitLogs = target_branch_commit()
#reset代码和暂存代码
stashTitleList = reset_and_stash(commitCount,commitLogs)
#重新rebase拉代码
pull_target_branch()
#恢复暂存代码(恢复代码 add代码 提交代码 删除暂存代码)
pop_stash_with_titles(stashTitleList)
#提示手动解决冲突后,重新提交代码并且push
print "⚠️ ⚠️ ⚠️ 请手动解决冲突后,再提交代码"
def case_no_conflict():
print "走没有冲突流程"
#当前分支直接push 或者走merge流程
pushImmediacy = can_push_branch_target()
if pushImmediacy:
print("当前分支可以直接push代码")
push_branch_target()
else:
print("当前分支需要走gitlab的merge流程")
merge_branch_target()
print("✅✅✅提交代码已经完成,开始做剩下额外的工作\n")
#执行pod install 并且 #build项目
pod_install_if_need()
#删除已经被合并的远程分支
delete_source_branch_when_merged()
#更新类前缀和组织名
pjconfig.setup_class_prefix_if_need()
pass
def merge_branch_target():
#校验review人是否正确
check_assignee_id()
#创建本地分支
create_branch_source()
#推送新分支
push_branch_source()
#创建合并请求
mr = create_merge_request()
#发送钉钉消息提醒TA
send_dingding(mr)
#处理合并请求
deal_merge_request(mr)
#切换到目标分支
switch_target_branch()
#拉目标分支代码
pull_target_branch()
#删除本地source分支
delete_branch_source()
#执行git fetch
git_fetch()
#打开网页
open_web_merge_request(mr)
pass
def check_current_branch():
if target_branch() != current_select_branch():
print("❌❌❌必须在目标分支"+target_branch()+"上操作")
exit(0)
def git_status():
return cammand_out_put("git status", True, None)
def check_origin_target_exist():
print("获取远端分支列表:"+CAMMAND_REMOTE_BRANCHS)
remoteBranchs = cammand_out_put(CAMMAND_REMOTE_BRANCHS, True, None)
oirginTargetBranch = "origin/"+target_branch()
if oirginTargetBranch in remoteBranchs:
print("远端分支{branch}存在".format(branch=target_branch()))
return True
else:
print("远端分支{branch}不存在".format(branch=target_branch()))
return False
def mark_count_of_files():
global GLOBAL_BEFORE_PULL_FILE_LIST
GLOBAL_BEFORE_PULL_FILE_LIST = module_files_list()
print "记录下被统计目录文件列表"
def check_git_status(status):
needPull = MSG_PULL in status
noChange = MSG_NOCHANGE in status
changeNotAdd = MSG_CHANGE_NOTADD in status
changeDidAdd = MSG_CHANGE_ADD in status
needPush = MSG_NEEDPUSH1 in status or MSG_NEEDPUSH2 in status
#无拉 无改动 无提交(无push)
'''
On branch dev
Your branch is up to date with 'origin/dev'.
nothing to commit, working tree clean
'''
if not needPull and noChange and not needPush:
print("⚠️ ⚠️ ⚠️ 不需要拉取代码,没有代码改动,不能push")
exit(0)
#无拉 有改动但未add 无提交(无push)
'''
On branch dev
Your branch is up to date with 'origin/dev'.
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
no changes added to commit (use "git add" and/or "git commit -a")
'''
if not needPull and changeNotAdd and not needPush:
print("不需要拉取代码,有代码改动但未add,不能push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#无拉 有改动且add 无提交(无push)
'''
On branch dev
Your branch is up to date with 'origin/dev'.
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
'''
if not needPull and changeDidAdd and not needPush:
print("不需要拉取代码,有代码改动且已经add,不能push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#无拉 无改动 有一个提交(待push)
'''
On branch dev
Your branch is ahead of 'origin/dev' by 1 commit.
(use "git push" to publish your local commits)
nothing to commit, working tree clean
'''
if not needPull and noChange and needPush:
print("不需要拉取代码,没代码改动,有已经提交的,可以push")
#无拉 有改动未add 有一个提交(待push)
'''
On branch dev
Your branch is ahead of 'origin/dev' by 1 commit.
(use "git push" to publish your local commits)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
no changes added to commit (use "git add" and/or "git commit -a")
'''
if not needPull and changeNotAdd and needPush:
print("不需要拉取代码,有代码改动但是未add,需要push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#无拉 有改动未add 有一个提交(待push)
'''
On branch dev
Your branch is ahead of 'origin/dev' by 1 commit.
(use "git push" to publish your local commits)
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
'''
if not needPull and changeDidAdd and needPush:
print("不需要拉取代码,有代码改动且已经add,不能push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#有待拉取 无改动 无提交
'''
On branch dev
Your branch is behind 'origin/dev' by 2 commits, and can be fast-forwarded.
(use "git pull" to update your local branch)
nothing to commit, working tree clean
'''
if needPull and noChange and not needPush:
print("要拉取代码,没有代码改动,不能push")
print("⚠️ ⚠️ ⚠️ 请手动拉取代码:git pull origin "+target_branch())
exit(0)
#有待拉取 有改动但未add 无提交
'''
On branch dev
Your branch is behind 'origin/dev' by 2 commits, and can be fast-forwarded.
(use "git pull" to update your local branch)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
no changes added to commit (use "git add" and/or "git commit -a")
'''
if needPull and changeNotAdd and not needPush:
print("需要拉取代码,有代码改动但是未add,不能push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#有待拉取 有改动且add 无提交
'''
On branch dev
Your branch is behind 'origin/dev' by 2 commits, and can be fast-forwarded.
(use "git pull" to update your local branch)
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
'''
if needPull and changeDidAdd and not needPush:
print("需要拉取代码,有代码改动且已add,不能push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#有待拉取 无改动 有一个提交
'''
On branch dev
Your branch and 'origin/dev' have diverged,
and have 1 and 2 different commits each, respectively.
(use "git pull" to merge the remote branch into yours)
nothing to commit, working tree clean
'''
if needPull and noChange and needPush:
print("需要拉取代码,无代码改动,需要push")
#有待拉取 有改动但无add 有一个提交
'''
On branch dev
Your branch and 'origin/dev' have diverged,
and have 1 and 2 different commits each, respectively.
(use "git pull" to merge the remote branch into yours)
Changes not staged for commit:
(use "git add <file>..." to update what will be committed)
(use "git checkout -- <file>..." to discard changes in working directory)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
no changes added to commit (use "git add" and/or "git commit -a")
'''
if needPull and changeNotAdd and needPush:
print("需要拉取代码,有代码改动但未add,需要push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
#有待拉取 有改动且已add 有一个提交
'''
On branch dev
Your branch and 'origin/dev' have diverged,
and have 1 and 2 different commits each, respectively.
(use "git pull" to merge the remote branch into yours)
Changes to be committed:
(use "git reset HEAD <file>..." to unstage)
modified: WSPModule/WSPModule/Classes/Module/PublishJourney/VC/WSPPassengerPublishJourneyController.m
'''
if needPull and changeDidAdd and needPush:
print("需要拉取代码,有代码改动且已add,需要push")
print("⚠️ ⚠️ ⚠️ 请先提交或者贮藏你的代码")
exit(0)
def ahead_target_branch_commit():
status = cammand_out_put("git status", True, None)
needPull = MSG_PULL in status
noChange = MSG_NOCHANGE in status
needPush = MSG_NEEDPUSH1 in status or MSG_NEEDPUSH2 in status
if needPull and noChange and needPush:
print("正在解析本地已经提交的个数...")
matcher = ".*and have [0-9].*"
pattern1 = re.compile(matcher)
resultList = pattern1.findall(status)
print resultList
print status
if len(resultList):
#从matcher里分割出来字符串数组
matcherStr = resultList[0]
countStr = matcherStr.split(" ")[2]
print "本地提交的个数是"+countStr
return int(countStr)
else:
print "解析失败"
exit(0)
return 0
def target_branch_commit():
cammand = CAMMAND_GIT_LOG+" {branch} --oneline".format(branch=target_branch())
print("获取target分支的提交记录"+cammand)
logs = cammand_out_put(cammand, True, None)
return logs.split("\n")
def reset_and_stash(commitCount,commitLogs):
stashTitleList = []
index = 0
while(index<commitCount):
#commit这样: 81280d25 输入昵称空字符串进行拦截
commit = commitLogs[index]
farwardCommit = commitLogs[index+1]
#reset的时候要用前一个的commitID 暂存的时候要使用当前的commitMsg
commitID = farwardCommit.split(' ')[0]
commitMsg = commit.split(' ')[1]
print(commitMsg+"=="+commitID)
#reset代码
git_reset_with_commitID(commitID)
#暂存代码
title = str(commitCount-index)+"_"+commitMsg
git_stash_with_title(title)
stashTitleList.append(title)
index+=1
return stashTitleList
#stashTitleList为list类型
def pop_stash_with_titles(stashTitleList):
print("拉取所有的暂存的列表:"+CAMMAND_GIT_STASH_LIST)
stashs = cammand_out_put(CAMMAND_GIT_STASH_LIST, True, None)
stashList = stashs.split("\n")
tempStashTitleList = stashTitleList
for stash in stashList:
for stashTitle in tempStashTitleList:
if stashTitle in stash:
stashTitleList.remove(stashTitle)
stashOK = pop_stash_with_stash(stash)
if stashOK:
commit_stash_with_msg(stashTitle)
else:
print("⚠️ ⚠️ ⚠️ 终止恢复暂存,有冲突产生,接下来需要手动解决")
log_next_stash_count(stashTitleList)
exit(0)
def pop_stash_with_title(title):
print("拉取所有的暂存的列表:"+CAMMAND_GIT_STASH_LIST)
stashs = cammand_out_put(CAMMAND_GIT_STASH_LIST, True, None)
def log_next_stash_count(stashTitleList):
for stashTitle in stashTitleList:
print("如下暂存还未被恢复,请手动解决"+stashTitle)
def commit_stash_with_msg(msg):
print("提交恢复暂存的代码")
print("add变更:"+CAMMAND_GIT_ADD_ALL)
cammand_out_put(CAMMAND_GIT_ADD_ALL, True, None)
cammandCommit = CAMMAND_GIT_COMMIT_MSG+" \"{msg}\"".format(msg=msg)
print("提交代码:"+cammandCommit)
cammand_out_put(cammandCommit, True, None)
def pop_stash_with_stash(stash):
stashID = stash.split(": On")[0]
print("正在恢复已经暂存的代码:"+CAMMAND_GIT_STASH_POP+" --->"+stash)
return cammand_out_put(CAMMAND_GIT_STASH_POP, False, False)
def git_reset_with_commitID(commitID):
cammand = CAMMAND_GIT_RESET+" "+commitID
print("重置已经提交的代码:"+cammand)
cammand_out_put(cammand, True, None)
def git_stash_with_title(stashTitle):
cammand = CAMMAND_GIT_STASH+" \"{stashTitle}\"".format(stashTitle=stashTitle)
print("暂存已经更改的代码:"+cammand)
cammand_out_put(cammand, True, None)
def pull_target_branch():
print("拉目标分支代码:"+CAMMAND_PULL_TARGET_BRANCE)
conflict = cammand_out_put(CAMMAND_PULL_TARGET_BRANCE, False, True)
return conflict==True
def give_up_rebase():
print("放弃变基:"+CAMMAND_GIT_REBASE_ABORT)
cammand_out_put(CAMMAND_GIT_REBASE_ABORT, False, None)
def create_branch_source():
cammand = CAMMAND_CHECKOUT_BRANCH+" "+source_branch()
print("创建新分支:"+cammand)
result = cammand_out_put(cammand, False, False)
if result==False:
print("检测到本地分支{branch}已经存在".format(branch=source_branch()));
msg = "❓❓❓是否删除此本地分支继续提交? yes/no"
put = input_with_expect(["yes","no"],msg)
if put == "yes":
switch_target_branch()
delete_branch_source()
create_branch_source()
else:
print("请手动处理");
exit(0)
def push_branch_source():
cammand = CAMMAND_PUSH_BRANCH+" "+source_branch()
print("推送新(源)分支:"+cammand)
cammand_out_put(cammand, True, None)
def can_push_branch_target():
#外部指定了reviewer的人,则强制走review流程
if inputAssiness!=None:
return False
targetBranch = project.branches.get(target_branch())
return targetBranch.can_push
def push_branch_target():
cammand = CAMMAND_PUSH_BRANCH+" "+target_branch()
print("推送目标分支:"+cammand)
cammand_out_put(cammand, True, None)
def create_merge_request():
assignee_id = check_assignee_id()
commitID = cammand_out_put(CAMMAND_LAST_MY_COMMITID, True, None)
print("\n最后一次我的提交的ID是"+commitID)
commit = project.commits.get(commitID)
print("\n最后一次我的提交的信息是"+commit.author_name+commit.message+commit.title)
mr = project.mergerequests.create({'source_branch':source_branch(),
'target_branch':target_branch(),
'title':commit.title,
'remove_source_branch':True,
'assignee_id':assignee_id,
'description':commit.message
})
print "创建merge request,其ID是:"+str(mr.iid)
return mr
def send_dingding(mr):
try:
reviewer = mr.assignee["name"]
except Exception as e:
print "❌❌❌获取review名字失败,不发送钉钉"
return
url = mr.web_url
sender = mr.author["name"]
avatar = mr.author["avatar_url"]
message = mr.title
send.sendToDingDing(reviewer, url, msg=message, sender=sender,icon=avatar)
def deal_merge_request(mr):
mrID = str(mr.iid)
if auto_merge():
print "review人是自己,需要自动merge,merge request ID是:"+mrID
try:
mr.merge()
print "merge完毕"
except Exception as e:
webbrowser.open(mr.web_url)
print e
print "❌❌❌merge失败,请手动处理"
else:
print "需要他人merge,存储本次的merge request ID,ID是:"+mrID
#自动合并也许保存ID,后边删除远程分支使用
save_merge_request_ID(mrID)
def delete_source_branch_when_merged():
mrIDList = read_merge_request_IDs()
if not len(mrIDList):
return
print("本地存储的ID有:"+str(mrIDList))
for mrID in mrIDList:
mr = merge_request_with_ID(mrID)
if not mr:
continue
sourceBranch = mr.source_branch
if not sourceBranch:
print ("source branch不存在,不删除对应分支,,删除本地的此ID,对应ID:"+mrID)
#删除本地存储ID
delete_merge_request_ID(str(mr.iid))
continue
if mr.state == "open":
print ("分支:{branch} ID:{mrIID} 的merge request未被合并,不删除对应分支".format(branch=sourceBranch,mrIID=mrID))
continue
if mr.state == "closed":
print ("分支:{branch} ID:{mrIID} 的merge request已被关闭,不删除对应分支,删除本地的此ID".format(branch=sourceBranch,mrIID=mrID))
#删除本地存储ID
delete_merge_request_ID(str(mr.iid))
continue
if mr.state == "merged":
print ("分支:{branch} ID:{mrIID} 的merge request已被合并,需删除对应分支".format(branch=sourceBranch,mrIID=mrID))
#删除远程source分支
delete_origin_branch(mr.source_branch)
#删除本地存储ID
delete_merge_request_ID(str(mr.iid))
def delete_origin_branch(branch):
cammand =CAMMAND_DELETE_BRANCH_ORIGIN+" "+branch
print("删除远程分支:"+cammand)
result = cammand_out_put(cammand, False, None)
if not result:
print "远程分支已经不存在,无需删除分支:"+branch
def save_merge_request_ID(mrID):
mrIDList = read_merge_request_IDs()
mrIDList.append(mrID)
store_merge_request_IDs(mrIDList)
def delete_merge_request_ID(mrID):
mrIDList = read_merge_request_IDs()
if not len(mrIDList):
return
print("删除本地存储的merge request ID:"+mrID)
mrIDList.remove(mrID)
store_merge_request_IDs(mrIDList)
#mrIDList为空则清空本地储存的值
def store_merge_request_IDs(mrIDList):
if mrIDList and len(mrIDList):
mrIDs = ','.join(mrIDList)
else:
mrIDs = "\"\""
cammand = CAMMAND_GIT_CONFIG_MERGEREQUEST_IDS+" "+mrIDs
print("本地写入merge request ID:"+cammand)
cammand_out_put(cammand, True, None)
def read_merge_request_IDs():
print("读取本地存储的merge request ID:"+CAMMAND_GIT_CONFIG_MERGEREQUEST_IDS)
mrIDs = cammand_out_put(CAMMAND_GIT_CONFIG_MERGEREQUEST_IDS, False, None)
if not mrIDs:
print "没有本地储存的merge request ID"
return []
mrIDList = mrIDs.split(",")
return mrIDList
def merge_request_with_ID(mrID):
mr = None
try:
mr = project.mergerequests.get(mrID)
except Exception,e:
print e
print "没有此merge request ID="+mrID
return mr
def switch_target_branch():
cammand = CAMMAND_SWITCH_BRANCH+" "+target_branch()
print("切换到目标分支:"+cammand)
cammand_out_put(cammand, False, False)
def delete_branch_source():
cammand = CAMMAND_FORCE_DELETE_LOCAL_BRANCH+" "+source_branch()
print("强制删除本地分支:"+cammand)
cammand_out_put(cammand, True, None)
def git_fetch():
print("抓取仓库信息:"+CAMMAND_GIT_FETCH)
cammand_out_put(CAMMAND_GIT_FETCH, True, None)
def open_web_merge_request(mr):
if auto_merge():
print "mr的URL是:"+mr.web_url
else:
print("打开浏览器:"+mr.web_url)
webbrowser.open(mr.web_url)
def module_files_list():
return files_list_with_path(current_path())
def pod_install_if_need():
if len(GLOBAL_BEFORE_PULL_FILE_LIST)==0:
print("被标记的文件列表没有数据,不执行pod install")
return
afterPullFliesList = module_files_list()
if GLOBAL_BEFORE_PULL_FILE_LIST == afterPullFliesList:
print("无文件变更,不需要执行pod install")
return
podfilePath = podfile_path()
if not podfilePath:
print("Podfile文件不存在,无法执行pod install...")
return
os.chdir(podfilePath)
print("需要重新部署文件,正在执行:"+CAMMAND_POD_INSTALL)
print("请稍后...")
os.system(CAMMAND_POD_INSTALL)
os.chdir(current_path())
def podfile_path():
podfile = "Podfile"
for root,dirs,files in os.walk(current_path()):
for eachFile in files:
if eachFile == podfile:
return root
return None
def files_list_with_path(path):
#忽略统计的文件夹
igonreDir1 = "/Pods"
igonreDir2 = "/.git"
igonreDir3 = "/Assets"
igonreDir4 = ".xcworkspace"
igonreDir5 = ".xcodeproj"
igonreDir6 = ".idea"
filtList = []
for root,dirs,files in os.walk(path):
#忽略Pods文件夹
if (igonreDir1 in root or
igonreDir2 in root or
igonreDir3 in root or
igonreDir4 in root or
igonreDir5 in root or
igonreDir6 in root):
continue
for eachFile in files:
endH = eachFile.endswith(".h")
endM = eachFile.endswith(".m")
endMM = eachFile.endswith(".mm")
endPlist = eachFile.endswith(".plist")
endDB = eachFile.endswith(".db")
if endH or endM or endMM or endPlist or endDB:
filtList.append(eachFile)
return filtList
def project_ID():
projectID = read_project_ID()
#缓存逻辑
if projectID:
print ("读取缓存的当前项目ID是:"+projectID)
return projectID
#没缓存逻辑
congif_url = git_config_remote_url()
if not len(congif_url):
print ("❌❌❌没有远程仓库的URL!!!\n请先执行如下命令配置本仓库远程仓库的URL")
print CAMMAND_GIT_REMOTE_URL+" "+"xxx"
exit(0)
print "从远端读取所有项目来匹配ID"
try:
projects = gl.projects.list(all=True)
for p in projects:
if p.ssh_url_to_repo == congif_url or p.http_url_to_repo == congif_url:
projectID = p.id
break
if projectID>0:
save_project_ID(projectID)
else:
print("projectID={pID} projectID不存在".format(pID=projectID))
print("可手动配置,执行命令:{cmd} 项目ID".format(cmd=CAMMAND_GIT_CONFIG_PROJECT_ID))
exit(0)
except Exception,e:
print e
print("❌❌❌无法获gitlab远程仓库的本项目ID, 请检查网络是否正常")
print("如仍然无法解决,可执行如下命令配置项目ID:"+CAMMAND_GIT_CONFIG_PROJECT_ID+" projectID")
exit(0)
return projectID
def save_project_ID(projectID):
cammand = CAMMAND_GIT_CONFIG_PROJECT_ID+" "+str(projectID)
print ("缓存当前项目ID:"+str(projectID))
cammand_out_put(cammand, True, None)
def read_project_ID():
return cammand_out_put(CAMMAND_GIT_CONFIG_PROJECT_ID, False, None)
def last_my_short_commitID():
cammand = "git rev-parse --short HEAD" #获取最后一次的提交的ID(短)
return cammand_out_put(cammand, False, "")
#源分支自动获取, 不支持外部指定输入
def source_branch():
global GLOBAL_BRANCH_SOURCE
if GLOBAL_BRANCH_SOURCE:
return GLOBAL_BRANCH_SOURCE
#外部指定了,就用外部指定的
if inputSourceBranch:
GLOBAL_BRANCH_SOURCE = inputSourceBranch+"_"+last_my_short_commitID()
return GLOBAL_BRANCH_SOURCE
#使用代码生成的
cammand = "git config --global user.name"
name = cammand_out_put(cammand, False, None)
if not name:
print("请先执行命令配置自己对应gitlab上的名字"+cammand+" xxx")
exit(0)
#转拼音
# pin = Pinyin()
# name = unicode(name, 'utf-8')
# name = pin.get_pinyin(name,"")
#拼上时间
# dateTime = datetime.datetime.now().strftime('%H时%M分')#现在
# name = name+"_"+dateTime
GLOBAL_BRANCH_SOURCE = name+"_"+last_my_short_commitID()
return GLOBAL_BRANCH_SOURCE
#目标分支支持外部指定输入
def target_branch():
if inputTargetBranch:
return inputTargetBranch
localTargetBranch = cammand_out_put(CAMMAND_GIT_CONFIG_TARGET_BRANCH, False, None)
if localTargetBranch:
return localTargetBranch
return DEFAULT_BRANCH_TARGET
def private_token():
return cammand_out_put(CAMMAND_GIT_CONFIG_PRIVATE_TOKEN, False, "")
def gitlab_url():
return cammand_out_put(CAMMAND_GIT_CONFIG_GITLAB_URL, False, "")
def git_config_remote_url(url = None):
if url:
if url.startswith("http://") or url.startswith("ssh://"):
cammand = CAMMAND_GIT_REMOTE_URL+" "+url
print ("写入当前仓库的URL:"+cammand)
url = cammand_out_put(cammand, True, None)
return url
else:
print ("无效URL,无法写入")
return None
else:
print ("读取当前仓库的URL:"+CAMMAND_GIT_REMOTE_URL)
url = cammand_out_put(CAMMAND_GIT_REMOTE_URL, False, "")
print url
return url
def auto_merge():
return current_login_user_name() == get_assiness()
def get_assiness():
if inputAssiness:
return inputAssiness
if ASSINESS:
return ASSINESS
return current_login_user_name()
def local_branchs():
result = cammand_out_put(CAMMAND_CURRENT_LOCAL_BRANCHS, True, None)
return result.split("\n")
def current_select_branch():
branchs = local_branchs()
for i, val in enumerate(branchs):
if "*" in val:
return val.replace("* ","")
def check_assignee_id():
users = gl.users.list(all=True)
assignessName = get_assiness()
assignee_id = None
for user in users:
if assignessName == user.name:
assignee_id = user.id
if assignee_id:
print ("review的人名字是{name} ID是{id}".format(name=get_assiness(),id=assignee_id))
return assignee_id
else:
print ("❌❌❌ 未找到review者的名字是{name},已创建无review名字的MR".format(name=assignessName))
return assignee_id
def current_login_user_name():
try:
return gl.user.name
except Exception as e:
return "未获取到用户名"
# 基础方法
def current_time():
return datetime.datetime.now().strftime('%m-%d_%H:%M:%S')
def current_path():
#TODO:脚本执行所在路径
return os.getcwd()
#TODO:脚本文件所在路径
currentPath = os.path.realpath(__file__);
fileName = os.path.basename(__file__);
return currentPath.replace(fileName,"");
pass
def cammand_out_put(cammand, can_raise, raise_return_value):
try:
return subprocess.check_output(cammand, shell=True).strip()
pass
except subprocess.CalledProcessError as e:
if can_raise:
raise(e)
else:
return raise_return_value
pass
pass
#接收输入,入参为期望值数组和提示文案
def input_with_expect(expectList=[], mark="请输入"):
val = raw_input(mark+"\n请输入:")
#为空不处理
if not expectList or expectList == []:
return val
#全部转字符串
tempList = []
for expect in expectList:
tempList.append(str(expect))
while not val in tempList:
val = input_with_expect(expectList=expectList,mark="无效输入,请重新输入")
else:
return val
#处理参数
def deal_argv(arguments):
#删掉本身的参数
arguments.remove(arguments[0])
tempList = list(arguments)
#输出帮助
if "-h" in arguments or "--help" in arguments:
log_help()
exit(0)
#输出版本
if "-v" in arguments or "--version" in arguments:
print VERSION
exit(0)
#输出版本
if "update" in arguments:
update.updatejob()
exit(0)
global inputAssiness
global inputSourceBranch
global inputTargetBranch
for idx, argu in enumerate(arguments):
#指定review的人
if argu.startswith("-r=") or argu.startswith("--review="):
tempArgu = argu.replace("--review=", "")
tempArgu = tempArgu.replace("-r=", "")
name = tempArgu.replace("\"", "")
print "指定的review人是:"+name
inputAssiness = reviewer.reviewer_with_name(unicode(name,"utf-8"))
tempList.remove(argu)
#指定源分支
elif argu.startswith("-s=") or argu.startswith("--source="):
tempArgu = argu.replace("--source", "")
tempArgu = tempArgu.replace("-s=", "")
inputSourceBranch = tempArgu.replace("\"", "")
print "用户指定的源分支是:"+inputSourceBranch
tempList.remove(argu)
#指定目标分支
elif argu.startswith("-t=") or argu.startswith("--target="):
tempArgu = argu.replace("--target", "")
tempArgu = tempArgu.replace("-t=", "")
inputTargetBranch = tempArgu.replace("\"", "")
print "用户指定的目标分支是:"+inputTargetBranch
tempList.remove(argu)
#校验参数是否有误
if len(tempList):
log_help()
print "参数有误,请重新输入,未识别参数是:"
for value in tempList:
print value
exit(0)
if len(arguments):
print "---------------"
pass
def log_help():
print "帮助:(命令为push后加参数)"
print "* push后加参数update 更新升级"
print "* push后加参数-h 或者--help 输出帮助"
print "* push后加参数-r=xxx 或者--review=xxx 指定review的人"
print "* push后加参数-s=xxx 或者--source=xxx 指定source分支"
print "* push后加参数-t=xxx 或者--target=xxx 指定target分支"
print "* 其他命令:新增/修改默认目标分支请执行:"+CAMMAND_GIT_CONFIG_TARGET_BRANCH+" xxx"
print "* 其他命令:查看默认目标分支请执行:"+CAMMAND_GIT_CONFIG_TARGET_BRANCH
print "* 其他命令:删除默认目标分支请执行:"+CAMMAND_GIT_CONFIG_DELETE_TARGET_BRANCH
pjconfig.log_help()
#入口
if __name__ == '__main__':
deal_argv(sys.argv)
print("开始工作...")
main()
print("工作完毕。")
pass
|
import sympy
from meshless.dev.sympytools import (pow2mul, mprint_as_sparse, mprint_as_dense,
mprint_as_array)
def test_expandpow():
s = 'a** 3 + pow((b+2)*c+1, 2) + (a+b * c)**3'
assert pow2mul(s) == '(a*a*a) + (((b+2)*c+1)*((b+2)*c+1)) + ((a+b*c)*(a+b*c)*(a+b*c))'
def test_print_as_sparse():
xvar = sympy.var('xvar')
yvar = sympy.var('yvar')
m = sympy.Matrix([[1, 2*xvar + yvar**3]])
res1 = """
# test_m
# test_m_num=2
c += 1
test_mr[c] = row+0
test_mc[c] = col+0
test_mv[c] += 1
c += 1
test_mr[c] = row+0
test_mc[c] = col+1
test_mv[c] += 2*xvar + (yvar*yvar*yvar)
"""
res1 = res1.strip()
mprint_as_sparse(m, 'test_m', print_file=False) == res1
mprint_as_sparse(m, 'test_m', print_file=False, is_symmetric=True) == res1
res2 = """
# test_m
# test_m_num=2
c += 1
test_mr[c] = row+0
test_mc[c] = col+0
test_mv[c] += 1
c += 1
test_mr[c] = row+0
test_mc[c] = col+1
test_mv[c] += (yvar*yvar*yvar) + 2*yvar
"""
res2 = res2.strip()
mprint_as_sparse(m, 'test_m', print_file=False, subs={xvar: yvar}) == res2
def test_print_as_full():
xvar = sympy.var('xvar')
yvar = sympy.var('yvar')
res1 = '''
# m
# m_num=2
m[0, 0] += 1
m[0, 1] += 2*xvar + (yvar*yvar*yvar)
'''
res1 = res1.strip()
m = sympy.Matrix([[1, 2*xvar + yvar**3]])
assert mprint_as_dense(m, 'm') == res1
res2 = '''
# subs
# yvar = xvar
# m
# m_num=2
m[0, 0] += 1
m[0, 1] += (yvar*yvar*yvar) + 2*yvar
'''
res2 = res2.strip()
assert mprint_as_dense(m, 'm', subs={xvar: yvar}) == res2
def test_print_as_array():
xvar = sympy.var('xvar')
yvar = sympy.var('yvar')
m = sympy.Matrix([[1, 2*xvar + yvar**3, + 2*xvar*(yvar+1)]])
res1 = '''
# m
# m_num=3
m[pos+0] += 1
m[pos+1] += 2*xvar + (yvar*yvar*yvar)
m[pos+2] += 2*xvar*(yvar + 1)
'''
res1 = res1.strip()
assert mprint_as_array(m, 'm') == res1
res2 = '''
# cdefs
cdef double x0
# subs
x0 = 2*xvar
# m
# m_num=3
m[pos+0] += 1
m[pos+1] += x0 + (yvar*yvar*yvar)
m[pos+2] += x0*(yvar + 1)
'''
res2 = res2.strip()
assert mprint_as_array(m, 'm', use_cse=True) == res2
if __name__ == '__main__':
test_expandpow()
test_print_as_sparse()
test_print_as_full()
test_print_as_array()
|
from django import forms
from django.contrib.contenttypes import fields
from django.forms import widgets
from django.forms.models import inlineformset_factory
from courses.models import Course, Module
class CourseCreateForm(forms.ModelForm):
class Meta:
model = Course
fields = ('subject', 'title', 'overview', )
def __init__(self, *args, **kwargs):
super(CourseCreateForm, self).__init__(*args, **kwargs)
self.fields['title'].widget.attrs['placeholder'] = 'Course Title*'
self.fields['overview'].widget.attrs['placeholder'] = 'Course Overview*'
ModuleFormSet = inlineformset_factory(Course, Module, fields=['title', 'description'],
extra=2, can_delete=True
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 15 15:19:07 2019
@author: pippo
"""
#Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Importing the DataSet
dataset = pd.read_csv('Mall_Customers.csv')
X = dataset.iloc[:, [3, 4]].values
# Using the Dedograms to find the optimal number of clusters:
import scipy.cluster.hierarchy as sch
dedograms = sch.dendrogram(sch.linkage(X, method = 'ward'))
plt.title('Dedograms')
plt.xlabel('Customers')
plt.ylabel('Euclidean distances')
#Fitting HC to the mall dataset
from sklearn.cluster import AgglomerativeClustering
hc = AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')
y_hc = hc.fit_predict(X)
#Visualising the clusters
plt.scatter(X[y_hc == 0, 0], X[y_hc == 0,1], s=100, c='red', label = 'Careful' )
plt.scatter(X[y_hc == 1, 0], X[y_hc == 1,1], s=100, c='blue', label = 'Standard' )
plt.scatter(X[y_hc == 2, 0], X[y_hc == 2,1], s=100, c='green', label = 'Target' )
plt.scatter(X[y_hc == 3, 0], X[y_hc == 3,1], s=100, c='cyan', label = 'Careless' )
plt.scatter(X[y_hc == 4, 0], X[y_hc == 4,1], s=100, c='magenta', label = 'Sensible' )
plt.title('Cluster of clients')
plt.xlabel('Annual Income(K$)')
plt.ylabel('Spending Score (1-100)')
plt.legend()
plt.show() |
'''
Exports the 'tile' class, which represents a single tile on the game
board. Tiles are expected to keep track of all the agents that are
present on the tile. Tiles also keep track of who their neighbors are,
and also allow or deny an agent's request to enter the tile.
Perhaps the most important function of the tile is to keep track
of the "Play Queue", which returns agents that are to be played against
each other, prioritizing agents who still have games left that they
can play.
Created by NPD on 11/23/14
'''
from random import random
from numpy.random import choice as npchoice
from random import choice
from constants import *
class Tile():
def __init__(self, location, transition_prob = 1):
# location dictates where on the game board this tile is.
# transition_prob is the likelihood that an agent that attempts
# to move to this tile will be able to, a float from 0 to 1.
self.transition_prob = transition_prob
self.location = location
self.loc_x, self.loc_y = location
self.agents = [] # array of agents currently on the tile
self.neighbors = [] # array of neighboring tiles
self.agents_to_play = [] # stores a list of agents that waiting
# to play this iteration.
def _buildQueue(self):
# constructs agents_to_play.
self.agents_to_play = [x for x in self.agents]
def iterate(self):
# the iterate method effectively resets the queue. it
# is to be called after each iteration is completed.
# in the case of tiles, all that this does is rebuild the
# playing queue.
self._buildQueue()
def acceptAgent(self, agent):
# this decides whether or not to accept an agent onto this tile,
# potentially be checking whether or not the agent is in a
# neighboring tile.
if agent.tile == None:
agent.tile = self
self.agents.append(agent)
if random() <= self.transition_prob:
if agent.tile != None:
# then the agent may enter the tile.
# remove the agent from the other tile
agent.tile.removeAgent(agent)
# change that agent's tile
agent.tile = self
# add this agent to the list of current agents
self.agents.append(agent)
def removeAgent(self, agent):
# removes an agent from this tile.
if agent in self.agents_to_play:
self.agents_to_play.remove(agent)
self.agents.remove(agent)
def getPlayers(self):
# returns two randomly chosen agents. If there are no such
# agents remaining, simply return None.
if len(self.agents) < 2:
# only 0 or 1 agents -- that agent is, sadly, doomed to die
# with no fitness.
return None
while True:
rem_both = True
if not len(self.agents_to_play):
return None
if len(self.agents_to_play) == 1:
agent_a = self.agents_to_play[0]
agent_b = choice(self.agents)
rem_both = False
else:
agent_a, agent_b = npchoice(self.agents_to_play, 2, False)
if agent_a.available_moves >= 1 or agent_b.available_moves >= 1:
# i.e., if either have something left that they
# can gain.
break
self.agents_to_play.remove(agent_a)
if rem_both:
self.agents_to_play.remove(agent_b)
if agent_a.available_moves < 1:
self.agents_to_play.remove(agent_a)
if agent_b.available_moves < 1 and rem_both:
self.agents_to_play.remove(agent_b)
return (agent_a, agent_b)
|
from django.urls import path
from weather.views import weather
app_name = 'weather'
urlpatterns = [
path('', weather, name='weather'),
] |
# Este programa determina si a una persona se le otorga una licencia de conducir o no
def main():
edad = int(input("Ingresa tu edad: "))
if edad >= 18:
print(' Primera verifivación pasada con éxito ')
elif edad <0:
print(' Respuesta incorrecta ')
else:
print(' Licencia denegada ')
identificacion = str(input( ' Tienes identificaciónoficial? (s/n) ')) ##1. Indicas que conteste con s o n
if identificacion == "si": ##2. Tienes que usar las mismas letras que definiste en 1
print('Licencia autorizada')
elif identificacion == "no": ##2. Tienes que usar las mismas letras que definiste en 1
print('Licencia rechazada')
if __name__ == '__main__':
main()
|
# coding=utf-8
from django.core.management.base import BaseCommand, CommandError
from mulan.models import Setting
import datetime as dt
from mulan.management.commands.deliver import send_email
def generate_success_message (s1, s2, old_cur_bl_variant, old_next_bl_variant_change):
return (u"Смена варианта меню бизнес-ланча прошла успешно.\n" +
s1.description + u": было " + unicode (old_cur_bl_variant) + u', стало ' + unicode (s1.value) + u"\n" +
s2.description + u": было " + unicode (old_next_bl_variant_change.strftime ("%d.%m.%Y")) +
u', стало ' + unicode (s2.value) + u"\n")
def generate_failure_message (s1, s2, s3):
return u"Произошла какая-то ошибка! Проверьте внимательно настройки!"
class Command(BaseCommand):
def handle(self, *args, **options):
msg_to_send = None
failure = False
try:
s1 = Setting.objects.get (key = 'cur_bl_variant')
s2 = Setting.objects.get (key = 'next_bl_variant_change')
s3 = Setting.objects.get (key = 'bl_variant_change_period')
old_cur_bl_variant = int(s1.value)
old_next_bl_variant_change = dt.datetime.strptime (s2.value, "%d.%m.%Y").date()
bl_variant_change_period = int(s3.value)
if dt.date.today() >= old_next_bl_variant_change:
new_cur_bl_variant = (old_cur_bl_variant + 1) % 2
new_next_bl_variant_change = old_next_bl_variant_change + dt.timedelta (days = bl_variant_change_period)
s1.value = str (new_cur_bl_variant)
s2.value = new_next_bl_variant_change.strftime ("%d.%m.%Y")
s1.save()
s2.save()
msg_to_send = generate_success_message (s1, s2, old_cur_bl_variant, old_next_bl_variant_change)
except ValueError:
msg_to_send = generate_failure_message (s1, s2, s3)
failure = True
if msg_to_send:
subj = u"Смена варианта меню бизнес-ланча: " + (u"ошибка!" if failure else u"успешно")
send_email (subj, msg_to_send, msg_to_send, Setting.objects.get (key = 'admin_email').value)
|
'''
Created on 18 May 2017
@author: Tor Eivind Syvertsen
'''
if __name__ == '__main__':
pass
from bs4 import BeautifulSoup
import requests
link = 'https://www.the-ninth-age.com/index.php?thread/5939-sa-public-playtesting-comments/&pageNo=5'
f = requests.get(link)
soup = BeautifulSoup(f.text) #or f.content
'''TO PRINT ENTIRE SOUP'''
#for link in soup.find_all('a'):
#print(link.get('href'))
#suppeText = soup.findAll('div', {"class":"messageText"})
all_messages = soup.find_all("div", class_="messageText")
ctr =0
for msg in all_messages:
ctr+=1
a = msg.get_text()
a.replace(' ','')
print a
with open('msg'+str(ctr)+'.txt','w') as f:
f.write(a)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.