text stringlengths 8 6.05M |
|---|
from freenit.models.sql.user import User as BaseUser
class User(BaseUser):
class Meta:
table_name = 'users'
class LDAPUser():
def __init__(self, uid, domain):
self.uid = uid
self.domain = domain
|
# Generated by Django 2.2.13 on 2020-07-07 08:47
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('shop', '0019_products_women'),
]
operations = [
migrations.AddField(
model_name='products_men',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='products_men',
name='featured',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='products_men',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='products_women',
name='active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='products_women',
name='featured',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='products_women',
name='timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
# util.py
"""Various utility functions used in this project."""
def max_elements(list1, N):
final_list = []
list1 = list(list1)
N = min(len(list1), N)
for i in range(0, N):
max1 = 0
for j in range(len(list1)):
if list1[j] > max1:
max1 = list1[j];
list1.remove(max1);
final_list.append(max1)
return tuple(final_list)
|
import art.write
from art.config import ArtConfig
from art.manifest import Manifest
def test_dest_options(mocker, tmpdir):
cfg = ArtConfig(
work_dir=str(tmpdir), dests=[str(tmpdir)], name="", repo_url=str(tmpdir)
)
mf = Manifest(files={})
wf = mocker.patch("art.write._write_file")
art.write.write(
cfg,
dest="derp://foo/bar/?acl=quux",
path_suffix="blag",
manifest=mf,
dry_run=False,
)
call_kwargs = wf.call_args[1]
assert call_kwargs["options"] == {"acl": "quux"}
assert call_kwargs["dest"] == "derp://foo/bar/blag/.manifest.json"
|
'''
Copyright PuzzleDev s.n.c.
Created on Jul 14, 2012
@author: Michele Sama (m.sama@puzzledev.com)
'''
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.test.testcases import TestCase
from uploadcleaner.models import UploadCleanerLog, UploadCleanerLogManager
from uploadcleaner.utils import filefields_in_model, linked_files_from_model
class UploadCleanerLogManagerTestCase(TestCase):
def testFilterLinkedFiles(self):
to_keep = ["a", "b", "c"]
to_delete = ["x", "y", "z"]
all_files = to_keep + to_delete
deleted = UploadCleanerLogManager()\
.filter_linked_files(all_files, to_keep)
self.assertEquals(to_delete, deleted)
class UtilsTestCase(TestCase):
def testFileFieldsInModel(self):
self.assertEqual([], filefields_in_model(User))
self.assertEqual(['log_file', 'backup_file'],
[x.name for x in filefields_in_model(UploadCleanerLog)])
def testLinkedFilesForModel(self):
instance = UploadCleanerLog.objects.create()
myfile = ContentFile("hello world")
instance.log_file.save("hello.txt", myfile, save = True)
instance.save()
self.assertEqual(
[instance.log_file.path,],
linked_files_from_model(UploadCleanerLog))
instance.delete() |
with open("hightemp.txt") as f:
for line in f:
print(line.replace('tr',' '),end = '')
# cat hightemp.txt | sed
|
pi = 3.141592653589793238462
pi = float(pi)
print(pi)
diff = 20
load = diff*5
print(load)
print(type(diff))
print(2**8)
|
import requests
from bs4 import BeautifulSoup
class WebHelper:
def __init__(self, query: str, region: str):
self.url = "https://www.avito.ru/{}?q={}".format(region, query)
self.html = self.get_html()
self.soup = BeautifulSoup(self.html, 'html.parser')
def get_html(self):
try:
result = requests.get(self.url)
result.raise_for_status()
return result.text
except(requests.RequestException, ValueError):
print('Server error')
return False
def get_count(self) -> int:
count = self.soup.findAll('span', class_='page-title-count-1oJOc')
print(count)
return int(count[0].text.replace(' ', ''))
|
from PyQt5.QtWidgets import QWidget, QPushButton,QLabel,QComboBox, QGridLayout, QMainWindow, QFrame, QHBoxLayout, QVBoxLayout
def add_widgets(self, data1, data0, lay1):
#global combos
lay = lay1
combos = []
unknown_list = data1
standard_containers= data0
dict1 = dict(zip(unknown_list, standard_containers))
for index, (key, value) in enumerate(dict1.items()):
label = QLabel(key)
combo = QComboBox()
combo.addItems(data0)
combos.append(combo)
#setattr(self, 'combo%d' % index, combo)
combo.setCurrentIndex(index)
lay.addWidget(label, index, 0)
lay.addWidget(combo, index, 1)
return combos
def comboSelect(combos):
# finding the current item index in combo box
combo_box= combos
list_1=[]
for index, combo in enumerate(combo_box):
list_1.append(combos[index].currentText())
item = combos[index].currentText()
print(list_1)
return(list_1)
|
lim=int(input())
li=list(map(int,input().split()))
li.sort(reverse=True)
for x in li:
print(x,end="")
|
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from django.forms import Select, TextInput, HiddenInput
from ..models import Menu, MenuItems
class MenuItemsForm(ModelForm):
class Meta:
model = MenuItems
fields = ['dish', 'out']
widgets = {'dish': Select(attrs={'class': 'form-control mr-3',
'required': True,
'data-msg': 'Укажите блюдо'}),
'out': TextInput(attrs={'class': 'form-control mr-3',
'required': True,
'data-msg': 'Укажите вес блюда'})
}
MenuItemsFormSet = inlineformset_factory(Menu, MenuItems,
form=MenuItemsForm,
fk_name='invoce_doc',
extra=1)
class MenuForm(ModelForm):
class Meta:
model = Menu
fields = ['created_at', 'approved', 'food_intake']
widgets = {'created_at': HiddenInput(attrs={}),
'food_intake': Select(attrs={'class': 'form-control mr-3',
'required': True,
'data-msg': 'Укажите период'})
}
|
from eden import Eden
from vendor import Vendor
import pandas as pd, pandas
class Audyt_New:
def __init__(self, marka):
df1 = Vendor()
df2 = Eden(288, marka = marka, czyaudyt = True)
df1['EAN'] = pd.to_numeric(df1['EAN'], errors='coerce')
try:
df2['g_EANCode'] = pd.to_numeric(df2['g_EANCode'])
except:
pass
print(df1.dtypes)
print(df2.dtypes)
df2 = pandas.merge(df2, df1[['EAN', 'ASIN']], how = 'left', left_on = 'g_EANCode', right_on = 'EAN')
df2 = df2.drop(columns = ['EAN'])
writer = pd.ExcelWriter('Audyt_' + marka + '.xlsx', engine = 'xlsxwriter')
df1.to_excel(writer, sheet_name = 'Vendor', index = False)
df2.to_excel(writer, sheet_name = 'Eden', index = False)
writer.save()
if __name__ == '__main__':
a = Audyt_New('airoh') |
from datetime import datetime
from email.utils import format_datetime
from uuid import uuid4
from xml.sax.saxutils import escape
from scrapy.exporters import PythonItemExporter, XmlItemExporter
from scrapy.utils.python import is_listlike
VALID_RSS_ELEMENTS = {
"channel": [
"category",
"cloud",
"copyright",
"description",
"docs",
"generator",
"image",
"language",
"lastBuildDate",
"link",
"managingEditor",
"pubDate",
"rating",
"skipDays",
"skipHours",
"textInput",
"title",
"ttl",
"webMaster",
],
"channel_image": [
"url",
"title",
"link",
"width",
"height",
"description",
],
"item": { # value: should this field be escaped
"author": True,
"category": True,
"comments": True,
"description": True,
"enclosure": False,
"guid": False,
"link": False,
"pubDate": False,
"source": True,
"title": True,
},
}
# FIXME: guid isPermaLink attributes
# https://www.w3schools.com/xml/rss_tag_guid.asp
# FIXME: source url attributes
# https://www.w3schools.com/xml/rss_tag_source.asp
# FIXME: Add support for enclosure
# https://www.w3schools.com/xml/rss_tag_enclosure.asp
def _clean_item_field(value):
# Format timestamp
if isinstance(value, datetime):
return format_datetime(value)
# Fill empty field with empty string
if value is None:
return ""
return value
class RSSExporter(XmlItemExporter):
def __init__(self, file, channel_meta, **kwargs):
self.file = file
self.channel_meta = channel_meta
self.item_list = []
super().__init__(file, root_element="channel", **kwargs)
def start_exporting(self):
self.xg.startDocument()
# rss tag
self.xg.startElement("rss", {"version": "2.0"})
self._beautify_newline(new_item=True)
# channel tag
self._beautify_indent(depth=1)
self.xg.startElement(self.root_element, {})
self._beautify_newline(new_item=True)
# Inject channel metadata
for field, value in self.channel_meta.items():
if field not in VALID_RSS_ELEMENTS["channel"]:
continue
if field == "image":
image_value = {}
for image_field in VALID_RSS_ELEMENTS["channel_image"]:
field_value = getattr(self.channel_meta["image"], image_field, None)
if field_value:
image_value[image_field] = field_value
value = image_value
self._export_xml_field(field, value, depth=2)
def export_item(self, item):
# Didn't actually write to file, store to list and write after sorting
self.item_list.append(item)
def _write_item(self, item):
# Edit from `export_item` from `scrapy.exporters.XmlItemExporter`
self._beautify_indent(depth=2)
self.xg.startElement(self.item_element, {})
self._beautify_newline()
for name, value in self._get_serialized_fields(item, default_value=""):
if name not in VALID_RSS_ELEMENTS["item"]:
continue
# Special handler for image
if name == "enclosure":
value = {k: v for k, v in value.items() if v is not None}
self._export_xml_field(name, None, depth=3, attributes=value)
continue
# Special handler for category
if name == "category":
value = value[0]["name"]
# Cleanup data
value = _clean_item_field(value)
# Write to xml
self._export_xml_field(
name,
value,
depth=3,
escape_content=VALID_RSS_ELEMENTS["item"][name],
)
self._beautify_indent(depth=2)
self.xg.endElement(self.item_element)
self._beautify_newline(new_item=True)
def finish_exporting(self):
# Sort and write items
self.item_list.sort(key=lambda x: x["pubDate"], reverse=True)
for item in self.item_list:
self._write_item(item)
# channel tag
self._beautify_indent(depth=1)
self.xg.endElement(self.root_element)
self._beautify_newline(new_item=True)
# rss tag
self.xg.endElement("rss")
self.xg.endDocument()
def _export_xml_field(
self, name, serialized_value, depth, attributes=None, escape_content=False
):
if attributes is None:
attributes = {}
self._beautify_indent(depth=depth)
self.xg.startElement(name, attributes)
if hasattr(serialized_value, "items"):
self._beautify_newline()
for subname, value in serialized_value.items():
self._export_xml_field(
subname, value, depth=depth + 1, escape_content=escape_content
)
self._beautify_indent(depth=depth)
elif is_listlike(serialized_value):
self._beautify_newline()
for value in serialized_value:
self._export_xml_field(
"value", value, depth=depth + 1, escape_content=escape_content
)
self._beautify_indent(depth=depth)
elif serialized_value: # Make sure content is not empty
content = str(serialized_value)
if escape_content:
self._xg_raw_characters(f"<![CDATA[{content}]]>")
else:
self._xg_raw_characters(escape(content))
self.xg.endElement(name)
self._beautify_newline()
def _xg_raw_characters(self, content):
if content:
self.xg._finish_pending_start_element()
if not isinstance(content, str):
content = str(content, self.xg._encoding)
self.xg._write(content)
class CouchDBExporter(PythonItemExporter):
def __init__(self, db_session, db_uri, ARTICLES_DB):
super().__init__(binary=False)
self.db_session = db_session
self.db_uri = db_uri
self.ARTICLES_DB = ARTICLES_DB
def export_item(self, item):
cleaned = {}
for name, value in self._get_serialized_fields(item, default_value=""):
# Skip image
if name == "enclosure":
continue
# Cleanup data
value = _clean_item_field(value)
cleaned[name] = value
# Add to database
self.db_session.put(
f"{self.db_uri}/{self.ARTICLES_DB}/{uuid4()}", json=cleaned
).raise_for_status()
|
def incrementer(nums):
return [(n+i) % 10 for i,n in enumerate(nums,1)]
'''
Given an input of an array of digits num, return the
array with each digit incremented by its position in
the array. For example, the first digit will be incremented
by 1, the second digit by 2 etc. Make sure to start counting
your positions from 1 and not 0.
incrementer({1,2,3}) => {2,4,6}
Your result can only contain single digit numbers, so if
adding a digit with it's position gives you a multiple-digit
number, only the last digit of the number should be returned
incrementer({4,6,9,1,3}) => {5,8,2,5,8}
- 9 + 3 (position of 9 in array) = 12
- Only its last digit 2 should be returned
Lastly, return {} if your array is empty! Arrays will only
contain numbers so don't worry about checking that.
''' |
from pandas import read_csv
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from helpers import data_reader
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
import operator
def rfe_selection(X, Y, num_to_select=3):
model = LogisticRegression()
rfe = RFE(model, num_to_select)
fit = rfe.fit(X, Y)
print("RFE: ")
print("Num Features: {}".format(fit.n_features_))
print("Selected Features: {}".format(fit.support_))
print("Feature Ranking: {}".format(fit.ranking_))
print("Best {} features: {}".format(num_to_select, fit.ranking_[:num_to_select]))
def pca_selection(X, num_to_select = 3):
# feature extraction
pca = PCA(n_components=num_to_select)
fit = pca.fit(X)
# summarize components
print("PCA: ")
print("Num Features: {}".format(num_to_select))
print("Explained Variance: {}".format(fit.explained_variance_ratio_))
# print(fit.components_)
def feature_importance(X, Y, num_to_select = 3):
model = ExtraTreesClassifier()
model.fit(X, Y)
print("Feature importance: ")
print(model.feature_importances_)
d = dict((key, value) for (key, value) in zip(range(len(X)), model.feature_importances_))
sorted_d = sorted(d.items(), key=operator.itemgetter(1), reverse=True)
sorted_names = [a[0] for a in sorted_d]
print("Sorted by importance: {}".format(sorted_names))
print("{} most import features: {}".format(num_to_select, [k for k in sorted_names[:num_to_select]]))
if __name__ == '__main__':
data, labels = data_reader.read_dataframe("../data/training_data.csv", has_labels=True,
nsamples=1000, **{'skiprows': 202000})
values = data[data.columns[1:]].values
# print(values)
# print(labels)
X = values
Y = labels
features_num = 5
rfe_selection(X, Y, features_num)
pca_selection(X, features_num)
feature_importance(X, Y, features_num)
|
'''
92. Reverse Linked List II
Given the head of a singly linked list and two integers left and right where left <= right, reverse the nodes of the list from position left to position right, and return the reversed list.
Example 1:
Input: head = [1,2,3,4,5], left = 2, right = 4
Output: [1,4,3,2,5]
Example 2:
Input: head = [5], left = 1, right = 1
Output: [5]
Constraints:
The number of nodes in the list is n.
1 <= n <= 500
-500 <= Node.val <= 500
1 <= left <= right <= n
'''
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def reverseBetween(self, head: ListNode, left: int, right: int) -> ListNode:
# Empty list
if not head:
return None
# Move the two pointers until they reach the proper starting point
# in the list.
prev, cur = None, head
while left > 1:
prev = cur
cur = cur.next
left, right = left - 1, right - 1
con, tail = prev, cur
# Iteratively reverse the nodes until right becomes 0.
while right:
temp = cur.next
cur.next = prev
prev = cur
cur = temp
right -= 1
if con:
con.next = prev
else: # con could be None
head = prev
tail.next = cur
return head |
from flask_wtf import Form
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Email, Length
class SignupForm(Form):
firstname = StringField('First name', validators=[DataRequired('Please enter your first name')])
lastname = StringField('Last name', validators=[DataRequired('Please enter your last name')])
email = StringField('Email', validators=[DataRequired('Valid Email address required'), Email('Must be a valid email address')])
password = PasswordField('Password', validators=[DataRequired('Password is required'), Length(min=6, message='Password must be at least 6 characters long.')])
submit = SubmitField('Sign up!')
class LoginForm(Form):
email = StringField('Email Address', validators=[DataRequired('Valid Email address required'), Email('Email must be valid email address')])
password = PasswordField('Password', validators=[DataRequired('Password required')])
submit = SubmitField('Log in')
class AddressForm(Form):
address = StringField('Address', validators=[DataRequired('Please enter an address')])
submit = SubmitField('Search')
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.cc.goals import tailor
from pants.backend.cc.goals.tailor import PutativeCCTargetsRequest
from pants.backend.cc.target_types import CCSourcesGeneratorTarget
from pants.core.goals.tailor import AllOwnedSources, PutativeTarget, PutativeTargets
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*tailor.rules(),
QueryRule(PutativeTargets, (PutativeCCTargetsRequest, AllOwnedSources)),
],
target_types=[CCSourcesGeneratorTarget],
)
def test_find_putative_targets(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"src/native/owned/BUILD": "cc_sources()\n",
"src/native/owned/OwnedFile.cc": "",
"src/native/unowned/UnownedFile.c": "",
}
)
putative_targets = rule_runner.request(
PutativeTargets,
[
PutativeCCTargetsRequest(("src/native/owned", "src/native/unowned")),
AllOwnedSources(["src/native/owned/OwnedFile.cc"]),
],
)
assert (
PutativeTargets(
[
PutativeTarget.for_target_type(
CCSourcesGeneratorTarget,
"src/native/unowned",
"unowned",
["UnownedFile.c"],
),
]
)
== putative_targets
)
|
# Exercício 3.6 - Livro
m1 = 10
m2 = 3
m3 = 7
media = ((m1 + m2 + m3) / 3)
res = media >= 7
print(res)
|
import base64
from enum import Enum
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from memegen import Memegen
from memegen.meme_template import MemeTemplate
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/meme/{template_id}/{temperature}")
async def root(template_id: MemeTemplate, temperature: int=50):
generator = Memegen(
template=template_id,
temperature=temperature / 100)
meme, top, bottom = generator.predict()
return {
"top": top,
"bottom": bottom,
"image": base64.b64encode(meme)
}
|
"""
Main event loop class
"""
import json
import logging
from trio_websocket import open_websocket_url
logging.basicConfig(level=logging.DEBUG)
class Washer:
"""
Event loop
"""
def __init__(self):
self.move_callbacks = []
self._ws = None
def register_move_callback(self, callback):
self.move_callbacks.append(callback)
async def process_message(self, msg):
m = json.loads(msg)
logging.debug(f'Received |{m}|')
for b in self.move_callbacks:
await b(self, m)
async def run(self):
try:
async with open_websocket_url("ws://localhost:1234") as ws:
self._ws = ws
while True:
msg = await ws.get_message()
await self.process_message(msg)
except OSError:
print("Cannot connect to minecraft")
async def block_set(self, location):
s = {"verb": "set", "type": "block", "location": location}
await self._ws.send_message(json.dumps(s))
logging.debug(f"Sent |{s}|")
|
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 21 14:08:52 2019
@author: Administrator
"""
def calc_temp(D2,C5,C6):
dT = D2 - (C5<<8)
ret = 2000 + ((dT*C6)>>23)
return dT,ret
def calc_off(dT,C2,C4):
OFF = ((C2<<16) + (C4*dT>>7))
return(OFF)
def calc_all(D1,D2,C1,C2,C3,C4,C5,C6):
dT = D2 - (C5<<8)
TEMP = 2000 + ((dT*C6)>>23)
OFF = ((C2<<16) + (C4*dT>>7))
SENSE = (C1<<15) + ((C3*dT)>>8)
P = ((D1*SENSE)/2**21 - OFF)/(2**13)
return TEMP,OFF,SENSE,P
# OFF = ((C2<<16) + (C4*dT>>7))
# SENSE = (C1<<15 + (C3*dT)>>8
# print("dT:%d,TEMP:%d,OFF:%d,SENSE:%d\n",dT,TEMP,OFF,SENSE);
def calc_pres(D1,SEN,OFF):
ret = (((D1*SEN)>>21) - OFF)>>13
return ret
calc_temp(7119033,26891,26383)
|
#!/bin/python
# python <file> <linenumber>
import sys
try:
script = sys.argv[1]
linenumber = int(sys.argv[2])
except:
script = "/home/peter/setup/bin/mp.py"
linenumber = 10
with open(script, "r") as f:
lines = f.readlines()
last = None
try:
with open("/tmp/last", "r") as w:
last = w.readlines()[0]
except:
pass
for aline in reversed(lines[:linenumber]):
if aline.startswith("class "):
ret = aline.split(" ")[1].split("(")[0]
if (ret != last):
with open("/tmp/last", "w") as w:
w.write(ret)
print(ret)
exit()
print(last)
|
import heapq
def kthSmallest(iterable, k):
smallest = []
for value in iterable:
print(smallest)
heapq.heappush(smallest, -value)
if len(smallest) > k:
heapq.heappop(smallest)
if (len(smallest) < k):
return None
return -smallest[0]
print(kthSmallest([8, 16, 80, 55, 32, 8, 38], 3))
|
class Cards:
GUARD = 1
PRIEST = 2
BARON = 3
HANDMAIDEN = 4
PRINCE = 5
KING = 6
COUNTESS = 7
PRINCESS = 8
NUM_CARDS = 9
DECK_SIZE = 16
names = [
'NONE',
'GUARD',
'PRIEST',
'BARON',
'HANDMAIDEN',
'PRINCE',
'KING',
'COUNTESS',
'PRINCESS'
]
@staticmethod
def start_count(card):
if card == Cards.GUARD:
return 5
elif card in range(Cards.PRIEST, Cards.KING):
return 2
elif card in range(Cards.KING, Cards.NUM_CARDS):
return 1
else:
return 0
@staticmethod
def name(card):
return Cards.names[card]
class CardSet:
def __init__(self, other=None):
if other:
self.cards = other.cards[:]
else:
self.cards = []
for i in range(Cards.NUM_CARDS):
self.cards.append(0)
def __getitem__(self, key):
return self.cards[key]
def __setitem__(self, key, val):
self.cards[key] = val
def __str__(self):
ret = ''
for i in range(Cards.NUM_CARDS):
if self.cards[i] > 0:
ret += '%s:%s ' % (Cards.name(i), self.cards[i])
return ret
def contains(self, card):
return self.cards[card] > 0
def clear(self, exclude=None, cards=range(Cards.NUM_CARDS)):
for i in cards:
if i != exclude:
self.cards[i] = 0
def remove(self, card):
if self.cards[card] > 0:
self.cards[card] -= 1
def certainty(self, card):
try:
return self.cards[card] / sum(self.cards)
except ZeroDivisionError:
return 0
def most_likely(self, exclude):
cards = range(Cards.NUM_CARDS)
cards = sorted(cards, reverse=True)
cards = sorted(cards, key=lambda x: self.cards[x], reverse=True)
card = cards[0]
if card == exclude:
card = cards[1]
return (card, self.certainty(card))
def chance_less_than(self, card):
try:
count = sum(self.cards[c] for c in range(card))
return count / sum(self.cards)
except ZeroDivisionError:
return 0
def expected_value(self):
try:
count = sum(c * self.cards[c] for c in range(Cards.NUM_CARDS))
return count / sum(self.cards)
except ZeroDivisionError:
return 0
@staticmethod
def full():
card_set = CardSet()
for card in range(Cards.NUM_CARDS):
card_set[card] = Cards.start_count(card)
return card_set
@staticmethod
def single(card):
card_set = CardSet()
card_set[card] = Cards.start_count(card)
return card_set
|
def brute_force(goal):
x = 0
while x**2 < goal:
x += 1
if x**2 == goal:
print(f'The square root of {goal} is {x}')
else:
print(f'I couldnt find the root')
def approach_search(goal, epsilon):
step = epsilon**2
answer = 0.0
while abs(answer**2 - goal) >= epsilon and answer <= goal:
answer += step
if abs(answer**2 - goal) >= epsilon:
print(f'I couldnt find the square root of {goal}')
else:
print(f'The square root of {goal} is {answer}')
def binary_search(goal, epsilon):
lower = 0.0
top = max(1.0, goal)
answer = (lower + top) / 2
while abs(answer**2 - goal) >= epsilon:
if answer**2 > goal:
top = answer
else:
lower = answer
answer = (lower + top) / 2
print(f'The square root of {goal} is {answer}')
|
# coding: utf-8
# In[4]:
import os, csv
import numpy as np
import pandas as pd
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing, decomposition, manifold
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV, LeaveOneOut, train_test_split
from scipy.stats import randint as sp_randint
from time import time
import logging
import matplotlib.pyplot as plt
import pickle
get_ipython().magic('matplotlib inline')
import cv2
from scipy.misc import imresize
from scipy import stats
from skimage import feature
np.random.seed(1)
# # Calculate an ensemble of classifiers by computing the mode of the final results (voting)
# In[19]:
Path = "E:/csvdir/"
filelist = os.listdir(Path)
print(filelist)
list_ =[]
for file in filelist:
a = pd.read_csv(Path+file, index_col=None, header=0)
list_.append(np.asarray(a['Prediction']))
final = stats.mode(np.asarray(list_),axis=0)
labels_predicted = np.transpose(final.mode)
print(np.squeeze(labels_predicted))
os.chdir (Path)
with open('ResMode.csv', 'w', newline='') as csvfile:
csvwriter = csv.writer(csvfile, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
header = ['Id','Prediction']
csvwriter.writerow(header)
for Idx in range(len(labels_predicted)):
row =[str(Idx+1),np.squeeze(labels_predicted[Idx])]
csvwriter.writerow(row)
# In[ ]:
# In[ ]:
|
# encoding:utf-8
import requests
# client_id is the AK from Baidu Ai Studio, client_secret is the SK from Baidu Ai Studio
AK = ''
SK = ''
host = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id='+AK+'&client_secret='+SK
response = requests.get(host)
if response:
print(response.json())
|
import pygame
from random import randint
pygame.init()
screen=pygame.display.set_mode((1000,600))
pygame.display.set_caption("Rock Runner")
clear=(0,0,0)
red=(255,0,0)
green=(0,255,0)
blue=(0,0,255)
runnerX=640
runnerY=585
runnerHeight=15
runnerWidth=40
runnerVel=0.8
# object current co-ordinates
x = 500
y = 0
speed = 0.5
while True:
# completely fill the surface object
# with black colour
screen.fill((0, 0, 0))
keys=pygame.key.get_pressed()
if keys[pygame.K_LEFT]:
if runnerX>=0:
pygame.draw.rect(screen,(clear),(runnerX,runnerY,runnerWidth,runnerHeight))
runnerX-=runnerVel
if keys[pygame.K_RIGHT]:
if runnerX<=960:
pygame.draw.rect(screen,(clear),(runnerX,runnerY,runnerWidth,runnerHeight))
runnerX+=runnerVel
pygame.draw.rect(screen,(red),(runnerX,runnerY,runnerWidth,runnerHeight))
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
pygame.draw.rect(screen, (255, 255, 0), (x, y, 20, 40))
pygame.display.update()
y = y + speed
if y > 590:
y = 0
x = randint(10,990) |
# -*- coding: utf-8 -*-
class Solution:
def sortSentence(self, s: str) -> str:
tokens = s.split()
result = [None] * len(tokens)
for token in tokens:
word, index = token[:-1], int(token[-1])
result[index - 1] = word
return " ".join(result)
if __name__ == "__main__":
solution = Solution()
assert "This is a sentence" == solution.sortSentence("is2 sentence4 This1 a3")
assert "Me Myself and I" == solution.sortSentence("Myself2 Me1 I4 and3")
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
train = pd.read_csv('data/train.csv', index_col=0)
train.head()
# In[3]:
X = train.drop('target', axis=1)
y = train.target
del train
# In[4]:
from sklearn.decomposition import FactorAnalysis
fa = FactorAnalysis(n_components=100, random_state=42)
X_fa = fa.fit_transform(X)
# In[5]:
from sklearn.random_projection import SparseRandomProjection
srp = SparseRandomProjection(n_components=100, random_state=42)
X_srp = srp.fit_transform(X)
# In[6]:
from sklearn.random_projection import GaussianRandomProjection
grp = GaussianRandomProjection(n_components=100, random_state=42)
X_grp = grp.fit_transform(X)
# In[7]:
from sklearn.model_selection import train_test_split
X_added = pd.concat([
pd.DataFrame(X_fa),
pd.DataFrame(X_srp),
pd.DataFrame(X_grp),
], axis=1)
y_log = np.log1p(y)
X_train, X_test, y_train, y_test = train_test_split(
X_added, y_log, test_size=0.2, random_state=42
)
X_val, X_test, y_val, y_test = train_test_split(
X_test, y_test, test_size=0.5, random_state=42
)
print(X_train.shape, X_val.shape, X_test.shape)
# In[8]:
def rmsle_metric(y_test, y_pred):
assert len(y_test) == len(y_pred)
y_test = np.exp(y_test)-1
y_pred = np.exp(y_pred)-1
rmsle = np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_test))**2))
return ('RMSLE', rmsle, False)
# In[19]:
import lightgbm as lgb
gbm = lgb.LGBMRegressor(
objective='regression',
num_leaves=11,
learning_rate=0.008,
n_estimators=1000,
reg_lambda=2.0,
max_depth=5,
)
gbm.fit(X_train, y_train,
eval_set=[(X_test, y_test)],
eval_metric=rmsle_metric,
early_stopping_rounds=100
)
# In[20]:
y_pred_t = gbm.predict(X_train)
print(rmsle_metric(y_train, y_pred_t))
y_pred = gbm.predict(X_test)
print(rmsle_metric(y_test, y_pred))
y_pred_v = gbm.predict(X_val)
print(rmsle_metric(y_val, y_pred_v))
# In[43]:
test = pd.read_csv('data/test.csv', index_col=0)
test.head()
# In[44]:
ids = test.reset_index()['ID']
# In[45]:
from sklearn.decomposition import FactorAnalysis
from sklearn.random_projection import GaussianRandomProjection
from sklearn.random_projection import SparseRandomProjection
X_fa = fa.transform(test)
X_srp = srp.transform(test)
X_grp = grp.transform(test)
X_added = pd.concat([
pd.DataFrame(X_fa),
pd.DataFrame(X_srp),
pd.DataFrame(X_grp),
], axis=1)
y_pred = gbm.predict(X_added)
y_pred
# In[46]:
y_pred = np.exp(y_pred) - 1
# In[47]:
y_pred[0]
# In[48]:
ids[0]
# In[50]:
pd.DataFrame(y_pred, index=ids, columns=['target']).to_csv('data/submit.csv')
|
#!/usr/bin/env python3
from . import neopixelmatrix as Graphics
from . import utils
from .neofont import letters as font
class NeoSprite():
def __init__(self, path):
self.image = utils.get_image_matrix(path)
self.x = 0
self.y = 0
self.width = len(self.image[0])
self.height = len(self.image)
def render(self, x=0, y=0, blend=0xfff):
Graphics.drawImage(self.image, x+int(self.x), y+int(self.y), blend)
class AnimatedNeoSprite():
def __init__(self, path, width=8, height=8):
self.frames = utils.get_frames_for_image(path, width, height)
self.x = 0
self.y = 0
self.width = width
self.height = height
self.frame = 0
self.framerate = 1
self.time_ratio = 1/self.framerate
self.time_acc = 0
self.playing = False
self.animation = range(0, len(self.frames))
self.index_animation = 0
def update(self, dt):
if self.playing:
self.time_acc += dt
if self.time_acc > self.time_ratio:
self.time_acc = 0
self.index_animation += 1
if self.index_animation >= len(self.animation):
self.index_animation = 0
self.frame = self.animation[self.index_animation]
def setFrameRate(self, framerate):
if framerate == 0: return
self.framerate = framerate
self.time_ratio = 1/self.framerate
self.time_acc = 0
def render(self):
Graphics.drawImage(self.frames[self.frame], self.x, self.y)
def renderFrame(self, frame):
Graphics.drawImage(self.frames[frame], self.x, self.y)
def renderFrameAt(self, frame, x, y):
Graphics.drawImage(self.frames[frame], x, y)
class TextNeoSprite():
def __init__(self, text):
self.image = [[],[],[],[],[]]
for char in text:
letter = font[char]
char_spacing = len(letter[0])
for j in range(0, 5):
self.image[j] += letter[j] + [0]
self.x = 0
self.y = 0
self.width = len(self.image[0])
def render(self):
Graphics.drawMonoPixels(self.image, self.x, self.y)
class SpriteFromFrames():
def __init__(self, baseSprite, frames):
self.image = []
for j in range(0, baseSprite.height):
self.image.append([])
for frame in frames:
self.image[j] += baseSprite.frames[frame][j]
self.x = 0
self.y = 0
self.width = baseSprite.width*len(frames)
self.height = baseSprite.height
def render(self):
Graphics.drawImage(self.image, self.x, self.y) |
import requests
import sqlite3 as db
class Post:
def __init__(self,title, body, author):
self.title = title
self.body = body
self.author = author
def insert_posts(post,cursor):
insert_post_string = " insert into Posts(title, body) values(:title, :body)";
cursor.execute(insert_post_string, {'title': post['title'], 'body': post['body']})
# function return userId with most posts
def getUserIdWithMostPosts(posts):
postNumberForUsers = {}
# result 'for loop': we will have a dict where we have userId in corresponding post number
for post in posts:
userId = post['userId']
if (userId in postNumberForUsers):
postNumberForUsers[userId] += 1
else:
postNumberForUsers[userId] = 1
# Actual get userId with most posts
mostPostsUserId = None
mostPosts = 0
for userId in postNumberForUsers:
if(postNumberForUsers[userId] > mostPosts):
mostPosts = postNumberForUsers[userId]
mostPostsUserId = userId
return mostPostsUserId
def getPostsByUserId(posts,userId):
result = []
for post in posts:
if (post['userId'] == userId):
result.append(post)
return result
# Run for getUserIdWithMostPosts
# response = requests.get("http://jsonplaceholder.typicode.com/posts", timeout=6)
# if (response.ok):
# # return list of posts where each individual post is a dict with JSON format
# posts = response.json()
# print(getUserIdWithMostPosts(posts))
# print(posts)
# else:
# print("error happened with status code: ", response.status_code)
# run for getPostsByUserId
# response = requests.get("http://jsonplaceholder.typicode.com/posts", timeout=6)
# if (response.ok):
# # return list of posts where each individual post is a dict with JSON format
# posts = response.json()
# userId = getUserIdWithMostPosts(posts)
# print(userId)
# posts_for_user = getPostsByUserId(posts, userId)
# print(posts_for_user)
# else:
# print("error happened with status code: ", response.status_code)
#
# run for db
response = requests.get("http://jsonplaceholder.typicode.com/posts", timeout=6)
connection = db.connect("my_database.db")
cursor = connection.cursor()
create_posts_table_string = '''
CREATE TABLE Posts(
id INTEGER PRIMARY KEY AUTOINCREMENT,
title text,
body text
);
'''
cursor.execute(create_posts_table_string)
connection.commit()
if (response.ok):
# return list of posts where each individual post is a dict with JSON format
posts = response.json()
userId = getUserIdWithMostPosts(posts)
print(userId)
posts_for_user = getPostsByUserId(posts, userId)
for post in posts_for_user:
insert_posts(post, cursor)
connection.commit()
print(posts_for_user)
else:
print("error happened with status code: ", response.status_code) |
import os
import roman
def clear_screen():
input('\nAperte Enter para continuar...')
os.system("cls")
def end_screen():
print('\nFim da aplicação.')
input('Aperte Enter para finalizar...')
def menu():
print('-----MENU DE ESCOLHA-----')
print('1 - Arábico para romano.\n2 - Romano para arábico.')
option = int(input('Insira a sua escolha: '))
clear_screen()
return option
def condicional(parametro):
if parametro <= 0 or parametro > 2:
print('\nInsira um valor válido!')
clear_screen()
condicional(menu())
elif parametro == 1:
arabic = str(input('Insira um número Arábico qualquer: '))
try:
arabic = float(arabic)
arabic = round(arabic)
arabic = int(arabic)
numero_arabico_convertido = roman.toRoman(arabic)
except ValueError:
print('\nInsira um número válido!')
clear_screen()
condicional(menu())
except roman.OutOfRangeError:
print('\nInsira um número válido!')
clear_screen()
condicional(menu())
else:
print(f'O número arábico {arabic} em romano é {numero_arabico_convertido}')
end_screen()
else:
try:
romano = str(input('Insira um número romano qualquer: '))
romano = romano.upper()
numero_romano_convertido = roman.fromRoman(romano)
except roman.InvalidRomanNumeralError:
print('\nInsira um número válido!')
clear_screen()
condicional(menu())
else:
print(f'O número romano {romano} em arábico é {numero_romano_convertido}')
end_screen()
condicional(menu())
|
# Generated by Django 2.2.5 on 2019-12-28 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('zip_code', models.CharField(max_length=5)),
('unit_of_temperature', models.CharField(choices=[('F', 'Fahrenheit'), ('C', 'Celsius')], max_length=1)),
],
),
]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-10-16 23:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0004_auto_20181016_2257'),
]
operations = [
migrations.AddField(
model_name='message',
name='recipient',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='received_messages', to='dashboard_app.User'),
preserve_default=False,
),
]
|
import unittest
from pyfiles.db import database
from pyfiles.model import overworld
# Define a DB handler since we're creating entities that will need mapping
DB_HANDLER = None
def setUp(self) :
print(self.__testMethodName)
def setUpModule():
DB_HANDLER = database.DatabaseHandler()
DB_HANDLER.open_db()
def tearDownModule():
if DB_HANDLER is not None:
DB_HANDLER.close_db()
class TestOverworldGood(unittest.TestCase):
thisOverworld = overworld.getOverworld()
def test_start_pos(self):
#Ensure the x/y sizes of the overworld divide by 2
self.assertEqual(0, self.thisOverworld.map_size_x % 2)
self.assertEqual(0, self.thisOverworld.map_size_y % 2)
#Ensure the overworld start position is exactly half it's size
startPos = self.thisOverworld.get_starting_pos()
self.assertTrue(isinstance(startPos.pos_x, int))
self.assertTrue(isinstance(startPos.pos_y, int))
self.assertEqual(int(self.thisOverworld.OVERWORLD_SIZE_X/2), startPos.pos_x)
self.assertEqual(int(self.thisOverworld.OVERWORLD_SIZE_Y/2), startPos.pos_y)
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
from odoo import models, fields, _, api
from datetime import datetime
from dateutil.relativedelta import relativedelta
from odoo.exceptions import Warning, UserError
import pytz
from odoo import tools
from .tzlocal import get_localzone
class VacacionesNomina(models.Model):
_name = 'vacaciones.nomina'
_description = 'VacacionesNomina'
name = fields.Char("Name", required=True, copy=False, readonly=True, states={'draft': [('readonly', False)]}, index=True, default=lambda self: _('New'))
employee_id = fields.Many2one('hr.employee', string='Empleado')
fecha_inicial = fields.Date('Fecha inicial')
dias = fields.Integer('Días')
dias_de_vacaciones_disponibles = fields.Integer("Dias de vacaciones disponibles")
state = fields.Selection([('draft', 'Borrador'), ('done', 'Hecho'), ('cancel', 'Cancelado')], string='Estado', default='draft')
@api.onchange('employee_id')
def _onchange_employee_id(self):
if self.employee_id:
contract = self.employee_id.contract_id
if contract:
self.dias_de_vacaciones_disponibles = sum(vacacione.dias for vacacione in contract.tabla_vacaciones)
@api.onchange('dias')
def _onchange_dias(self):
if self.dias and self.dias > self.dias_de_vacaciones_disponibles:
raise Warning("No tiene suficientes dias de vacaciones")
@api.model
def create(self, vals):
if vals.get('name', _('New')) == _('New'):
vals['name'] = self.env['ir.sequence'].next_by_code('vacaciones.nomina') or _('New')
result = super(VacacionesNomina, self).create(vals)
return result
@api.multi
def action_validar(self):
leave_type = self.env.ref('nomina_cfdi_extras_ee.hr_holidays_status_vac', False)
if self.fecha_inicial:
date_from = self.fecha_inicial
date_to = date_from + relativedelta(days=self.dias - 1)
date_from = date_from.strftime("%Y-%m-%d") + ' 00:00:00'
date_to = date_to.strftime("%Y-%m-%d") +' 23:59:59'
else:
date_from = datetime.today().strftime("%Y-%m-%d")
date_to = date_from + ' 20:00:00'
date_from += ' 06:00:00'
timezone = self._context.get('tz')
if not timezone:
timezone = self.env.user.partner_id.tz or 'UTC'
#timezone = tools.ustr(timezone).encode('utf-8')
local = pytz.timezone(timezone) #get_localzone()
naive_from = datetime.strptime (date_from, "%Y-%m-%d %H:%M:%S")
local_dt_from = local.localize(naive_from, is_dst=None)
utc_dt_from = local_dt_from.astimezone (pytz.utc)
date_from = utc_dt_from.strftime ("%Y-%m-%d %H:%M:%S")
naive_to = datetime.strptime (date_to, "%Y-%m-%d %H:%M:%S")
local_dt_to = local.localize(naive_to, is_dst=None)
utc_dt_to = local_dt_to.astimezone (pytz.utc)
date_to = utc_dt_to.strftime ("%Y-%m-%d %H:%M:%S")
nombre = 'Vacaciones_'+self.name
registro_falta = self.env['hr.leave'].search([('name','=', nombre)], limit=1)
if registro_falta:
registro_falta.write({'date_from' : date_from,
'date_to' : date_to,
'employee_id' : self.employee_id.id,
'holiday_status_id' : leave_type and leave_type.id,
'state': 'validate',
})
else:
holidays_obj = self.env['hr.leave']
vals = {'date_from' : date_from,
'holiday_status_id' : leave_type and leave_type.id,
'employee_id' : self.employee_id.id,
'name' : 'Vacaciones_'+self.name,
'date_to' : date_to,
'state': 'confirm',}
holiday = holidays_obj.new(vals)
holiday._onchange_employee_id()
holiday._onchange_leave_dates()
vals.update(holiday._convert_to_write({name: holiday[name] for name in holiday._cache}))
#holidays_obj.create(vals)
vals.update({'holiday_status_id' : leave_type and leave_type.id,})
vacacion = self.env['hr.leave'].create(vals)
vacacion.action_validate()
self.write({'state':'done'})
dias = self.dias
if self.employee_id and dias:
contract = self.employee_id.contract_id
if contract:
for vac in contract.tabla_vacaciones.sorted(key=lambda object1: object1.ano):
if dias <= vac.dias:
vac.write({'dias':vac.dias-dias})
break
elif dias > vac.dias:
dias = dias-vac.dias
vac.write({'dias':0})
return True
@api.multi
def action_cancelar(self):
self.write({'state':'cancel'})
nombre = 'Vacaciones_'+self.name
registro_falta = self.env['hr.leave'].search([('name','=', nombre)], limit=1)
if registro_falta:
registro_falta.action_refuse() #.write({'state':'cancel'})
contract = self.employee_id.contract_id
if contract:
vac = contract.tabla_vacaciones.sorted(key=lambda object1: object1.ano)
saldo_ant = vac[0].dias + self.dias
vac[0].write({'dias':saldo_ant})
@api.multi
def action_draft(self):
self.write({'state':'draft'})
@api.multi
def unlink(self):
raise UserError("Los registros no se pueden borrar, solo cancelar.") |
from typing import List
# 快速排序
def quicksort(arr:List):
if len(arr) < 2: # 基线条件
return arr
else: # 递归条件
mid = arr[0] # 随便找一个基准
less = [i for i in arr[1:] if i <= mid] # 小于基准的元素
greet = [i for i in arr[1:] if i > mid] # 大于基准的元素
return quicksort(less) + [mid] + quicksort(greet) # 分而治之的思想 :把小于基准的元素和大于基准的元素 分别再递归调用 该函数 实现排序
# 使用递归 求列表元素之和
def recursion_sum(arr:List):
if len(arr) < 1:
return 0
else:
return arr[0] + recursion_sum(arr[1:])
# 使用递归来计算数组元素个数:
def recursion_len(arr:List):
if arr == []:
return 0
else:
return 1 + recursion_len(arr[1:])
# 使用递归来计算数组最大值
def recursion_max(arr:List):
if len(arr) <= 1:
return arr[0]
else :
max = arr[0]
return max if max > recursion_max(arr[1:]) else recursion_max(arr[1:])
arr = [3,6,1,78,23,98,0,2,23,1,99]
print(f"元素个数{recursion_len(arr)}")
print(f"快速排序:{quicksort(arr)}")
print(f"最大值{recursion_max(arr)}")
print(f"元素和{recursion_sum(arr)}")
|
import sys
from datetime import datetime
import tkinter.font
from tkinter import*
from datetime import datetime
from PIL import Image, ImageTk
import threading
from random import *
import openpyxl
import matplotlib.pyplot as plt
import time
import speech_recognition as sr
import pyaudio
import wave
from pydub.playback import play
from pydub import AudioSegment
import numpy as np
import struct
import time
sr.__version__
import tkinter.ttk
FORMAT = pyaudio.paInt16
LEN = 10**100
PASS = 5
CHANNELS = 1
RATE = 44100
CHUNK = 1024
RECORD_SECONDS = 3
MIN_STRING_LIST_LENGTH = 9
WAVE_OUTPUT_FILENAME = "./data/wav/file.wav"
class Gui() :
def __init__(self):
self.window = tkinter.Tk()
self.font = tkinter.font.Font(family = "나눔스퀘어 Bold", size = 24)
self.font4 = tkinter.font.Font(family = "나눔스퀘어 Regular", size = 18)
self.font5 = tkinter.font.Font(family = "나눔스퀘어 Regular", size = 15)
self.font6 = tkinter.font.Font(family = "나눔스퀘어 Light", size = 12)
self.window.title("Kwangwoon Univ. Chambit Design Project (GWAENGSU(괭수) - STC(Speech to Code) Program)")
self.window.geometry("1090x800")
self.window.resizable(False, False)
icon = PhotoImage(file='icon/dialogue.png')
self.window.iconphoto(False, icon)
self.show_outer_line()
self.show_voice_listbox()
self.show_label()
self.show_button()
self.show_combobox()
self.show_checkbox()
t = threading.Thread(target=self.voice_to_text)
t.start()
self.window.mainloop()
def show_label(self):
self.label_voice=tkinter.Label(self.window, text="TEXT",font = self.font, fg = 'black')
self.label_voice.place(x = 510, y = 110, width = 90, height = 30)
self.label_code=tkinter.Label(self.window, text="CODE",font = self.font, fg = 'black')
self.label_code.place(x = 860, y = 110, width = 90, height = 30)
label_convert=tkinter.Label(self.window, text="convert",font = self.font4, fg = 'grey')
label_convert.place(x = 420, y = 45, width = 90, height = 30)
label_record=tkinter.Label(self.window, text="record",font = self.font4, fg = 'grey')
label_record.place(x = 60, y = 45, width = 90, height = 30)
label_langugae=tkinter.Label(self.window, text="language",font = self.font4, fg = 'grey')
label_langugae.place(x = 60, y = 375, width = 100, height = 30)
label_software=tkinter.Label(self.window, text="software",font = self.font4, fg = 'grey')
label_software.place(x = 60, y = 515, width = 100, height = 30)
arrow = PhotoImage(file="icon/next.png")
self.arrow = Label(image=arrow, height=40)
self.arrow.image = arrow
self.arrow.place(x = 710, y = 400, width = 45, height = 50)
def show_voice_listbox(self) :
#scrollbar=tkinter.Scrollbar(self.window,relief='solid',bd = 4)
#scrollbar.place(x = 160, y = 100, width = 30, height = 80)
self.voice_listbox=tkinter.Listbox(self.window, relief='groove', bd=2, font=self.font5)
self.voice_listbox.place(x = 405, y = 150, width = 300, height = 610)
self.code_listbox=tkinter.Listbox(self.window, relief='groove', bd=2, font=self.font5)
self.code_listbox.place(x = 755, y = 150, width = 300, height = 610)
def show_outer_line(self):
outer_line1=tkinter.Label(self.window, relief="groove",bd = 2)
outer_line1.place(x = 390, y = 60, width = 680, height = 720)
outer_line2=tkinter.Label(self.window, relief="groove",bd = 2)
outer_line2.place(x = 30, y = 60, width = 330, height = 300)
outer_line3=tkinter.Label(self.window, relief="groove",bd = 2)
outer_line3.place(x = 30, y = 385, width = 330, height = 120)
outer_line4=tkinter.Label(self.window, relief="groove",bd = 2)
outer_line4.place(x = 30, y = 530, width = 330, height = 150)
def show_button(self) :
self.button_start = tkinter.Button(self.window, relief="raised" ,repeatdelay=1000, repeatinterval=1000, \
bg = 'white',bd = 3,text = "start",font = self.font, highlightcolor = 'grey')
self.button_start.place(x= 65,y = 100,width = 255,height = 50)
self.button_stop = tkinter.Button(self.window, relief="raised" ,repeatdelay=1000, repeatinterval=1000, \
bg = 'white',bd = 3,text = "stop",font = self.font, highlightcolor = 'grey')
self.button_stop.place(x= 65,y = 180,width = 255,height = 50)
self.button_play = tkinter.Button(self.window, relief="raised" ,repeatdelay=1000, repeatinterval=1000, \
bg = 'white',bd = 3,text = "play",font = self.font)
self.button_play.place(x= 65,y = 260,width = 255,height = 50)
self.button_send = tkinter.Button(self.window, relief="groove" ,repeatdelay=1000, repeatinterval=1000, \
bd = 3,text = "send",font = self.font4)
self.button_send.place(x= 170,y = 675,width = 150,height = 30)
self.delete=tkinter.Button(self.window, text="새로고침", font=self.font6, command=self.deleteText)
self.delete.place(x=30, y=720, width=70, height=40)
def getText(self):
self.result1 = self.voice_listbox.get(0, "end")
self.result2 = self.code_listbox.get(0, "end")
return self.result1+self.result2
def deleteText(self):
result = self.getText()
for i in result:
self.voice_listbox.delete(0, "end")
self.code_listbox.delete(0, "end")
def show_combobox(self) :
values1 = [" Python"," C", " Java"]
combobox1=tkinter.ttk.Combobox(self.window, height=10, values=values1, font = self.font3)
combobox1.place(x= 65,y = 440,width = 255,height = 50)
combobox1.set(" Python")
values2 = [" Jupyter Notebook"," Pycharm"]
combobox2=tkinter.ttk.Combobox(self.window, height=10, values=values2, font = self.font4)
combobox2.place(x= 65,y = 610,width = 255,height = 50)
combobox2.set(" Jupyter Notebook")
def show_checkbox(self) :
checkVar1=tkinter.IntVar()
ckeck_box=tkinter.Checkbutton(self.window,text=" auto",variable=checkVar1,font = self.font4)
ckeck_box.place(x = 60, y = 670)
def voice_to_text(self):
while(1):
audio = pyaudio.PyAudio()
# start Recording
stream = audio.open(format=pyaudio.paInt16,
channels=CHANNELS,
rate=RATE,
input=True,
input_device_index=1,
frames_per_buffer=CHUNK)
frames, string_list = [], []
for i in range(LEN):
data = stream.read(CHUNK)
frames.append(data)
string = np.frombuffer(data, np.int16)[0]
string_list.append(string)
# stop Recording
if string == 0 and i > PASS:
break
stream.stop_stream()
stream.close()
audio.terminate()
waveFile = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
waveFile.setnchannels(CHANNELS)
waveFile.setsampwidth(audio.get_sample_size(FORMAT))
waveFile.setframerate(RATE)
waveFile.writeframes(b''.join(frames))
waveFile.close()
if len(string_list) > MIN_STRING_LIST_LENGTH:
r = sr.Recognizer()
korean_audio = sr.AudioFile("./data/wav/file.wav")
with korean_audio as source:
mandarin = r.record(source)
try :
sentence = r.recognize_google(audio_data=mandarin, language="ko-KR")
self.voice_listbox.insert(END,sentence)
print(sentence)
if sentence in '종료':
break
except:
print('*** 다시 말해주세요 ***')
def voice_play(self):
audio_file = AudioSegment.from_file(file="./data/wav/file.wav")
play(audio_file)
Gui()
|
import requests
import json
from sendPostRequest import*
#Stage 1 - Reverse a given string.
#Return String
recieve = 'http://challenge.code2040.org/api/getstring'
#This url is where I will send the string to validate the algorithm works
confirm = 'http://challenge.code2040.org/api/validatestring'
#Send a request to retrieve the string
string = sendPost(recieve, tokenDictionary)
def returnReveresedString(string):
return string[::-1]
#Reversed the incoming string
reversed_string = str(returnReveresedString(string))
#Put the result in a dictionary
return_token = {'token' : token, 'string' : reversed_string}
#Print the result
print(sendPost(confirm, return_token))
|
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
train = pd.read_csv('data/train.csv', index_col=0)
train.head()
# In[3]:
X = train.drop('target', axis=1)
y = train.target
# ### Scaler
# In[4]:
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
X = minmax.fit_transform(X)
pd.DataFrame(X).describe()
# ### SelectKBest
# In[5]:
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import mutual_info_regression
X = SelectKBest(mutual_info_regression, k=500).fit_transform(X, y)
X.shape
# In[10]:
pd.DataFrame(X).describe()
# In[6]:
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# In[30]:
def rmsle_metric(y_pred,y_test) :
assert len(y_test) == len(y_pred)
return np.sqrt(np.mean((np.log(1+y_pred) - np.log(1+y_test))**2))
# ### Linear Regression
# In[29]:
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
params = {
'normalize': [True, False],
'fit_intercept': [True, False]
}
lr = LinearRegression()
lr_gs = GridSearchCV(lr, params)
lr_gs.fit(X_train, y_train)
y_pred = lr_gs.predict(X_test)
print(rmsle_metric(y_pred, y_test))
lr_gs.best_estimator_
# ### SVM
# In[25]:
from sklearn.svm import SVR
from sklearn.model_selection import GridSearchCV
params = {
'C': [.5, 1.5],
'epsilon': [0.05, 0.1, 0.3],
'kernel': ['poly', 'rbf'],
}
svr = SVR()
svr_gs = GridSearchCV(svr, params)
svr_gs.fit(X_train, y_train)
y_pred = svr_gs.predict(X_test)
print(rmsle_metric(y_pred, y_test))
svr_gs.best_estimator_
# ### KNN
# In[19]:
from sklearn.neighbors import KNeighborsRegressor
from sklearn.model_selection import GridSearchCV
params = {
'n_neighbors': [5, 15],
'weights': ['uniform', 'distance'],
'algorithm': ['ball_tree', 'kd_tree']
}
knn = KNeighborsRegressor()
knn_gs = GridSearchCV(knn, params, n_jobs=2)
knn_gs.fit(X_train, y_train)
y_pred = knn_gs.predict(X_test)
print(rmsle_metric(y_pred, y_test))
# In[22]:
knn_gs.best_estimator_
# ### ExtraTreeRegressor
# In[32]:
from sklearn.tree import ExtraTreeRegressor
from sklearn.model_selection import GridSearchCV
params = {
'criterion': ['mse', 'mae'],
'splitter': ['best', 'random'],
'max_depth': [50, 100, 500]
}
tree = ExtraTreeRegressor()
tree_gs = GridSearchCV(tree, params, n_jobs=2)
tree_gs.fit(X_train, y_train)
y_pred = tree_gs.predict(X_test)
print(rmsle_metric(y_pred, y_test))
# In[33]:
tree_gs.best_estimator_
# ### Neural Network
# In[52]:
from datetime import datetime
from sklearn.neural_network import MLPRegressor
from sklearn.model_selection import GridSearchCV
params = {
'hidden_layer_sizes': [(200, 100), (300, 100)],
'activation': ['logistic', 'relu'],
'solver': ['sgd', 'adam'],
'max_iter': [800],
'learning_rate': ['adaptive']
}
start = datetime.now()
nn = MLPRegressor()
nn_gs = GridSearchCV(nn, params)
nn_gs.fit(X_train, y_train)
y_pred = nn_gs.predict(X_test)
print(datetime.now() - start)
print(rmsle_metric(y_pred, y_test))
nn_gs.best_estimator_
# In[48]:
pd.DataFrame(nn_gs.cv_results_).sort_values('rank_test_score').head()[
['param_activation', 'param_hidden_layer_sizes', 'param_solver', 'rank_test_score']
]
# In[47]:
nn_gs.cv_results_
# ### Stacking
# In[41]:
from mlxtend.regressor import StackingRegressor
stregr = StackingRegressor(
regressors=[
tree_gs.best_estimator_,
knn_gs.best_estimator_,
lr_gs.best_estimator_,
svr_gs.best_estimator_
],
meta_regressor=svr_gs.best_estimator_
)
stregr.fit(X_train, y_train)
y_pred = stregr.predict(X_test)
print(rmsle_metric(y_pred, y_test))
|
# Generated by Django 3.1 on 2020-08-17 15:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('messageId', models.IntegerField()),
('content', models.TextField()),
('messageDate', models.DateTimeField()),
('elasticPushDate', models.DateTimeField()),
('senderId', models.IntegerField()),
('senderUsername', models.CharField(max_length=200)),
('senderName', models.CharField(max_length=200)),
('isGroup', models.BooleanField()),
('channelId', models.IntegerField()),
('channelName', models.CharField(max_length=200)),
('channelUsername', models.CharField(max_length=200)),
('parentId', models.IntegerField()),
('likeCount', models.IntegerField()),
('source', models.CharField(choices=[('telegram', 'Telegram'), ('sahamyab', 'Sahamyab')], max_length=100)),
('stock', models.CharField(max_length=100)),
('sentiment', models.CharField(choices=[('neutral', 'Neutral'), ('negative', 'Negative'), ('positive', 'Positive')], max_length=100)),
('image', models.TextField()),
('version', models.CharField(max_length=20)),
],
options={
'ordering': ['messageId'],
},
),
]
|
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
import numpy as np
def get(reverse=False, N=256):
"""
Returns the 'GeriMap' colormap.
"""
gm = [(0, 0, 0), (.15, .15, .5), (.3, .15, .75),
(.6, .2, .50), (1, .25, .15), (.9, .5, 0),
(.9, .75, .1), (.9, .9, .5), (1, 1, 1)]
if reverse:
return LinearSegmentedColormap.from_list('GeriMap_r', gm[::-1], N=N)
else:
return LinearSegmentedColormap.from_list('GeriMap', gm, N=N)
def register():
"""
Register the perceptually uniform colormap 'GeriMap' with matplotlib.
"""
plt.register_cmap(cmap=get(False))
plt.register_cmap(cmap=get(True))
|
def is_group(word):
alphabet = [False] * 26
for i in range(len(word)):
idx = ord(word[i]) - ord('a')
if not alphabet[idx]:
alphabet[idx] = True
else:
if not (word[i-1] == word[i]):
return False
return True
cnt = 0
for _ in range(int(input())):
if is_group(input()):
cnt += 1
print(cnt)
|
class class1():
var1 =100
@classmethod
def fun1(cls):
print('我是fun1,var1=',cls.var1)
class1.fun1() |
n = int(input())
s = input()
print(*([1]*s.count('n')+[0]*s.count('z'))) |
import tensorflow as tf
import numpy as np
initializer = tf.contrib.layers.xavier_initializer()
c = tf.Variable(initializer([10]), name='item_embedding')
b = tf.nn.embedding_lookup(c, [1,2,3])
e = tf.const([])
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(sess.run(b))
print(sess.run(c)) |
from django.shortcuts import render
from django.http import JsonResponse
import json
import subprocess
from .models import *
# Create your views here.
def index(request):
return render(request, 'pas/index.html')
def student(request, sid):
info = Student.objects.filter(pk=sid)
groups = Group.objects.filter(members=info)
info = info.values()
groups = groups.values()
data = {
'id': info[0]['id'],
'name': info[0]['name'],
'preflist': info[0]['preflist'],
'groups': list(groups)
}
return JsonResponse(data)
def studentList(request):
info = Student.objects.all().values()
data = []
for s in info:
sid = int(s['id'])
groups = Group.objects.filter(members__id=sid).values()
try:
assg_id = int(s['assignment_id'])
except:
assg_id = 0
assg_proj = ''
if assg_id != 0:
assg_proj = Project.objects.filter(pk=assg_id).values()
assg_proj = assg_proj[0]['name']
json = {
'id': s['id'],
'name': s['name'],
'preflist': s['preflist'],
'assignment_id': s['assignment_id'],
'assignment_name': assg_proj,
'groups': list(groups)
}
data.append(json)
response = {
'student_list': list(data)
}
return JsonResponse(response)
def lecturer(request, lid):
info = Lecturer.objects.filter(pk=lid)
projects = Project.objects.filter(supervisor=lid)
info = info.values()
projects = projects.values()
for p in projects:
enrolled = Group.objects.filter(preflist__contains=p['id']).values()
sid = [s['id'] for s in enrolled]
if len(sid) != 0:
p['enrolled'] = ','.join(str(s) for s in sid)
else:
p['enrolled'] = ''
# Prepare preflist
preflist = LecturerPreflist.objects.filter(lecturer_id=lid,
project_id=p['id']).values()
if not preflist:
p['preflist'] = ''
else:
p['preflist'] = preflist[0]['preflist']
# Prepare matched group
row = LecturerPreflist.objects.filter(lecturer_id=lid, project_id=p['id'])
if not row:
p['matched'] = ''
continue
matched = Group.objects.filter(matched=row).values()
sid = [s['id'] for s in matched]
if len(sid) != 0:
p['matched'] = ','.join(str(s) for s in sid)
else:
p['matched'] = ''
data = {
'id': info[0]['id'],
'name': info[0]['name'],
'capacity': info[0]['capacity'],
'projects': list(projects),
}
return JsonResponse(data)
def lecturerList(request):
info = Lecturer.objects.all().values()
data = {
'lecturer_list': list(info)
}
return JsonResponse(data)
def group(request, gid):
info = Group.objects.filter(pk=gid)
rep_name = Student.objects.filter(pk=info.values('representative_id'))
members = Student.objects.filter(members=info)
capacity = members.count()
info = info.values()
rep_name = rep_name.values()
members = members.values()
data = {
'id': info[0]['id'],
'name': info[0]['name'],
'preflist': info[0]['preflist'],
'capacity': capacity,
'sid': info[0]['representative_id'],
'sid_name': rep_name[0]['name'],
'members': list(members)
}
return JsonResponse(data)
def groupList(request):
info = Group.objects.all().values()
for p in info:
try:
assg_id = int(p['assignment_id'])
except:
assg_id = 0
assg_proj = ''
if assg_id != 0:
assg_proj = Project.objects.filter(pk=assg_id).values()
assg_proj = assg_proj[0]['name']
p['assignment_name'] = assg_proj
members = Student.objects.filter(members=p['id'])
p['capacity'] = members.count()
tmp = []
for s in members.values():
tmp.append(s['id'])
p['members'] = ','.join(map(str, tmp))
data = {
'group_list': list(info)
}
return JsonResponse(data)
def project(request, pid):
info = Project.objects.filter(pk=pid)
supervisor = Lecturer.objects.filter(project__pk=pid)
info = info.values()
supervisor = supervisor.values()
data = {
'id': info[0]['id'],
'name': info[0]['name'],
'description': info[0]['description'],
'capacity': info[0]['capacity'],
'lid': info[0]['supervisor_id'],
'lid_name': supervisor[0]['name']
}
return JsonResponse(data)
def projectList(request):
info = Project.objects.all().values()
tmp = []
for p in info:
pid = int(p['id'])
tmp.append(json.loads(project(request, pid).content.decode()))
data = {
'project_list': tmp
}
return JsonResponse(data)
def listAll(request):
projects = json.loads(projectList(request).content.decode())
groups = json.loads(groupList(request).content.decode())
lecturers = Lecturer.objects.all().values()
tmp = []
for l in lecturers:
lid = int(l['id'])
tmp.append(json.loads(lecturer(request, lid).content.decode()))
data = {
'project_list': projects['project_list'],
'group_list': groups['group_list'],
'lecturer_list': list(tmp)
}
with open('data.json', 'w+') as file:
file.write(json.dumps(data))
file.close()
return JsonResponse(data)
def matching(request):
listAll(request)
open('matching.json', 'w').close()
proc = subprocess.run(['./spa_group.py', 'data.json'])
result = ''
with open('matching.json', 'r') as f:
result = json.loads(f.read())
for p in result['group_matched'][0]:
pid = int(p)
assignment = result['group_matched'][0][p]
if assignment:
for g in assignment:
gid = int(g)
Group.objects.filter(pk=gid).update(assignment_id=pid)
Student.objects.filter(members=gid).update(
assignment_id=pid
)
for l in result['lecturer_matched'][0]:
lid = int(l)
assignment = result['lecturer_matched'][0][l]
if assignment:
for p in assignment:
pid = int(p)
gp_list = result['group_matched'][0][str(p)]
row = LecturerPreflist.objects.get(lecturer_id=lid,
project_id=pid)
row.matched.add(*gp_list)
return JsonResponse(result)
def clearMatching(request):
Group.objects.all().update(assignment_id=0)
Student.objects.all().update(assignment_id=0)
l_list = LecturerPreflist.objects.all()
for l in l_list:
l.matched.clear()
return JsonResponse({ 'status': 'success'})
|
from django.db import models
from ..users.models import User
from ..books.models import Book
# Create your models here.
class ReviewManager(models.Manager):
def review_valid(self, postData):
errors = {}
if 'rating' not in postData: # null
errors['bad_rating'] = "Oops, you forgot to rate the book."
if len(postData['review']) < 1: # null
errors['no_review'] = "Oops, you forgot to tell us what you thought of the book."
elif len(postData['review']) < 3: # not long enough
errors['bad_review'] = "Hmmm, that doesn't really tell us what you thought of the book. Review should be a minimum of 3 characters."
# process validations
return errors
def create_review(self, postData, user_id):
reviewer = User.objects.get(id=user_id)
reviewed_book = Book.objects.get(id=int(postData['book_id']))
review = self.create(review=postData['review'], rating=int(postData['rating']), reviewer=reviewer, book=reviewed_book)
return review
class Review(models.Model):
review = models.TextField()
rating = models.SmallIntegerField()
reviewer = models.ForeignKey(User, related_name="reviews")
book = models.ForeignKey(Book, related_name="reviews")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ReviewManager()
def __repr__(self):
return f'<Review {self.id} - Rating {self.rating}' |
#========================================
# author: Changlong.Zang
# mail: zclongpop123@163.com
# time: Tue Sep 19 15:59:31 2017
#========================================
import pymel.core as pm
#--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def add_shape(shape, transform):
'''
'''
new_shapes = pm.PyNode(shape).getShapes()
pm.parent(new_shapes, transform, s=True, r=True)
pm.delete(shape)
def replace_shape(shape, transform):
'''
'''
old_shapes = pm.PyNode(transform).getShapes()
add_shape(shape, transform)
pm.delete(old_shapes)
|
import unittest
from katas.kyu_7.time_degrees import clock_degree
class ClockDegreeTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(clock_degree('00:00'), '360:360')
def test_equal_2(self):
self.assertEqual(clock_degree('01:01'), '30:6')
def test_equal_3(self):
self.assertEqual(clock_degree('00:01'), '360:6')
def test_equal_4(self):
self.assertEqual(clock_degree('01:00'), '30:360')
def test_equal_5(self):
self.assertEqual(clock_degree('01:30'), '30:180')
def test_equal_6(self):
self.assertEqual(clock_degree('24:00'), 'Check your time !')
def test_equal_7(self):
self.assertEqual(clock_degree('13:60'), 'Check your time !')
def test_equal_8(self):
self.assertEqual(clock_degree('20:34'), '240:204')
def test_equal_9(self):
self.assertEqual(clock_degree('01:03'), '30:18')
def test_equal_10(self):
self.assertEqual(clock_degree('12:05'), '360:30')
def test_equal_11(self):
self.assertEqual(clock_degree('26:78'), 'Check your time !')
def test_equal_12(self):
self.assertEqual(clock_degree('16:25'), '120:150')
def test_equal_13(self):
self.assertEqual(clock_degree('17:09'), '150:54')
def test_equal_14(self):
self.assertEqual(clock_degree('19:00'), '210:360')
def test_equal_15(self):
self.assertEqual(clock_degree('23:20'), '330:120')
def test_equal_16(self):
self.assertEqual(clock_degree('-09:00'), 'Check your time !')
|
#!/usr/bin/env python
n=int(input("Please Enter The number:"))
for i in range(n,0,-1):
print((n-i)*" "+i*"* ")
|
"""
首页主页面
"""
from appium.webdriver.common.mobileby import MobileBy
from app.企业微信po.page.basepage import BasePage
from app.企业微信po.page import ContactListPage
class MainPage(BasePage):
# def __init__(self,driver):
# self.driver = driver 在基类里面封装,
contactlist = (MobileBy.XPATH, "//*[@text='通讯录']")
def goto_contactlist(self):
"""
进入到通讯录
:return:
"""
# self.driver.find_element(MobileBy.XPATH, "//*[@text='通讯录']").click()
self.find_and_click(self.contactlist) # 改造后
return ContactListPage(self.driver)
def goto_workbench(self):
"""
进入到打卡
:return:
"""
pass
|
# Author: Ryan Lanese
"""
Prettifier
"""
class Prettifier(object):
def __init__(self, file, extra=False):
self.rules = self.load_file(file)
def load_file(self, file):
rules = []
with open(file, 'r') as f:
for line in f:
rules.append(f)
return rules
def main():
#style_sheet = '/file/path/style.css'
prettifier = Prettifier(style_sheet)
if __name__ == "__main__":
main() |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import date
def split_years(DF):
DF = DF.copy()
years_list= []
years_list.append(DF.loc[DF['Day']<date(2007,1,1)])
years_list.append(DF.loc[(DF['Day']>=date(2007,1,1)) & (DF['Day']<date(2008,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2008,1,1)) & (DF['Day']<date(2009,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2009,1,1)) & (DF['Day']<date(2010,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2010,1,1)) & (DF['Day']<date(2011,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2011,1,1)) & (DF['Day']<date(2012,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2012,1,1)) & (DF['Day']<date(2013,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2013,1,1)) & (DF['Day']<date(2014,1,1))])
years_list.append(DF.loc[(DF['Day']>=date(2014,1,1)) & (DF['Day']<date(2015,1,1))])
years_list.append(DF.loc[DF['Day']>=date(2015,1,1)])
return years_list
def plots(years, title):
sns.set(style="white")
fig, (ax06, ax07, ax08, ax09, ax10, ax11, ax12, ax13, ax14, ax15) = \
plt.subplots(10, sharey=True, figsize=(18,40))
ax06.plot(years[0]["Day"], years[0]["Count"])
ax06.set_title('2006')
ax07.plot(years[1]["Day"], years[1]["Count"])
ax07.set_title('2007')
ax08.plot(years[2]["Day"], years[2]["Count"])
ax08.set_title('2008')
ax09.plot(years[3]["Day"], years[3]["Count"])
ax09.set_title('2009')
ax10.plot(years[4]["Day"], years[4]["Count"])
ax10.set_title('2010')
ax11.plot(years[5]["Day"], years[5]["Count"])
ax11.set_title('2011')
ax12.plot(years[6]["Day"], years[6]["Count"])
ax12.set_title('2012')
ax13.plot(years[7]["Day"], years[7]["Count"])
ax13.set_title('2013')
ax14.plot(years[8]["Day"], years[8]["Count"])
ax14.set_title('2014')
ax15.plot(years[9]["Day"], years[9]["Count"])
ax15.set_title('2015')
fig.suptitle(title, fontsize='x-large')
fig.subplots_adjust(top=0.95)
sns.despine(bottom=True, left=True) |
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
import textwrap
import pytest
from pants.backend.python.subsystems.setup import InvalidLockfileBehavior, PythonSetup
from pants.backend.python.util_rules.interpreter_constraints import InterpreterConstraints
from pants.backend.python.util_rules.lockfile_metadata import PythonLockfileMetadataV3
from pants.backend.python.util_rules.pex_requirements import (
Lockfile,
ResolvePexConfig,
ResolvePexConstraintsFile,
_pex_lockfile_requirement_count,
get_metadata,
is_probably_pex_json_lockfile,
strip_comments_from_pex_json_lockfile,
validate_metadata,
)
from pants.core.util_rules.lockfile_metadata import (
BEGIN_LOCKFILE_HEADER,
END_LOCKFILE_HEADER,
InvalidLockfileError,
)
from pants.engine.internals.native_engine import EMPTY_DIGEST
from pants.testutil.option_util import create_subsystem
from pants.util.ordered_set import FrozenOrderedSet
from pants.util.pip_requirement import PipRequirement
from pants.util.strutil import comma_separated_list
METADATA = PythonLockfileMetadataV3(
InterpreterConstraints(["==3.8.*"]),
{PipRequirement.parse("ansicolors"), PipRequirement.parse("requests")},
manylinux=None,
requirement_constraints={PipRequirement.parse("abc")},
only_binary={"bdist"},
no_binary={"sdist"},
)
def create_python_setup(
behavior: InvalidLockfileBehavior, *, enable_resolves: bool = True
) -> PythonSetup:
return create_subsystem(
PythonSetup,
invalid_lockfile_behavior=behavior,
resolves_generate_lockfiles=enable_resolves,
interpreter_versions_universe=PythonSetup.default_interpreter_universe,
resolves={"a": "lock.txt"},
default_resolve="a",
)
def test_get_metadata() -> None:
# We don't get metadata if we've been told not to validate it.
python_setup = create_python_setup(behavior=InvalidLockfileBehavior.ignore)
metadata = get_metadata(python_setup, b"", None, "dummy", "#")
assert metadata is None
python_setup = create_python_setup(behavior=InvalidLockfileBehavior.warn)
# If we are supposed to validate Pants-generated lockfiles, but there is no header
# block, then it's not a Pants-generated lockfile, so succeed but return no metadata.
metadata = get_metadata(python_setup, b"NO HEADER HERE", None, "dummy", "#")
assert metadata is None
# If we are supposed to validate Pants-generated lockfiles, and there is a header
# block, then succeed on valid JSON.
valid_lock_metadata = json.dumps(
{
"valid_for_interpreter_constraints": "dummy",
"requirements_invalidation_digest": "dummy",
}
)
metadata = get_metadata(
python_setup,
f"# {BEGIN_LOCKFILE_HEADER}\n# {valid_lock_metadata}\n# {END_LOCKFILE_HEADER}\n".encode(),
None,
"dummy",
"#",
)
assert metadata is not None
# If we are supposed to validate Pants-generated lockfiles, and there is a header
# block, then fail on invalid JSON.
with pytest.raises(InvalidLockfileError):
get_metadata(
python_setup,
f"# {BEGIN_LOCKFILE_HEADER}\n# NOT JSON\n# {END_LOCKFILE_HEADER}\n".encode(),
None,
"dummy",
"#",
)
# If we are supposed to validate Pants-generated lockfiles, and there is a header
# block, then fail on JSON that doesn't have the keys we expect.
with pytest.raises(InvalidLockfileError):
get_metadata(
python_setup,
f"# {BEGIN_LOCKFILE_HEADER}\n# {{ 'a': 'b' }}\n# {END_LOCKFILE_HEADER}\n".encode(),
None,
"dummy",
"#",
)
@pytest.mark.parametrize(
(
"invalid_reqs,invalid_interpreter_constraints,invalid_constraints_file,invalid_only_binary,"
+ "invalid_no_binary,invalid_manylinux"
),
[
(
invalid_reqs,
invalid_interpreter_constraints,
invalid_constraints_file,
invalid_only_binary,
invalid_no_binary,
invalid_manylinux,
)
for invalid_reqs in (True, False)
for invalid_interpreter_constraints in (True, False)
for invalid_constraints_file in (True, False)
for invalid_only_binary in (True, False)
for invalid_no_binary in (True, False)
for invalid_manylinux in (True, False)
if (
invalid_reqs
or invalid_interpreter_constraints
or invalid_constraints_file
or invalid_only_binary
or invalid_no_binary
or invalid_manylinux
)
],
)
def test_validate_lockfiles(
invalid_reqs: bool,
invalid_interpreter_constraints: bool,
invalid_constraints_file: bool,
invalid_only_binary: bool,
invalid_no_binary: bool,
invalid_manylinux: bool,
caplog,
) -> None:
runtime_interpreter_constraints = (
InterpreterConstraints(["==2.7.*"])
if invalid_interpreter_constraints
else METADATA.valid_for_interpreter_constraints
)
req_strings = FrozenOrderedSet(
["bad-req"] if invalid_reqs else [str(r) for r in METADATA.requirements]
)
lockfile = Lockfile(
url="lock.txt",
url_description_of_origin="foo",
resolve_name="a",
)
validate_metadata(
METADATA,
runtime_interpreter_constraints,
lockfile,
req_strings,
validate_consumed_req_strings=True,
python_setup=create_python_setup(InvalidLockfileBehavior.warn),
resolve_config=ResolvePexConfig(
indexes=(),
find_links=(),
manylinux="not-manylinux" if invalid_manylinux else None,
constraints_file=ResolvePexConstraintsFile(
EMPTY_DIGEST,
"c.txt",
FrozenOrderedSet(
{PipRequirement.parse("xyz" if invalid_constraints_file else "abc")}
),
),
no_binary=FrozenOrderedSet(["not-sdist" if invalid_no_binary else "sdist"]),
only_binary=FrozenOrderedSet(["not-bdist" if invalid_only_binary else "bdist"]),
path_mappings=(),
),
)
def contains(msg: str, if_: bool = True) -> None:
assert (msg in caplog.text) is if_
reqs_desc = comma_separated_list(f"`{rs}`" for rs in req_strings)
contains(
f"You are consuming {reqs_desc} from the `a` lockfile at lock.txt "
"with incompatible inputs"
)
contains(
"The lockfile does not provide all the necessary requirements",
if_=invalid_reqs,
)
contains(
"The requirements not provided by the `a` resolve are:\n ['bad-req']",
if_=invalid_reqs,
)
contains("The inputs use interpreter constraints", if_=invalid_interpreter_constraints)
contains("The constraints file at c.txt has changed", if_=invalid_constraints_file)
contains("The `only_binary` arguments have changed", if_=invalid_only_binary)
contains("The `no_binary` arguments have changed", if_=invalid_no_binary)
contains("The `manylinux` argument has changed", if_=invalid_manylinux)
contains("./pants generate-lockfiles --resolve=a`")
def test_is_probably_pex_json_lockfile():
def is_pex(lock: str) -> bool:
return is_probably_pex_json_lockfile(lock.encode())
for s in (
"{}",
textwrap.dedent(
"""\
// Special comment
{}
"""
),
textwrap.dedent(
"""\
// Next line has extra space
{"key": "val"}
"""
),
textwrap.dedent(
"""\
{
"key": "val",
}
"""
),
):
assert is_pex(s)
for s in (
"",
"# foo",
"# {",
"cheesey",
"cheesey==10.0",
textwrap.dedent(
"""\
# Special comment
cheesey==10.0
"""
),
):
assert not is_pex(s)
def test_strip_comments_from_pex_json_lockfile() -> None:
def assert_stripped(lock: str, expected: str) -> None:
assert strip_comments_from_pex_json_lockfile(lock.encode()).decode() == expected
assert_stripped("{}", "{}")
assert_stripped(
textwrap.dedent(
"""\
{ // comment
"key": "foo",
}
"""
),
textwrap.dedent(
"""\
{ // comment
"key": "foo",
}"""
),
)
assert_stripped(
textwrap.dedent(
"""\
// header
// more header
{
"key": "foo",
}
// footer
"""
),
textwrap.dedent(
"""\
{
"key": "foo",
}"""
),
)
def test_pex_lockfile_requirement_count() -> None:
assert _pex_lockfile_requirement_count(b"empty") == 2
assert (
_pex_lockfile_requirement_count(
textwrap.dedent(
"""\
{
"allow_builds": true,
"allow_prereleases": false,
"allow_wheels": true,
"build_isolation": true,
"constraints": [],
"locked_resolves": [
{
"locked_requirements": [
{
"artifacts": [
{
"algorithm": "sha256",
"hash": "00d2dde5a675579325902536738dd27e4fac1fd68f773fe36c21044eb559e187",
"url": "https://files.pythonhosted.org/packages/53/18/a56e2fe47b259bb52201093a3a9d4a32014f9d85071ad07e9d60600890ca/ansicolors-1.1.8-py2.py3-none-any.whl"
}
],
"project_name": "ansicolors",
"requires_dists": [],
"requires_python": null,
"version": "1.1.8"
}
],
"platform_tag": [
"cp39",
"cp39",
"macosx_11_0_arm64"
]
}
],
"pex_version": "2.1.70",
"prefer_older_binary": false,
"requirements": [
"ansicolors"
],
"requires_python": [],
"resolver_version": "pip-legacy-resolver",
"style": "strict",
"transitive": true,
"use_pep517": null
}
"""
).encode()
)
== 3
)
|
class Tree:
"""
Class for tree representation
"""
def __init__(self, root):
self.key = root
self.left_child = None
self.right_child = None
|
# Problem Set 1
from os import system
system("clear")
# Part A: House Hunting
def monthsToPay(annual_salary, portion_saved, total_cost):
portion_down_payment = 0.25
current_savings = 0
r = 0.04
nb_months = 0
while current_savings < portion_down_payment*total_cost:
# investment winning funds (r)
current_savings += current_savings*r/12
# portion saved % of annual salary
current_savings += portion_saved*annual_salary/12
nb_months += 1
return nb_months
def monthsToPayTest():
a_s = float(input("Enter your annual salary: "))
p_s = float(
input("Enter the percent of your salary to save, as a decimal: "))
t_c = float(input("Enter the cost of your dream home: "))
print(f"Number of months: {monthsToPay(a_s,p_s,t_c)}")
# Part B: Saving, with a raise
def monthsToPayRaise(annual_salary, portion_saved, total_cost,
semi_annual_raise):
current_salary = annual_salary
portion_down_payment = 0.25
current_savings = 0
r = 0.04
nb_months = 0
while current_savings < portion_down_payment*total_cost:
# Check if we have a raise this month (!= 0 and = 0 mod 6)
if nb_months != 0 and nb_months % 6 == 0:
current_salary *= (1+semi_annual_raise)
# investment winning funds (r)
current_savings += current_savings*r/12
# portion saved % of annual salary
current_savings += portion_saved*current_salary/12
nb_months += 1
return nb_months
def monthsToPayRaiseTest():
a_s = float(input("Enter your annual salary: "))
p_s = float(
input("Enter the percent of your salary to save, as a decimal: "))
t_c = float(input("Enter the cost of your dream home: "))
s_a_r = float(input("Enter the semi-annual-raise, as a decimal: "))
print(f"Number of months: {monthsToPayRaise(a_s,p_s,t_c,s_a_r)}")
# Part C: Finding the right amount to save away
|
numero = int(input('Digite um número:'))
print('Esse número é par' if numero % 2 == 0 else 'Esse número é impa7r')
|
#!/usr/bin/env python
import sys
import shlex
import subprocess
import gtk
import appindicator
CHECK_FREQUENCY = 5
class CheckVpnc:
def __init__(self):
self.ind = appindicator.Indicator(
"example-simple-client",
"channel-insecure-symbolic",
appindicator.CATEGORY_APPLICATION_STATUS)
self.ind.set_status(appindicator.STATUS_ACTIVE)
self.ind.set_attention_icon("channel-secure-symbolic")
self.menu_setup()
self.ind.set_menu(self.menu)
def main(self):
self.check_status()
gtk.timeout_add(CHECK_FREQUENCY * 1000, self.check_status)
gtk.main()
def quit(self, widget):
sys.exit(0)
def menu_setup(self):
self.menu = gtk.Menu()
self.quit_item = gtk.MenuItem("Quit")
self.quit_item.connect("activate", self.quit)
self.quit_item.show()
self.menu.append(self.quit_item)
def check_process(self):
cmd_ps = subprocess.Popen(shlex.split('ps aux'),
stdout=subprocess.PIPE)
cmd_grep = subprocess.Popen(shlex.split('grep [v]pnc-connect'),
stdin=cmd_ps.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_ps.stdout.close()
cmd_grep.communicate()
return cmd_grep.returncode
def check_interface(self):
cmd_ifconfig = subprocess.Popen("ifconfig",
stdout=subprocess.PIPE)
cmd_grep = subprocess.Popen(shlex.split('grep [t]un0'),
stdin=cmd_ifconfig.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
cmd_ifconfig.stdout.close()
cmd_grep.communicate()
return cmd_grep.returncode
def check_status(self):
if self.check_process() == 0 and self.check_interface() == 0:
self.ind.set_status(appindicator.STATUS_ATTENTION)
else:
self.ind.set_status(appindicator.STATUS_ACTIVE)
return True
if __name__ == "__main__":
indicator = CheckVpnc()
indicator.main()
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 25 20:21:10 2018
@author: Home
"""
import numpy as np
from keras.utils import np_utils
import tensorflow as tf
tf.python.control_flow_ops = tf
np.random.seed(42)
X = np.array([[0,0],[0,1],[1,0],[1,1]]).astype('float32')
y = np.array([[0],[1],[1],[0]]).astype('float32')
from keras.models import Sequential
from keras.layers.core import Dense, Activation
y = np_utils.to_categorical(y)
xor = Sequential()
xor.add(Dense(32, input_dim =2))
xor.add(Activation("tanh"))
xor.add(Dense(2))
xor.add(Activation('sigmoid'))
xor.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
xor.fit(X,y,nb_epoch=50)
xor.summary()
history = xor.fit(X, y, nb_epoch=100, verbose=0)
score = xor.evaluate(X, y)
print("\nAccuracy: ", score[-1])
print("\nPredictions:")
print(xor.predict_proba(X)) |
#!/usr/bin/env python
"""
matchsort.py
Take a standard star catalog (pre-sorted in ascending order by RA)
and an observed catalog (also pre-sorted in ascending order by RA)
and match them by their RA,DEC coordinates.
Examples:
matchsort.py --help
matchsort.py --inputStdStarCatFile standard_stars_all_id6_pypsmFormat.csv --inputObsCatFile obsquery-20131002-g-r03p01.csv --outputMatchFile matched-20131002-g-r03p01.csv --verbose 1
"""
import math
import sys
import os
import re
##################################
def main():
import argparse
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--inputStdStarCatFile', help='CSV file containing the standard star catalog to be used in the match', default='standardstarcat.csv')
parser.add_argument('--inputObsCatFile', help='CSV file containing the catalog of observations to be used in the match', default='obsquery.csv')
parser.add_argument('--outputMatchFile', help='CSV file containing the output of the match', default='matched.csv')
parser.add_argument('--verbose', help='verbosity level of output to screen (0, 1, 2, ...)', type=int, default=0)
args = parser.parse_args()
matchsort(args)
##################################
#advanceheadlist takes an RA from the observed catalog and creates/updates
# a list of standard stars within a "sliding window" having a range of +/- 3arcsec of that RA
# and taken from a catalog of standard stars that has been pre-sorted into order of ascending RA
def advanceheadlist(fid,radegl,decdegl,magul,maggl,magrl,magil,magzl,magyl,ra2,radeg1,fd1,fd2,done1,radct,decdct,banddct,ccddct,obslinedct):
# DLT: I think done2 may currently be superfluous; see comments on the "while done2 == 0"
# block below...
done2 = 0
# if we have finished reading the observed catalog, return...
if (done1==1):
return (done1,radeg1)
tol=float(3.0/3600.)
# if the previous standard star RA (radeg1) is above the upper bound of the tolerance
# range, return...
delta=float(radeg1-ra2)
if (delta > tol):
return (done1,radeg1)
# DLT: I don't think this "while" (or the done2 parameter) is strictly necessary,
# since the "return (done1, radeg0)" at the end of this method occurs *within* the
# while loop, not after it. (Did I accidentally change the indentation of the
# "return (done1, radeg0) in an earlier incarnation of this file???)
while done2 == 0:
# Read a line from the standard star file...
l1=fd1.readline()
# if we have reached the end of the standard star file,
# return, indicating the standard star file is "done";
# otherwise, process the new line...
if l1 == "":
return (1,radeg1)
else:
l1s=l1.strip().split(',')
radeg0=float(l1s[0])
# if the RA of the standard star RA (radeg0) is below the lower bound of the tolerance
# range, return...
if (radeg0-ra2) < -tol:
return (done1,radeg1)
# add the standard star info to lists of ra, dec, mags for this sliding window...
radegl.append(radeg0)
decdegl.append(float(l1s[1]))
magul.append(float(l1s[2]))
maggl.append(float(l1s[3]))
magrl.append(float(l1s[4]))
magil.append(float(l1s[5]))
magzl.append(float(l1s[6]))
magyl.append(float(l1s[7]))
# initialize lists for possible observed star/standard star matches and add
# these lists to "dictionaries" associated with this standard star...
radct.append([])
decdct.append([])
ccddct.append([])
banddct.append([])
obslinedct.append([])
# if the RA of the previous standard star is above the upper bound of the
# tolerance range from the RA of this standard star, declare done2=1
delta = (radeg1-radeg0)
if (delta > tol):
done2 = 1
# DLT: Shouldn't this return be outside the "while done2 == 0" block?
# With its current indentation, it is the final statement *within*
# the "while done2 == 0" block.
return (done1,radeg0)
##################################
# Write out the matched standard star/observed data to matched file...
def cleancurlist(fid,radegl,decdegl,magul,maggl,magrl,magil,magzl,magyl,radct,decdct,banddct,ccddct,obslinedct,ofd,ccdcount):
# we only care about entry i=0 in the "dictionaries"... (why?)
i=0
## if there is at least one entry in the RA dictionary for this star, increment the running star id
#if len(radct[i]) > 0:
# fid += 1
# Loop through all the observations matched with standard star i...
# (Note that many standard stars may have zero matches...)
for j in range(0,len(radct[i])):
band = banddct[i][j]
stdmag = 0
stdcolor = 0
bandid = -1
if band == 'u':
bandid = 0
stdmag=magul[i]
stdcmag=maggl[i]
stdcolor=magul[i]-maggl[i]
if band == 'g':
bandid = 1
stdmag=maggl[i]
stdcmag=magrl[i]
stdcolor=maggl[i]-magrl[i]
if band == 'r':
bandid = 2
stdmag=magrl[i]
stdcmag=maggl[i]
stdcolor=maggl[i]-magrl[i]
if band == 'i':
bandid = 3
stdmag=magil[i]
stdcmag=magzl[i]
stdcolor=magil[i]-magzl[i]
if band == 'z':
bandid = 4
stdmag=magzl[i]
stdcmag=magil[i]
stdcolor=magil[i]-magzl[i]
if band == 'y' or band == 'Y':
bandid = 5
stdmag=magyl[i]
stdcmag=magzl[i]
stdcolor=magzl[i]-magyl[i]
# only include standard star in output if stdmag or stdcmag are both positive...
if ( (stdmag>0.) and (stdcmag>0.) ):
# increment the running star id
fid += 1
ccdcount[ccddct[i][j]] += 1
outputLine = """%d,%f,%f,%f,%f,%d,%d,%s\n""" % (fid,radegl[i],decdegl[i],stdmag,stdcolor,bandid,j,obslinedct[i][j])
ofd.write(outputLine)
# Delete the dictionaries associated with standard star i (=0 in the sliding RA window)...
del radct[i]
del decdct[i]
del ccddct[i]
del banddct[i]
del obslinedct[i]
# Delete the lists associated with standard star i (=0 in the sliding RA window)...
del radegl[i]
del decdegl[i]
del magul[i]
del maggl[i]
del magrl[i]
del magil[i]
del magzl[i]
del magyl[i]
return fid
##################################
# Find first good match (not necessarily best matches) between an observed star and standard stars
# and place info into "dictionaries"
def incurlist(radegl,decdegl,ra2,dec2,band2,ccd2,obsline2,radct,decdct,banddct,ccddct,obslinedct):
dtr=float(3.1415926/180.0)
tol=float((2.0/3600.)**2)
cosd=math.cos(dec2*dtr)
# Loop through all standards stars i in the sliding RA window for that oberved star...
for i in range(0,len(radegl)):
delta=(ra2-radegl[i])*(ra2-radegl[i])*cosd*cosd+(dec2-decdegl[i])*(dec2-decdegl[i])
# Is the sky position of standard star i (in the sliding RA window) within a 2-arcsec radial tolerance
# of the observed star? If so, add the observed info to that standard star's dictionaries...
if float(delta) < float(tol):
radct[i].append(ra2)
decdct[i].append(dec2)
ccddct[i].append(ccd2)
banddct[i].append(band2)
obslinedct[i].append(obsline2)
# if we found one match, we take it and break out of the loop...
break
#else:
# print "no match"
return 0
# The upper-level match method...
def matchsort(args):
f1=args.inputStdStarCatFile
f2=args.inputObsCatFile
outfile=args.outputMatchFile
verbose=args.verbose
# initialize ccd counts for all ccds
ccdcount=[]
for i in range(0,63):
ccdcount.append(0)
# initialize "dictionaries".
# Each element of "dictionary" is associated with a standard star.
# Each element is a list of information from the potential matches from the observed data.
radct=[]
decdct=[]
ccddct=[]
banddct=[]
obslinedct=[]
# initialize lists of standard stars.
# These are actually lists of standards within a given sliding window of RA.
radegl=[]
decdegl=[]
magul=[]
maggl=[]
magrl=[]
magil=[]
magzl=[]
magyl=[]
# Open the output file for the standard star/observed star matches...
ofd=open(outfile,'w')
fid=0
# Open the standard star CSV file...
fd1=open(f1)
# Identify columns in the standard star CSV file corresponding to
# radeg, decdeg, magu, magg, magr, magi, magz, magy
h1=fd1.readline()
h1n=h1.strip().split(',')
for i in range(0,len(h1n)):
if h1n[i].upper() == 'RADEG':
radegcol1=i
if h1n[i].upper() == 'DECDEG':
decdegcol1=i
if h1n[i].upper() == 'MAGU':
magucol1=i
if h1n[i].upper() == 'MAGG':
maggcol1=i
if h1n[i].upper() == 'MAGR':
magrcol1=i
if h1n[i].upper() == 'MAGI':
magicol1=i
if h1n[i].upper() == 'MAGZ':
magzcol1=i
if h1n[i].upper() == 'MAGY':
magycol1=i
# Open CSV file of observed data...
fd2=open(f2)
# Identify columns in the CSV observed data file corresponding to ra,dec
h2=fd2.readline()
# ... but first write header for the output CSV file...
outputHeader = h2.strip().upper()
outputHeader = """fid,radegstd,decdegstd,magstd,colorstd,bandidstd,j,%s\n""" % (outputHeader)
ofd.write(outputHeader)
h2n=h2.strip().split(',')
for i in range(0,len(h2n)):
if h2n[i].upper() == 'RA':
racol2=i
if h2n[i].upper() == 'DEC':
deccol2=i
if h2n[i].upper() == 'BAND':
bandcol2=i
if h2n[i].upper() == 'CCDNUM':
ccdcol2=i
# initialize some variables
# done = "are we done with the whole process yet?"
# done1 = "are we done reading the observations file yet?"
# radeg1, decdeg1 = "initial/previous values for standard star RA,DEC"
# ra2, dec2 = "initial/previous values for observed star RA,DEC"
# tol = "tolerance in arcsec"
# linecnt = "line count"
done=0
done1=0
radeg1=-999
decdeg1=-999
ra2=-999
dec2=-999
tol = 3/3600.0
linecnt = 0
# Loop through file of observed data...
while not done:
linecnt += 1
if ( (linecnt/1000.0 == int(linecnt/1000.0)) and (verbose > 1) ):
print '\r'+'Progress (lines read from observed catalog): ',linecnt,
sys.stdout.flush()
l2=fd2.readline()
# Are we done reading through the file of observed data yet?
# If so, break out of loop; otherwise, process the data line...
if l2 == "":
done = 1
break
else:
#obsline2 holds the whole line of information for this entry for future use...
obsline2=l2.strip()
l2s=l2.strip().split(',')
ra2=float(l2s[racol2])
dec2=float(l2s[deccol2])
band2=str(l2s[bandcol2])
ccd2=int(l2s[ccdcol2])
# Update the sliding RA window of possibly matching standard stars
# and find possible matches...
doneheadadv=0
while not doneheadadv:
(done1,radeg1)=advanceheadlist(fid,radegl,decdegl,magul,maggl,magrl,magil,magzl,magyl,ra2,radeg1,fd1,fd2,done1,radct,decdct,banddct,ccddct,obslinedct)
if (done1 == 1):
break
if (radeg1-ra2 > tol):
break
#break
# For this observed star, fill the "dictionaries" with all good matches from the sliding window lists of possible matches
# that fall within a radial tolerance of 2 arcsec...
incurlist(radegl,decdegl,ra2,dec2,band2,ccd2,obsline2,radct,decdct,banddct,ccddct,obslinedct)
tol = float(3/3600.0)
done3=0
while not done3:
if len(radegl) > 1:
delt = ra2-radegl[0]
if delt > tol:
fid=cleancurlist(fid,radegl,decdegl,magul,maggl,magrl,magil,magzl,magyl,radct,decdct,banddct,ccddct,obslinedct,ofd,ccdcount)
else:
break
else:
done3 = 1
done3 = 0
while not done3:
if len(radegl) > 0:
fid=cleancurlist(fid,radegl,decdegl,magul,maggl,magrl,magil,magzl,magyl,radct,decdct,banddct,ccddct,obslinedct,ofd,ccdcount)
else:
break
# close the input and output files...
fd1.close()
fd2.close()
ofd.close()
if verbose > 0:
print
print 'ccdnum,nstds'
for i in range(0,63):
print str(i)+' '+str(ccdcount[i])
print
sys.exit(0)
##################################
if __name__ == "__main__":
main()
##################################
|
def inicia_cadastro(alunos):
print('Escolha a opção:')
print('1 - Listar alunos')
print('2 - Cadastrar aluno')
print('3 - Buscar aluno')
opcao = int(input('Opção: '))
if opcao == 1:
listar_alunos(alunos)
elif opcao == 2:
cadastrar_aluno(alunos)
elif opcao == 3:
buscar_aluno(alunos)
def listar_alunos(alunos):
if len(alunos) == 0:
print('lista vazia!')
else:
print('Exibindo a lista de alunos:')
for aluno in alunos:
print(aluno)
def cadastrar_aluno(alunos):
aluno = input('Informe o nome do aluno: ')
alunos.append(aluno)
def buscar_aluno(alunos):
nome_procurado = input('Informe o nome exato do aluno: ')
for indice, aluno in enumerate(alunos, start=0):
if aluno.lower().strip() == nome_procurado.lower().strip():
print('O indice do aluno {} é {}'.format(nome_procurado, indice))
return
print('Aluno não encontrado!')
if __name__ == '__main__':
lista_de_alunos = []
while True:
inicia_cadastro(lista_de_alunos)
|
# This line is a convenience to import most packages you'll need. You may need to import others (eg random and cmath)
import IPython, numpy as np, scipy as sp, matplotlib.pyplot as plt, matplotlib, sklearn, librosa, cmath,math
from IPython.display import Audio
# This line makes sure your plots happen IN the webpage you're building, instead of in separate windows.
from scipy.fftpack import fft
from scipy.signal import hann
def stft(signal, window_size, hop_size, window_type = 'hann'):
"""
Computes the short term fourier transform of a 1-D numpy array, where the array
is windowed into a set of subarrays, each of length window_size. The distance between
window centers (in samples) is given by hop_size. The type of window applied is
determined by window_type. This returns a 2-D numpy array where the ith column
is the FFT of the ith window. Each column contains an array of complex values.
Input Parameters
----------------
signal: The 1-d (complex or real) numpy array containing the signal
window_size: an integer scalar specifying the number of samples in a window
hop_size: an integer specifying the number of samples between the start of adjacent windows
window_type: a string specifying one of two "hann" or "rectangular"
Returns
-------
a 2D numpy array of complex numbers where the array column is the FFT of the ith window,
and the jth element in the ith column is the jth frequency of analysis.
"""
# figure out how many hops
length_to_cover_with_hops = len(signal) - window_size;
assert (length_to_cover_with_hops >= 0), "window_size cannot be longer than the signal to be windowed"
num_hops = 1 + length_to_cover_with_hops/hop_size;
# make our window function
if (window_type == 'hann'):
window = sp.signal.hann(window_size, sym=False)
else:
window = np.ones(window_size)
stft = [0]*num_hops
# fill the array with values
for hop in range(num_hops):
start = hop*hop_size
end = start + window_size
unwindowed_sound = signal[start:end]
windowed_sound = unwindowed_sound * window
stft[hop]= fft(windowed_sound, window_size)
return np.array(stft).T
from scipy.fftpack import ifft
def istft(X, hop_size):
"""
Takes a 2-D numpy array representing an STFT of some signal, where stft[i]
is the FFT of the ith window as input and stft[i,k] is the kth frequency of analysis.
Performs an inverse FFT on each window and then does overlap & add resynthesis to rebuild
the original signal the STFT was built from.
Input Parameters
----------------
X: a 2-D numpy array of complex numbers representing an STFT, where the ith
column is the FFT of the ith window, and the jth row is the jth frequency of analysis.
hop_size: an integer specifying the number of samples between the start of adjacent windows.
Returns
-------
a 1-d numpy array of (possibly complex) values representing the original signal used to make X
"""
# make an empty signal of the appropriate length
window_size,num_hops = X.shape
signal_length = (num_hops-1)*hop_size + window_size
signal = np.zeros(signal_length,dtype='complex');
#fill the signal
for n in range(num_hops):
start = n * hop_size
end = start + window_size
signal[start:end] = signal[start:end] + ifft(X[:,n])
return signal
def plt_spectrogram(X,win_length, hop_size, sample_rate, zoom_x=None, zoom_y=None,tick_labels='time-freq'):
"""
Plots the log magnitude spectrogram.
Input Parameters:
------------------
X: 2D complex numpy array containing the stft values. Rows correspond to frequency bins and columns to time frames.
win_length: the length of the analysis window
hop_size: the hop size between adjacent windows
sample_rate: sampling frequency
tick_labels: the type of x and y tick labels, there are two options:
'time-freq': shows times (sec) on the x-axis and frequency (Hz) on the y-axis (default)
'bin-frame': shows time frame number on the x-axis and frequency bin number on the y-axis
zoom_x: 1 by 2 numpy array containing the range of values on the x-axis, e.g. zoom_t=np.array([x_start,x_end])
zoom_y: 1 by 2 numpy array containing the range of values on the y-axis, e.g. zoom_f=np.array([y_start,y_end])
Returns:
---------
times: 1D real numpy array containing time instances corresponding to stft frames
freqs: 1D real numpy array containing frequencies of analyasis up to Nyquist rate
2D plot of the magnitude spectrogram
"""
# Find the size of stft
Nf,Nt=np.shape(X)
# Compute the log magnitude spectrogram
X=20*np.log10(np.abs(X))
# Extract the first half of the spectrum for each time frame
X=X[0:Nf/2]
Nf=np.shape(X)[0]
# Generate time vector for plotting
times=(hop_size/float(sample_rate))*np.arange(Nt)
# Generate frequency vector for plotting
freqs=(float(sample_rate)/win_length)*np.arange(Nf)
# Generate time and frequency matrices for pcolormesh
times_matrix,freqs_matrix=np.meshgrid(times,freqs)
# Plot the log magnitude spectrogram
plt.title('Log magnitude spectrogram')
if tick_labels == 'bin-frame':
plt.pcolormesh(X)
plt.xlabel('Time-frame Number')
plt.ylabel('Frequency-bin Number')
else:
plt.pcolormesh(times_matrix,freqs_matrix,X)
plt.xlabel('Time (sec)')
plt.ylabel('Frequency (Hz)')
# Zoom in on the plot if specified
if zoom_x is None and zoom_y is None:
plt.axis('tight')
if zoom_x is not None:
plt.xlim(zoom_x)
if zoom_y is not None:
plt.ylim(zoom_y)
return X
from scipy import signal
from scipy import fftpack
from time import time
time1 = time()
from librosa import load
import matplotlib.pyplot as plt
from IPython.display import Audio
# This line makes sure your plots happen IN the webpage you're building, instead of in separate windows.
window_size = 2048
hop_size = 1024
beethovenrecord, sr = load('beetrecord10secsCLIPPED.wav')
# zoom_x=(8.5*sr,10*sr),
X = stft(beethovenrecord, window_size, hop_size)
V = plt_spectrogram(X, window_size, hop_size, sr)
beethovenrecord_voice, sr = load('beetrecord10secsVoice.wav')
# zoom_x=(8.5*sr,10*sr),
X_voice = stft(beethovenrecord_voice, window_size, hop_size)
V_voice = plt_spectrogram(X_voice, window_size, hop_size, sr)
beethovenrecord_live, sr = load('beetrecord10secsLiveCLIPPED.wav')
# zoom_x=(8.5*sr,10*sr),
X_live = stft(beethovenrecord_live, window_size, hop_size)
V_live = plt_spectrogram(X_live, window_size, hop_size, sr)
V_mask = V_voice - V
print np.min(V_mask)
print np.max(V_mask)
print X_live.shape
print X.shape
# X_new = X_live - X[:,:215]
# print sr
# f, t, Sxx = signal.spectrogram(beethovenrecord, sr)
# newsignal = fftpack.ifft(symS)
# plt.pcolormesh(t, f, Sxx)
# plt.show()
newsignal = istft(X_new, hop_size)
Audio(newsignal, rate=sr)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" USB3 control request handler gateware. """
|
"""
pytorch_evaluation.py
Pytorch evalution.
"""
import torch
from maxsmi.pytorch_data import data_to_pytorch_format
def model_evaluation(
data_loader,
ml_model_name,
ml_model,
smiles_dictionary,
max_length_smiles,
device_to_use,
):
"""
Evaluation per batch of a pytorch machine learning model.
Parameters
----------
data_loader : torch.utils.data
The training data as seen by Pytorch for mini-batches.
ml_model_name : str
Name of the machine learning model. It can be either "CON1D", "CONV2D", or "RNN".
ml_model : nn.Module
Instance of the pytorch machine learning model.
smiles_dictionary : dict
The dictionary of SMILES characters.
max_length_smiles : int
The length of the longest SMILES.
device_to_use : torch.device
The device to use for model instance, "cpu" or "cuda".
Returns
-------
tuple : (torch.tensor, torch.tensor)
The true, predicted output values in the data loader.
"""
ml_model.eval()
with torch.no_grad():
all_output_pred = []
all_output_true = []
for _, data in enumerate(data_loader):
# SMILES and target
smiles, target = data
input_true, output_true = data_to_pytorch_format(
smiles,
target,
smiles_dictionary,
max_length_smiles,
ml_model_name,
device_to_use,
)
# Prediction
output_pred = ml_model(input_true)
all_output_pred.append(output_pred)
all_output_true.append(output_true)
all_output_pred = torch.stack(all_output_pred).view(-1, 1)
all_output_true = torch.stack(all_output_true).view(-1, 1)
return (all_output_true, all_output_pred)
|
# Generated by Django 2.2.13 on 2020-07-16 10:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0072_customer'),
]
operations = [
migrations.DeleteModel(
name='Customer',
),
]
|
file_in = open("zeros.in", "r")
t = int(file_in.readline())
while t > 0:
x, y, m, k = [int(n) for n in file_in.readline().split()]
num = []
for i in range(0, x):
num.append(1)
for j in range(x, x + y):
num.append(0)
n = 0
cont = 0
for s in range(0, x + y):
if num[s] == 1:
n += pow(2, x + y - s - 1)
if n % m >= k:
cont += 1
for i in range(x - 1, 0, -1):
for j in range(i, i + y):
num[j], num[j + 1] = num[j + 1], num[j]
n = 0
for s in range(0, x + y):
if num[s] == 1:
n += pow(2, x + y - s - 1)
if n % m >= k:
cont += 1
print(cont)
t -= 1
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2' # deactivate tf build warnings
import tensorflow as tf
import numpy as np
from util import y_indicator, classification_rate
from sklearn.model_selection import train_test_split
class HiddenLayer:
def __init__(self, n, Min, Mout):
self.n = n
self.Min = Min
self.Mout = Mout
self.W, self.b = self._init_weights(Min, Mout)
def _init_weights(self, Min, Mout):
'''Initialise weights for weight matrix W and vector b
for input layer dimensions Min and output layer dimensions Mout.
Parameters
----------
Min: int
input dimensions
Mout: int
output layer dimensions
Returns
-------
W: nd-array (Min x Mout)
randomly initialised weight matrix
b: 1d-array (Mout)
bias vector of zeros
'''
W = tf.Variable(tf.random_normal((Min, Mout)))
b = tf.Variable(tf.zeros(Mout))
return W, b
def forward(self, X, activation='relu'):
'''Evaluate the hidden layer Z values.'''
if activation == 'relu':
return tf.nn.relu(tf.matmul(X,self.W) + self.b)
elif activation == 'tanh':
return tf.tanh(tf.matmul(X,self.W) + self.b)
elif activation == 'sigmoid':
return tf.sigmoid(tf.matmul(X,self.W) + self.b)
class ANN:
def __init__(self):
'''
ANN class for 1 hidden layer, size M, implemented in TensorFlow
'''
self.batch_counter = -1
# def _init_weights(self, Min, Mout):
# '''Initialise weights for weight matrix W and vector b
# for input layer dimensions Min and output layer dimensions Mout.
# Parameters
# ----------
# Min: int
# input dimensions
# Mout: int
# output layer dimensions
# Returns
# -------
# W: nd-array (Min x Mout)
# randomly initialised weight matrix
# b: 1d-array (Mout)
# bias vector of zeros
# '''
# W = tf.Variable(tf.random_normal((Min, Mout)))
# b = tf.Variable(tf.zeros(Mout))
# return W, b
def _cost(self, T, Yish):
return tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(labels=T, logits=Yish))
# def _Z(self, X, W, b, activation='relu'):
# '''Evaluate the hidden layer Z values.'''
# if activation == 'relu':
# return tf.nn.relu(tf.matmul(X,W) + b)
# elif activation == 'tanh':
# return tf.tanh(tf.matmul(X,W) + b)
# elif activation == 'sigmoid':
# return tf.sigmoid(tf.matmul(X,W) + b)
# def _forward(self, X, W, b, V, c, activation='relu'):
# Z = self._Z(X, W, b, activation)
# return tf.matmul(Z, V)+c
def _forward(self, X, layers, activation='relu'):
Z = layers[0].forward(X, activation)
for layer in layers[1:-1]:
Z = layer.forward(Z, activation)
return tf.matmul(Z, layers[-1].W) + layers[-1].b
def _get_next_batch(self, i, X, T, Y, batch_size=100):
start = i * batch_size
end = (i + 1) * batch_size
return X[start:end,], T[start:end,], Y[start:end]
def fit(self, Xval, Yval,
M=[100],
activation='relu',
epochs=100,
learning_rate=0.00001,
L2=0.1,
batch_size=None,
print_freq=10):
'''
Fit the model.
Parameters
----------
X: nd-array (N, D)
input features, D dimensions, N rows
Y: nd-array (N, K)
target indicator matrix for K classes
M: list of ints
number of hidden layers per layer
activation: str
description of activation function
epochs: int
number of epochs
learning_rate: float
learning rate
L2: float
L2 regularisation rate
batch: bool
use batches in training
batch_size: None or int
number of samples per batch to run batch training
print_freq: int
how often to print stuff
'''
Tval = y_indicator(Yval).astype(np.float32)
Yval = Yval.astype(np.float32)
Xtrain, Xtest, Ytrain, Ytest, Ttrain, Ttest = train_test_split(
Xval, Yval, Tval, stratify=Yval, test_size=0.1, random_state=42)
N, D = Xtrain.shape
_, K = Tval.shape
print('N={}, D={}, K={}'.format(N, D, K))
X = tf.placeholder(tf.float32, shape=(None, D))
T = tf.placeholder(tf.float32, shape=(None, K))
layer_sizes = [D] + M + [K]
layers = [HiddenLayer(i, Min, Mout)
for i, (Min, Mout) in enumerate(zip(layer_sizes, layer_sizes[1:]))]
Y = self._forward(X, layers, activation)
# W, b = self._init_weights(D, M)
# V, c = self._init_weights(M, K)
# Y = self._forward(X, W, b, V, c)
cost = self._cost(T, Y)
prediction = tf.argmax(Y, axis=1)
train = tf.train.RMSPropOptimizer(
learning_rate,
decay=0.999,
momentum=0.9
).minimize(cost)
results = []
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for n in range(epochs):
if batch_size is not None:
n_batches = N // batch_size + 1
for i in range(n_batches):
Xbatch, Tbatch, Ybatch = self._get_next_batch(
i, Xtrain, Ttrain, Ytrain, batch_size=batch_size
)
session.run(train, feed_dict={X:Xbatch, T:Tbatch})
if n % print_freq == 0:
cost_train = session.run(cost, feed_dict={X:Xbatch, T:Tbatch})
cost_test = session.run(cost, feed_dict={X:Xtest, T:Ttest})
pred_train = session.run(prediction, feed_dict={X:Xbatch, T:Tbatch})
pred_test = session.run(prediction, feed_dict={X:Xtest, T:Ttest})
rate_train = classification_rate(pred_train, Ybatch)
rate_test = classification_rate(pred_test, Ytest)
print(n, cost_train, cost_test, rate_train, rate_test)
results.append((cost_train, cost_test, rate_train, rate_test))
return results
def main():
from facial_recognition import get_data
from util import get_binary_data
print('Get data')
X, Y = get_data()
# X, Y = get_binary_data(X, Y)
model = ANN()
print('Fit')
results = model.fit(X, Y,
activation='tanh',
M=[1000,500,100],
batch_size=100,
learning_rate=5e-7,
L2=1e-3
)
if __name__=='__main__':
main()
|
from sentence_transformers import SentenceTransformer
from app.main.lib.shared_models.shared_model import SharedModel
from app.main.lib.similarity_measures import angular_similarity
class MultiSbert(SharedModel):
def load(self):
model_name = self.options.get('model_name', 'distiluse-base-multilingual-cased')
self.model = SentenceTransformer(self.options.get("model_url") or model_name)
def respond(self, doc):
return self.vectorize(doc)
def similarity(self, vecA, vecB):
return angular_similarity(vecA, vecB)
def vectorize(self, doc):
"""
vectorize: Embed a text snippet in the vector space.
"""
return self.model.encode([doc])[0].tolist()
|
text = 'hello world'
print text.ljust(20)
print text.rjust(20)
print text.center(20)
print text.rjust(20,'=')
print text.center(20,'*')
print format(text,'>20')
print format(text,'-^20')
print format(text,'=<20s')
print format(text,'=<20')
print '{:>10}{:>10}'.format('hello','world')
f = 1.234567
print format(f,'>10')
print format(f,'^10.2f')
print '%-20s' % text
print '%20s' % text
|
import pytest
from mathcrypto.math.funcs import MathFunctions
@pytest.mark.parametrize("num,expected", [(65, 48), (240, 64), (17, 16)])
def test_phi(num, expected):
assert MathFunctions.phi(num) == expected
@pytest.mark.parametrize("num_a,num_b,expected", [(135, 186, 3), (132, 84, 12), (1701, 3768, 3)])
def test_euclid_gcd(num_a, num_b, expected):
assert MathFunctions.euclid_gcd(num_a, num_b) == expected
@pytest.mark.parametrize(
"problem,expected",
[
([[8, 9], [3, 5]], 8),
([[7, 9], [4, 6]], 2),
([[7, 9], [3, 5]], 43),
],
)
def test_crt(problem, expected):
assert MathFunctions.crt(problem) == expected
@pytest.mark.parametrize("modulus,number,expected", [(13, 7, 2), (24, 5, 5), (7, 3, 5)])
def test_eea(modulus, number, expected):
assert MathFunctions.eea(modulus, number) == expected
|
"""
CCT 建模优化代码
A07质子相关常量和计算 Protons 示例
作者:赵润晓
日期:2021年4月29日
"""
from os import error, path
import sys
sys.path.append(path.dirname(path.abspath(path.dirname(__file__))))
from cctpy import *
# Protons 类包含了与质子相关的很多常量和计算
print("质子静止质量,单位kg",Protons.STATIC_MASS_KG)
# 1.672621898e-27
print("质子静止能量,单位J",Protons.STATIC_ENERGY_J)
# 1.5032775928961053e-10
print("质子静止能量,单位eV",Protons.STATIC_ENERGY_eV)
# 938272081.4796858
print("质子静止能量,单位MeV",Protons.STATIC_ENERGY_MeV)
# 938.2720813
print("质子电荷量,单位C",Protons.CHARGE_QUANTITY)
# 1.6021766208e-19
# 函数 get_total_energy_MeV(kinetic_energy_MeV),计算质子总能量
# 传入动能 kinetic_energy_MeV
# 单位都是 MeV
print("计算质子总能量 MeV,动能 250 MeV",Protons.get_total_energy_MeV(250))
# 1188.2720813
# 函数 get_total_energy_J(kinetic_energy_MeV),计算质子总能量
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 焦耳
print("计算质子总能量 J,动能 250 MeV",Protons.get_total_energy_J(250))
# 1.903821747808217e-10
# 函数 get_relativistic_mass(kinetic_energy_MeV),计算质子计算动质量
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 Kg
print("计算质子计算动质量 Kg,动能 250 MeV",Protons.get_relativistic_mass(250))
# 2.1182873744149107e-27
# 函数 get_speed_m_per_s(kinetic_energy_MeV),计算质子速度
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 m/s
print("计算质子速度 m/s,动能 250 MeV",Protons.get_speed_m_per_s(250))
# 183955177.96913892
# 函数 get_momentum_kg_m_pre_s(kinetic_energy_MeV),计算质子动量 kg m/s
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 kg m/s
print("计算质子动量 kg m/s,动能 250 MeV",Protons.get_momentum_kg_m_pre_s(250))
# 3.896699309502749e-19
# 函数 getMomentum_MeV_pre_c(kinetic_energy_MeV),计算质子动量 MeV/c
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 MeV/c
print("计算质子动量 MeV/c,动能 250 MeV",Protons.getMomentum_MeV_pre_c(250))
# 729.1337833520677
# 函数 get_magnetic_stiffness(kinetic_energy_MeV),计算质子磁钢度 T/m
# 传入动能 kinetic_energy_MeV,单位 MeV
# 返回值单位 T/m
print("计算质子磁钢度 T/m,动能 250 MeV",Protons.get_magnetic_stiffness(250))
# 2.4321284301084396
# 函数 get_kinetic_energy_MeV(momentum_KG_M_PER_S),质子动量 kg m/s 转动能 MeV
# 传入动量 momentum_KG_M_PER_S kg m/s
# 返回值单位 MeV
print("质子动量 kg m/s 转动能 MeV,动量 3.896699309502749e-19 kg m/s",Protons.get_kinetic_energy_MeV(3.896699309502749e-19))
# 249.99999982031446
# 函数 get_kinetic_energy_MeV_after_momentum_dispersion(old_kinetic_energy_MeV,momentum_dispersion)
# 计算动量分散后的动能 MeV
# 参数:
# old_kinetic_energy_MeV 原动能_MeV
# momentum_dispersion 动量分散 0~1
print("计算动量分散后的动能 MeV,原动能 250 MeV,动量分散 -20%",Protons.get_kinetic_energy_MeV_after_momentum_dispersion(250,-0.2))
# 166.53630221606724
# 函数 convert_momentum_dispersion_to_energy_dispersion(momentum_dispersion,kinetic_energy_MeV)
# 将动量分散转为能量分散
# 参数
# momentum_dispersion 动量分散
# kinetic_energy_MeV 动能_MeV
print("将 20% 动量分散转为能量分散,中心动能为250 MeV",Protons.convert_momentum_dispersion_to_energy_dispersion(0.2,250))
# 0.3579220947905309
# 函数 convert_energy_dispersion_to_momentum_dispersion(energyDispersion,kineticEnergy_MeV)
# 将能量分散转为动量分散
# 参数
# energyDispersion 能量分散
# kinetic_energy_MeV 动能_MeV
print("将0.3579220947905309能量分散转为动量分散,中心动能为250 MeV",Protons.convert_energy_dispersion_to_momentum_dispersion(0.3579220947905309,250))
# 0.2
|
from django import forms
from django.forms import (formset_factory, modelformset_factory)
from .models import (Book, Author, Service, Worker, ServicesWorkers)
class BookForm(forms.Form):
name = forms.CharField(
label='Book Name',
widget=forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Book Name here'
})
)
BookFormset = formset_factory(BookForm)
class BookModelForm(forms.ModelForm):
class Meta:
model = Book
fields = ('name', )
labels = {
'name': 'Book Name'
}
widgets = {
'name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Book Name here'
}
)
}
BookModelFormset = modelformset_factory(
Book,
fields=('name', ),
extra=1,
widgets={
'name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Book Name here'
}
)
}
)
AuthorFormset = modelformset_factory(
Author,
fields=('name', ),
extra=1,
widgets={'name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': 'Enter Author Name here'
})
}
)
ServiceFormset = modelformset_factory(
Service,
fields=('service_name', ),
extra=1,
widgets={'service_name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': "Enter the service's name",
})
}
)
WorkerFormset = modelformset_factory(
Worker,
fields=('worker_name', ),
extra=1,
widgets={'worker_name': forms.TextInput(attrs={
'class': 'form-control',
'placeholder': "Enter the worker's name",
})
}
)
ServiceWorkerFormset = modelformset_factory(
ServicesWorkers,
fields=('service_enabled', ),
extra=0,
widgets={'service_enabled': forms.CheckboxInput(attrs={
'class': 'form-control',
})
}
) |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verify that relinking a solib doesn't relink a dependent executable if the
solib's public API hasn't changed.
"""
import os
import sys
import TestCommon
import TestGyp
# NOTE(fischman): This test will not work with other generators because the
# API-hash-based-mtime-preservation optimization is only implemented in
# ninja.py. It could be extended to the make.py generator as well pretty
# easily, probably.
# (also, it tests ninja-specific out paths, which would have to be generalized
# if this was extended to other generators).
test = TestGyp.TestGyp(formats=['ninja'])
if not os.environ.get('ProgramFiles(x86)'):
# TODO(scottmg)
print 'Skipping test on x86, http://crbug.com/365833'
test.pass_test()
test.run_gyp('solibs_avoid_relinking.gyp')
# Build the executable, grab its timestamp, touch the solib's source, rebuild
# executable, ensure timestamp hasn't changed.
test.build('solibs_avoid_relinking.gyp', 'b')
test.built_file_must_exist('b' + TestCommon.exe_suffix)
pre_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
os.utime(os.path.join(test.workdir, 'solib.cc'),
(pre_stat.st_atime, pre_stat.st_mtime + 100))
test.sleep()
test.build('solibs_avoid_relinking.gyp', 'b')
post_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
if pre_stat.st_mtime != post_stat.st_mtime:
test.fail_test()
else:
test.pass_test()
|
from django.db import models
class Produto(models.Model):
nome = models.CharField(max_length=60)
|
import pytesseract as pt
from PIL import Image
#生成图片实例
image = Image.open('C:\\Users\\wh\\Desktop\\2.jpg')
# 调用pytesseract把图片转换成文字
# 返回结果是转换的结果
text = pt.image_to_string(image)
print(text) |
class GroupController(object):
def __init__(self,groupService):
self.groupService = groupService
def addGroup(self,id,name,members):
return self.groupService.addGroup(id,name,members) |
from .configuration import Configuration
from .component import Component
from .dataDrivenProperty import DataDrivenProperty
from .release import Release
from mongoengine import *
class Project(Document):
attributes = DictField()
automationTools = ListField(StringField())
components = ListField(EmbeddedDocumentField(Component))
configuration = ReferenceField(Configuration)
dataDrivenProperties = ListField(EmbeddedDocumentField(DataDrivenProperty))
defaultRelease = StringField()
description = StringField()
lastUpdated = DateTimeField()
name = StringField(required=True)
releases = ListField(EmbeddedDocumentField(Release))
tags = ListField(StringField())
meta = {'collection': 'projects'}
|
# encoding: utf-8
from zope.interface import alsoProvides
from dexterity.localrolesfield.field import LocalRolesField, LocalRoleField
from plone import api
from plone.autoform.interfaces import IFormFieldProvider
from plone.app.robotframework.testing import AUTOLOGIN_LIBRARY_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PloneWithPackageLayer
from plone.app.testing import ploneSite
from plone.dexterity.content import Container
from plone.supermodel import model
from plone.testing import z2
from zope import interface
from zope.schema import Choice
from zope.schema.fieldproperty import FieldProperty
from zope.schema.vocabulary import SimpleVocabulary
import dexterity.localrolesfield as package
class ITestingType(model.Schema):
localrole_field = LocalRolesField(
title=u'localrolefield',
required=False,
value_type=Choice(vocabulary=SimpleVocabulary.fromValues([u'support',
u'mail'])),
)
localrole_user_field = LocalRolesField(
title=u'localroleuserfield',
required=False,
value_type=Choice(vocabulary=SimpleVocabulary.fromValues([u'john',
u'jane',
u'tom',
u'kate'])),
)
class TestingType(Container):
interface.implements(ITestingType)
localrole_field = FieldProperty(ITestingType[u'localrole_field'])
class ITestingBehavior(model.Schema):
mono_localrole_field = LocalRoleField(
title=u"Mono valued local role field",
required=False,
vocabulary=SimpleVocabulary.fromValues([
u'john',
u'jane',
u'tom',
u'kate'])
)
alsoProvides(ITestingBehavior, IFormFieldProvider)
class LocalRolesFieldLayer(PloneWithPackageLayer):
def setUp(self):
super(LocalRolesFieldLayer, self).setUp()
with ploneSite() as portal:
groups_tool = portal.portal_groups
groups = {'mail_editor': ('john', 'jane'),
'mail_reviewer': ('jane', 'tom'),
'support_editor': ('kate', ),
'support_reviewer': ('kate', )}
for group_id in groups:
if group_id not in groups_tool.getGroupIds():
groups_tool.addGroup(group_id)
for user in groups[group_id]:
if not api.user.get(username=user):
api.user.create(username=user, email='test@test.com')
api.group.add_user(groupname=group_id, username=user)
if not api.user.get(username='basic-user'):
api.user.create(username='basic-user', email='test@test.com')
LOCALROLESFIELD_FIXTURE = LocalRolesFieldLayer(
zcml_filename='testing.zcml',
zcml_package=package,
gs_profile_id='dexterity.localrolesfield:testing',
name='dexterity.localrolesfield.layer:fixture',
)
LOCALROLESFIELD_INTEGRATION = IntegrationTesting(
bases=(LOCALROLESFIELD_FIXTURE, ),
name='dexterity.localrolesfield.layer:integration',
)
LOCALROLESFIELD_FUNCTIONAL = FunctionalTesting(
bases=(LOCALROLESFIELD_FIXTURE, ),
name='dexterity.localrolesfield.layer:functional',
)
LOCALROLESFIELD_ROBOT = FunctionalTesting(
bases=(LOCALROLESFIELD_FIXTURE, AUTOLOGIN_LIBRARY_FIXTURE,
z2.ZSERVER_FIXTURE),
name='dexterity.localrolesfield.layer:robot',
)
|
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('one.linuxlearn.net', 8124))
print(s.recv(1024))
s.send('wazying'.encode())
print(s.recv(1024))
s.send('wazying'.encode())
print(s.recv(1024))
for data in ['Michel', 'Tracy', 'Sarah']:
s.send(data.encode())
print(s.recv(1024))
# s.send('exit'.encode())
s.close()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Implement a MyCalendarTwo class to store your events.
# A new event can be added if adding the event will not cause a triple booking.
# Your class will have one method, book(int start, int end). Formally,
# this represents a booking on the half open interval [start, end),
# the range of real numbers x such that start <= x < end.
# A triple booking happens when three events have some non-empty intersection
# (ie., there is some time that is common to all 3 events.)
# For each call to the method MyCalendar.book, return true if the event can be added to the calendar
# successfully without causing a triple booking. Otherwise, return false and do not add the event to the calendar.
# Your class will be called like this: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)
# Example 1:
# MyCalendar();
# MyCalendar.book(10, 20); // returns true
# MyCalendar.book(50, 60); // returns true
# MyCalendar.book(10, 40); // returns true
# MyCalendar.book(5, 15); // returns false
# MyCalendar.book(5, 10); // returns true
# MyCalendar.book(25, 55); // returns true
# Explanation:
# The first two events can be booked. The third event can be double booked.
# The fourth event (5, 15) can't be booked, because it would result in a triple booking.
# The fifth event (5, 10) can be booked, as it does not use time 10 which is already double booked.
# The sixth event (25, 55) can be booked, as the time in [25, 40) will be double booked with the third event;
# the time [40, 50) will be single booked, and the time [50, 55) will be double booked with the second event.
# Note:
# The number of calls to MyCalendar.book per test case will be at most 1000.
# In calls to MyCalendar.book(start, end), start and end are integers in the range [0, 10^9].
# Leetcode Weekly Contest 59.
# 98 / 98 test cases passed.
# Status: Accepted
# Runtime: 592 ms
class MyCalendarTwo(object):
def __init__(self):
self.calendar = []
self.overlap = []
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: bool
"""
for i in self.overlap:
if not (i[1] <= start or i[0] >= end):
# if start < i[1] and end > i[0]:
return False
for i in self.calendar:
if not (i[1] <= start or i[0] >= end):
# if start < i[1] and end > i[0]:
self.overlap += (max(start, i[0]), min(end, i[1])),
self.calendar += (start, end),
return True
if __name__ == '__main__':
mycalendar = MyCalendarTwo()
print(mycalendar.book(10, 20))
print(mycalendar.book(50, 60))
print(mycalendar.book(10, 40))
print(mycalendar.book(5, 15))
print(mycalendar.book(5, 10))
print(mycalendar.book(25, 55))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 16/5/4 下午8:32
# @Author : Li Yaxi
import pandas as pd
from sklearn import linear_model
from PaperUtil import getBeginOfMonth,getDataFromDate,getGroupDict,getRootMSE
group_dict = {}
clf = linear_model.LinearRegression()
X = []
Y = []
B = 0.00
result = {}
def get_group_id(x):
if group_dict.has_key(x):
return group_dict[x]
else:
return -1
# 得到Beta和残差标准差
def getBetaAndCasu(dataframe):
resutl_list = []
for i,j in dataframe.groupby(['group_id']):
X = []
Y = []
print "*******************"
print j
for k,m in j.groupby("DATE"):
X.append([float(m.EWRETD.mean())])
Y.append(float(m.RET.mean()))
clf.fit (X, Y)
beta = clf.coef_
intercept = clf.intercept_
epsilon = getRootMSE(X,Y,beta[0],intercept)
resutl_list.append([i,beta[0],epsilon])
# print resutl_list
# break
return resutl_list
#第二步计算
def second_step(startDate,endDate,secondPathName):
global group_dict
#读取数据
period = (endDate-startDate)-50000
months_start = getBeginOfMonth(startDate,startDate+period)
months_end = getBeginOfMonth(endDate-period,endDate)
dataframe2 = getDataFromDate("filteredData.csv", startDate,endDate)
group_dict = getGroupDict(secondPathName)
dataframe2['group_id'] = dataframe2.PERMNO.map(lambda x: get_group_id(x))
dataframe2 = dataframe2[dataframe2['group_id']!=-1]
file2 = open(secondPathName,"wb+")
file2.writelines("Date,Group_id,Beta,Epsilon\n")
for i in range(0,len(months_start)):
dataframe = dataframe2[dataframe2['DATE']>=months_start[i]]
dataframe = dataframe[dataframe['DATE']<=months_end[i]]
resutl_list = getBetaAndCasu(dataframe)
for j in range(0,len(resutl_list)):
print months_end[i],resutl_list[j][0],resutl_list[j][1],resutl_list[j][2]
file2.writelines(str(months_end[i])+','+str(resutl_list[j][0])+','
+str(resutl_list[j][1])+','+str(resutl_list[j][2])+'\n')
print len(months_start),len(months_end)
if __name__ == '__main__':
second_step(19300101,19390101,'1-2.csv')
second_step(19340101,19430101,'2-2.csv')
second_step(19380101,19470101,'3-2.csv')
second_step(19420101,19510101,'4-2.csv')
second_step(19460101,19550101,'5-2.csv')
second_step(19500101,19590101,'6-2.csv')
second_step(19540101,19630101,'7-2.csv')
second_step(19580101,19670101,'8-2.csv')
second_step(19620101,19710101,'9-2.csv')
|
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
knownNums = {}
start = -1
maxLength = 0
for i in range(len(s)):
if s[i] in knownNums and knownNums[s[i]] > start:
start = knownNums[s[i]]
else:
length = i - start
if length > maxLength:
maxLength = length
knownNums[s[i]] = i
return maxLength
|
# Generated by Django 3.1.1 on 2020-09-14 08:23
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('attend', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='face',
name='img',
),
]
|
import xmlrpclib
from util import *
from django.db.models import Q
def get_unread_topic(request, start_num, last_num, search_id='', filters=[]):
data = {
'result': True,
'total_topic_num': 0,
'topics': [],
'forum_id': '',
}
groups = request.user.groups.all() or [] #removed after django > 1.2.3
topics = Topic.objects.filter(
Q(forum__category__groups__in=groups) | \
Q(forum__category__groups__isnull=True))
try:
last_read = PostTracking.objects.get(user_id=request.user.pk).last_read
except PostTracking.DoesNotExist:
last_read = None
if last_read:
topics = topics.filter(Q(updated__gte=last_read) | Q(created__gte=last_read)).all()
else:
#searching more than forum_settings.SEARCH_PAGE_SIZE in this way - not good idea :]
topics = [topic for topic in topics[:forum_settings.SEARCH_PAGE_SIZE * 5] if forum_extras.has_unreads(topic, request.user)]
data['total_topic_num'] = 41352
if start_num != 0 or last_num != 0:
topics = topics[start_num:last_num]
for topic in topics:
data['topics'].append(topic.as_tapatalk())
return data
def get_latest_topic(request, start_num=0, last_num=None, search_id='', filters=[]):
data = {
'result': True,
'topics': [],
}
topics = Topic.objects.filter(forum__category__groups__isnull=True)
data['total_topic_num'] = 43437
if start_num != 0 or last_num != 0:
topics = topics[start_num:last_num]
for t in topics:
data['topics'].append(t.as_tapatalk())
return data
# TODO: Pagination
def get_participated_topic(request, user_name='', start_num=0, last_num=None, search_id='', user_id=''):
user = request.user
posts = Post.objects.filter(user=user)
topics = []
tmp = []
for post in posts:
if post.topic.id not in tmp:
tmp.append(post.topic_id)
topics = Topic.objects.filter(pk__in=tmp).filter(forum__category__groups__isnull=True)
if start_num != 0 or last_num != 0:
topics = topics[start_num:last_num]
topic_set = []
for topic in topics:
topic_set.append(topic.as_tapatalk())
data = {
'result': True,
'search_id': search_id,
'total_topic_num': len(topics),
'topics': topic_set,
}
print data
return data
def get_topic(request, forum_id, start_num=0, last_num=0, mode='DATE'):
topics = Topic.objects.filter(forum_id=forum_id).filter(forum__category__groups__isnull=True)
forum = Forum.objects.get(pk=forum_id)
if mode == 'TOP':
topics = topics.filter(sticky=True)
if start_num != 0 or last_num != 0:
topics = topics[start_num:last_num]
data = {
'total_topic_num': forum.topic_count,
'forum_id': str(forum_id),
'forum_name': xmlrpclib.Binary(forum.name),
'can_post': True,
'can_upload': False,
'require_prefix': False,
'topics': [],
}
subscriptions = []
if request.user.is_authenticated():
subscriptions = request.user.subscriptions.all()
for topic in topics:
t = topic.as_tapatalk()
if request.user.is_authenticated():
t['can_subscribe'] = True
if topic in subscriptions:
t['is_subscribed'] = True
data['topics'].append(t)
return data
def new_topic(request, forum_id, subject, text_body, prefix_id='', attachment_id_array=[], group_id=''):
from djangobb_forum.models import Topic, Post
t = Topic()
t.forum_id = int(forum_id)
t.name = str(subject)
t.user_id = request.user.pk
t.save()
p = Post()
p.user_id = request.user.pk
p.topic_id = t.id
p.body = str(text_body)
p.save()
return {
'result': True,
'topic_id': t.id,
}
|
import requests
from selenium import webdriver
from lxml import etree
import re
import time
from bs4 import BeautifulSoup
import json
import os
import random
def selenium_request(link):
# chrome_options = webdriver.ChromeOptions()
# proxy = get_proxys()
#
# chrome_options.add_argument('--proxy-server=http://%s:%s' % (proxy[0],proxy[1]))
#
# driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(link)
time.sleep(5)
driver.switch_to.frame('contentFrame')
return driver.page_source
def get_proxys():
r = requests.get('http://127.0.0.1:8000/?types=0&count=10&country=国内')
data = json.loads(r.text)
return random.sample(data,1)[0]
playlist_id = '763904548'
playlist_url = 'http://music.163.com/playlist?id={}'
data = selenium_request(playlist_url.format(str(playlist_id)))
tree = etree.HTML(data)
links = tree.xpath('//table/tbody/tr//a/@href')
file_path = os.getcwd() + "/lrc.txt"
document = open(file_path, "w+",encoding='utf8')
for link in links:
if link == "javascript:;":
continue
elif not re.match("\/song\?id=\d+", link):
continue
song_id = re.search("\d+", link).group()
lyric_url = 'http://music.163.com/api/song/lyric?os=pc&id={}&lv=-1&kv=-1&tv=-1'
lyric_url = lyric_url.format(str(song_id))
# proxy = get_proxys()
# proxies = {
# "http": "http://"+proxy[0]+":"+proxy[1],
# }
s = BeautifulSoup(requests.get(lyric_url).content, "html.parser")
lrc = json.loads(s.text)['lrc']['lyric']
lrc = re.sub('\[(.*)\]', '', lrc)
time.sleep(10)
document.write(lrc)
|
# Generated by Django 2.2.9 on 2020-02-16 18:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tssite', '0019_auto_20191209_0636'),
]
operations = [
migrations.AlterModelOptions(
name='class',
options={'ordering': ['series_sequence', 'division_sequence', 'segment_sequence', 'section_sequence', 'unit_sequence', 'part_sequence']},
),
migrations.AlterField(
model_name='teamim',
name='post',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to='tssite.Class'),
),
migrations.AlterField(
model_name='teamim',
name='reader',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.SET_DEFAULT, to='tssite.Teacher'),
),
]
|
import torch
image_x: int = 28
image_y: int = image_x
hole_size_x: int = 10
hole_size_y: int = hole_size_x
max_random: int = 4
n_of_samples: int = 5000
batch_size: int = 50
epoch: int = 50
lr: float = 0.001
val_split: float = 0.2
random_seed: int = 10
shuffle_dataset: bool = True
# network parameters
k: int = 3
l: int = 1
n: int = hole_size_x * hole_size_y
model_filename = 'model667.pth'
use_cuda = torch.cuda.is_available()
device = torch.device('cuda' if use_cuda else 'cpu')
|
import numpy as np
from functions.metrics import compute_batch_accuracy, compute_optimal_roc_threshold, compute_metrics
from params import *
def evaluate(network: torch.nn.Module, dataloader: torch.utils.data.DataLoader, criterion: torch.optim) -> dict:
network = network.eval()
y_scores, y_true = [], []
loss, accuracy = [], []
running_loss, running_accuracy = 0.0, 0.0
with torch.no_grad():
for i, (x, y) in enumerate(dataloader):
# Move validation inputs and labels to device
x = x.float().to(DEVICE)
y = torch.from_numpy(np.asarray(y)).long().to(DEVICE)
# Initialize the hidden state of the RNN and move it to device
h = network.init_state(x.shape[0]).to(DEVICE)
# Predict
o = network(x, h).to(DEVICE)
loss_value, batch_accuracy = criterion(o, y).item(), compute_batch_accuracy(o, y)
# Accumulate validation loss and accuracy for the log
running_loss += loss_value
running_accuracy += batch_accuracy
# Store all validation loss and accuracy values for computing avg
loss += [loss_value]
accuracy += [batch_accuracy]
# Store predicted scores and ground truth labels
y_scores += torch.exp(o).cpu().numpy().tolist()
y_true += y.cpu().numpy().tolist()
if not (i + 1) % 10:
print("[ batch: {}/{} ] [ loss: {:.5f} | accuracy: {:.5f} ]"
.format(i + 1, len(dataloader), running_loss / 10, running_accuracy / 10))
running_loss, running_accuracy = 0.0, 0.0
y_scores, y_true = np.array(y_scores).reshape((len(y_scores), 2)), np.array(y_true)
# Compute predicted labels based on the optimal ROC threshold
threshold = compute_optimal_roc_threshold(y_true, y_scores[:, 1])
y_pred = np.array(y_scores[:, 1] >= threshold, dtype=np.int)
# Compute the validation metrics
avg_loss, avg_accuracy = np.mean(loss), np.mean(accuracy)
metrics = compute_metrics(y_true, y_pred, y_scores[:, 1])
metrics["loss"] = avg_loss
metrics["accuracy"] = avg_accuracy
return metrics
|
import pytest
from vowels import get_word_max_vowels
paragraphs = [
("Python is an easy to learn, powerful programming language."
"It has efficient high-level data structures and a simple "
"but effective approach to object-oriented programming. "
"Python’s elegant syntax and dynamic typing, together with "
"its interpreted nature, make it an ideal language for "
"scripting and rapid application development in many areas "
"on most platforms."),
("The Python interpreter and the extensive standard library "
"are freely available in source or binary form for all major "
"platforms from the Python Web site, https://www.python.org/, "
"and may be freely distributed. The same site also contains "
"distributions of and pointers to many free third party Python "
"modules, programs and tools, and additional documentation."),
("The Python interpreter is easily extended with new functions "
"and data types implemented in C or C++ (or other languages "
"callable from C). Python is also suitable as an extension "
"language for customizable applications."),
("This tutorial introduces the reader informally to the basic "
"concepts and features of the Python language and system. "
"It helps to have a Python interpreter handy for hands-on "
"experience, but all examples are self-contained, so the "
"tutorial can be read off-line as well."),
("For a description of standard objects and modules, see The "
"Python Standard Library. The Python Language Reference gives "
"a more formal definition of the language. To write extensions "
"in C or C++, read Extending and Embedding the Python "
"Interpreter and Python/C API Reference Manual. There are "
"also several books covering Python in depth."),
("This tutorial does not attempt to be comprehensive and cover "
"every single feature, or even every commonly used feature. "
"Instead, it introduces many of Python’s most noteworthy "
"features, and will give you a good idea of the language’s "
"flavor and style. After reading it, you will be able to read and "
"write Python modules and programs, and you will be ready to "
"learn more about the various Python library modules described "
"in The Python Standard Library.")
]
expected = [
[('object-oriented', 6)], # only one word with 6 vowels
[('documentation.', 6)], # ditto
[('customizable', 5), ('applications.', 5)], # here we have two options
[('self-contained,', 5), ('experience,', 5)], # ditto
[('definition', 5)], # only one word with 5 vowels
[('comprehensive', 5)], # ditto
]
@pytest.mark.parametrize('text, result', zip(paragraphs, expected))
def test_get_word_max_vowels(text, result):
assert get_word_max_vowels(text) in result |
s = str(input())
arr = []
def cnt0(s):
cnt = 0
for i in s:
if i == '0':
cnt += 1
elif i == '1':
break
arr.append(cnt)
return arr
print(cnt0(s)) |
# Generated by Django 3.0.4 on 2021-03-07 00:04
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0010_auto_20210307_0003'),
]
operations = [
migrations.AlterField(
model_name='post',
name='answers',
field=models.ManyToManyField(blank=True, to='api.Answer'),
),
migrations.AlterField(
model_name='post',
name='comments',
field=models.ManyToManyField(blank=True, to='api.Comment'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=models.ManyToManyField(blank=True, to='api.Tag'),
),
migrations.AlterField(
model_name='post',
name='upvotes',
field=models.ManyToManyField(blank=True, related_name='posts_upvoted', to=settings.AUTH_USER_MODEL),
),
]
|
#!/usr/bin/python
# Copyright 2015 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the fileset picker. """
import os
import test_tools
import unittest
import fileset_picker
class TestFilesetPicker(unittest.TestCase):
def testPickMpegFileset(self):
fileset = fileset_picker.PickFileset('mpeg_video')
self.assertEquals(44, len(fileset.AllFilesAndRates()))
def testPickNonexistentFileset(self):
with self.assertRaises(fileset_picker.Error):
fileset_picker.PickFileset('no_such_directory')
class TestFilesetPickerWithLocalFiles(test_tools.FileUsingCodecTest):
def testPickLocalVideoDirectory(self):
os.mkdir(os.path.join(os.getenv('WORKDIR'), 'video'))
os.mkdir(os.path.join(os.getenv('WORKDIR'), 'video', 'local'))
test_tools.MakeYuvFileWithOneBlankFrame(
'video/local/one_black_frame_1024_768_30.yuv')
fileset = fileset_picker.PickFileset('local')
self.assertEquals(4, len(fileset.AllFilesAndRates()))
if __name__ == '__main__':
unittest.main()
|
from colorsys import rgb_to_hsv, hsv_to_rgb
import random
from functools import lru_cache
from typing import Union, Optional
import glm # for mix (conflicts with util.mix)
from glm import vec3, vec4, ivec2, ivec3, ivec4, normalize, cross, dot, vec2
from game.constants import EPSILON, DEBUG
import pygame
def map_range(val, r1, r2):
return (val - r1[0]) / (r1[1] - r1[0]) * (r2[1] - r2[0]) + r2[0]
def clamp(x, mini=0, maxi=1):
if mini > maxi:
return x
if x < mini:
return mini
if x > maxi:
return maxi
return x
def surf_fader(max_dist, dz):
"""
Get alpha value for fade.
Arguments:
max_dist {int} -- Maximum distance until butterflies disappear completely
dz {int} -- Difference of Z pos between camera and butterfly
"""
return clamp((max_dist - dz) / max_dist * 255, 0, 255)
def rgb2hsv(r, g, b):
"""Conversion between rgb in range 0-255 to hsv"""
return rgb_to_hsv(r / 255, g / 255, b / 255)
def hsv2rgb(h, s, v):
"""Conversion between hsv to rgb in range 0-255"""
s = clamp(s)
v = clamp(v)
r, g, b = hsv_to_rgb(h % 1, s, v)
return (
int(r * 255),
int(g * 255),
int(b * 255),
)
def random_color():
"""Random RGB color of the rainbow"""
return hsv2rgb(random.random(), 1, 1)
def plane_intersection(p1: vec3, d1: vec3, p2: vec3, d2: vec3):
"""
Compute the line of intersection of the two planes.
Note: if the two planes are parallel or equal this returns None.
:param p1: a point in the first plane
:param d1: a vector normal to the first plane
:param p2: a point in the second plane
:param d2: a normal vector of the second plane
:return: None if they are parallel else (p3, d3)
where p3 is a point in the line of intersection
and d3 is the direction of this line
"""
d1 = normalize(d1)
d2 = normalize(d2)
if d1 in (d2, -d2):
# planes are parallel
return None
d3 = cross(d1, d2)
# d3 and v1 are an orthonormal base of the first plane
v1 = cross(d3, d1)
b = -dot(p1, d2) / dot(v1, d2)
p3 = p1 + b * v1
return p3, d3
def cross2d(a, b):
return a.x * b.y - a.y * b.x
def line_intersection(p, u, q, v) -> Optional[vec2]:
"""
Compute the intersection of two 2d lines.
:param p: a point on the first line
:param u: direction of the first line
:param q: a point on the first line
:param v: direction of the first line
:return: None if no intersection
"""
cross = cross2d(u, v)
if abs(cross) < EPSILON:
return None
w = p - q
s = cross2d(u, w) / cross
return p + s * u
def line_segment_intersection(a, b, p, u) -> Optional[vec2]:
"""
Compute the intersection between a 2d line and a segment.
:param a: start point of the segment
:param b: end point of the segment
:param p: a point of the line
:param u: the direction of the line
:return:
"""
v = b - a
cross = cross2d(v, u)
if abs(cross) < EPSILON:
return None
w = a - p
s = cross2d(v, w) / cross
if 0 <= s <= 1:
return a + s * v
return None
def estimate_3d_size(size_2d):
"""
Return a 3D size given a sprite size.
Last coordinate is set to the minimum dimension of the two first.
"""
return vec3(*size_2d, min(size_2d))
def pg_color(c):
tc = type(c)
if tc == pygame.Color:
return c
elif tc == vec3:
c = vec4(c, 0)
elif tc == ivec3:
c = vec4(c, 0)
elif tc == ivec4:
c = vec4(c, 0)
elif tc == tuple:
return c
elif tc == str:
return pygame.Color(c)
c = tuple(int(clamp(x * 255, 0, 255)) for x in c)
return c
def ncolor(c):
"""
Normalize color based on type.
Given a color string, a pygame color, or vec3,
return that as a normalized vec4 color
"""
tc = type(c)
if tc == str:
c = vec4(*pygame.Color(c)) / 255.0
elif tc == tuple:
c = vec4(*c, 0) / 255.0
elif tc == c or tc == pygame.Color:
c = vec4(*c) / 255.0
elif tc == vec3:
c = vec4(*c, 0)
elif tc == float or tc == int:
c = vec4(c, c, c, 0)
elif c is None:
c = vec4(0)
return c
def rgb_mix(a, b, t):
if t >= 1:
return b
if t <= 0:
return a
return (
int(a[0] * (1 - t) + b[0] * t),
int(a[1] * (1 - t) + b[1] * t),
int(a[2] * (1 - t) + b[2] * t),
)
def nrand(s=1.0):
"""
normalized random scalar, scaled by S
"""
return (random.random() * 2 - 1) * s
def mix(a, b, t):
"""
interpolate a -> b @ t
Returns a vec4
Supports color names and pygame colors
"""
if isinstance(a, vec3):
return glm.mix(a, b, t)
# this works for vec4 as well
return glm.mix(ncolor(a), ncolor(b), t)
def random_vec3(s=1):
return glm.normalize(vec3(nrand(), nrand(), nrand())) * s
def random_rgb():
return vec4(random.random(), random.random(), random.random(), 0)
def random_char():
"""
Random human-readable char
"""
return chr(random.randint(32, 126))
def rand_RGB():
return (
random.randrange(255),
random.randrange(255),
random.randrange(255),
)
@lru_cache(15)
def noise_surf(size, num=0):
surf = pygame.Surface(size)
for y in range(size[1]):
for x in range(size[0]):
surf.set_at((x, y), rand_RGB())
surf.set_alpha(12)
return surf
@lru_cache(15)
def noise_surf_dense_bottom(size, num=0):
surf = pygame.Surface(size).convert_alpha()
for y in range(size[1]):
interp = 1 - y / size[1]
alpha = min(int(0.02 / interp * 255), 255)
for x in range(size[0]):
surf.set_at(
(x, y),
(
min(random.randrange(10, 255) / interp / 6, 255),
min(random.randrange(10, 255) / interp / 6, 255),
min(random.randrange(10, 255) / interp / 6, 255),
alpha,
),
)
return surf
def debug_log_call(f: "function"):
if DEBUG:
def wrapper(*args, **kwargs):
ar = [str(a) for a in args]
kw = ["{n}={v}" for n, v in kwargs.items()]
print(f"CALL {f.__name__}({', '.join(ar + kw)})")
return f(*args, **kwargs)
return wrapper
return f
|
from .list import ListNode, new_list, new_cycle_list
from .test import test, sorted_list, sorted_2d_list
from .tree import (
TreeNode,
new_tree,
is_valid_avl,
is_valid_bst,
height,
inorder_traverse,
preorder_traverse,
postorder_traverse,
level_order_traverse,
)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright (c) Donald Stufft, pip, and individual contributors
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#---------------------------------------------------------------------------
# Name: __init__.py
# Purpose: Initialize
#
# Author: Kosuke Akizuki
#
# Created: 2015/03/11
# Copyright: (c) Kosuke Akizuki 2015-2018
# Licence: The MIT License (MIT)
#---------------------------------------------------------------------------
from __future__ import absolute_import
import sys
class VendorAlias(object):
def __init__(self, package_names):
self._package_names = package_names
self._vendor_name = __name__
self._vendor_pkg = self._vendor_name + "."
self._vendor_pkgs = [
self._vendor_pkg + name for name in self._package_names
]
def find_module(self, fullname, path=None):
if fullname.startswith(self._vendor_pkg):
return self
def load_module(self, name):
# Ensure that this only works for the vendored name
if not name.startswith(self._vendor_pkg):
raise ImportError(
"Cannot import %s, must be a subpackage of '%s'." % (
name, self._vendor_name,
)
)
if not (name == self._vendor_name or
any(name.startswith(pkg) for pkg in self._vendor_pkgs)):
raise ImportError(
"Cannot import %s, must be one of %s." % (
name, self._vendor_pkgs
)
)
# Check to see if we already have this item in sys.modules, if we do
# then simply return that.
if name in sys.modules:
return sys.modules[name]
# Check to see if we can import the vendor name
try:
# We do this dance here because we want to try and import this
# module without hitting a recursion error because of a bunch of
# VendorAlias instances on sys.meta_path
real_meta_path = sys.meta_path[:]
try:
sys.meta_path = [
m for m in sys.meta_path
if not isinstance(m, VendorAlias)
]
__import__(name)
module = sys.modules[name]
finally:
# Re-add any additions to sys.meta_path that were made while
# during the import we just did, otherwise things like
# requests.packages.urllib3.poolmanager will fail.
for m in sys.meta_path:
if m not in real_meta_path:
real_meta_path.append(m)
# Restore sys.meta_path with any new items.
sys.meta_path = real_meta_path
except ImportError:
# We can't import the vendor name, so we'll try to import the
# "real" name.
real_name = name[len(self._vendor_pkg):]
try:
__import__(real_name)
module = sys.modules[real_name]
except ImportError:
raise ImportError("No module named '%s'" % (name,))
# If we've gotten here we've found the module we're looking for, either
# as part of our vendored package, or as the real name, so we'll add
# it to sys.modules as the vendored name so that we don't have to do
# the lookup again.
sys.modules[name] = module
# Finally, return the loaded module
return module
sys.meta_path.append(VendorAlias(["merger"]))
|
import os
import sys
import numpy as np
from utils import utils
from cost_functions import cost_functions
from camera_optimization_support.support import *
MAX_DURATION = 6
def camera_pre_optimization(project_data, cost_matrix):
script = project_data.script
characters = project_data.characters
action_data = project_data.action_data
animation_dict = project_data.animation_dict
# ========= initial sequence matrix ==========
initial_sequence_matrix(project_data, cost_matrix)
# ========= initial action matrix ============
initial_action_map(project_data, cost_matrix)
initial_cost_map(project_data, cost_matrix)
init_quality_cost(project_data, cost_matrix)
def helper(project_data, t, camIndex, DefaultCamCostHash, DefaultCamNextCamHash, DefaultQualithHash):
# recursion
print("calculate time {} cam {}".format(t, camIndex))
if t == project_data.endTime + 1:
# dummy end node has 0 node cost
return 0
if t == -1:
print("start Node!")
else:
if DefaultCamCostHash[t - project_data.startTime][camIndex]:
# if this cost has already been calculated
# return this cost directly
return DefaultCamCostHash[t - project_data.startTime][camIndex]
validNextNodes = utils.getValidNextNodesWoUserCam(t, MAX_DURATION, project_data.numDefaultCameras, project_data.startTime,
project_data.endTime)
minCost = sys.maxsize
for nextNode in validNextNodes:
duration = nextNode[-1]
nextNodeCost = helper(project_data, nextNode[0], nextNode[1], DefaultCamCostHash, DefaultCamNextCamHash,
DefaultQualithHash)
qualityCost = cost_functions.getDurationQualityCost([t, camIndex], duration, DefaultQualithHash, project_data.startTime,
project_data.endTime)
# hops cost
durationCost = cost_functions.getDurationCost([t, camIndex], [nextNode[0], nextNode[1]], duration)
transferCost = cost_functions.getWeightedTransferCostWoUserCam([t, camIndex], [nextNode[0], nextNode[1]],
project_data.endTime, project_data.characters, project_data.script,
project_data.eyePos, project_data.leftRightOrder, project_data.objects)
totalCost = nextNodeCost + .5 * transferCost + .5 * durationCost + qualityCost
if totalCost < minCost:
minCost = totalCost
minNextNode = [nextNode[0], nextNode[1]]
if t != -1:
DefaultCamCostHash[t - project_data.startTime][camIndex] = minCost
DefaultCamNextCamHash[t - project_data.startTime][camIndex] = minNextNode
print("time {} camera {} min cost: {}".format(t, camIndex, minCost))
return minCost
def camera_optimization_main(project_data, cost_matrix):
optimizeDuration = project_data.endTime -project_data.startTime + 1
# 从任意点开始到end的cost
DefaultCamCostHash = [[None for i in range(project_data.numDefaultCameras)] for j in range(optimizeDuration)]
DefaultCamNextCamHash = [[[None, None] for i in range(project_data.numDefaultCameras)] for j in range(optimizeDuration)]
# node cost
DefaultQualityHash = cost_matrix.quality_cost
minCost = helper(project_data, -1, -1, DefaultCamCostHash, DefaultCamNextCamHash, DefaultQualityHash)
path = []
startIndex = DefaultCamCostHash[0].index(min(DefaultCamCostHash[0]))
startNode = [project_data.startTime, startIndex]
while startNode[0] < project_data.endTime + 1:
duration = DefaultCamNextCamHash[startNode[0] - project_data.startTime][startNode[1]][0] - startNode[0]
path.append([startNode[0], startNode[1], duration])
startNode = DefaultCamNextCamHash[startNode[0] - project_data.startTime][startNode[1]]
# path = [[0, 7, 3], [3, 67, 4], [7, 7, 3], [10, 29, 4], [14, 87, 3], [17, 47, 3], [20, 48, 2], [22, 47, 3], [25, 48, 3], [28, 47, 3], [31, 29, 5], [36, 7, 2], [38, 29, 4], [42, 68, 2], [44, 69, 3], [47, 8, 5], [52, 47, 3], [55, 8, 4], [59, 29, 5], [64, 7, 4], [68, 29, 4], [72, 7, 3], [75, 67, 2], [77, 29, 5], [82, 8, 4], [86, 47, 3], [89, 87, 1], [90, 29, 4], [94, 7, 5], [99, 29, 2], [101, 8, 3], [104, 7, 3], [107, 29, 4], [111, 7, 2], [113, 88, 2], [115, 89, 3], [118, 8, 4], [122, 29, 5], [127, 7, 4], [131, 8, 4], [135, 7, 4], [139, 29, 4], [143, 7, 3]]
print("camera sequence: ", path)
t = 0
for cam in path:
start = cam[0]
cam_id = cam[1]
end = start + cam[2]
for i in range(start, end):
cam_setting = project_data.defaultCameras[cam_id]
cam_index = cam_setting['camIndex']
cam_char = cam_setting['charIndex']
talking_char = project_data.talking_char_t[t]
print("Use Cam: {} shotting Character {} while Character {} is talking"
.format(cam_index, cam_char, talking_char))
t += 1
return path
if __name__ == "__main__":
camera_optimization_main(32)
|
""""
https://leetcode.com/problems/maximum-units-on-a-truck/
You are assigned to put some amount of boxes onto one truck. You are given a 2D array boxTypes, where boxTypes[i] = [numberOfBoxesi, numberOfUnitsPerBoxi]:
numberOfBoxesi is the number of boxes of type i.
numberOfUnitsPerBoxi is the number of units in each box of the type i.
You are also given an integer truckSize, which is the maximum number of boxes that can be put on the truck. You can choose any boxes to put on the truck as long as the number of boxes does not exceed truckSize.
Return the maximum total number of units that can be put on the truck.
Input: boxTypes = [[1,3],[2,2],[3,1]], truckSize = 4
Output: 8
Explanation: There are:
- 1 box of the first type that contains 3 units.
- 2 boxes of the second type that contain 2 units each.
- 3 boxes of the third type that contain 1 unit each.
You can take all the boxes of the first and second types, and one box of the third type.
The total number of units will be = (1 * 3) + (2 * 2) + (1 * 1) = 8.
"""
from typing import List
class Solution:
def maximumUnits(self, boxTypes: List[List[int]], truckSize: int) -> int:
print (boxTypes, truckSize)
d = {}
for key,val in boxTypes:
d[val] = d.get(val,0) + key
print (d)
ans = 0
for w in sorted(d,reverse=True):
print (w, d[w], ans, truckSize)
if truckSize < 0:
break
val = d[w] if truckSize > d[w] else truckSize
ans = ans + (w * val)
truckSize = truckSize - d[w]
return ans
boxTypes = [[1,3],[2,2],[3,1]]
truckSize = 4
print ("Input - Box Types : {}, Truck Size : {}".format(boxTypes, truckSize))
ans = Solution().maximumUnits(boxTypes, truckSize)
print ("Solution - {}".format(ans))
boxTypes = [[5,10],[2,5],[4,7],[3,9]]
truckSize = 10
print ("Input - Box Types : {}, Truck Size : {}".format(boxTypes, truckSize))
ans = Solution().maximumUnits(boxTypes, truckSize)
print ("Solution - {}".format(ans))
boxTypes = [[2,1],[4,4],[3,1],[4,1],[2,4],[3,4],[1,3],[4,3],[5,3],[5,3]]
truckSize = 13
print ("Input - Box Types : {}, Truck Size : {}".format(boxTypes, truckSize))
ans = Solution().maximumUnits(boxTypes, truckSize)
print ("Solution - {}".format(ans)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.