text
stringlengths 8
6.05M
|
|---|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
from dataclasses import dataclass
from pants.backend.java.bsp.spec import JavacOptionsItem, JavacOptionsParams, JavacOptionsResult
from pants.backend.java.target_types import JavaFieldSet, JavaSourceField
from pants.base.build_root import BuildRoot
from pants.bsp.protocol import BSPHandlerMapping
from pants.bsp.spec.base import BuildTargetIdentifier
from pants.bsp.util_rules.lifecycle import BSPLanguageSupport
from pants.bsp.util_rules.targets import (
BSPBuildTargetsMetadataRequest,
BSPBuildTargetsMetadataResult,
BSPCompileRequest,
BSPCompileResult,
BSPResourcesRequest,
BSPResourcesResult,
)
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.target import FieldSet
from pants.engine.unions import UnionRule
from pants.jvm.bsp.compile import _jvm_bsp_compile, jvm_classes_directory
from pants.jvm.bsp.compile import rules as jvm_compile_rules
from pants.jvm.bsp.resources import _jvm_bsp_resources
from pants.jvm.bsp.resources import rules as jvm_resources_rules
from pants.jvm.compile import ClasspathEntryRequestFactory
from pants.jvm.target_types import JvmResolveField
LANGUAGE_ID = "java"
_logger = logging.getLogger(__name__)
class JavaBSPLanguageSupport(BSPLanguageSupport):
language_id = LANGUAGE_ID
can_compile = True
can_provide_resources = True
@dataclass(frozen=True)
class JavaMetadataFieldSet(FieldSet):
required_fields = (JavaSourceField, JvmResolveField)
source: JavaSourceField
resolve: JvmResolveField
class JavaBSPBuildTargetsMetadataRequest(BSPBuildTargetsMetadataRequest):
language_id = LANGUAGE_ID
can_merge_metadata_from = ()
field_set_type = JavaMetadataFieldSet
resolve_prefix = "jvm"
resolve_field = JvmResolveField
@rule
async def bsp_resolve_java_metadata(
_: JavaBSPBuildTargetsMetadataRequest,
) -> BSPBuildTargetsMetadataResult:
return BSPBuildTargetsMetadataResult()
# -----------------------------------------------------------------------------------------------
# Javac Options Request
# See https://build-server-protocol.github.io/docs/extensions/java.html#javac-options-request
# -----------------------------------------------------------------------------------------------
class JavacOptionsHandlerMapping(BSPHandlerMapping):
method_name = "buildTarget/javacOptions"
request_type = JavacOptionsParams
response_type = JavacOptionsResult
@dataclass(frozen=True)
class HandleJavacOptionsRequest:
bsp_target_id: BuildTargetIdentifier
@dataclass(frozen=True)
class HandleJavacOptionsResult:
item: JavacOptionsItem
@rule
async def handle_bsp_java_options_request(
request: HandleJavacOptionsRequest,
build_root: BuildRoot,
) -> HandleJavacOptionsResult:
return HandleJavacOptionsResult(
JavacOptionsItem(
target=request.bsp_target_id,
options=(),
classpath=(),
class_directory=build_root.pathlib_path.joinpath(
f".pants.d/bsp/{jvm_classes_directory(request.bsp_target_id)}"
).as_uri(),
)
)
@rule
async def bsp_javac_options_request(request: JavacOptionsParams) -> JavacOptionsResult:
results = await MultiGet(
Get(HandleJavacOptionsResult, HandleJavacOptionsRequest(btgt)) for btgt in request.targets
)
return JavacOptionsResult(items=tuple(result.item for result in results))
# -----------------------------------------------------------------------------------------------
# Compile Request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class JavaBSPCompileRequest(BSPCompileRequest):
field_set_type = JavaFieldSet
@rule
async def bsp_java_compile_request(
request: JavaBSPCompileRequest, classpath_entry_request: ClasspathEntryRequestFactory
) -> BSPCompileResult:
result: BSPCompileResult = await _jvm_bsp_compile(request, classpath_entry_request)
return result
# -----------------------------------------------------------------------------------------------
# Resources Request
# -----------------------------------------------------------------------------------------------
@dataclass(frozen=True)
class JavaBSPResourcesRequest(BSPResourcesRequest):
field_set_type = JavaFieldSet
@rule
async def bsp_java_resources_request(
request: JavaBSPResourcesRequest,
build_root: BuildRoot,
) -> BSPResourcesResult:
result: BSPResourcesResult = await _jvm_bsp_resources(request, build_root)
return result
def rules():
return (
*collect_rules(),
*jvm_compile_rules(),
*jvm_resources_rules(),
UnionRule(BSPLanguageSupport, JavaBSPLanguageSupport),
UnionRule(BSPBuildTargetsMetadataRequest, JavaBSPBuildTargetsMetadataRequest),
UnionRule(BSPHandlerMapping, JavacOptionsHandlerMapping),
UnionRule(BSPCompileRequest, JavaBSPCompileRequest),
UnionRule(BSPResourcesRequest, JavaBSPResourcesRequest),
)
|
# -*- coding: utf-8 -*-
from aluno import Aluno
|
# Author:Yichen Fan
# Date 12/8/2015
#ASS10
import pandas as pd
from main10 import *
import numpy as np
import matplotlib.pyplot as plt
def plot_figure(raw_data):
uniq_date = raw_data.groupby(['DATE','GRADE']).size().unstack()#gruped data by date and grade
uniq_date = uniq_date.replace(np.nan, 0)#replace none to 0 since sometime there is no grade
uniq_date.index = pd.to_datetime(uniq_date.index)#convert date and time to useful formate
uniq_date.plot()
plt.title('Grade for restaurant in NYC')
plt.savefig('grade_improvement_NYC.pdf',format = 'pdf')
plt.close
def plot_figure_by_boro(uniq):
for boro in ['QUEENS','BRONX','MANHATTAN','BROOKLYN','STATEN ISLAND']:
per_boro = uniq[uniq['BORO']==boro] #divided data into five regions
boro_date=per_boro.groupby(['DATE','GRADE']).size().unstack()#grouped data by date and grade
boro_date.index=pd.to_datetime(boro_date.index)
boro_date.plot()
plt.title('GRADE improvement of restaurant is' + boro)
plt.savefig('grade_improvement_'+ boro+'.pdf',format = 'pdf')
plt.close()
|
'''Basics file of beautiful soup'''
from bs4 import BeautifulSoup
import requests
source = requests.get('https://github.com/sai-sondarkar').text
soup = BeautifulSoup(source, 'lxml')
#print(soup.prettify()) #prettify and prints the entire web page
#match = soup.title.text prints the title of the web page i.e., name
#match = soup.div
for details in soup.find_all('div', class_ ='js-profile-editable-area'):
username = soup.find('div', class_='vcard-names-container py-3 js-sticky js-user-profile-sticky-fields')
#print(details)
user_git_link_name = username.find('span', class_='p-nickname vcard-username d-block')
print("https://github.com/"+user_git_link_name.text) #prints the name in link
user_fullname = username.find('span', class_='p-name vcard-fullname d-block overflow-hidden')
print(user_fullname.text)
location = details.find('span', class_='p-label')
print(location.text) # prints location
mail = details.find('a', class_='u-email')
print(mail) # prints emailid
website = details.find('a', class_='vcard-detail pt-1 css-truncate css-truncate-target')
print(website.text)
'''header = soup.find('div', class_='UnderlineNav user-profile-nav js-sticky top-0')
count = header.find('nav')
print(header)'''
|
#encoding=utf-8
from openpyxl import load_workbook
import pandas as pd
from nameout import out
R=input("输入年份:")
date=pd.date_range(R+'/01/01',R+'/12/31', freq='D')
print(date)
week=[int(i.strftime("%w")) for i in date] # 0表示星期日
dataframe = pd.DataFrame({'date':date,'week':week})
dataframe.to_excel('dates.xlsx',index=False)
#------------------------------------------------------execl
wb=load_workbook('./dates.xlsx')
# print(wb.sheetnames)
sheet=wb["Sheet1"]
sheet.title='date'
# 对行进行遍历,输出A1,B1,C1
# for row in sheet.rows:
# for cell in row:
# print(cell.value)
# 对列进行遍历,输出A1,A2,A3
# for column in sheet.columns:
# for cell in column:
# print(cell.value)
for column in sheet['A']:
column.value=str(column.value).replace(' 00:00:00','')
# print(column.value)
for column in sheet['B']:
# print(column.value)
if column.value == 1:
column.value='星期一'
# print(column.value)
elif column.value ==2:
column.value = '星期二'
elif column.value ==3:
column.value = '星期三'
elif column.value ==4:
column.value = '星期四'
elif column.value ==5:
column.value = '星期五'
elif column.value ==6:
column.value = '星期六'
elif column.value ==0:
column.value = '星期日'
wb.save('./dates.xls')
out()
|
#The code is written by Ruiqi Zhong on April 28 to map mrna name to KEGG function
from urllib.request import urlopen
directory = input()
output = directory + '.out'
f = open(directory, 'r')
w = open(output, 'w')
for l in f:
url = 'http://rest.kegg.jp/find/genes/' + l
w.write(l)
try:
data = urlopen(url, timeout=100).read()
print(data)
w.write(str(data))
w.write('\n')
w.write('--------\n')
except:
w.write('Not Found')
w.write('--------\n')
|
"""Markdown parsing functions"""
from collections import namedtuple
import re
ParsedIssue = namedtuple("ParsedIssue", ["issue_number", "closes", "org", "repo"])
def _make_issue_number_regex():
"""Create regex to extract issue number and other useful things"""
# See https://help.github.com/en/github/writing-on-github/autolinked-references-and-urls
closes_prefix = r"(?P<closes>(fixes|closes))?(\s+|^)"
# org and repo are named different because it's not allowed to have two groups named the same in the regex
# even if they are separated with a |
prefixes = [
r"(https://github.com/(?P<org1>[^/\s]+)/(?P<repo1>[^/\s]+)/issues/)",
r"(?P<org2>[^/\s]+)/(?P<repo2>[^/\s]+)#",
r"#",
r"GH-",
]
issue_number_pattern = r"(?P<issue_number>\d+)"
pattern = f"{closes_prefix}({'|'.join([f'{prefix}' for prefix in prefixes])}){issue_number_pattern}"
return re.compile(pattern, re.IGNORECASE)
REGEX = _make_issue_number_regex()
def parse_linked_issues(pull_request):
"""
Parse markdown for linked issues
Args:
pull_request (PullRequest): Information about a pull request
Returns:
list of ParsedIssue: parsed issue numbers and their context
"""
parsed_issues = []
for match in REGEX.finditer(pull_request.body):
groups = match.groupdict()
parsed_issues.append(ParsedIssue(
issue_number=int(groups.get("issue_number")),
# org1 and org2 match different groups in the regex. There should only be one which matches since they
# are separated with an |.
# If org or repo are None, that means the issue number was provided without that context, which means
# it's part of the same org/repo as the pull request.
org=groups.get("org1") or groups.get("org2") or pull_request.org,
repo=groups.get("repo1") or groups.get("repo2") or pull_request.repo,
closes=groups.get("closes") is not None,
))
return parsed_issues
|
import z
import queue
import buy
import sliding
import statistics
debug = None
#debug = "BA"
if debug:
print ("debugging {}".format(debug))
start = 60
each = 10
istart = -1*start
req = start - 20
dates = z.getp("dates")
que = 12
firstdate = dates[istart*7]
print("firstdate : {}".format( firstdate ))
march_23_2020 = "2020-03-23"
def proc(astock, rank):
prev_close = None
gains = list()
closes = list()
specialvalue = None
for i, row in enumerate(buy.getRows(astock, firstdate)):
c_close = float(row[z.closekey])
if not specialvalue and row["Date"] == march_23_2020:
specialvalue = c_close
if i == 0 and row["Date"] != firstdate:
if debug:
print ("too new")
return None
if not prev_close:
prev_close = c_close
else:
change = round(c_close/prev_close, 3)
prev_close = c_close
gains.append(change)
buy.addSorted("a", change, i, 10)
closes.append(c_close)
sort = buy.getSorted("a")
buy.clearSorted("a")
if rank < 1000:
try:
change_1 = round(closes[-1]/closes[-2],3)
change_5 = round(closes[-1]/closes[-6],3)
change_s = round(closes[-1]/specialvalue,3)
buy.addSorted("change_1", change_1, astock)
buy.addSorted("change_5", change_5, astock)
buy.addSorted("change_s", change_s, astock)
except:
pass
# exit()
highs = list()
avgs = list()
mins = list()
for high,idx in sort[-5:]:
if debug:
print("idx : {}".format( idx ))
print("high: {}".format( high))
try:
highs.append(high)
highprice = closes[idx]
except:
continue
try:
after_changes = [ round(price/highprice,3) for price in closes[idx+1:idx+8] ]
mindrop = min(after_changes)
mins.append(mindrop)
avgs.append(round(statistics.mean(after_changes),3))
if debug:
print("closes : {}".format( closes[idx:idx+7] ))
print("after_changes : {}".format( after_changes ))
except:
pass
if debug:
print("avgs: {}".format( avgs))
print("highs : {}".format( highs ))
if not highs or not avgs:
return None
try:
hi = round(statistics.mean(highs),3)
lo = round(min(statistics.mean(mins), statistics.median(mins)),3)
except:
return None
return hi, lo
def procs():
stocks = [debug.upper()] if debug else z.getp("listofstocks")
low_target = dict()
highs = list()
lows = list()
for astock in stocks:
rank = buy.getMCRank(astock)
try:
a,b = proc(astock, rank)
low_target[astock] = a,b
except Exception as e:
print("astock: {}".format( astock))
continue
if rank < 1000:
highs.append(a)
lows.append(b)
if highs and lows:
hi = round(statistics.mean(highs),3)
lo = round(statistics.mean(lows),3)
if not debug:
z.setp(low_target, "prob_drop", True)
print("hi : {}".format( hi ))
print("lo : {}".format( lo ))
if not debug:
buy.saveSorted("change_1")
buy.saveSorted("change_5")
buy.saveSorted("change_s")
if __name__ == '__main__':
procs()
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
import json
import re
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.plugins import BeetsPlugin, MetadataSourcePlugin, get_distance
import confuse
from beets.util.id_extractors import beatport_id_regex
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = f'beets/{beets.__version__} +https://beets.io/'
class BeatportAPIError(Exception):
pass
class BeatportObject:
def __init__(self, data):
self.beatport_id = data['id']
self.name = str(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], str(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [str(x['name'])
for x in data['genres']]
class BeatportClient:
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"https://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=[f'fieldType:{release_type}'])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
if response:
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
return None
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id,
perPage=100)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return "<BeatportRelease: {} - {} ({})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return str(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "https://beatport.com/release/{}/{}".format(
data['slug'], data['id'])
self.genre = data.get('genre')
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return ("<BeatportTrack: {} - {} ({})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return str(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = str(data['title'])
if 'mixName' in data:
self.mix_name = str(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "https://beatport.com/track/{}/{}" \
.format(data['slug'], data['id'])
self.track_number = data.get('trackNumber')
self.bpm = data.get('bpm')
self.initial_key = str(
(data.get('key') or {}).get('shortName')
)
# Use 'subgenre' and if not present, 'genre' as a fallback.
if data.get('subGenres'):
self.genre = str(data['subGenres'][0].get('name'))
elif data.get('genres'):
self.genre = str(data['genres'][0].get('name'))
class BeatportPlugin(BeetsPlugin):
data_source = 'Beatport'
id_regex = beatport_id_regex
def __init__(self):
super().__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except OSError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug('authentication error: {0}', e)
raise beets.ui.UserError('communication with Beatport failed')
beets.ui.print_("To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_("Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug('authentication error: {0}', e)
raise beets.ui.UserError('Beatport token request failed')
# Save the token for later use.
self._log.debug('Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confuse.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the Beatport source weight and the maximum source weight
for albums.
"""
return get_distance(
data_source=self.data_source,
info=album_info,
config=self.config
)
def track_distance(self, item, track_info):
"""Returns the Beatport source weight and the maximum source weight
for individual tracks.
"""
return get_distance(
data_source=self.data_source,
info=track_info,
config=self.config
)
def candidates(self, items, artist, release, va_likely, extra_tags=None):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = f'{artist} {release}'
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = f'{artist} {title}'
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the query is not a valid ID or release is not found.
"""
self._log.debug('Searching for release {0}', release_id)
release_id = self._get_id('album', release_id, self.id_regex)
if release_id is None:
self._log.debug('Not a valid Beatport release ID.')
return None
release = self.client.get_release(release_id)
if release:
return self._get_album_info(release)
return None
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not a valid Beatport ID or track is not found.
"""
self._log.debug('Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
self._log.debug('Not a valid Beatport track ID.')
return None
bp_track = self.client.get_track(match.group(2))
if bp_track is not None:
return self._get_track_info(bp_track)
return None
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, flags=re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, flags=re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = "Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
beatport_album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media='Digital',
data_source=self.data_source, data_url=release.url,
genre=release.genre)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != "Original Mix":
title += f" ({track.mix_name})"
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=self.data_source, data_url=track.url,
bpm=track.bpm, initial_key=track.initial_key,
genre=track.genre)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
return MetadataSourcePlugin.get_artist(
artists=artists, id_key=0, name_key=1
)
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks
|
from django.db import models
class Sizes (models.Model):
id_size = models.AutoField(primary_key=True)
size_name = models.CharField(max_length=45)
price_coef = models.FloatField(null=True, default=None)
|
#! /usr/bin/python3
import sys
import pdb
import argparse
import xml_utils as u
import datetime
from collections import defaultdict
from argparse import RawTextHelpFormatter
# from ordereddefaultdict import OrderedDefaultdict
##------------------------------------------------------------
## can be called with:
## partition_files 80 20 -out xxx *.xml dirs
## generate_folds 5 -out yyy *.xml dirs
##------------------------------------------------------------
def main (argv) :
parser = argparse.ArgumentParser(description='Partitions objects into x and y percents.\nIf shuffle is set to False, each label will be split as specified.\nIf -group is set, will use db argument to partition after grouped by date.\nx and y can be set to 100 and 0, respectively, for no partitioning (to combine multiple XMLs.)\n\nExample: generate_partition -shuffle False -file faces 80 20 images.xml\n\t generate_partition -group all.csv 75 25 chips.xml',
formatter_class=RawTextHelpFormatter)
# parser.formatter.max_help_position = 50
parser.add_argument ('x', default=80,
help='Percent of first set.')
parser.add_argument ('y', default=20,
help='Percent of second set.')
parser.add_argument ('input', nargs='+')
parser.add_argument ('-shuffle', '--shuffle', default=True,
help='Determines whether all objects are mixed before partition. If set to False, each label wil be split as specified. Defaults to True.')
parser.add_argument ('--test_count_minimum', default=0,
help='Minimum test images per label, overrides partition percentage. Defaults to 0.')
parser.add_argument ('-image_count_minimum', '--image_count_minimum', default=0,
help='Minimum number of images per label. Defaults to 0.')
parser.add_argument ('-image_size_minimum', '--image_size_minimum', default=0,
help='Minimum size of image. Defaults to 0.')
parser.add_argument ('-filetype', '--filetype', default="chips",
help='Type of file to partition. <faces|chips>. Defaults to "chips".')
parser.add_argument ('-group', '--group_date_db',
help='Group images with same date and label together before partitioning using csv (\';\' separated) for date/label information.')
parser.add_argument ('-out', '--output', default="",
help='Output file basename. Defaults to "part_<date><time>_"')
parser.add_argument ('-v', '--verbosity', type=int, default=1,
choices=[0, 1, 2], help=argparse.SUPPRESS)
u.set_argv (argv)
args = parser.parse_args()
verbose = args.verbosity
### --------------
# TODO check & WARN that if shuffle is set, will ignore
# image_count_minimum, test_count_minimum
### --------------
### -------------- validate arguments -------- ###
try:
x = int (args.x)
except ValueError:
print ('Error: number expected for x, got:', args.x)
return
try:
y = int (args.y)
except ValueError:
print ('Error: number expected for y, got:', args.y)
return
if x + y != 100 :
print("Error: (x + y) needs to be 100")
return
filetypes = ['chips', 'faces']
filetype = args.filetype
if filetype not in filetypes :
print('unrecognized filetype :', filetype, 'should be one of:', filetypes)
return
if not args.output :
args.output = datetime.datetime.now().strftime("part_%Y%m%d_%H%M")
do_grouping = False
if args.group_date_db != None :
do_grouping = True
if verbose > 2 :
print()
print("x: ", x)
print("y: ", y)
print("sum: ", x + y)
if do_grouping :
print ("------- partitioning grouped by date ------")
print("group date db: ", args.group_date_db)
print("output: ", args.output)
print("input: ", args.input)
xml_files = u.generate_xml_file_list (args.input)
u.generate_partitions (xml_files, x, y, args.output, args.shuffle, int(args.image_count_minimum), int(args.test_count_minimum), int (args.image_size_minimum), filetype, do_grouping, args.group_date_db)
if __name__ == "__main__":
main (sys.argv)
|
from tkinter import messagebox
from tkinter import *
from tkinter.filedialog import *
from tkinter.colorchooser import *
# 记事本应用
class Application(Frame):
def __init__(self, master=None):
super().__init__(master) # super 代表的是父类的定义 调用父类构造器
self.master = master
self.pack()
self.createWidget()
# 创建组件
def createWidget(self):
# 创建主菜单
menubar = Menu(root)
# 创建子菜单
menuFile = Menu(menubar)
menuEdit = Menu(menubar)
menuHelp = Menu(menubar)
# 将子菜单加入主菜单栏
menubar.add_cascade(label="文件(F)", menu=menuFile)
menubar.add_cascade(label="编辑(E)", menu=menuEdit)
menubar.add_cascade(label="帮助(H)", menu=menuHelp)
# 添加菜单项
menuFile.add_command(label="新建", accelerator="ctrl+n", command=self.newfile)
menuFile.add_command(label="打开", accelerator="ctrl+o", command=self.openfile)
menuFile.add_command(label="保存", accelerator="ctrl+s", command=self.savefile)
menuFile.add_separator() # 添加分割线
menuFile.add_command(label="退出", accelerator="ctrl+q", command=self.exit)
# 将主菜单栏添加到根窗口
root["menu"] = menubar
# 增加快捷键
root.bind("<Control-n>", lambda event: self.newfile())
# 文本编辑区
self.textpad = Text(root, width=50, height=30)
self.textpad.pack()
# 创建上下文菜单
self.contextMenu = Menu(root)
self.contextMenu.add_command(label="背景颜色", command=self.openAskColor)
# 为右键绑定事件
root.bind("<Button-3>", self.createContextMenu)
def openfile(self):
self.textpad.delete("1.0", "end") # 把text控件中的内容清空
with askopenfile(title="打开文本文件") as f:
self.textpad.insert(INSERT, f.read())
self.filename = f.name
print(f.read())
def savefile(self):
with open(self.filename, "w") as f:
c = self.textpad.get(1.0, END)
f.write(c)
def exit(self):
root.quit()
def newfile(self):
self.filename = asksaveasfilename(title="另存为", initialfile="未命名.txt",
filetypes=[("文本文档", "*.txt")], defaultextension=".txt")
self.savefile()
def openAskColor(self):
s1 = askcolor(color="red", title="选择背景颜色")
self.textpad.config(bg=s1[1])
def test(self):
pass
def createContextMenu(self, event):
# 菜单在鼠标右键单击的坐标处显示
self.contextMenu.post(event.x_root, event.y_root)
root = Tk()
root.geometry("400x100+200+300")
root.title("zjf")
app = Application(master=root)
root.mainloop()
|
from influxdb import InfluxDBClient
import Adafruit_DHT
import socket
import time
DHT_SENSOR = Adafruit_DHT.DHT22
DHT_PIN = 4
client = InfluxDBClient(host='192.168.0.101', port=8086, username='marc', password='marc')
client.create_database('sensors')
client.switch_database('sensors')
measurement = "rpi-dht22"
location = socket.gethostname()
while True:
try:
# Print the values to the serial port
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
print("Temp: {:.1f} C Humidity: {}% ".format(temperature, humidity))
iso = time.ctime()
data = [
{
"measurement": measurement,
"tags": {
"location": location,
},
"time": iso,
"fields": {
"temperature": temperature,
"humidity": humidity
}
}
]
client.write_points(data)
except RuntimeError as error:
# Errors happen fairly often, DHT's are hard to read, just keep going
print(error.args[0])
time.sleep(60)
|
import sys
from bioblend import galaxy
from bioblend.galaxy.users import UserClient
args = sys.argv
address = args[1]
master_key = args[2]
gi = galaxy.GalaxyInstance(url=address, key=master_key)
users = UserClient(gi)
u = users.get_users()[0]
key = users.create_user_apikey(u.get('id'))
print(key)
|
from flask_wtf import Form
from wtforms import StringField, SubmitField, IntegerField, HiddenField, TimeField
from wtforms import validators
class TaskFrom(Form):
name = StringField("Name: ", [
validators.DataRequired("Please enter student name."),
validators.Length(3, 255, "Name should be from 3 to 255 symbols")
])
discipline_name = StringField("Discipline: ", [
validators.DataRequired("Please enter discipline name."),
validators.Length(3, 255, "Name should be from 3 to 255 symbols")
])
value = IntegerField("Value: ", [
validators.DataRequired("Please enter discipline name.")
])
deadline = TimeField("Deadline: ", [
validators.DataRequired("Please enter discipline name.")
], format= '%H:%M:%S')
old_name = HiddenField()
submit = SubmitField("Save")
|
#!/usr/bin/env /share/share1/share_dff/anaconda3/bin/python
"""
Author: Lira Mota, lmota20@gsb.columbia.edu
Course: Big Data in Finance (Spring 2019)
Date: 2019-02
Code:
Creates stock_monthly pandas data frame.
Import CRSP MSE and MSF.
------
Dependence:
fire_pytools
"""
# %% Packages
import sys
sys.path.append('/Users/manshizou/PycharmProjects/big/')
import fire_pytools
import wrds
import pandas as pd
import numpy as np
import datetime
from pandas.tseries.offsets import MonthEnd
from fire_pytools.import_wrds.crsp_sf import *
from fire_pytools.utils.post_event_nan import *
def calculate_melag(mdata):
"""
Parameters:
------------
mdata: data frame
crsp monthly data with cols permno, date as index and lag_me column
Notes:
------
If ME is missing, we do not exclude stock, but rather keep it in with last non-missing MElag.
The stock will be excluded if:
(i) Delisted;
(ii) Have a missing ME in the moment of portfolio construction.
This is different than Ken's method
EXAMPLE:
--------
there seem to be 12 stocks with missing PRC and thus missing ME in July 1926.
Thus, our number of firms drops from 428 to 416.
Fama and French report 427 in both July and August, so they also do not seem to exclude these
rather they probably use the previous MElag for weight and must assume some return in the following month.
The whole paragraph from the note on Ken French's website:
----------------------------------------------------------
"In May 2015, we revised the method for computing daily portfolio returns
to match more closely the method for computing monthly portfolio returns.
Daily files produced before May 2015 drop stocks from a portfolio
(i) the next time the portfolio is reconstituted, at the end of June, regardless of the CRSP delist date or
(ii) during any period in which they are missing prices for more than 10 consecutive trading days.
Daily files produced after May 2015 drop stocks from a portfolio
(i) immediately after their CRSP delist date or
(ii) during any period in which they are missing prices for more than 200 consecutive trading days. "
"""
required_cols = ['lag_me', 'lag_dlret']
set(required_cols).issubset(mdata.columns), "Required columns: {}.".format(', '.join(required_cols))
df = mdata[required_cols].copy()
df['melag'] = df.groupby('permno').lag_me.fillna(method='pad')
df.reset_index(inplace=True)
# Fill na after delisting
df = post_event_nan(df=df, event=df.lag_dlret.notnull(), vars=['melag'], id_vars=['permno', 'edate'])
df.set_index(['permno', 'edate'], inplace=True)
return df[['melag']]
def calculate_cumulative_returns(mdata, tt, mp): #TODO: to be completed
"""
Calculate past returns for momentum stratagy
Parameters:
------------
mdata: data frame
crsp monthly data with cols permno, date as index.
tt: int
number of periods to cumulate retuns
min_periods: int
minimum number of periods. Default tt/2
"""
start_time = time.time()
required_cols = ['retadj']
assert set(required_cols).issubset(mdata.columns), "Required columns: {}.".format(', '.join(required_cols))
df = mdata[required_cols].copy()
df['retadj'] = df['retadj']+1
df['ret'] = df['retadj'].isnull()
df.reset_index(level=0, inplace=True)
cret = df.groupby('permno')['retadj'].rolling(window=tt, min_periods=mp).apply(np.nanprod, raw=True)
cret_roll_back = df.groupby('permno')['retadj'].rolling(window=2, min_periods=1).apply(np.nanprod, raw=True)
cret_fin = cret/cret_roll_back
cret_fin = cret_fin-1
cret = cret_fin.to_frame('ret' + str(11_1))
#unique = len(df['permno'].unique())
cret_copy = cret.copy()
cret_copy.reset_index(inplace=True)
unique_permno=pd.DataFrame(cret_copy.groupby(cret_copy.edate.dt.year)['permno'].unique())
unique_no = unique_permno['permno'].apply(lambda x: len(x))
cret_copy['isnull'] = cret_copy['ret111'].isnull()
missing = cret_copy.groupby(cret_copy.edate.dt.year)['isnull'].sum()
max_per_year = cret_copy.groupby(cret_copy.edate.dt.year)['ret111'].max()
min_per_year = cret_copy.groupby(cret_copy.edate.dt.year)['ret111'].min()
average_per_year = cret_copy.groupby(cret_copy.edate.dt.year)['ret111'].mean()
print("Time to calculate %d months past returns: %s seconds" % (tt, str(round(time.time() - start_time, 2))))
return cret,missing,unique_no,max_per_year, min_per_year, average_per_year
#main function
def main(save_out=True):
# %% Set Up
db = wrds.Connection(wrds_username='mzou20') # make sure to configure wrds connector before hand.
DATAPATH = "/Users/manshizou/Documents/Computingforbusiness/hw4_output_1" # where to save output?
start_time = time.time()
# %% Download CRSP data
varlist = ['dlret', 'dlretx', 'exchcd', 'naics', 'permco', 'prc', 'ret', 'shrcd', 'shrout', 'siccd', 'ticker']
start_date = '1925-01-01' # '2017-01-01' #
end_date = datetime.date.today().strftime("%Y-%m-%d")
freq = 'monthly' # 'daily'
permno_list = None #[10001, 14593, 10107]
shrcd_list = [10, 11]
exchcd_list = [1, 2, 3]
crspm = crsp_sf(varlist,
start_date,
end_date,
freq=freq,
permno_list=permno_list,
shrcd_list=shrcd_list,
exchcd_list=exchcd_list,
db=db)
query = "SELECT caldt as date, t30ret as rf FROM crspq.mcti"
rf = db.raw_sql(query, date_cols=['date'])
del query
# %% Create variables
# Rankyear
# Rankyear is the year where we ranked the stock, e.g., for the return of a stock in January 2001,
# rankyear is 2000, because we ranked it in June 2000
crspm['rankyear'] = crspm.date.dt.year
crspm.loc[crspm.date.dt.month <= 6, 'rankyear'] = crspm.loc[crspm.date.dt.month <= 6, 'rankyear'] - 1
# Returns adjusted for delisting
crspm['retadj'] = ((1 + crspm['ret'].fillna(0)) * (1 + crspm['dlret'].fillna(0)) - 1)
crspm.loc[crspm[['ret', 'dlret']].isnull().all(axis=1), 'retadj'] = np.nan
# Create Market Equity (ME)
# SHROUT is the number of publicly held shares, recorded in thousands. ME will be reported in 1,000,000 ($10^6$).
# If the stock is delisted, we set ME to NaN.
# Also, some companies have multiple shareclasses (=PERMNOs).
# To get the company ME, we need to calculate the sum of ME over all shareclasses for one company (=PERMCO).
# This is used for sorting, but not for weights.
crspm['me'] = abs(crspm['prc']) * (crspm['shrout'] / 1000)
# Create MEsum
crspm['mesum_permco'] = crspm.groupby(['date', 'permco']).me.transform(np.sum, min_count=1)
# Adjust for delisting
crspm.loc[crspm.dlret.notnull(), 'me'] = np.nan
crspm.loc[crspm.dlret.notnull(), 'mesum'] = np.nan
# Resample data (This takes about 9 min)
# CRSP data has skipping months.
# Create line to missing months to facilitate the calculation of lag/past returns
start_time1 = time.time()
crspm['edate'] = crspm['date'] + MonthEnd(0)
crspm.sort_values(['permno', 'edate'], inplace=True)
pk_integrity(crspm, ['permno', 'edate'])
crspm.set_index('edate', inplace=True)
# Resample to take care of missing months
scrspm = crspm[['permno', 'me', 'dlret']].groupby('permno').resample('M').mean().drop(columns='permno') # mean maintains nan
scrspm = scrspm.groupby('permno').shift(1)
scrspm.columns = ['lag_' + i for i in scrspm.columns]
crspm.reset_index(inplace=True)
crspm.set_index(['permno', 'edate'], inplace=True)
crspm = crspm.join(scrspm, how='outer')
print("Time to resample data: %s seconds" % str(time.time() - start_time1))
# Create MElag
crspm['melag'] = calculate_melag(crspm)
# TODO: Calculate past 11, 1 returns
cum_return,missing,unique_no,max_per_year,min_per_year,average_per_year = calculate_cumulative_returns(crspm, 13, 6)
crspm = crspm.join(cum_return)
# Delete rows that were not in the original data set
crspm.dropna(subset=['date'], inplace=True)
crspm.drop(columns=[x for x in crspm.columns if 'lag_' in x], inplace=True)
crspm.sort_values(['permno', 'date'], inplace=True)
print("Time to create CRSP monthly: %s seconds" % str(time.time() - start_time))
if save_out:
crspm.to_pickle(DATAPATH+'stock_monthly.pkl')
print("Successfully saved stock_monthly.")
return crspm,missing,unique_no,max_per_year, min_per_year,average_per_year
# %% Main
if __name__ == '__main__':
crspm = main()
|
from sklearn.metrics import cohen_kappa_score
import InputOutput as io
data = io.csvIn(r'Training\Final\Labeled.csv', skip_first=True)
labels1 = []
labels2 = []
for row in data:
labels1.append(row[4])
labels2.append(row[5])
print(cohen_kappa_score(labels1, labels2))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2018/8/5 11:14
# @Author: Joey66666
# @Software VSCode
import json
import logging
import time
import base64
from decimal import *
from flask import Flask, jsonify, abort, request
from flask_sqlalchemy import SQLAlchemy
from werkzeug.security import generate_password_hash, check_password_hash
from sqlalchemy import create_engine, MetaData, create_engine, MetaData, Table, Column, Date, Integer, String, ForeignKey
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.dialects.mysql import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, \
DATETIME, DECIMAL, DECIMAL, DOUBLE, ENUM, FLOAT, INTEGER, \
LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \
NUMERIC, NVARCHAR, REAL, SET, SMALLINT, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT, VARBINARY, VARCHAR, YEAR
app = Flask('test')
# # 配置数据库连接
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+mysqlconnector://user:passwd@IP:port/DB'
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db = SQLAlchemy(app)
@app.route('/')
def hello():
return "LeaveMiniprogram-Api page"
@app.route("/login", methods=['GET', 'POST'])
def login():
if (request.method == 'POST'):
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
rec_id = data['userId']
rec_pwd = data['userPwd']
if len(str(rec_id)) == 6:
db_data = search_t(str(rec_id))
if (db_data) is None:
return_data = dict()
return_data['flag'] = '0'
return jsonify(return_data)
else:
db_id = str(
Decimal(db_data['teacher_id']).quantize(Decimal('0')))
db_name = db_data['name']
db_pwd = db_data['passwd']
db_role = db_data['role']
db_school = db_data['school']
if check_password_hash(str(db_pwd),str(rec_pwd)) is not True:
return_data = dict()
return_data['flag'] = '1'
return_data['teacher_id'] = rec_id
return jsonify(return_data)
elif check_password_hash(str(db_pwd),str(rec_pwd)) is True:
return_data = dict()
db_id = str(
Decimal(db_data['teacher_id']).quantize(Decimal('0')))
return_data['teacher_id'] = db_id
return_data['name'] = db_name
return_data['role'] = db_role
return_data['school'] = db_school
return_data['flag'] = '2'
return (jsonify(return_data))
if len(str(rec_id)) == 9:
db_data = search_s(str(rec_id))
if (db_data) is None:
return_data = dict()
return_data['flag'] = '0'
return jsonify(return_data)
else:
db_id = str(
Decimal(db_data['student_id']).quantize(Decimal('0')))
db_name = db_data['name']
db_pwd = db_data['passwd']
db_class = db_data['s_class']
db_room = db_data['room']
if check_password_hash(str(db_pwd),str(rec_pwd)) is not True:
return_data = dict()
return_data['flag'] = '1'
return_data['student_id'] = rec_id
return jsonify(return_data)
elif check_password_hash(str(db_pwd),str(rec_pwd)) is True:
return_data = dict()
db_id = str(
Decimal(db_data['student_id']).quantize(Decimal('0')))
return_data['student_id'] = db_id
return_data['name'] = db_name
return_data['s_class'] = db_data['s_class']
return_data['room'] = db_data['room']
return_data['flag'] = '2'
return (jsonify(return_data))
else:
return_data = dict()
return_data['flag'] = '0'
return jsonify(return_data)
else:
return jsonify('not POST method')
@app.route("/student/ask_leave", methods=['GET', 'POST'])
def ask_leave():
if (request.method == 'POST'):
if not (request.json):
return jsonify('not json')
else:
try:
data = request.get_json()
student_id = data['student_id']
start_time = data['start_time']
end_time = data['end_time']
reason = data['reason']
flag = data['flag']
teacher1_id = data['teacher1_id']
teacher2_id = data['teacher2_id']
s_type = data['type']
ensure = data['ensure']
insert = insert_leave(student_id, start_time, end_time, reason, flag, teacher1_id, teacher2_id, s_type,
ensure)
if (insert == True):
return ('True')
else:
return ('False')
except Exception:
return("ERROR")
else:
return jsonify('not POST method')
@app.route("/student/search_leave", methods=['GET', 'POST'])
def search_s_leave():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
student_id = data['student_id']
result = search_stu_leave(student_id)
return (result)
else:
return jsonify("not POST")
@app.route("/student/search_leave_detail", methods=['GET', 'POST'])
def search_s_leave_detail():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
student_id = data['student_id']
result = search_stu_leave_detail(student_id)
return (result)
else:
return jsonify("not POST")
@app.route("/teacher/search_leave", methods=['GET', 'POST'])
def search_t_leave():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
teacher_id = data['teacher_id']
result = search_tea_leave(teacher_id)
return (result)
else:
return jsonify("not POST")
@app.route("/teacher2/search_leave", methods=['GET', 'POST'])
def search_t2_leave():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
teacher_id = data['teacher_id']
result = search_tea2_leave(teacher_id)
return (result)
else:
return jsonify("not POST")
@app.route("/teacher/update_leave", methods=['GET', 'POST'])
def update_leave():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
leave_num = data['leave_num']
flag = data['flag']
result = update_leave(leave_num, flag)
return (result)
else:
return jsonify("not POST")
@app.route("/teacher/search_id", methods=['GET', 'POST'])
def search_t_id():
if request.method == 'POST':
if not (request.json):
return jsonify('not json')
else:
data = request.get_json()
school = data['school']
result = search_t_id(school)
return (result)
else:
return jsonify("not POST")
@app.route("/change_pwd", methods=['GET', 'POST'])
def change_pwd():
if (request.method == 'POST'):
if not (request.json):
return jsonify('not json')
else:
try:
data = request.get_json()
rec_id = data['userId']
rec_pwd = data['userPwd']
change_passwd(rec_id, rec_pwd)
return jsonify("True")
except:
return jsonify("False")
else:
return jsonify('not POST method')
@app.route("/search_name",methods=['GET','POST'])
def search_name():
if (request.method == 'POST'):
if not (request.json):
return jsonify('not json')
else:
# try:
data = request.get_json()
rec_name = data['name']
try:
return_data=search_name(rec_name)
except:
return jsonify("name is not exist")
else:
return jsonify(return_data)
else:
return jsonify('not POST method')
class student(db.Model):
__tablename__ = 'student'
student_id = db.Column(db.DECIMAL(65), primary_key=True)
name = db.Column(db.VARCHAR(255))
s_class = db.Column(db.VARCHAR(255))
passwd = db.Column(db.VARCHAR(255))
room = db.Column(db.VARCHAR(255))
school = db.Column(db.VARCHAR(255))
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
class teacher(db.Model):
__tablename__ = 'teacher'
teacher_id = db.Column(db.DECIMAL(65), primary_key=True)
name = db.Column(db.VARCHAR(255))
passwd = db.Column(db.VARCHAR(255))
role = db.Column(db.VARCHAR(2))
school = db.Column(db.VARCHAR(255))
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
class leave(db.Model):
__tablename__ = 'leave'
leave_num = db.Column(db.DECIMAL(65), primary_key=True)
student_id = db.Column(db.DECIMAL(65))
start_time = db.Column(db.DATETIME)
end_time = db.Column(db.DATETIME)
reason = db.Column(db.VARCHAR(255))
flag = db.Column(db.VARCHAR(2))
teacher1_id = db.Column(db.DECIMAL(65))
teacher2_id = db.Column(db.DECIMAL(65))
type = db.Column(db.VARCHAR(2))
ensure = db.Column(db.VARCHAR(255))
def to_dict(self):
return {c.name: getattr(self, c.name, None) for c in self.__table__.columns}
# 字符串转二进制
def b_encode(s):
return(''.join([bin(ord(c)).replace('0b', '')for c in s]))
# 二进制转字符串
def b_decode(s):
return(''.join([chr(i)for i in [int(b, 2)for b in s.split('')]]))
def search_t(id):
result = teacher.query.filter_by(teacher_id=id).first()
if result is None:
return (None)
else:
return (result.to_dict())
def search_s(id):
result = student.query.filter_by(student_id=id).first()
if result is None:
return (None)
else:
return (result.to_dict())
def insert_leave(student_id, start_time, end_time, reason, flag, teacher1_id, teacher2_id, s_type, ensure):
engine = create_engine(
'mysql+mysqlconnector://user:passwd@IP:port/DB')
metadata = MetaData(engine)
# 连接数据表
leave_table = Table('leave', metadata, autoload=True)
try: # 创建 insert 对象
ins = leave_table.insert()
# 绑定要插入的数据
ins = ins.values(student_id=student_id, start_time=start_time, end_time=end_time, reason=reason, flag=flag,
teacher1_id=teacher1_id, teacher2_id=teacher2_id, type=s_type, ensure=ensure)
# 连接引擎
conn = engine.connect()
# 执行语句
result = conn.execute(ins)
return (True)
except:
print(result)
return (False)
def search_stu_leave(id):
result = leave.query.filter_by(student_id=id).all()
if result is None:
return (None)
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
leave_num = result[i].to_dict()['leave_num']
start_time = str(result[i].to_dict()['start_time'])
end_time = str(result[i].to_dict()['end_time'])
reason = result[i].to_dict()['reason']
flag = result[i].to_dict()['flag']
teacher1_id = str(Decimal(result[i].to_dict()[
'teacher1_id']).quantize(Decimal('0')))
teacher2_id = str(Decimal(result[i].to_dict()[
'teacher2_id']).quantize(Decimal('0')))
data_type = result[i].to_dict()['type']
ensure = result[i].to_dict()['ensure']
temp_data['leave_num'] = leave_num
temp_data['start_time'] = start_time
temp_data['end_time'] = end_time
temp_data['reason'] = reason
temp_data['flag'] = flag
temp_data['teacher1_id'] = teacher1_id
temp_data['teacher2_id'] = teacher2_id
temp_data['type'] = data_type
temp_data['ensure'] = ensure
return_data[i] = temp_data
i += 1
return (jsonify(return_data))
def search_stu_leave_detail(id):
result = student.query.filter_by(student_id=id).all()
if result is None:
return (None)
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
student_id = str(Decimal(result[i].to_dict()[
'student_id']).quantize(Decimal('0')))
name = result[i].to_dict()['name']
s_class = result[i].to_dict()['s_class']
room = result[i].to_dict()['room']
temp_data['student_id'] = student_id
temp_data['name'] = name
temp_data['class'] = s_class
temp_data['room'] = room
return_data[i] = temp_data
i += 1
return (jsonify(return_data))
def search_tea_leave(id):
result = leave.query.filter_by(teacher1_id=id).all()
if result is None:
return (None)
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
leave_num = result[i].to_dict()['leave_num']
student_id = str(Decimal(result[i].to_dict()[
'student_id']).quantize(Decimal('0')))
start_time = str(result[i].to_dict()['start_time'])
end_time = str(result[i].to_dict()['end_time'])
reason = result[i].to_dict()['reason']
flag = result[i].to_dict()['flag']
teacher1_id = str(Decimal(result[i].to_dict()[
'teacher1_id']).quantize(Decimal('0')))
teacher2_id = str(Decimal(result[i].to_dict()[
'teacher2_id']).quantize(Decimal('0')))
data_type = result[i].to_dict()['type']
ensure = result[i].to_dict()['ensure']
print(leave_num, start_time, end_time, reason, flag,
teacher1_id, teacher2_id, data_type, ensure)
temp_data['leave_num'] = leave_num
temp_data['student_id'] = student_id
temp_data['start_time'] = start_time
temp_data['end_time'] = end_time
temp_data['reason'] = reason
temp_data['flag'] = flag
temp_data['teacher1_id'] = teacher1_id
temp_data['teacher2_id'] = teacher2_id
temp_data['type'] = data_type
temp_data['ensure'] = ensure
return_data[i] = temp_data
i += 1
return (jsonify(return_data))
def search_tea2_leave(id):
result = leave.query.filter_by(teacher2_id=id).all()
if result is None:
return jsonify("None")
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
leave_num = result[i].to_dict()['leave_num']
student_id = str(Decimal(result[i].to_dict()[
'student_id']).quantize(Decimal('0')))
start_time = str(result[i].to_dict()['start_time'])
end_time = str(result[i].to_dict()['end_time'])
reason = result[i].to_dict()['reason']
flag = result[i].to_dict()['flag']
teacher1_id = str(Decimal(result[i].to_dict()[
'teacher1_id']).quantize(Decimal('0')))
teacher2_id = str(Decimal(result[i].to_dict()[
'teacher2_id']).quantize(Decimal('0')))
data_type = result[i].to_dict()['type']
ensure = result[i].to_dict()['ensure']
temp_data['leave_num'] = leave_num
temp_data['student_id'] = student_id
temp_data['start_time'] = start_time
temp_data['end_time'] = end_time
temp_data['reason'] = reason
temp_data['flag'] = flag
temp_data['teacher1_id'] = teacher1_id
temp_data['teacher2_id'] = teacher2_id
temp_data['type'] = data_type
temp_data['ensure'] = ensure
return_data[i] = temp_data
i += 1
return (jsonify(return_data))
def search_s_name(name):
result = student.query.filter_by(name=name).first()
if result is None:
return (None)
else:
return (result.to_dict())
def search_name(name):
student_data = search_s_name(name)
student_id=str(Decimal(student_data['student_id']).quantize(Decimal('0')))
student_name=student_data['name']
student_class=student_data['s_class']
student_room=student_data['room']
student_school=student_data['school']
result = leave.query.filter_by(student_id=student_id).all()
if result is None:
return jsonify("None")
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
leave_num = result[i].to_dict()['leave_num']
start_time = str(result[i].to_dict()['start_time'])
end_time = str(result[i].to_dict()['end_time'])
reason = result[i].to_dict()['reason']
flag = result[i].to_dict()['flag']
teacher1_id = str(Decimal(result[i].to_dict()['teacher1_id']).quantize(Decimal('0')))
teacher2_id = str(Decimal(result[i].to_dict()['teacher2_id']).quantize(Decimal('0')))
data_type = result[i].to_dict()['type']
ensure = result[i].to_dict()['ensure']
temp_data['leave_num'] = leave_num
temp_data['start_time'] = start_time
temp_data['end_time'] = end_time
temp_data['reason'] = reason
temp_data['flag'] = flag
temp_data['teacher1_id'] = teacher1_id
temp_data['teacher2_id'] = teacher2_id
temp_data['type'] = data_type
temp_data['ensure'] = ensure
temp_data['student_id']=student_id
temp_data['class']=student_class
temp_data['room']=student_room
temp_data['school']=student_school
temp_data['name']=student_name
return_data[i] = temp_data
i += 1
return (return_data)
def update_leave(leave_num, flag):
try:
data = db.session.query(leave).filter_by(leave_num=leave_num).first()
data.flag = flag
db.session.commit()
db.session.close()
return jsonify("True")
except:
return jsonify("False")
def search_t_id(school):
result = teacher.query.filter_by(school=school).all()
if result is None:
return jsonify("None")
else:
result_length = len(result)
return_data = {}
i = 0
while i < result_length:
temp_data = dict()
teacher_id = str(Decimal(result[i].to_dict()[
'teacher_id']).quantize(Decimal('0')))
name = result[i].to_dict()['name']
role = result[i].to_dict()['role']
temp_data['teacher_id'] = teacher_id
temp_data['name'] = name
temp_data['role'] = role
return_data[i] = temp_data
i += 1
return (jsonify(return_data))
def change_passwd(userId, userPwd):
if len(str(userId)) == 6:
try:
data = db.session.query(teacher).filter_by(
teacher_id=userId).first()
data.passwd = generate_password_hash(str(userPwd))
db.session.commit()
db.session.close()
return jsonify("True")
except:
return jsonify("False")
elif len(str(userId)) == 9:
try:
data = db.session.query(student).filter_by(
student_id=userId).first()
data.passwd = generate_password_hash(str(userPwd))
db.session.commit()
db.session.close()
return jsonify("True")
except:
return jsonify("False")
if __name__ == '__main__':
# db = None
app.run(host='0.0.0.0', port=8080, debug=True)
|
def hotel_cost(nights):
return 140 * nights
def plane_ride_cost(city):
if city == 'Charlotte':
return 183
elif city == 'Tampa':
return 220
elif city == 'Pittsburgh':
return 222
elif city == 'Los Angeles':
return 475
def rental_car_cost(days):
rental = 40 * days
if (days >= 7):
return rental - 50
elif (days >= 3 and days <=6):
return rental -20
else:
return rental
def trip_cost(city, days, spending_money):
return hotel_cost(days) + plane_ride_cost(city) + rental_car_cost(days) + spending_money
print trip_cost("Los Angeles", 5, 600)
|
# Generated by Django 2.1.7 on 2019-04-20 07:36
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('amazon', '0008_auto_20190420_1212'),
]
operations = [
migrations.RenameModel(
old_name='women_shops_clothes',
new_name='women_shops',
),
migrations.AlterModelTable(
name='women_shops',
table='women_shops',
),
]
|
import pytest
from rubicon_ml.viz.common.colors import get_rubicon_colorscale
from rubicon_ml.viz.common.dropdown_header import dropdown_header
@pytest.mark.parametrize("num_colors,expected", [(1, 2), (4, 4)])
def test_get_rubicon_colorscale(num_colors, expected):
colors = get_rubicon_colorscale(num_colors)
assert len(colors) == expected
def test_dropdown_header():
layout = dropdown_header(["A", "B", "C"], "A", "test left", "test right", "test-id")
assert layout.id.startswith("test-id")
assert layout.children[1].children.label == "A"
assert [child.children for child in layout.children[1].children.children] == ["A", "B", "C"]
assert layout.children[0].children.children == "test left"
assert layout.children[2].children.children == "test right"
|
#!/usr/bin/python
#=============================================================================
#
# Copyright 2006 Etienne URBAH for the EGEE project
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details at
# http://www.gnu.org/licenses/gpl.html
#
# For the folder given as parameter, this script recursively lists the files
# and their replicas, minimizing the number of simultaneous sessions.
#
# It uses the lfc_opendirg, lfc_readdirg and lfc_getreplica methods.
#
#=============================================================================
import sys
import os
import lfc
#=============================================================================
# Function readdirg_recurse
#=============================================================================
def readdirg_recurse(*args):
if len(args) < 1:
folder = ''
prefix = ''
else:
folder = args[0]
prefix = folder + '/'
if (folder == '') or (folder[0] != '/'):
if 'LFC_HOME' in os.environ:
folder = os.environ['LFC_HOME'] + '/' + prefix
else:
sys.exit('Relative folder path requires LFC_HOME to be set and exported')
#---------------------------------------------------------------------------
# Open the folder
#---------------------------------------------------------------------------
dir = lfc.lfc_opendirg(folder, '')
if dir == None:
err_num = lfc.cvar.serrno
err_string = lfc.sstrerror(err_num)
sys.exit('Error ' + str(err_num) + ' on folder ' + folder + ': ' + err_string)
files = []
listp = lfc.lfc_list()
#---------------------------------------------------------------------------
# Loop on the entries of the folder to build the list of files
#---------------------------------------------------------------------------
while 1:
entry = lfc.lfc_readdirg(dir)
if entry == None:
break
files.append({'name': prefix + entry.d_name,
'mode': entry.filemode,
'guid': entry.guid})
lfc.lfc_closedir(dir)
#---------------------------------------------------------------------------
# Loop on the current file list : If the entry is a folder, recurse
#---------------------------------------------------------------------------
for myfile in files[:]: # Local copy is mandatory
if myfile['mode'] & 040000:
files.extend(readdirg_recurse(myfile['name']))
return files
#=============================================================================
# Function get_replicas
#=============================================================================
def get_replicas(files):
#---------------------------------------------------------------------------
# Loop on the list of files
#---------------------------------------------------------------------------
lfc.lfc_startsess('', '')
for myfile in files:
print myfile['name']
#-------------------------------------------------------------------------
# If the entry is a regular file, list its replicas using its GUID
#-------------------------------------------------------------------------
if not (myfile['mode'] & 060000): # Exclude folders and symbolic links
(res, replicas) = lfc.lfc_getreplica('', myfile['guid'], '')
if res == 0:
for replica in replicas:
print ' ==>', replica.sfn
lfc.lfc_endsess()
#=============================================================================
# Main program
#=============================================================================
if __name__ == '__main__':
get_replicas(readdirg_recurse(*sys.argv[1:]))
|
# -*- coding: utf-8 -*-
from __future__ import division
import socket
from matplotlib import pyplot as plt
def getOpenPort():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("",0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def reverseDict(dictionary):
reversedDict = {}
for entry in dictionary:
for item in dictionary[entry]:
try:
reversedDict[item].append(entry)
except KeyError:
reversedDict.update({item:[entry]})
for entry in reversedDict:
if len(reversedDict[entry]) == 1 : reversedDict[entry] = reversedDict[entry][0]
return reversedDict
class PIDcontroller():
def __init__(self, Kp, Ki, Kd):
self._Kp=Kp
self._Ki=Ki
self._Kd=Kd
self._cumError = 0
self._prevError = 0
def update(self, target_value, measured_value):
error = target_value - measured_value
self._cumError += error
change_in_error = error - self._prevError
return measured_value + self._Kp*error + self._Ki*self._cumError + self._Kd*change_in_error
#
# controller = PIDcontroller(0.1, 0.00001, 0.1)
#
# target = 0.5
# measured_value = 0
#
# a = []
#
# for ii in range(0,10):
# measured_value = controller.update(target, measured_value)
# a.append(measured_value)
# print(measured_value)
#
# plt.plot(range(len(a)), a)
# plt.show()
#
# x = [1,2,3,4,5]
# y = 3
#
#
# for ii in x:
# if ii > y:
# print(ii,y,y/ii)
# elif ii < y:
# print(ii,y,ii/y)
# else:
# print(ii,y,1)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from config import configuration
db = SQLAlchemy();
def create_app(config_name):
app = Flask(__name__)
app.config.from_object(configuration[config_name])
db.init_app(app)
from .main import main as main_bluprint
app.register_blueprint(main_bluprint, url_prefix='/')
return app
|
import sys
import struct
import numpy as np
from nptyping import NDArray
from physics import Physics
class PhysicsSim(Physics):
def __init__(self, racecar) -> None:
self.__racecar = racecar
def get_linear_acceleration(self) -> NDArray[3, np.float32]:
self.__racecar._RacecarSim__send_header(
self.__racecar.Header.physics_get_linear_acceleration
)
values = struct.unpack("fff", self.__racecar._RacecarSim__receive_data(12))
return np.array(values)
def get_angular_velocity(self) -> NDArray[3, np.float32]:
self.__racecar._RacecarSim__send_header(
self.__racecar.Header.physics_get_angular_velocity
)
values = struct.unpack("fff", self.__racecar._RacecarSim__receive_data(12))
return np.array(values)
|
# coding:utf-8
# 定义函数的一般语法:
def greetings():
"""显示简单的问候语"""
print("Hello World!")
def greeting(username):
"""显示简单的问候语 username-形参"""
print("Hello {}".format(username))
def printme(strings):
"""打印任何参入的字符串"""
print(strings)
return
# 1.函数的调用
greetings()
# 在函数的调用代码greeting("Bob")中,值"Bob"是一个实参
greeting("Bob")
printme("我要调用用户自定义函数")
printme("再次调用同一函数")
# 2.函数的返回
def calculate_sum(arg1, arg2):
"""返回两个参数的和"""
return arg1 + arg2
total = calculate_sum(10, 20)
print("{0}+{1}={2}".format(10, 20, total))
# 3.位置参数
def power(m, n=3):
result = 1
while n > 0:
n = n - 1
result = result * m
return result
# 在power(m,n)中有两个参数,即m和n,这两个参数都是位置参数,在调用的时候传入的两个值按照顺序依次赋给m和n
print("4的三次方:", power(4, 3))
# 4.默认参数
print("4的三次方:", power(4))
print("4的五次方:", power(4, 5))
# 5.关键字参数
print("4的三次方:", power(n=3, m=4))
# 6.可变长度参数
def print_info(arg1, *vartuple):
"""打印任何传入的参数"""
print("输出:")
print(arg1)
for var in vartuple:
print(var)
return
# 调用print_info()函数
print_info(15)
print_info(15, 30, 45, 60)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
"""
CP_META: extract various information from the meta file (meta.json) produced
when tiling/importing the WSI.
"""
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
import argparse as opt
import simplejson as json
import os.path
from math import floor
def main():
p = opt.ArgumentParser(description="Extracts pieces of info from META file.")
p.add_argument('meta_path', action='store', help='full path to meta.json (excluding meta.json)')
p.add_argument('--list_stains', action='store_true', help='list stains in the file')
p.add_argument('-s', '--stain', action='store', help='stain of interest')
p.add_argument('-m', '--magnification', action='store', default='',
help='magnification of interest (e.g. 1.25x or 20.0x)')
p.add_argument('--print_ROI', action='store_true',
help='print ROI (for the specified stain and magnification) as x0 y0 width height')
p.add_argument('--target_magnification', action="store", default='',
help='if specified, scale ROI to the desired magnification')
args = p.parse_args()
meta_file = args.meta_path + os.path.sep + 'meta.json'
if not os.path.exists(meta_file):
raise RuntimeError("Cannot find " + meta_file)
with open(meta_file, 'r') as fd:
meta = json.load(fd)
if args.list_stains:
stains = list()
for k in meta:
if k in ['mpp_x', 'mpp_y', 'objective']:
continue
stains.append(k)
print(' '.join(stains))
return
if args.print_ROI:
stain = args.stain
if len(stain) == 0 or stain not in meta:
raise RuntimeError("Stain not specified or not in the meta.json file")
mag = args.magnification
if len(mag) == 0 or "mag:"+mag not in meta[stain]:
raise RuntimeError("Magnification not specified or not in meta.json file")
sm = float(mag[:-1]) # drop 'x' at the end
if len(args.target_magnification) == 0:
args.target_magnification = args.magnification
tm = float(args.target_magnification[:-1])
f = tm / sm
mag = 'mag:' + mag
x = int(floor(f * float(meta[stain][mag]['from_original_x'])))
y = int(floor(f * float(meta[stain][mag]['from_original_y'])))
w = int(floor(f * float(meta[stain][mag]['from_original_width'])))
h = int(floor(f * float(meta[stain][mag]['from_original_height'])))
print(x, y, w, h, sep=' ')
return
##
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
class Student(object):
def __init__(self,name,score):
self.__name = name
self.__score = score
def get_name(self):
return self.__name
def get_score(self):
return self.__score
def set_score(self,score):
if 0<= score <= 100:
self.__score = score
else:
raise ValueError('bad score')
tangdu = Student('tangdu',20)
# 私有变量通过get set 方法取值、赋值
print('tangdu.get_name() = ',tangdu.get_name())
tangdu.set_score(60)
print('tangdu.get_score() = ',tangdu.get_score())
# 通过. 获取name , 带__的变量外部不能访问
print('property get name' % tangdu.name)
|
from pandas.core.common import flatten
def remove(digit_list):
digit_list = [''.join(x for x in i if x.isalpha()) for i in digit_list]
return digit_list
def can_contain_gold(bag):
if all_contents[bag][0] == 'noother':
return 'n'
else:
if 'shinygold' in ''.join(all_contents[bag]):
return 'y'
else:
return [can_contain_gold(b) for b in all_contents[bag]]
all_contents = {}
shiny_gold_content = {}
with open('data/07.txt') as f:
for line in f:
t = remove(
line.strip().replace('.', '').replace(' contain ', ',').replace('bags', '').replace(' bags', '').replace(
'bag', '').replace(', ', ',').split(','))
all_contents[t[0]] = t[1:]
for bag in all_contents.keys():
shiny_gold_content[bag] = list(flatten(can_contain_gold(bag)))
has_shiny_gold = 0
for bag in shiny_gold_content.keys():
if 'y' in shiny_gold_content[bag]:
has_shiny_gold += 1
print(has_shiny_gold)
|
import unittest
from operator import itemgetter
from unittest import TestCase
from unittest.mock import MagicMock, Mock, patch
import numpy as np
import pandas as pd
from sdv.data_navigator import DataNavigator, Table
from sdv.modeler import Modeler
from sdv.sampler import Sampler
class TestSampler(TestCase):
def test__square_matrix(self):
"""_square_matrix transform triagular list of list into square matrix."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
triangular_matrix = [
[1],
[1, 1],
[1, 1, 1]
]
expected_result = [
[1, 0, 0],
[1, 1, 0],
[1, 1, 1]
]
# Run
result = sampler._square_matrix(triangular_matrix)
# Check
assert result == expected_result
def test__prepare_sampled_covariance(self):
""" """
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
covariance = [
[1.0],
[0.5, 1.0],
[0.5, 0.5, 1.0]
]
expected_result = np.array([
[1.0, 0.5, 0.5],
[0.5, 1.0, 0.5],
[0.5, 0.5, 1.0]
])
# Run
result = sampler._prepare_sampled_covariance(covariance)
# Check
assert (result == expected_result).all().all()
@patch('sdv.sampler.Sampler.sample_rows', autospec=True)
def test_sample_all(self, rows_mock):
"""Check sample_all and returns some value."""
# Setup
data_navigator = MagicMock()
data_navigator.tables = ['TABLE_A', 'TABLE_B']
data_navigator.get_parents.side_effect = lambda x: x != 'TABLE_A'
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
def fake_dataframe(*args, **kwargs):
kwargs['sampled_data'][args[1]] = 'sampled_data'
rows_mock.side_effect = fake_dataframe
expected_get_parents_call_list = [(('TABLE_A',), {}), (('TABLE_B',), {})]
expected_result = {
'TABLE_A': 'sampled_data'
}
# Run
result = sampler.sample_all(num_rows=5)
# Check
assert result == expected_result
assert data_navigator.get_parents.call_args_list == expected_get_parents_call_list
rows_mock.assert_called_once_with(
sampler, 'TABLE_A', 5, sampled_data={'TABLE_A': 'sampled_data'})
def test_sample_all_with_reset_primary_key(self):
"""Check sample_all with reset_primary_keys True"""
# Setup
reset_primary_keys_generators_mock = Mock()
dn_mock = Mock()
dn_mock.tables = {
'DEMO': Table(pd.DataFrame(), {'some': 'meta'})
}
dn_mock.get_parents.return_value = True
# Run
sampler_mock = Mock()
sampler_mock._reset_primary_keys_generators = reset_primary_keys_generators_mock
sampler_mock.dn = dn_mock
Sampler.sample_all(sampler_mock, reset_primary_keys=True)
# Asserts
reset_primary_keys_generators_mock.assert_called_once_with()
def test__unflatten_dict(self):
"""unflatten_dict restructure flatten dicts."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
flat = {
'a__first_key__a': 1,
'a__first_key__b': 2,
'b__second_key__x': 0
}
expected_result = {
'a': {
'first_key': {
'a': 1,
'b': 2
},
},
'b': {
'second_key': {
'x': 0
},
}
}
# Run
result = sampler._unflatten_dict(flat)
# Check
assert result == expected_result
data_navigator.assert_not_called()
modeler.assert_not_called()
def test__unflatten_dict_mixed_array(self):
"""unflatten_dict restructure arrays."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
flat = {
'first_key__0__0': 1,
'first_key__0__1': 0,
'first_key__1__0': 0,
'first_key__1__1': 1,
'second_key__0__std': 0.5,
'second_key__0__mean': 0.5,
'second_key__1__std': 0.25,
'second_key__1__mean': 0.25
}
expected_result = {
'first_key': [
[1, 0],
[0, 1]
],
'second_key': {
0: {
'std': 0.5,
'mean': 0.5
},
1: {
'std': 0.25,
'mean': 0.25
}
}
}
# Run
result = sampler._unflatten_dict(flat)
# Check
assert result == expected_result
data_navigator.assert_not_called()
modeler.assert_not_called()
def test__unflatten_dict_child_name(self):
"""unflatten_dict will respect the name of child tables."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
flat = {
'first_key__a__b': 1,
'first_key____CHILD_TABLE__model_param': 0,
'distribs____CHILD_TABLE__distribs__UNIT_PRICE__std__mean': 0
}
expected_result = {
'first_key': {
'a': {
'b': 1
},
'__CHILD_TABLE': {
'model_param': 0
}
},
'distribs': {
'__CHILD_TABLE__distribs__UNIT_PRICE__std': {
'mean': 0
}
}
}
# Run
result = sampler._unflatten_dict(flat)
# Check
assert result == expected_result
modeler.assert_not_called()
data_navigator.assert_not_called()
def test__unflatten_dict_respect_covariance_matrix(self):
"""unflatten_dict restructures the covariance matrix into an square matrix."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
sampler = Sampler(data_navigator, modeler)
def fake_values(i, j):
return '{}, {}'.format(i, j)
expected_result = {
'covariance': np.array([
[fake_values(i, j) for j in range(40)]
for i in range(40)
]).tolist()
}
flat = {
'covariance__{}__{}'.format(i, j): fake_values(i, j)
for i in range(40) for j in range(40)
}
# Run
result = sampler._unflatten_dict(flat)
# Check
assert result == expected_result
def test__unflatten_gaussian_copula(self):
"""_unflatten_gaussian_copula add the distribution, type and fitted kwargs."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
modeler.model_kwargs = {
'distribution': 'distribution_name'
}
sampler = Sampler(data_navigator, modeler)
model_parameters = {
'some': 'key',
'covariance': [
[1],
[0, 1]
],
'distribs': {
0: {
'first': 'distribution',
'std': 0
},
1: {
'second': 'distribution',
'std': 0
}
}
}
expected_result = {
'some': 'key',
'distribution': 'distribution_name',
'covariance': [
[1, 0],
[0, 1]
],
'distribs': {
0: {
'type': 'distribution_name',
'fitted': True,
'first': 'distribution',
'std': 1
},
1: {
'type': 'distribution_name',
'fitted': True,
'second': 'distribution',
'std': 1
}
}
}
# Run
result = sampler._unflatten_gaussian_copula(model_parameters)
# Check
assert result == expected_result
data_navigator.assert_not_called()
modeler.assert_not_called()
def test__unflatten_gaussian_copula_not_matrix_symmetric(self):
"""unflatte with not matrix symmetric"""
# Setup
model_parameters = {
'some': 'key',
'covariance': [
[1],
[0, 1]
],
'distribs': {
0: {
'first': 'distribution',
'std': 0
},
1: {
'second': 'distribution',
'std': 0
}
}
}
modeler_mock = Mock()
modeler_mock.model_kwargs = {
'distribution': 'distribution_name'
}
prepare_mock = Mock()
prepare_mock.return_value = [[1], [0, 1]]
check_mock = Mock()
check_mock.return_value = False
make_mock = Mock()
make_mock.return_value = np.array([[1, 0], [0, 1]])
# Run
sampler_mock = Mock()
sampler_mock.modeler = modeler_mock
sampler_mock._prepare_sampled_covariance = prepare_mock
sampler_mock._check_matrix_symmetric_positive_definite = check_mock
sampler_mock._make_positive_definite = make_mock
result = Sampler._unflatten_gaussian_copula(sampler_mock, model_parameters)
# Asserts
assert result['covariance'] == [[1, 0], [0, 1]]
prepare_mock.assert_called_once_with([[1], [0, 1]])
check_mock.assert_called_once_with([[1], [0, 1]])
make_mock.assert_called_once_with([[1], [0, 1]])
def test__unflatten_gaussian_copula_negative_std(self):
"""_unflatten_gaussian_copula will transform negative or 0 std into positive."""
# Setup
data_navigator = MagicMock()
modeler = MagicMock()
modeler.model_kwargs = {
'distribution': 'distribution_name'
}
sampler = Sampler(data_navigator, modeler)
model_parameters = {
'some': 'key',
'covariance': [
[1],
[0, 1]
],
'distribs': {
0: {
'first': 'distribution',
'std': 0
},
1: {
'second': 'distribution',
'std': -1
}
}
}
expected_result = {
'some': 'key',
'distribution': 'distribution_name',
'covariance': [
[1, 0],
[0, 1]
],
'distribs': {
0: {
'type': 'distribution_name',
'fitted': True,
'first': 'distribution',
'std': 1
},
1: {
'type': 'distribution_name',
'fitted': True,
'second': 'distribution',
'std': np.exp(-1)
}
}
}
# Run
result = sampler._unflatten_gaussian_copula(model_parameters)
# Check
assert result == expected_result
data_navigator.assert_not_called()
modeler.assert_not_called()
def test__sample_valid_rows_raises_unfitted_model(self):
"""_sample_valid_rows raise an exception for invalid models."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator, modeler)
data_navigator.get_parents.return_value = set()
num_rows = 5
table_name = 'table_name'
model = None
# Run
with self.assertRaises(ValueError):
sampler._sample_valid_rows(model, num_rows, table_name)
# Check
modeler.assert_not_called()
assert len(modeler.method_calls) == 0
data_navigator.assert_not_called()
data_navigator.get_parents.assert_called_once_with('table_name')
def test__get_missing_valid_rows(self):
"""get_missing_valid_rows return an a dataframe and an integer.
The dataframe contains valid_rows concatenated to synthesized and their index reset.
The integer is the diference between num_rows and the returned dataframe rows.
"""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator, modeler)
synthesized = pd.DataFrame(columns=list('AB'), index=range(3, 5))
drop_indices = pd.Series(False, index=range(3, 5))
valid_rows = pd.DataFrame(columns=list('AB'), index=range(2))
num_rows = 5
# Run
result = sampler._get_missing_valid_rows(synthesized, drop_indices, valid_rows, num_rows)
missing_rows, valid_rows = result
# Check
assert missing_rows == 1
assert valid_rows.equals(pd.DataFrame(columns=list('AB'), index=[0, 1, 2, 3]))
data_navigator.assert_not_called()
assert data_navigator.method_calls == []
modeler.assert_not_called()
assert modeler.method_calls == []
def test__get_missing_valid_rows_excess_rows(self):
"""If more rows than required are passed, the result is cut to num_rows."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator, modeler)
synthesized = pd.DataFrame(columns=list('AB'), index=range(3, 7))
drop_indices = pd.Series(False, index=range(3, 7))
valid_rows = pd.DataFrame(columns=list('AB'), index=range(2))
num_rows = 5
# Run
result = sampler._get_missing_valid_rows(synthesized, drop_indices, valid_rows, num_rows)
missing_rows, valid_rows = result
# Check
assert missing_rows == 0
assert valid_rows.equals(pd.DataFrame(columns=list('AB'), index=range(5)))
data_navigator.assert_not_called()
assert data_navigator.method_calls == []
modeler.assert_not_called()
assert modeler.method_calls == []
@patch('sdv.sampler.get_qualified_name')
def test__sample_model(self, qualified_mock):
"""_sample_model sample the number of rows from the given model."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator, modeler)
model = MagicMock()
values = np.array([
[1, 1, 1],
[2, 2, 2],
[3, 3, 3]
])
qualified_mock.return_value = 'package.module.full_qualified_name'
model.sample.return_value = values
num_rows = 3
columns = list('ABC')
expected_result = pd.DataFrame(values, columns=columns)
# Run
result = sampler._sample_model(model, num_rows, columns)
# Check
assert result.equals(expected_result)
qualified_mock.assert_called_once_with(model)
model.sample.assert_called_once_with(3)
@patch('sdv.sampler.get_qualified_name')
def test__sample_model_vine(self, qualified_mock):
"""_sample_model sample the number of rows from the given model."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator, modeler)
model = MagicMock()
values = [
np.array([1, 1, 1]),
np.array([2, 2, 2]),
np.array([3, 3, 3])
]
qualified_mock.return_value = 'copulas.multivariate.vine.VineCopula'
model.sample.side_effect = values
num_rows = 3
columns = list('ABC')
expected_result = pd.DataFrame(values, columns=columns)
# Run
result = sampler._sample_model(model, num_rows, columns)
# Check
assert result.equals(expected_result)
qualified_mock.assert_called_once_with(model)
assert model.sample.call_args_list == [
((3,), ),
((3,), ),
((3,), )
]
def test__reset_primary_keys_generators(self):
"""_reset_primary_keys deletes all generators and counters."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator=data_navigator, modeler=modeler)
sampler.primary_key = {
'table': 'generator for table'
}
sampler.remaining_primary_key = {
'table': 'counter for table'
}
# Run
sampler._reset_primary_keys_generators()
# Check
assert sampler.primary_key == dict()
assert sampler.remaining_primary_key == dict()
@patch('sdv.sampler.exrex.count', autospec=True)
@patch('sdv.sampler.exrex.generate', autospec=True)
def test__get_primary_keys_create_generator(self, exrex_gen_mock, exrex_count_mock):
"""If there's a primary key, but no generator, a new one is created and used."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
data_navigator.get_meta_data.return_value = {
'primary_key': 'table_pk',
'fields': {
'table_pk': {
'regex': 'regex for table_pk',
'type': 'number',
'subtype': 'integer'
},
}
}
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator=data_navigator, modeler=modeler)
exrex_gen_mock.return_value = (str(x) for x in range(10))
exrex_count_mock.return_value = 10
expected_primary_key = 'table_pk'
expected_primary_key_values = pd.Series(range(5))
# Run
result = sampler._get_primary_keys('table', 5)
# Check
primary_key, primary_key_values = result
assert primary_key == expected_primary_key
primary_key_values.equals(expected_primary_key_values)
assert sampler.primary_key['table'] == exrex_gen_mock.return_value
assert sampler.remaining_primary_key['table'] == 5
data_navigator.get_meta_data.assert_called_once_with('table')
exrex_count_mock.assert_called_once_with('regex for table_pk')
exrex_gen_mock.assert_called_once_with('regex for table_pk')
def test__get_primary_keys_no_pk(self):
"""If no primary key, _get_primary_keys return a duple of None """
# Setup
data_navigator = MagicMock(spec=DataNavigator)
data_navigator.get_meta_data.return_value = {}
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator=data_navigator, modeler=modeler)
# Run
result = sampler._get_primary_keys('table', 5)
# Check
primary_key, primary_key_values = result
assert primary_key is None
assert primary_key_values is None
def test__get_primary_keys_raises_error(self):
"""_get_primary_keys raises an exception if there aren't enough values."""
# Setup
data_navigator = MagicMock(spec=DataNavigator)
data_navigator.get_meta_data.return_value = {
'primary_key': 'table_pk',
'fields': {
'table_pk': {
'regex': 'regex for table_pk',
'type': 'number',
'subtype': 'integer'
},
}
}
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator=data_navigator, modeler=modeler)
sampler.primary_key['table'] = 'a generator'
sampler.remaining_primary_key['table'] = 0
# Run / Check
with self.assertRaises(ValueError):
sampler._get_primary_keys('table', 5)
@patch('sdv.sampler.Sampler.sample_rows', autospec=True)
def test_sample_table(self, rows_mock):
""" """
# Setup
data_navigator = MagicMock(spec=DataNavigator)
data_navigator.tables = {
'table': MagicMock(**{'data.shape': ('rows', 'columns')})
}
modeler = MagicMock(spec=Modeler)
sampler = Sampler(data_navigator=data_navigator, modeler=modeler)
rows_mock.return_value = 'samples'
table_name = 'table'
reset_primary_keys = False
expected_result = 'samples'
# Run
result = sampler.sample_table(table_name, reset_primary_keys=reset_primary_keys)
# Check
assert result == expected_result
rows_mock.assert_called_once_with(
sampler, 'table', 'rows', sample_children=False, reset_primary_keys=False)
def test__fill_text_columns(self):
"""Fill columns"""
# Setup
data_navigator_mock = Mock()
data_navigator_mock.tables = {
'DEMO': Table(
pd.DataFrame(),
{
'fields': {
'a_field': {
'name': 'a_field',
'type': 'id',
'ref': {
'table': 'table_ref',
'field': 'table_ref_id'
}
},
'b_field': {
'name': 'b_field',
'type': 'id',
'regex': '^[0-9]{10}$'
},
'c_field': {
'name': 'c_field',
'type': 'text',
'regex': '^[a-z]{10}$'
}
}
}
)
}
sample_rows_mock = Mock()
sample_rows_mock.return_value = {'table_ref_id': {'name': 'table_ref_id'}}
# Run
sampler_mock = Mock()
sampler_mock.dn = data_navigator_mock
sampler_mock.sample_rows = sample_rows_mock
row = pd.DataFrame({
'c_field': ['foo', 'bar', 'tar']
})
labels = ['a_field', 'b_field', 'c_field']
table_name = 'DEMO'
Sampler._fill_text_columns(sampler_mock, row, labels, table_name)
# Asserts
sample_rows_mock.assert_called_once_with('table_ref', 1)
def test__transform_synthesized_rows(self):
"""Reverse transform synthetized data."""
# Setup
ht_mock = Mock()
ht_mock.transformers = ['foo', 'bar']
ht_mock.reverse_transform_table.return_value = pd.DataFrame({
'foo': [1, 2, 3],
'bar': ['aaa', 'bbb', 'ccc']
})
dn_mock = Mock()
dn_mock.ht = ht_mock
dn_mock.get_meta_data.return_value = {
'fields': {
'foo': {
'subtype': 'integer'
},
'bar': {
'subtype': 'text'
},
}
}
fill_text_mock = Mock()
fill_text_mock.return_value = pd.DataFrame({
'foo': [1, 2, 3],
'bar': ['aaa', 'bbb', 'ccc']
})
# Run
sampler_mock = Mock()
sampler_mock.dn = dn_mock
sampler_mock._fill_text_columns = fill_text_mock
table_name = 'DEMO'
synthesized = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
columns=['foo', 'bar', 'tar']
)
Sampler._transform_synthesized_rows(sampler_mock, synthesized, table_name)
# Asserts
exp_called_synthesized = pd.DataFrame({
'foo': [1, 2, 3],
'bar': ['aaa', 'bbb', 'ccc'],
'tar': [3, 6, 9]
})
exp_called_labels = ['foo', 'bar']
exp_called_reverse_meta = {
'fields': [
{'subtype': 'integer', 'name': 'foo'},
{'subtype': 'text', 'name': 'bar'}
],
'name': 'DEMO'
}
dn_mock.get_meta_data.assert_called_once_with('DEMO')
fill_text_args, fill_text_kwargs = fill_text_mock.call_args
fill_text_data_frame, fill_text_labels, fill_text_table_name = fill_text_args
fill_text_data_frame.sort_index(axis=1, inplace=True)
exp_called_synthesized.sort_index(axis=1, inplace=True)
assert fill_text_mock.call_count == 1
assert sorted(fill_text_labels) == sorted(exp_called_labels)
assert fill_text_table_name == 'DEMO'
pd.testing.assert_frame_equal(fill_text_data_frame, exp_called_synthesized)
rt_args, rt_kwargs = ht_mock.reverse_transform_table.call_args
rt_arg_text_filled, rt_arg_meta = rt_args
rt_arg_meta['fields'] = sorted(rt_arg_meta['fields'], key=itemgetter('name'))
exp_called_reverse_meta['fields'] = sorted(
exp_called_reverse_meta['fields'],
key=itemgetter('name')
)
pd.testing.assert_frame_equal(rt_arg_text_filled, pd.DataFrame(index=[0, 1, 2]))
assert rt_arg_meta == exp_called_reverse_meta
def test__unflatten_dict_raise_value_error_row_index(self):
"""Raises ValueError by row_index"""
# Setup
setdefault_mock = Mock()
setdefault_mock.return_value = [1, 2, 3, 4, 5]
# Run and assert
sampler = Mock()
sampler._setdefault = setdefault_mock
flat = {
'foo__1__1': 'foo'
}
with self.assertRaises(ValueError):
Sampler._unflatten_dict(sampler, flat)
def test__unflatten_dict_raise_value_error_column_index(self):
"""Raises ValueError by column_index"""
# Setup
setdefault_mock = Mock()
setdefault_mock.return_value = [[1, 2, 3, 4]]
# Run and assert
sampler = Mock()
sampler._setdefault = setdefault_mock
flat = {
'foo__1__1': 'foo'
}
with self.assertRaises(ValueError):
Sampler._unflatten_dict(sampler, flat)
def test__unflatten_dict_alrady_unflatted(self):
"""Already unflatted dict."""
# Setup
# Run
sampler = Mock()
flat = {
'foo': 'bar'
}
result = Sampler._unflatten_dict(sampler, flat)
# Asserts
exp_dict = {
'foo': 'bar'
}
assert result == exp_dict
@patch('sdv.sampler.Sampler._check_matrix_symmetric_positive_definite')
def test__make_positive_definite_no_iterate(self, check_mock):
"""Make positive when check_matrix returns True without iterate"""
# Setup
check_matrix_mock = Mock()
check_matrix_mock.return_value = True
# Run
sampler_mock = Mock()
sampler_mock._check_matrix_symmetric_positive_definite = check_matrix_mock
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Sampler._make_positive_definite(sampler_mock, matrix)
# Asserts
assert check_matrix_mock.call_count == 1
def test__make_positive_definite_iterate(self):
"""Make positive when check_matrix returns True with iterations"""
# Setup
check_matrix_mock = Mock()
check_matrix_mock.side_effect = [False, False, True]
# Run
sampler_mock = Mock()
sampler_mock._check_matrix_symmetric_positive_definite = check_matrix_mock
matrix = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
Sampler._make_positive_definite(sampler_mock, matrix)
# Asserts
assert check_matrix_mock.call_count == 3
def test__check_matrix_symmetric_positive_definite(self):
"""Check matrix symmetric positive return false"""
# Setup
# Run
sampler_mock = Mock()
matrix = np.array([-4, -69])
result = Sampler._check_matrix_symmetric_positive_definite(sampler_mock, matrix)
# Asserts
assert result is False
def test__check_matrix_symmetric_positive_definite_error(self):
"""Check matrix symmetric positive return false raise error"""
# Setup
# Run
sampler_mock = Mock()
matrix = np.array([[1, 1], [1, 1]])
result = Sampler._check_matrix_symmetric_positive_definite(sampler_mock, matrix)
# Asserts
assert result is False
def test__get_extension(self):
"""Retrieve the generated parent row extension"""
# Setup
# Run
sampler_mock = Mock()
parent_row = pd.Series([[1, 1], [1, 1]], ['__demo__foo', '__demo__bar'])
table_name = 'demo'
parent_name = 'parent'
result = Sampler._get_extension(sampler_mock, parent_row, table_name, parent_name)
# Asserts
expect = {'foo': [1, 1], 'bar': [1, 1]}
assert result == expect
@patch('sdv.sampler.get_qualified_name')
def test__get_model(self, qualified_name):
"""Retrieve the model with parameters"""
# Setup
unflatten_dict_mock = Mock()
unflatten_dict_mock.return_value = dict()
qualified_name.return_value = 'copulas.multivariate.gaussian.GaussianMultivariate'
unflatten_gaussian_mock = Mock()
unflatten_gaussian_mock.return_value = None
model_mock = Mock()
model_mock.from_dict.return_value = None
modeler_mock = Mock()
modeler_mock.model = model_mock
# Run
sampler_mock = Mock()
sampler_mock._unflatten_dict = unflatten_dict_mock
sampler_mock.modeler = modeler_mock
sampler_mock._unflatten_gaussian_copula = unflatten_gaussian_mock
Sampler._get_model(sampler_mock, None)
# Asserts
exp_unflatten_gaussian_called = {
'fitted': True,
'type': 'copulas.multivariate.gaussian.GaussianMultivariate'
}
qualified_name.assert_called_once_with(modeler_mock.model)
unflatten_dict_mock.assert_called_once_with(None)
unflatten_gaussian_mock.assert_called_once_with(exp_unflatten_gaussian_called)
model_mock.from_dict.assert_called_once_with(None)
def test_sample_rows_sample_children(self):
"""sample_rows with sample_children True"""
# Setup
reset_pk_generators_mock = Mock()
sample_valid_rows_mock = Mock()
sample_valid_rows_mock.return_value = {}
get_pk_mock = Mock()
get_pk_mock.return_value = None
transform_mock = Mock()
modeler_mock = Mock()
modeler_mock.models = {
'DEMO': {}
}
dn_mock = Mock()
dn_mock.get_parents.return_value = {}
dn_mock.foreign_keys = {}
# Run
sampler_mock = Mock()
sampler_mock._reset_primary_keys_generators = reset_pk_generators_mock
sampler_mock._sample_valid_rows = sample_valid_rows_mock
sampler_mock._get_primary_keys = get_pk_mock
sampler_mock._transform_synthesized_rows = transform_mock
sampler_mock.modeler = modeler_mock
sampler_mock.dn = dn_mock
table_name = 'DEMO'
num_rows = 5
Sampler.sample_rows(sampler_mock, table_name, num_rows, reset_primary_keys=True)
# Asserts
reset_pk_generators_mock.assert_called_once_with()
sample_valid_rows_mock.assert_called_once_with({}, 5, 'DEMO')
def test_sample_rows_no_sample_children(self):
"""sample_rows with sample_children True"""
# Setup
reset_pk_generators_mock = Mock()
sample_valid_rows_mock = Mock()
sample_valid_rows_mock.return_value = {}
get_pk_mock = Mock()
get_pk_mock.return_value = None, ['foo']
transform_mock = Mock()
modeler_mock = Mock()
modeler_mock.models = {
'DEMO': {}
}
dn_mock = Mock()
dn_mock.get_parents.return_value = {'foo': 'bar'}
dn_mock.foreign_keys = {
('DEMO', 'foo'): (None, 'tar')
}
# Run
sampler_mock = Mock()
sampler_mock._reset_primary_keys_generators = reset_pk_generators_mock
sampler_mock._sample_valid_rows = sample_valid_rows_mock
sampler_mock._get_primary_keys = get_pk_mock
sampler_mock._transform_synthesized_rows = transform_mock
sampler_mock.modeler = modeler_mock
sampler_mock.dn = dn_mock
table_name = 'DEMO'
num_rows = 5
Sampler.sample_rows(sampler_mock, table_name, num_rows, sample_children=False)
# Asserts
transform_mock.assert_called_once_with({'tar': 'foo'}, 'DEMO')
def test__sample_without_previous(self):
"""Check _sample without previous"""
# Setup
get_extension_mock = Mock()
get_extension_mock.return_value = {'child_rows': 0.999}
get_model_mock = Mock()
get_model_mock.return_value = None
sample_valid_rows_mock = Mock()
sample_valid_rows_mock.return_value = {}
sample_children_mock = Mock()
dn_mock = Mock()
dn_mock.foreign_keys = {
('DEMO', 'p_name'): ('parent_id', 'foreign_key')
}
# Run
sampler_mock = Mock()
sampler_mock._get_extension = get_extension_mock
sampler_mock._get_model = get_model_mock
sampler_mock._sample_valid_rows = sample_valid_rows_mock
sampler_mock._sample_children = sample_children_mock
sampler_mock.dn = dn_mock
table_name = 'DEMO'
parent_name = 'p_name'
parent_row = {'parent_id': 'foo'}
sampled = {}
Sampler._sample(sampler_mock, table_name, parent_name, parent_row, sampled)
# Asserts
get_extension_mock.assert_called_once_with({'parent_id': 'foo'}, 'DEMO', 'p_name')
get_model_mock.assert_called_once_with({'child_rows': 0.999})
sample_valid_rows_mock.assert_called_once_with(None, 1, 'DEMO')
sample_children_mock.assert_called_once_with('DEMO', {'DEMO': {'foreign_key': 'foo'}})
def test__sample_with_previous(self):
"""Check _sample with previous"""
# Setup
get_extension_mock = Mock()
get_extension_mock.return_value = {'child_rows': 0.999}
get_model_mock = Mock()
get_model_mock.return_value = None
sample_valid_rows_mock = Mock()
sample_valid_rows_mock.return_value = pd.DataFrame({'foo': [0, 1]})
sample_children_mock = Mock()
dn_mock = Mock()
dn_mock.foreign_keys = {
('DEMO', 'p_name'): ('parent_id', 'foreign_key')
}
# Run
sampler_mock = Mock()
sampler_mock._get_extension = get_extension_mock
sampler_mock._get_model = get_model_mock
sampler_mock._sample_valid_rows = sample_valid_rows_mock
sampler_mock._sample_children = sample_children_mock
sampler_mock.dn = dn_mock
table_name = 'DEMO'
parent_name = 'p_name'
parent_row = {'parent_id': 'foo'}
sampled = {'DEMO': pd.DataFrame({
'bar': [1, 2]
})}
Sampler._sample(sampler_mock, table_name, parent_name, parent_row, sampled)
# Asserts
exp_dataframe_sampled = pd.DataFrame({
'bar': [1, 2, np.NaN, np.NaN],
'foo': [np.NaN, np.NaN, 0, 1],
'foreign_key': [np.NaN, np.NaN, 'foo', 'foo']
})
args_sample_children, kwargs_sample_children = sample_children_mock.call_args
exp_arg_table_name, exp_arg_sampled = args_sample_children
get_extension_mock.assert_called_once_with({'parent_id': 'foo'}, 'DEMO', 'p_name')
get_model_mock.assert_called_once_with({'child_rows': 0.999})
sample_valid_rows_mock.assert_called_once_with(None, 1, 'DEMO')
assert exp_arg_table_name == 'DEMO'
pd.testing.assert_frame_equal(exp_arg_sampled['DEMO'], exp_dataframe_sampled)
def test__sample_children(self):
"""Sample children"""
# Setup
dn_mock = Mock()
dn_mock.get_children.return_value = ['aaa', 'bbb', 'ccc']
sample_mock = Mock()
# Run
sampler_mock = Mock()
sampler_mock.dn = dn_mock
sampler_mock._sample = sample_mock
table_name = 'DEMO'
sampled = {
'DEMO': pd.DataFrame({
'foo': [0, 1]
})
}
Sampler._sample_children(sampler_mock, table_name, sampled)
# Asserts
exp_sampled = {
'DEMO': pd.DataFrame({
'foo': [0, 1]
})
}
exp_sample_arguments = [
('aaa', 'DEMO', pd.Series({'foo': 0}, name=0), exp_sampled),
('aaa', 'DEMO', pd.Series({'foo': 1}, name=1), exp_sampled),
('bbb', 'DEMO', pd.Series({'foo': 0}, name=0), exp_sampled),
('bbb', 'DEMO', pd.Series({'foo': 1}, name=1), exp_sampled),
('ccc', 'DEMO', pd.Series({'foo': 0}, name=0), exp_sampled),
('ccc', 'DEMO', pd.Series({'foo': 1}, name=1), exp_sampled)
]
dn_mock.get_children.assert_called_once_with('DEMO')
assert sample_mock.call_count == 6
for called, expected in zip(sample_mock.call_args_list, exp_sample_arguments):
assert called[0][0] == expected[0]
assert called[0][1] == expected[1]
pd.testing.assert_series_equal(called[0][2], expected[2])
pd.testing.assert_frame_equal(called[0][3]['DEMO'], expected[3]['DEMO'])
def test__sample_valid_rows_fitted(self):
"""sample valid rows with model fitted"""
# Setup
pk_keys_mock = Mock()
pk_keys_mock.return_value = 'pk_name', [1, 2, 3, 4]
synthesized_mock = pd.DataFrame({'foo': [0, 1.1], 'bar': [1, 0]})
sample_model_mock = Mock()
sample_model_mock.return_value = synthesized_mock
missing_valid_rows_mock = Mock()
missing_valid_rows_mock.side_effect = [
(True, {}),
(False, {})
]
missing_valid_rows_mock.return_value = False, {}
dn_mock = Mock()
dn_mock.get_meta_data.return_value = {
'fields': {
'foo': {
'type': 'categorical',
},
'bar': {
'type': 'numeric'
}
}
}
tables = {
'DEMO': pd.DataFrame({'a_field': [1, 0], 'b_field': [0, 1]})
}
# Run
sampler_mock = Mock()
sampler_mock._get_primary_keys = pk_keys_mock
sampler_mock._sample_model = sample_model_mock
sampler_mock._get_missing_valid_rows = missing_valid_rows_mock
sampler_mock.modeler.tables = tables
sampler_mock.dn = dn_mock
model_mock = Mock()
model_mock.fitted = True
Sampler._sample_valid_rows(sampler_mock, model_mock, 5, 'DEMO')
# Asserts
assert missing_valid_rows_mock.call_count == 2
assert sample_model_mock.call_count == 2
if __name__ == '__main__':
unittest.main()
|
import os
from app.server import app
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port, threaded=True, debug=False)
|
from _collections import defaultdict
number_of_pieces = int(input())
all_pieces = defaultdict(dict)
for _ in range(number_of_pieces):
tokens = input().split("|")
piece = tokens[0]
composer = tokens[1]
key = tokens[2]
all_pieces[piece]["composer"] = composer
all_pieces[piece]["key"] = key
while True:
tokens = input()
if tokens == "Stop":
break
tokens = tokens.split("|")
command = tokens[0]
piece = tokens[1]
if command == "Add":
composer = tokens[2]
key = tokens[3]
if piece in all_pieces:
print(f"{piece} is already in the collection!")
continue
print(f"{piece} by {composer} in {key} added to the collection!")
all_pieces[piece]["composer"] = composer
all_pieces[piece]["key"] = key
elif command == "Remove":
if piece in all_pieces:
print(f"Successfully removed {piece}!")
all_pieces.pop(piece)
continue
print(f"Invalid operation! {piece} does not exist in the collection.")
elif command == "ChangeKey":
new_key = tokens[2]
if piece in all_pieces:
print(f"Changed the key of {piece} to {new_key}!")
all_pieces[piece]["key"] = new_key
continue
print(f"Invalid operation! {piece} does not exist in the collection.")
sorted_pieces = dict(sorted(all_pieces.items(), key=lambda p: (p[0], p[1]["composer"])))
for piece, composer_key in sorted_pieces.items():
print(f"{piece} -> Composer: {composer_key['composer']}, Key: {composer_key['key']}")
|
def archers_ready(archers):
return all(a >= 5 for a in archers) if archers else False
|
# Generated by Django 2.0.7 on 2018-09-13 10:30
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('TraverMsg', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TraverNote',
fields=[
('id', models.CharField(max_length=20, primary_key=True, serialize=False, verbose_name='游记id')),
('title', models.CharField(max_length=50, verbose_name='标题')),
('note_content', models.CharField(max_length=3000, verbose_name='游记内容')),
('star_num', models.CharField(max_length=10, verbose_name='点赞人数')),
('notify_status', models.CharField(choices=[('1', '待评审'), ('2', '通过审核')], default='1', max_length=10, verbose_name='审核状态')),
('add_time', models.DateField(default=datetime.datetime.now, verbose_name='发表日期')),
('img_url', models.CharField(max_length=100, verbose_name='封面图url')),
('scenic_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TraverMsg.ScenicMsg', verbose_name='所属景点')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所属用户')),
],
options={
'verbose_name': '用户游记',
'verbose_name_plural': '用户游记',
},
),
migrations.CreateModel(
name='UserAnswer',
fields=[
('id', models.CharField(max_length=20, primary_key=True, serialize=False, verbose_name='回复id')),
('title', models.CharField(max_length=50, verbose_name='标题')),
('answer_content', models.CharField(max_length=3000, verbose_name='回答内容')),
('add_time', models.DateField(default=datetime.datetime.now, verbose_name='时间')),
],
options={
'verbose_name': '用户解答',
'verbose_name_plural': '用户解答',
},
),
migrations.CreateModel(
name='UserAsk',
fields=[
('id', models.CharField(max_length=20, primary_key=True, serialize=False, verbose_name='问题id')),
('title', models.CharField(max_length=50, verbose_name='标题')),
('ask_content', models.CharField(max_length=11, verbose_name='内容')),
('star_num', models.CharField(max_length=50, verbose_name='关注数')),
('add_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='时间')),
('scenic_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TraverMsg.ScenicMsg', verbose_name='景点id')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所属用户')),
],
options={
'verbose_name': '用户提问',
'verbose_name_plural': '用户提问',
},
),
migrations.AddField(
model_name='useranswer',
name='ask_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='TraverAsk.UserAsk', verbose_name='所属问题'),
),
migrations.AddField(
model_name='useranswer',
name='user_id',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='所属用户'),
),
]
|
"""django-allauth customizations for browsercompat."""
|
from django.db import models
from .validators import validate_file_extension
from django.conf import settings
from tssite import client
masechetot_by_seder = [
('Zeraim', (
('berachot', 'Berachot'),
)
),
('Moed', (
('shabbat', 'Shabbat'),
)
),
]
class Teacher(models.Model):
title = models.CharField(max_length=20)
fname = models.CharField(max_length=40)
mname = models.CharField(max_length=10, default=None, null=True, blank=True)
lname = models.CharField(max_length=40)
short_bio = models.CharField(max_length=256)
long_bio = models.TextField()
image_url = models.CharField(max_length=1024, null=True, blank=True)
image = models.FileField(upload_to='uploads/', null=True, blank=True)
def __str__(self):
if self.mname:
return '{} {} {} {}'.format(self.title, self.fname, self.mname, self.lname)
return '{} {} {}'.format(self.title, self.fname, self.lname)
@property
def teacher_string(self):
string = ''
if self.mname:
title = self.title.replace('.', '').lower()
first = self.fname.lower()
middle = self.mname.replace('.', '').lower()
last = self.lname.lower()
string = f'{title}-{first}-{middle}-{last}'
else:
title = self.title.replace('.', '').lower()
first = self.fname.lower()
last = self.lname.lower()
string = f'{title}-{first}-{last}'
return string
def get_class_audio_location(instance, filename):
path = ''
if instance.division == 'torah':
path = f'archives/Torah/{instance.section_title}/{instance.unit}-{instance.part}.mp3'
elif (
instance.division == 'neviim_rishonim' or
instance.division == 'neviim_aharonim' or
instance.division == 'tere_assar'
):
base = ''
if instance.division == 'neviim_rishonim':
base = 'archives/Neviim Rishonim'
elif instance.division == 'neviim_aharonim':
base = 'archives/Neviim Aharonim'
elif instance.division == 'tere_assar':
base = 'archives/Tere Asar'
file = ''
if instance.part is not None and instance.part is not '':
file = f'{instance.section}-{instance.unit}{instance.part}'
else:
file = f'{instance.section}-{instance.unit}'
path = f'{base}/{instance.section.title()}/{file}.mp3'
elif instance.division == 'ketuvim':
base = 'archives/Ketuvim'
file = ''
if instance.part is not None and instance.part is not '':
file = f'{instance.section}-{instance.unit}{instance.part}'
else:
file = f'{instance.section}-{instance.unit}'
path = f"{base}/{instance.section_title}/{file}.mp3"
elif instance.division == 'parasha':
base = 'archives/parasha'
common = f'{base}/{instance.segment}/{instance.segment}-{instance.section}-{instance.unit}'
if instance.series is not None and instance.series is not 'first':
path = f'{common}-{instance.series}-{instance.teacher.teacher_string}.mp3'
else:
path = f'{common}-{instance.teacher.teacher_string}.mp3'
elif instance.division == 'mishna':
base = 'archives/mishna'
file = f'{instance.segment}-{instance.section}-{instance.unit}-{instance.part}-{instance.teacher.teacher_string}'
path = f'{base}/{instance.segment}/{instance.section}/{file}.mp3'
else:
raise Exception(f'division is invalid: {instance.division}')
return path
def create_transcoder_job(audio_field):
if client is None:
raise Exception('client not initialized')
s3_key = str(audio_field)
client.create_job(
PipelineId=settings.AWS_TRANSCODER_PIPELINE_ID,
Input={
'Key': s3_key,
},
Output={
'Key': s3_key,
'PresetId': settings.AWS_TRANSCODER_PRESET_ID,
}
)
print('created transcoder job')
class Class(models.Model):
division = models.CharField(max_length=256)
division_name = models.CharField(max_length=256, null=True, blank=True)
division_title = models.CharField(max_length=256, null=True, blank=True)
division_sponsor = models.CharField(max_length=256, null=True, blank=True)
division_sequence = models.IntegerField(null=True, blank=True)
segment = models.CharField(max_length=256, null=True, blank=True)
segment_name = models.CharField(max_length=256, null=True, blank=True)
segment_title = models.CharField(max_length=256, null=True, blank=True)
segment_sponsor = models.CharField(max_length=256, null=True, blank=True)
segment_sequence = models.IntegerField(null=True, blank=True)
section = models.CharField(max_length=256)
section_name = models.CharField(max_length=256, null=True, blank=True)
section_title = models.CharField(max_length=256, null=True, blank=True)
section_sponsor = models.CharField(max_length=256, null=True, blank=True)
section_sequence = models.IntegerField(null=True, blank=True)
unit = models.CharField(max_length=256)
unit_name = models.CharField(max_length=256, null=True, blank=True)
unit_title = models.CharField(max_length=256, null=True, blank=True)
unit_sponsor = models.CharField(max_length=256, null=True, blank=True)
unit_sequence = models.IntegerField(null=True, blank=True)
part = models.CharField(max_length=256, null=True, blank=True)
part_name = models.CharField(max_length=256, null=True, blank=True)
part_title = models.CharField(max_length=256, null=True, blank=True)
part_sponsor = models.CharField(max_length=256, null=True, blank=True)
part_sequence = models.IntegerField(null=True, blank=True)
series = models.CharField(max_length=256, null=True, blank=True)
series_name = models.CharField(max_length=256, null=True, blank=True)
series_title = models.CharField(max_length=256, null=True, blank=True)
series_sponsor = models.CharField(max_length=256, null=True, blank=True)
series_sequence = models.IntegerField(null=True, blank=True)
start_chapter = models.CharField(max_length=256, null=True, blank=True)
start_verse = models.CharField(max_length=256, null=True, blank=True)
end_chapter = models.CharField(max_length=256, null=True, blank=True)
end_verse = models.CharField(max_length=256, null=True, blank=True)
audio_url = models.CharField(max_length=1024, null=True, blank=True)
audio = models.FileField(upload_to=get_class_audio_location, validators=[validate_file_extension], default=None, null=True, max_length=500)
teacher = models.ForeignKey(Teacher, on_delete=models.SET_DEFAULT, default=None, null=True)
date = models.DateTimeField(null=True, blank=True)
video_url = models.CharField(max_length=1024, null=True, blank=True)
class Meta:
ordering = ['series_sequence', 'division_sequence', 'segment_sequence', 'section_sequence', 'unit_sequence', 'part_sequence', 'series_sequence', '-date']
def get_location(self):
toreturn = ''
if self.division == 'torah':
toreturn = f'/parasha-study/perakim/{self.section}/{self.unit}?part={self.part}'
elif (
self.division == 'neviim_rishonim' or
self.division == 'neviim_aharonim' or
self.division == 'tere_assar' or
self.division == 'ketuvim'
):
if self.part:
toreturn = f'/tanach-study/perakim/{self.section}/{self.unit}?part={self.part}'
else:
toreturn = f'/tanach-study/perakim/{self.section}/{self.unit}'
elif self.division == 'parasha':
toreturn = f'/parasha-plus-study/sefarim/{self.segment}/{self.section}?part={self.unit}'
elif self.division == 'mishna':
toreturn = f'/mishna-study/perek/{self.segment}/{self.section}/{self.unit}?part={self.part}'
return toreturn
def __str__(self):
audio = get_class_audio_location(self, '')
toreturn = ''
if self.division == 'torah':
toreturn = f'Torah - Sefer {self.section_title} Parashat {self.unit_title}: Part {self.part}'
elif (
self.division == 'neviim_rishonim' or
self.division == 'neviim_aharonim' or
self.division == 'tere_assar' or
self.division == 'ketuvim'
):
if self.part:
toreturn = f'Tanach - Sefer {self.section_title}: Perek {self.unit.title()} Part {self.part}'
else:
toreturn = f'Tanach - Sefer {self.section_title}: Perek {self.unit.title()}'
elif self.division == 'parasha':
if self.series is not None:
toreturn = f'{self.division.title()} - {self.segment_title}: {self.section_title} {self.unit.title()} ({self.series.title()})'
else:
toreturn = f'{self.division.title()} - {self.segment_title}: {self.section_title} {self.unit.title()}'
elif self.division == 'mishna':
toreturn = f'{self.division_title} - {self.segment_title}: {self.section_title} Perek {self.unit.title()} Mishna {self.part}'
return toreturn
def get_teamim_audio_location(instance, filename):
path = ''
reader_string = instance.reader.teacher_string
if instance.post.division == 'torah':
path = f'archives/Torah/{instance.post.section_title}/recordings/{reader_string}-{instance.post.unit}-{instance.post.part}-teamim.mp3'
elif (
instance.post.division == 'neviim_rishonim' or
instance.post.division == 'neviim_aharonim' or
instance.post.division == 'tere_assar'
):
base = ''
if instance.post.division == 'neviim_rishonim':
base = 'archives/Neviim Rishonim'
elif instance.post.division == 'neviim_aharonim':
base = 'archives/Neviim Aharonim'
elif instance.post.division == 'tere_assar':
base = 'archives/Tere Asar'
file = ''
if instance.post.part is not None and instance.post.part is not '':
file = f'{instance.post.section}-{instance.post.unit}{instance.post.part}'
else:
file = f'{instance.post.section}-{instance.post.unit}'
path = f'{base}/{instance.post.section.title()}/recordings/{reader_string}-{file}-teamim.mp3'
elif instance.post.division == 'ketuvim':
base = 'archives/Ketuvim'
file = ''
if instance.post.part is not None and instance.post.part is not '':
file = f'{instance.post.section}-{instance.post.unit}{instance.post.part}'
else:
file = f'{instance.post.section}-{instance.post.unit}'
path = f"{base}/{instance.post.section_title}/recordings/{reader_string}-{file}-teamim.mp3"
elif instance.post.division == 'parasha':
base = 'archives/parasha'
path = f'{base}/{instance.post.segment}/recordings/{reader_string}-{instance.post.segment}-{instance.post.section}-{instance.post.unit}-teamim.mp3'
elif instance.post.division == 'mishna':
base = 'archives/mishna'
# TODO(joey): remove teacher_string, add reader_string
file = f'{instance.post.segment}-{instance.post.section}-{instance.post.unit}-{instance.post.part}-{instance.post.teacher.teacher_string}'
path = f'{base}/{instance.post.segment}/{instance.post.section}/recordings/{file}-teamim.mp3'
else:
raise Exception(f'division is invalid: {instance.post.division}')
return path
class Teamim(models.Model):
reader = models.ForeignKey(Teacher, on_delete=models.SET_DEFAULT, default=None, null=True, blank=True)
audio = models.FileField(upload_to=get_teamim_audio_location, default=None, null=True, blank=True, max_length=500)
post = models.ForeignKey(Class, on_delete=models.SET_DEFAULT, default=None, null=True, blank=True)
def __str__(self):
return f'{self.post} Read By {self.reader}'
class ShasSedarim(models.TextChoices):
ZERAIM = 'zeraim', 'Zeraim'
MOED = 'moed', 'Moed'
NASHIM = 'nashim', 'Nashim'
NEZIKIN = 'nezikin', 'Nezikin'
KADASHIM = 'kadashim', 'Kadashim'
TAHAROT = 'taharot', 'Taharot'
class TalmudSponsor(models.Model):
line_one = models.CharField(max_length=1024)
line_two = models.CharField(max_length=1024, blank=True, null=True)
line_three = models.CharField(max_length=1024, blank=True, null=True)
def __str__(self):
s = ''
if self.line_one:
s = s + ' ' + self.line_one
if self.line_two:
s = s + ' ' + self.line_two
if self.line_three:
s = s + ' ' + self.line_three
return s
def get_talmud_audio_location(instance, filename):
base = 'archives/talmud'
file = f'{instance.seder}-{instance.masechet}-{instance.daf}-{instance.teacher.teacher_string}'
path = f'{base}/{instance.seder}/{instance.masechet}/{file}.mp3'
return path
class TalmudStudy(models.Model):
MASECHET_CHOICES = masechetot_by_seder
seder = models.CharField(max_length=12, choices=ShasSedarim.choices)
seder_sponsor = models.ForeignKey(TalmudSponsor, related_name='+', on_delete=models.SET_DEFAULT, default=None, null=True, blank=True)
seder_sequence = models.IntegerField(null=True, blank=True)
masechet = models.CharField(max_length=50, choices=MASECHET_CHOICES)
masechet_sponsor = models.ForeignKey(TalmudSponsor, related_name='+', on_delete=models.SET_DEFAULT, default=None, null=True, blank=True)
masechet_sequence = models.IntegerField(null=True, blank=True)
daf = models.IntegerField()
daf_sponsor = models.ForeignKey(TalmudSponsor, related_name='+', on_delete=models.SET_DEFAULT, default=None, null=True, blank=True)
audio = models.FileField(upload_to=get_talmud_audio_location, validators=[validate_file_extension], default=None, null=True, max_length=500)
teacher = models.ForeignKey(Teacher, on_delete=models.SET_DEFAULT, default=None, null=True)
date = models.DateTimeField()
models.UniqueConstraint(fields=['masechet', 'daf', 'amud'], name='unique_daf_amud_per_masechet')
def __str__(self):
return f'Masechet {self.masechet.title()} Daf {self.daf} with {str(self.teacher)}'
def get_location(self):
teacher = str(self.teacher).lower().replace('.', '').replace(' ', '-')
return f'/talmud-study/dapim/{self.seder}/{self.masechet}/{self.daf}?{teacher}'
|
class CompteBancaire:
def __init__(self,nom="Dupont",solde=1000):
self.nom=nom
self.solde=solde
def depot(self,somme):
self.solde+=somme
def retrait(self,somme):
self.solde -= somme
def affiche(self):
print("le titulaire "+self.nom+" qui a "+str(self.solde)+"dt dans son compte")
print("le titulaire {} et le solde est {}".format(self.nom,str(self.solde)))
compte1=CompteBancaire("sam",1000)
compte1.depot(1000)
compte1.affiche()
|
#===========================================================================
#command line parsers for py.test.
#===========================================================================
def pytest_addoption(parser):
parser.addoption("--env", action="store",help="env: the env that runs the tests e.g. qa , dev")
def pytest_funcarg__env(request):
return request.config.option.env
|
# -*- coding: UTF-8 -*-
import os # 调用操作系统相关的功能
import sys # 调用python 解释 器相关的方法
import logging # 日志记录
import ConfigParser # 配置文件模块 ,在3.X中已经将ConfigParser改成了configParser
|
import json
def main():
with open("emd.json", encoding="utf-8") as file:
dataset = json.load(file)
file = open("converted.ttl", "w", encoding="utf-8")
emds = {}
atletas = {}
modalidades = set()
clubes = {}
for data in dataset:
emds[data["_id"]] = {
"data": data["dataEMD"],
"modalidade": data["modalidade"],
"nome": data["nome"]["primeiro"] + data["nome"]["último"],
"resultado": data["resultado"]
}
atletas[data["nome"]["primeiro"] + data["nome"]["último"]] = {
"nome": data["nome"]["primeiro"] + " " + data["nome"]["último"],
"idade": data["idade"],
"género": data["género"],
"morada": data["morada"],
"modalidade": data["modalidade"],
"clube": data["clube"],
"email": data["email"],
"federado": data["federado"],
}
modalidades.add(data["modalidade"])
clube_key = data["clube"].replace(" ","")
if clube_key in clubes:
clubes[clube_key]["atletas"].append(data["nome"]["primeiro"] + data["nome"]["último"])
else:
clubes[clube_key] = {
"nome": data["clube"],
"atletas": [data["nome"]["primeiro"] + data["nome"]["último"]]
}
for emd in emds:
file.write("### http://www.semanticweb.org/joaop/ontologies/2021/4/emd#" + emd + "\n")
file.write(":" + emd + " rdf:type owl:NamedIndividual ,\n")
file.write(" "*8 + ":EMD ; \n")
file.write(" "*8 + ":relativoA " + ":" + emds[emd]["nome"] + ' ;\n')
file.write(" "*8 + ":referenteA " + ":" + emds[emd]["modalidade"] + ' ;\n')
file.write(" "*8 + ":data " + '"' + emds[emd]["data"] + '" ;\n')
file.write(" "*8 + ":resultado " + '"' + str(emds[emd]["resultado"]) + '" .\n')
file.write("\n\n")
for atleta in atletas:
file.write("### http://www.semanticweb.org/joaop/ontologies/2021/4/emd#" + atleta + "\n")
file.write(":" + atleta + " rdf:type owl:NamedIndividual ,\n")
file.write(" "*8 + ":Atleta ; \n")
file.write(" "*8 + ":participaEm " + ":" + atletas[atleta]["modalidade"] + ' ;\n')
file.write(" "*8 + ":nome " + '"' + atletas[atleta]["nome"] + '" ;\n')
file.write(" "*8 + ":idade " + '"' + str(atletas[atleta]["idade"]) + '" ;\n')
file.write(" "*8 + ":genero " + '"' + atletas[atleta]["género"] + '" ;\n')
file.write(" "*8 + ":morada " + '"' + atletas[atleta]["morada"] + '" ;\n')
file.write(" "*8 + ":email " + '"' + atletas[atleta]["email"] + '" ;\n')
file.write(" "*8 + ":nome " + '"' + atletas[atleta]["nome"] + '" ;\n')
file.write(" "*8 + ":federado " + '"' + str(atletas[atleta]["federado"]) + '" .\n')
file.write("\n\n")
for modalidade in modalidades:
file.write("### http://www.semanticweb.org/joaop/ontologies/2021/4/emd#" + modalidade + "\n")
file.write(":" + modalidade + " rdf:type owl:NamedIndividual ,\n")
file.write(" "*8 + ":Modalidade . \n")
file.write("\n\n")
for clube in clubes:
file.write("### http://www.semanticweb.org/joaop/ontologies/2021/4/emd#" + clube + "\n")
file.write(":" + clube + " rdf:type owl:NamedIndividual ,\n")
file.write(" "*8 + ":Clube ; \n")
i = 0
for atleta in clubes[clube]["atletas"]:
if i == 0:
file.write(" "*8 + ":temAtleta " + ":" + atleta)
else :
file.write(" "*18 + ":" + atleta)
if i == len(clubes[clube]["atletas"])-1:
file.write(" ;\n")
else:
file.write(" ,\n")
i += 1
file.write(" "*8 + ":nome " + '"' + clubes[clube]["nome"] + '" .\n')
file.write("\n\n")
file.close()
if __name__ == "__main__":
main()
|
from heart_server_helpers import existing_beats
import pytest
@pytest.mark.parametrize("pat_id, expected", [
(-1, True),
(-2, False),
])
def test_existing_beats(pat_id, expected):
pat_exist = existing_beats(pat_id)
assert pat_exist == expected
|
import tensorflow as tf
"""
理解:
tf.get_variable: 配合tf.variable_scope 及 scope.reuse_variables() 使得 var1 和 var12是同一个变量(name相同)
tf.Variable: var2, var21, var22是三个不同的变量
所以如果要复用变量,使用 tf.get_variable
"""
# name_scope
# with tf.name_scope("a_name_scope"):
# initializer = tf.constant_initializer(value=1)
# var1 = tf.get_variable(name='var1', shape=[1], dtype=tf.float32, initializer=initializer)
# var2 = tf.Variable(name='var2', initial_value=[2], dtype=tf.float32)
# var21 = tf.Variable(name='var2', initial_value=[2.1], dtype=tf.float32)
# var22 = tf.Variable(name='var2', initial_value=[2.2], dtype=tf.float32)
# variable_scope
with tf.variable_scope("a_variable_scope") as scope:
initializer = tf.global_variables_initializer(value=1)
var1 = tf.get_variable(name='var1', shape=[1], dtype=tf.float32, initializer=initializer)
scope.reuse_variables()
var12 = tf.get_variable(name='var1')
var2 = tf.Variable(name='var2', initial_value=[2], dtype=tf.float32)
var21 = tf.Variable(name='var2', initial_value=[2.1], dtype=tf.float32)
var22 = tf.Variable(name='var2', initial_value=[2.2], dtype=tf.float32)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print(var1.name) # var1:0
print(sess.run(var1)) # [ 1.]
print('-'*10)
print(var12.name) # var1:0
print(sess.run(var12)) # [ 1.]
print('-'*10)
print(var2.name) # a_name_scope/var2:0
print(sess.run(var2)) # [ 2.]
print('-' * 10)
print(var21.name) # a_name_scope/var2_1:0
print(sess.run(var21)) # [ 2.0999999]
print('-' * 10)
print(var22.name) # a_name_scope/var2_2:0
print(sess.run(var22)) # [ 2.20000005]
|
import numpy as np
import pandas as pd
import sklearn
import sklearn.preprocessing
import scipy
import tensorflow.keras as keras
df = pd.read_csv('WISDM_clean.csv')
df_train = df[df['user_id'] <= 30]
df_test = df[df['user_id'] > 30]
# Norm
scale_columns = ['x_axis', 'y_axis', 'z_axis']
scaler = sklearn.preprocessing.RobustScaler()
scaler = scaler.fit(df_train[scale_columns])
df_train.loc[:, scale_columns] = scaler.transform(
df_train[scale_columns].to_numpy()
)
df_test.loc[:, scale_columns] = scaler.transform(
df_test[scale_columns].to_numpy()
)
def create_dataset(X, y, time_steps=1, step=1):
Xs, ys = [], []
for i in range(0, len(X) - time_steps, step):
v = X.iloc[i:(i + time_steps)].values
labels = y.iloc[i: i + time_steps]
Xs.append(v)
ys.append(scipy.stats.mode(labels)[0][0])
return np.array(Xs), np.array(ys).reshape(-1, 1)
TIME_STEPS = 200
STEP = 40
X_train, y_train = create_dataset(
df_train[['x_axis', 'y_axis', 'z_axis']],
df_train.activity,
TIME_STEPS,
STEP
)
X_test, y_test = create_dataset(
df_test[['x_axis', 'y_axis', 'z_axis']],
df_test.activity,
TIME_STEPS,
STEP
)
print(X_train.shape, y_train.shape)
enc = sklearn.preprocessing.OneHotEncoder(handle_unknown='ignore', sparse=False)
enc = enc.fit(y_train)
y_train = enc.transform(y_train)
y_test = enc.transform(y_test)
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(
units=128,
input_shape=[X_train.shape[1], X_train.shape[2]]
)
)
)
model.add(keras.layers.Dropout(rate=0.5))
model.add(keras.layers.Dense(units=128, activation='relu'))
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
print(X_train.shape) # (22454, 200, 3)
history = model.fit(
X_train, y_train,
epochs=20,
batch_size=64,
validation_split=0.1,
shuffle=True
)
model.summary()
model.save("lstm.h5")
|
# noinspection PyUnresolvedReferences
from django.shortcuts import render,redirect
# noinspection PyUnresolvedReferences
from django.http import HttpResponse,HttpResponseRedirect
# noinspection PyUnresolvedReferences
from django.contrib.auth.models import User
# noinspection PyUnresolvedReferences
from django.contrib.auth import authenticate,login
# noinspection PyUnresolvedReferences
from django.contrib.auth.decorators import login_required
from .models import toDoList
from django.core import serializers
import json
def homepage(request):
if request.method == "POST" and 'taskName' in request.POST:
taskName=request.POST['taskName']
description=request.POST['description']
newEnty = toDoList(userdetails= request.user, taskName = taskName, description = description, status = "TODO")
newEnty.save()
return HttpResponseRedirect('/home/profile')
if request.user.is_authenticated:
return HttpResponseRedirect('/home/profile')
if request.method == "POST" and 'form-user-name' in request.POST:
username_form= request.POST['form-user-name']
first_form=request.POST['form-first-name']
Last_form = request.POST['form-last-name']
Email_form = request.POST['form-email']
Pass_form = request.POST['form-pass-word']
user = User.objects.create_user(username=username_form, password=Pass_form,email=Email_form,first_name=first_form,last_name=Last_form)
login(request, user)
return HttpResponseRedirect('/home/profile')
if request.method == "POST" and 'form-username' in request.POST:
username_login=request.POST['form-username']
password_login=request.POST['form-password']
user = authenticate(username=username_login, password=password_login)
if user:
login(request, user)
return HttpResponseRedirect('/home/profile')
else:
return render(request,'homepage.html',{"msg":"login failed"})
return render(request,'homepage.html')
@login_required
def userprofile(request):
if not request.user.is_authenticated:
return HttpResponseRedirect('/home/')
tasks= toDoList.objects.filter(userdetails = request.user,status__in=["COMPLETED","TODO","INPROGRESS"])
completed = toDoList.objects.filter(userdetails = request.user,status="COMPLETED")
todo = toDoList.objects.filter(userdetails = request.user,status="TODO")
inprogress = toDoList.objects.filter(userdetails = request.user,status="INPROGRESS")
return render(request,'cards.html',{"tasks":tasks,"completed":completed, "todo":todo, "inprogress": inprogress})
def taskstatus(request,offset):
newtype = offset.split("-")[0]
taskid = offset.split("-")[1]
newstatus = toDoList.objects.get(id = taskid)
newstatus.status = newtype
newstatus.save()
return HttpResponseRedirect('/home/profile')
|
nums = [1, 2, 3, 4, 5, 6, 7, 8]
# using reverse index to get the number I wanted
print(nums[-2])
characters = ['Asokha Tano', 'Luke Skywalker', 'Obi-Wan Kenobi']
print(characters[0])
print(characters[-1])
|
from .SelectionMode import SelectionMode
from .SelectionType import SelectionModeTransform, SelectionType
from bsp.leveleditor.menu.KeyBind import KeyBind
from bsp.leveleditor import LEGlobals
class VertexMode(SelectionMode):
Type = SelectionType.Vertices
Mask = LEGlobals.FaceMask
Key = "solidvertex"
CanDelete = False
CanDuplicate = False
TransformBits = SelectionModeTransform.Translate
KeyBind = KeyBind.SelectVertices
Icon = "resources/icons/editor-select-verts.png"
Name = "Vertices"
Desc = "Select solid vertices"
def onSelectionsChanged(self):
pass
|
array = [6, 3, 7, 0, 9, 8, 4, 5, 1, 2]
for j in range(len(array)-1):
for i in range(len(array)-1):
if array[i] > array[i + 1]:
buffer = array[i]
array[i] = array[i + 1]
array[i + 1] = buffer
print(array)
print('test')
|
from __future__ import unicode_literals
from django.db import models
import re
email_regex = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
import bcrypt
def get_passwd_hash( passwd, salt = bcrypt.gensalt() ):
return( bcrypt.hashpw( passwd, salt ) )
class UsersManager( models.Manager ):
def register( self, postData ):
errors = []
# validate email (raw content)
if len( postData['email'] ) < 1:
errors.append( "The email field is empty." )
elif not email_regex.match( postData['email'] ):
errors.append( "Incorrectly formatted email." )
# validate email (not in DB)
elif len( self.filter( email = postData['email'] ) ) > 0:
errors.append( "The email ({}) is already used.".format( postData['email'] ) )
# validate first_name (raw content)
if len( postData['f_name'] ) < 1:
errors.append( "The first name field is empty." )
elif not postData['f_name'].isalpha():
errors.append( "The first name field can only contain letters." )
# validate last_name (raw content)
if len( postData['l_name'] ) < 1:
errors.append( "The last name field is empty." )
elif not postData['l_name'].isalpha():
errors.append( "The last name field can only contain letters." )
# validate passwd_1 (raw content)
if len( postData['passwd_1'] ) < 1:
errors.append( "The password field is empty." )
elif len( postData['passwd_1'] ) < 8:
errors.append( "The password field MUST be AT LEAST 8 characters!" )
elif not re.match( r'^.*[A-Z]+.*$', postData['passwd_1'] ):
errors.append( "The password field MUST contain AT LEAST 1 capital letter!" )
elif not re.match( r'^.*\d+.*$', postData['passwd_1'] ):
errors.append( "The password field MUST contain AT LEAST 1 number!" )
# validate passwd_1 against passwd_2
if postData['passwd_1'] != postData['passwd_2']:
errors.append( "The password and confirm password fields MUST match!" )
# return
if len( errors ):
return {
'status': False,
'errors': errors
}
else:
return {
'status': True,
'user': self.create(
email = postData['email'],
first_name = postData['f_name'],
last_name = postData['l_name'],
password = get_passwd_hash( postData['passwd_1'].encode() ),
)
}
def login( self, postData ):
errors = []
# validate email (raw content)
if len( postData['email'] ) < 1:
errors.append( "The email field is empty." )
elif not email_regex.match( postData['email'] ):
errors.append( "Incorrectly formatted email." )
# validate email (in DB)
elif len( self.filter( email = postData['email'] ) ) < 1:
errors.append( "Unknown email." )
# validate password (raw content)
elif len( postData['passwd'] ) < 1:
errors.append( "The password field is empty." )
# validate password (matches DB)
else:
user = self.get( email = postData['email'] )
if get_passwd_hash( postData['passwd'].encode(), user.password.encode() ) != user.password:
errors.append( "Incorrect email or password." )
# return
if len( errors ):
return {
'status': False,
'errors': errors
}
else:
return {
'status': True,
'user': self.get( email = postData['email'] )
}
def add_predefined_data( self ):
self.create(
email = "AbCde@f.x",
first_name = "Ab",
last_name = "Cde",
password = get_passwd_hash( "1password".encode() ),
)
self.create(
email = "a@b.c",
first_name = "A",
last_name = "Bc",
password = get_passwd_hash( "2password".encode() ),
)
self.create(
email = "x@y.z",
first_name = "X",
last_name = "Yz",
password = get_passwd_hash( "password2".encode() ),
)
class Users( models.Model ):
first_name = models.CharField( max_length = 255 )
last_name = models.CharField( max_length = 255 )
email = models.CharField( max_length = 255 )
password = models.CharField( max_length = 40 )
created_at = models.DateTimeField( auto_now_add = True )
updated_at = models.DateTimeField( auto_now = True )
objects = UsersManager()
|
import sys
import os
f = open("C:/Users/user/Documents/python/ant/import.txt","r")
sys.stdin = f
# -*- coding: utf-8 -*-
n = int(input())
w = []
v = []
for i in range(0,n):
temp = list(map(int,input().split()))
w.append(temp[0])
v.append(temp[1])
W = int(input())
dp = [[-1] * (n + 2)] * (W + 2)
def rec(i,j):
if 0 <= i < n and 1 <= j < n:
if dp[i][j] >= 0:
return dp[i][j]
if i == n:
res = 0
elif j < w[i]:
res = rec(i + 1, j)
else:
res = max(rec(i + 1,j), rec(i + 1,j - w[i]) + v[i])
if 0 <= i < n and 1 <= j < n:
dp[i][j] = res
return res
print(rec(0,W))
|
import numpy as np
# 통계함수
data = np.array([1, 2, 3, 4, 5, 6])
print(data.min(), data.mean(), data.max(), data.std())
print(np.sum(data), np.mean(data), np.min(data), np.max(data), np.std(data))
print(np.median(data))
print(np.quantile(data, [0.25, 0.5, 0.75]))
print(np.percentile(data, [25, 50, 75]))
# 주식 - 피보나치값 황금비율(38.2%, 50%, 61.8%)
data = np.array([1450, 2050])
print(np.quantile(data, [0.382, 0.5, 0.612]))
|
#!/usr/bin/python
"""
PowerMon!
A script to read power meter readings from a 433MHz energy sensor and write them to a RRD file.
In my case, the power meter is an Owl CM180.
This script requires rtl_433 from https://github.com/merbanan/rtl_433, with a little hackery to
make the output parseable.
"""
import sys
import subprocess
import re
import traceback
import logging
import logging.handlers
import argparse
import BaseHTTPServer
from urlparse import urlparse, parse_qs
import tempfile
import threading
import json
LAST_POWER_READ = 0
RRD_FILE = None
#####################
# Web Server
#####################
class MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_HEAD(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
def do_GET(s):
"""Respond to a GET request."""
s.send_response(200)
args = parse_qs(urlparse(s.path).query)
if s.path == "/data":
# Return JSON representation
renderDataJSON(s)
elif s.path.startswith("/denkimonconf"):
renderDenkiMonConf(s)
elif "graph" not in args:
renderPowerGraphHTML(s)
else:
renderGraphImage(s, args["graph"][0])
def renderDataJSON(s):
s.send_header("Content-type", "application/json")
s.end_headers()
json.dump({ 'power_now' : LAST_POWER_READ }, s.wfile)
def renderDenkiMonConf(s):
s.send_header("Content-type", "text/html")
s.end_headers()
with open('denkimondconf.html', 'r') as htmlf:
s.wfile.write(htmlf.read())
def renderPowerGraphHTML(s):
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write("""
<html>
<head>
<title>Power Consumption</title>
</head>
<body>
<h1>Previous 1 Hour</h1>
<img src="?graph=1h" border=0"/>
<h1>Previous 1 Hour Actual Values</h1>
<img src="?graph=1h_raw" border=0"/>
<h1>Previous 6 Hours</h1>
<img src="?graph=6h" border=0"/>
<h1>Previous 24 Hours</h1>
<img src="?graph=24h" border=0"/>
<h1>Previous 7 days</h1>
<img src="?graph=7d" border=0"/>
</body>
</html>
""")
def renderGraphImage(s, graph_type):
s.send_header("Content-type", "image/png")
s.end_headers()
(fh, tmpfile) = tempfile.mkstemp()
rrdcmd = 'rrdtool graph {filename} --width 1200 --height 480 \
--imgformat PNG \
--start end-{duration} --end now \
--slope-mode --vertical-label Watts \
DEF:MinPower={rrd_file}:watts:MIN:step={step} \
DEF:MaxPower={rrd_file}:watts:MAX:step={step} \
DEF:Power={rrd_file}:watts:AVERAGE:step={step} \
CDEF:Range=MaxPower,MinPower,- \
LINE1:MinPower#00FF00:"Min" \
AREA:Range#8dadf5::STACK \
LINE1:MaxPower#FF0000:"Max" \
LINE2:Power#0000FF:"Average" \
'
rrdcmd_raw = 'rrdtool graph {filename} --width 1200 --height 480 \
--imgformat PNG \
--start end-{duration} --end now \
--slope-mode --vertical-label Watts \
DEF:Power={rrd_file}:watts:LAST:step={step} \
LINE1:Power#0000FF:"Actual" \
'
if graph_type == "6h":
rrdcmd = rrdcmd.format(filename = tmpfile, rrd_file = RRD_FILE, duration="6h", step="300") #"300")
elif graph_type == "1h":
rrdcmd = rrdcmd.format(filename = tmpfile, rrd_file = RRD_FILE, duration="1h", step="30") #"60")
elif graph_type == "1h_raw":
rrdcmd = rrdcmd_raw.format(filename = tmpfile, rrd_file = RRD_FILE, duration="1h", step="10") #"60")
elif graph_type == "24h":
rrdcmd = rrdcmd.format(filename = tmpfile, rrd_file = RRD_FILE, duration="24h", step="300") #"300")
elif graph_type == "7d":
rrdcmd = rrdcmd.format(filename = tmpfile, rrd_file = RRD_FILE, duration="7d", step="600") #"300")
else:
logging.error("Unexpected graph type.")
return
logging.info("Running rrd command: {} ".format(rrdcmd))
try:
o = subprocess.check_output(rrdcmd, stderr = subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
logging.error("Ooops, rrdtool failed: {}".format(e.output))
with open(tmpfile, 'rb') as imgfile:
s.wfile.write(imgfile.read())
class HTTPThread (threading.Thread):
def __init__(self, port):
threading.Thread.__init__(self)
self.port = port
def run(self):
logging.info( "Starting HTTP Thread" )
run_http(self.port)
logging.info("Exiting HTTP Thread" )
def run_http(port):
server_class = BaseHTTPServer.HTTPServer
httpd = server_class(('', port), MyHandler)
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
##########
# rtl_433 reader
##########
def init(rrd_file):
# The buckets are:
# 1) 1:4320 = 1 reading every 10 seconds, 1 reading per sample, storing 4320 samples = 12 hours of recording.
# 2) 6:1440 = 1 reading every 10 seconds, 6 readings per sample, storing 1440 samples = 24 hours of recording.
# 3) 60:1008 = 1 reading every 10 seconds, 60 readings per sample, storing 1008 samples = 7 days of recording.
cmd = "rrdtool create {} --step 10 \
DS:watts:GAUGE:300:0:5000 \
RRA:LAST:0.5:1:60480 \
RRA:AVERAGE:0.5:1:4320 \
RRA:AVERAGE:0.5:6:1440 \
RRA:AVERAGE:0.5:60:1008 \
RRA:MIN:0.5:1:3600 \
RRA:MIN:0.5:6:1440 \
RRA:MIN:0.5:60:1008 \
RRA:MAX:0.5:1:3600 \
RRA:MAX:0.5:6:1440 \
RRA:MAX:0.5:60:1008 \
".format(rrd_file)
logging.info("Initializing RRD with command: {} ".format(cmd))
o = subprocess.check_output(cmd, stderr = subprocess.STDOUT, shell=True)
logging.info("Completed init.")
def update_rrd(rrd_file, power, ts):
cmd = "rrdtool update {} {}:{}".format(rrd_file, ts, power)
logging.info("Updating rrd with command: {}".format(cmd))
o = subprocess.check_output(cmd, stderr = subprocess.STDOUT, shell=True)
def run(rrd_file):
global LAST_POWER_READ
popen = subprocess.Popen('rtl_433', stdout = subprocess.PIPE)
l_iter = iter(popen.stdout.readline, b'')
for line in l_iter:
# Example line: Energy Sensor CM180, Id: 62a1, power: 577W, Time: 1452027145
if line.startswith('Energy Sensor CM180'):
m = re.search(r"power: (\d+)", line)
power = m.group(1)
m = re.search(r"Time: (\d+)", line)
ts = m.group(1)
logging.info("Sensor reading {} watts at {} epoch seconds.".format(power, ts))
update_rrd(rrd_file, power, ts)
LAST_POWER_READ = power
# Run rtl_433 in subprocess reading stdout
# Parse each line into time and watts
# Insert into rrd.
def main():
parser = argparse.ArgumentParser(description='Powermon')
parser.add_argument('--log', help='Log file', default='powermon.log')
parser.add_argument('--init', help='Initializes RRD database file', default=False, action='store_true')
parser.add_argument('--rrd_file', help='RRD filename', default='power.rrd')
parser.add_argument('--http_port', help='Port number of HTTP server', default=9000)
args = parser.parse_args()
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(message)s")
rootLogger = logging.getLogger()
fileHandler = logging.handlers.RotatingFileHandler(args.log, maxBytes=(1048576*5), backupCount=7)
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel(logging.DEBUG)
global RRD_FILE
RRD_FILE = args.rrd_file
logging.info('Powermon started.')
if args.init:
init(args.rrd_file)
else:
t = HTTPThread(args.http_port)
t.start()
run(args.rrd_file)
logging.info('Powermon ending.')
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
traceback.print_exc()
except Exception as e:
traceback.print_exc()
|
"""
Python Client speaking Nedh by taking over a socket from an inherited fd
"""
__all__ = ["takeEdhFd"]
from typing import *
import asyncio
import inspect
import socket
import runpy
from edh import *
from . import log
from .mproto import *
from .peer import *
logger = log.get_logger(__name__)
async def takeEdhFd(wsc_fd: int, net_opts: Optional[Dict] = None):
loop = asyncio.get_running_loop()
# prepare the peer object
ident = f"<fd:{wsc_fd}>"
eol = loop.create_future()
# outletting currently has no rate limit, maybe add in the future?
# with an unbounded queue, backpressure from remote peer is ignored
# and outgoing packets can pile up locally
poq = asyncio.Queue()
# intaking should create backpressure when handled slowly, so use a
# bounded queue
hoq = asyncio.Queue(maxsize=1)
peer = Peer(ident=ident, eol=eol, posting=poq.put, hosting=hoq.get)
# mark end-of-life anyway finally
def client_cleanup(clnt_fut):
if eol.done():
return
if clnt_fut.cancelled():
eol.set_exception(asyncio.CancelledError())
return
exc = clnt_fut.exception()
if exc is not None:
eol.set_exception(exc)
else:
eol.set_result(None)
async def _consumer_thread():
outlet = None
try:
# take over the network connection
sock = socket.socket(fileno=wsc_fd)
intake, outlet = await asyncio.open_connection(
sock=sock,
**net_opts or {},
)
async def pumpCmdsOut():
# this task is the only one writing the socket
try:
while True:
pkt = await read_stream(eol, poq.get())
if pkt is EndOfStream:
break
await sendPacket(ident, outlet, pkt)
except Exception as exc:
logger.error("Nedh fd client error.", exc_info=True)
if not eol.done():
eol.set_exception(exc)
asyncio.create_task(pumpCmdsOut())
# pump commands in,
# this task is the only one reading the socket
await receivePacketStream(
peer_site=ident, intake=intake, pkt_sink=hoq.put, eos=eol
)
except Exception as exc:
logger.error("Nedh fd client error.", exc_info=True)
if not eol.done():
eol.set_exception(exc)
finally:
if not eol.done():
eol.set_result(None)
if outlet is not None:
# todo post err (if any) to peer
outlet.write_eof()
outlet.close()
# don't do this to workaround https://bugs.python.org/issue39758
# await outlet.wait_closed()
asyncio.create_task(_consumer_thread()).add_done_callback(client_cleanup)
return peer
|
"""
browser.switch_to.window(window_name)
first_window = browser.window_handles[0]
new_window = browser.window_handles[1]
"""
import time
from selenium import webdriver
from selenium.webdriver.common.by import By
import math
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
def calc(x):
return str(math.log(abs(12 * math.sin(int(x)))))
try:
browser = webdriver.Chrome("f:\Дима\chromedriver.exe")
browser.implicitly_wait(5)
link = "http://suninjuly.github.io/explicit_wait2.html"
browser.get(link)
# говорим Selenium проверять в течение 5 секунд, пока кнопка не станет кликабельной
price = WebDriverWait(browser, 20).until(
EC.text_to_be_present_in_element((By.ID, 'price'), '100')
)
button = browser.find_element_by_id('book').click()
price = browser.find_element_by_id('input_value').text
res = calc(price)
field = browser.find_element_by_id("answer").send_keys(res)
submit = browser.find_element_by_css_selector("[type=submit]").click()
alert = browser.switch_to.alert
text = alert.text
help(text)
print(text[-18:])
alert.accept()
print("success!")
finally:
# ожидание чтобы визуально оценить результаты прохождения скрипта
time.sleep(1)
# закрываем браузер после всех манипуляций
browser.quit()
|
import copy
#グラフは隣接リストで実装する
#グラフの読み込み
def read_graph_data(linksfile,namesfile):
lf = open(linksfile)
nf = open(namesfile)
names=dict([line.strip().split() for line in nf.readlines()])
nodelist={v:[] for v in names.values()} #dict
for line in lf.readlines():
n_from,n_to,cost=line.strip().split()
nodelist[ names[n_from] ].append([names[n_to],int(cost)])
nodelist[ names[n_to] ].append([names[n_from],int(cost)])
lf.close()
nf.close()
return nodelist
#dijkstra法により最短経路を求める
#優先度付きキューを用いて計算量はE+Vlog(V)
import heapq
def dijkstra(graph,start,end):
if start not in graph.keys() or end not in graph.keys():
return "そのような駅は存在しません",-1
INF=1001001001
distances={k:INF for k in graph.keys()}
prev_node={k:-1 for k in graph.keys()}
#最短距離を求める
que=[(0,start),]
heapq.heapify(que)
while len(que)>0:
now_cost,now_node=heapq.heappop(que)
if now_cost>distances[now_node]:
continue
for next_node,cost in graph[now_node]:
if now_cost+cost<distances[next_node]:
distances[next_node]=now_cost+cost
prev_node[next_node]=now_node
heapq.heappush(que, (now_cost+cost,next_node))
#最短経路を求める
if distances[end]==INF:
return "到達できません",-1
now_node=end
route=[end,]
while now_node!=start:
assert(now_node in prev_node.keys())
now_node=prev_node[now_node]
route.append(now_node)
return list(reversed(route)),distances[end]
import queue
import heapq
def count_betweeness_Brandes(graph):#ノード、辺の媒介中心性の計算
edge_betweeness={}#(from,to):edge betweeness
for frm,tos in graph.items():#全てのノードを追加
for to,cost in tos:
edge_betweeness[(frm,to,cost)]=0
#algorithm:https://www.eecs.wsu.edu/~assefaw/CptS580-06/papers/brandes01centrality.pdf
#は頂点の媒介中心性を求めているので、それを辺にも応用したもの
#ちなみにノードの中心媒介性はそのまま用いてもノードの重要度の指標になりそう。
C_b={k:0 for k in graph.keys()}
for s in graph.keys():
P={k:[] for k in graph.keys()} #P[v]:頂点vに向かう最短経路において前に通った頂点のリスト
sigma={k:0 for k in graph.keys()}#最短経路の数
sigma[s]=1
d={k:-1 for k in graph.keys()}#P[v]:sからvへの最短距離
d[s]=0
#ここは普通にDijkstraを行っている
Q=[]
heapq.heapify(Q)
heapq.heappush(Q, (0,s))
while len(Q)>0:
#print(Q)
nowcost,v=heapq.heappop(Q)
if d[v]<nowcost:
continue
assert(d[v]==nowcost)
for w,cost in graph[v]:
if d[w]<0:
d[w]=d[v]+cost
heapq.heappush(Q, (d[w],w))
if d[w]==d[v]+cost:
P[w].append((v,cost))
sigma[w]+=sigma[v]
if d[w]>d[v]+cost:
d[w]=d[v]+cost
heapq.heappush(Q, (d[w],w))
P[w]=[(v,cost),]#今までの記録は捨てる
sigma[w]=sigma[v]#今までの記録は捨てる
#Sは全ての到達できるノードがsから近い順に並んでいる状態にする
S=[k for k,v in sorted(d.items(), key=lambda x:x[1])]#最短経路長でソート
delta={k:0 for k in graph.keys()}#これはsを始点とした時の頂点kの媒介中心性が求まる
delta_edge={k:0 for k in edge_betweeness.keys()}#これはsを始点とした時のedgeの媒介中心性が求まる
while len(S)>0:
to=S.pop()#これは必ずsから遠い順に取り出される
#print(d[to],end=",")
for frm,cost in P[to]:#toの前に通った頂点frmに対して
delta[frm]+=sigma[frm]/sigma[to]*(1+delta[to])
#toの媒介中心性(決定済み)にsigma[frm]/sigma[to]をかければ辺の媒介性が求まる
edge_betweeness[(frm,to,cost)]+=(sigma[frm]/sigma[to])*delta[to]
if to!=s:
C_b[to]+=delta[to]
return C_b,edge_betweeness
def connected_groups(graph_nondirected):
#連結であるとは、そのグループの中のどの2頂点を選んでもどちらか一方向に道が存在することとする。
#このグラフはもともと無向グラフ
groups=[]
not_connected=[k for k in graph_nondirected.keys()]
while len(not_connected)>0:#全てのノードを始点としてBFSを行う
start=not_connected.pop()
group=[start,]
now=[start,]
visited={ k:0 for k in graph_nondirected.keys()}
visited[start]=1
count=0
while len(now)>0:
count+=1
nxt=[]
for i in now:
for next_node,cost in graph_nondirected[i]:
if visited[next_node]==0:
group.append(next_node)
assert(next_node in not_connected)#無向グラフならこの条件を満たすはず
not_connected.remove(next_node)
visited[next_node]=1
nxt.append(next_node)
now=nxt
groups.append(group)
return groups
#Nグループに分割する(Girvan-Newman法)→授業のグループとの関係が見えるのでは
def grouping_girvan_newman(graph,N):
new_graph=copy.deepcopy(graph)
connected_groups_now=connected_groups(new_graph)
#最初の連結成分
while True:
groups=[]
#1.残っている全てのリンクのedge betweennessを計算する
#node_betweenessは、ある頂点が任意の2ノード間の最短パスに含まれる回数。(ただし自分が始点、終点であるものは除く)
#edge_betweenessは、ある辺が任意の2ノード間の最短パスに含まれる回数
node_betweeness,edge_betweeness=count_betweeness_Brandes(new_graph)
#「到達できる場合最短経路長は必ず1である状態」になったらそれ以上の分類は不可能なので
#辺を切ることをやめる
if max(node_betweeness.values())==0:
print("到達できる場合最短経路長は必ず1である状態です。")
connected_groups_now=connected_groups(new_graph)
return len(connected_groups_now),connected_groups(new_graph)
#2.そうでない場合、最もedge betweenessが高いリンクを切る。
max_edge_from,max_edge_to,max_edge_cost=max(edge_betweeness, key=edge_betweeness.get)
#print(max_edge_from,max_edge_to, max_edge_cost)
new_graph[max_edge_from].remove([max_edge_to,max_edge_cost])
new_graph[max_edge_to].remove([max_edge_from,max_edge_cost])
#3.1-2を、連結成分がN個になるまで繰り返す。
connected_groups_now=connected_groups(new_graph)
if len(connected_groups_now)>=N:
return len(connected_groups_now),connected_groups_now
graph=read_graph_data("./transit_links/edges.txt","./transit_links/stations.txt")
print("=====TEST start=====")
route,duration=dijkstra(graph,"新宿","四ツ谷")
print(route,duration,"分")
route,duration=dijkstra(graph,"新宿","四谷")
print(route,duration,"分")
route,duration=dijkstra(graph,"千葉","赤羽岩淵")
print(route,duration,"分")
print("=====TEST end=====")
print("最も媒介中心性が高い駅を見つけました")
#Todo:このアルゴリズムだと辺に重みがあるとき正確に媒介中心性が高い駅が見つけられていない
#なぜならSから取り出す順番が必ずしも遠い順とは限らなくなっているから
#あとでcount_betweeness_BrandesをBFSからDijkstraに切り替えておく(木曜日の夜には間に合わなかったので)
node_betweeness,edge_betweeness=count_betweeness_Brandes(graph)
#print(node_betweeness)
important_station=max(node_betweeness, key=node_betweeness.get)
print(important_station)
#ちなみに
print("終着点の媒介中心性は必ず0になっているはずである。")
#例えば
print("「三鷹」の媒介中心性",node_betweeness["三鷹"])
print("10グループに分割してみる")
groupnum,groups=grouping_girvan_newman(graph,10)
print(groupnum,"groups:")
print(*groups,sep='\n')
if __name__=='__main__':
while True:
station_from=input("from:")
station_to=input("to:")
route,duration=dijkstra(graph,station_from,station_to)
print(route,duration,"分")
|
from math import ceil
from operator import attrgetter
class Fighter(object):
""" used as a pre-loaded class inside of this kata, for local testing """
def __init__(self, name, health, damage_per_attack):
self.name = name
self.health = health
self.damage_per_attack = damage_per_attack
def declare_winner(fighter1, fighter2, first_attacker):
fighter1.turns = ceil(fighter1.health / float(fighter2.damage_per_attack))
fighter2.turns = ceil(fighter2.health / float(fighter1.damage_per_attack))
if fighter1.turns == fighter2.turns:
return first_attacker
return max(fighter1, fighter2, key=attrgetter('turns')).name
|
# -*- coding: utf-8 -*-
from collections import Counter
from heapq import heappop, heappush
class MaxHeap:
def __init__(self):
self.els = []
def __len__(self):
return len(self.els)
def __nonzero__(self):
return len(self.els) > 0
def pop(self):
_, el = heappop(self.els)
return el
def push(self, el, count):
heappush(self.els, (count, el))
class Solution:
def topKFrequent(self, nums, k):
counter, heap = Counter(nums), MaxHeap()
for el, count in counter.items():
heap.push(el, count)
if len(heap) > k:
heap.pop()
result = []
while heap:
result.append(heap.pop())
return result[::-1]
if __name__ == "__main__":
solution = Solution()
assert [1, 2] == solution.topKFrequent([1, 1, 1, 2, 2, 3], 2)
assert [1] == solution.topKFrequent([1], 1)
|
#!/usr/bin/python3
'''
Form classes
'''
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, SelectField, TextAreaField
from wtforms.validators import DataRequired
from models import storage
import models
def get_countries():
choices = []
countries = storage.all(models.Country)
for country in countries.values():
choices.append((country.name, country.name))
return choices
class LoginForm(FlaskForm):
username = StringField(validators=[DataRequired()])
password = PasswordField(validators=[DataRequired()])
submit = SubmitField('Log In')
class CreateTrip(FlaskForm):
city = StringField(validators=[DataRequired()])
country = SelectField(choices=get_countries(), validators=[DataRequired()])
dates = StringField(validators=[DataRequired()])
description = TextAreaField(validators=[DataRequired()])
create = SubmitField('CREATE TRIP')
|
from django.contrib.auth import authenticate, login, get_user_model,logout
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.shortcuts import reverse
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, FormView, DetailView, View, UpdateView
from django.views.generic.edit import FormMixin
from django.http import HttpResponse
from django.shortcuts import render,redirect
from django.utils.http import is_safe_url
from django.utils.safestring import mark_safe
from .mixins import NextUrlMixin, RequestFormAttachMixin
from .forms import LoginForm, RegisterForm, ReactivateEmailForm
from .models import EmailActivation
from .signals import user_logged_in
from .models import User
from order.models import Order
class AccountEmailActivateView(FormMixin, View):
success_url = 'login'
form_class = ReactivateEmailForm
key = None
def get(self, request, key=None, *args, **kwargs):
print("get is running")
self.key = key
if key is not None:
qs = EmailActivation.objects.filter(key__iexact=key)
confirm_qs = qs.confirmable()
if confirm_qs.count() == 1:
obj = confirm_qs.first()
obj.activate()
messages.success(request, "Your email has been confirmed. Please login.")
return redirect("accounts:login")
else:
activated_qs = qs.filter(activated=True)
if activated_qs.exists():
reset_link = reverse("password_reset")
msg = """Your email has already been confirmed
Do you need to <a href="{link}">reset your password</a>?
""".format(link=reset_link)
messages.success(request, mark_safe(msg))
return redirect("accounts:login")
context = {'form': self.get_form(),'key': key}
return render(request, 'registration/activation-error.html', context)
def post(self, request, *args, **kwargs):
print("post is running")
# create form to receive an email
form = self.get_form()
if form.is_valid():
return self.form_valid(form)
else:
return self.form_invalid(form)
def form_valid(self, form):
print("form valid is running")
msg = """Activation link sent, please check your email."""
request = self.request
messages.success(request, msg)
email = form.cleaned_data.get("email")
obj = EmailActivation.objects.email_exists(email).first()
user = obj.user
new_activation = EmailActivation.objects.create(user=user, email=email)
new_activation.send_activation()
return super(AccountEmailActivateView, self).form_valid(form)
def form_invalid(self, form):
print("form invalid is running")
context = {'form': form, "key": self.key }
return render(self.request, 'registration/activation-error.html', context)
class Login_View(RequestFormAttachMixin, FormView):
form_class = LoginForm
template_name = 'accounts/login.html'
success_url = 'accounts/home.html'
default_next = 'shop:product_list'
def get(self, request, *args, **kwargs):
context = {'next': request.GET['next'] if request.GET and 'next' in request.GET else ''}
return render(request, "accounts/login.html", context)
def form_valid(self, form):
request = self.request
print(request)
print(request.POST)
next_post = request.POST.get('next')
if next_post is None:
next_post= self.default_next
print(next_post)
return redirect(next_post)
class Home(LoginRequiredMixin, DetailView):
template_name = 'accounts/home.html'
success_url = 'accounts/home.html'
def get_object(self):
return self.request.user
class Register_View(CreateView):
print("Register krne aa gya")
form_class = RegisterForm
template_name = 'accounts/register.html'
success_url = 'login'
def profile(request):
email = request.user.get_short_name()
username = request.user.get_full_name()
history = Order.objects.filter(email=email)
context = {
'history':history,
'name':username,
'email':email,
}
return render(request,'accounts/home.html',context=context)
def Logout_view(request):
logout(request)
messages.info(request,"Succefully Logged Out")
return redirect("shop:product_list")
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
model_save_path = "./checkpoint/mnist.ckpt"
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.load_weights(model_save_path)
preNum = int(input("the number of test picture:"))
for i in range(preNum):
img_path = input("the path of picture:")
img = Image.open(img_path)
image = plt.imread(img_path)
plt.set_cmap('gray')
plt.imshow(image)
img = img.resize((28, 28), Image.ANTIALIAS)
img_arr = np.array(img.convert("L"))
for i in range(28):
for j in range(28):
if img_arr[i][j] < 200:
img_arr[i][j] = 255
else:
img_arr[i][j] = 0
img_arr = img_arr / 255.0
x_predict = img_arr[tf.newaxis, ...]
result = model.predict(x_predict)
pred = tf.argmax(result, axis=1)
print('\n')
tf.print(pred)
plt.pause(1)
plt.close()
|
archivo = open("./paises.txt", "r")
paises = []
for linea in archivo:
paises.append(linea.strip())
for pais in paises:
print(pais)
print ("Total de Países: " + str(len(paises)))
for pais in paises:
if pais[0] == "L":
print("País con L: " + pais)
#print(paises)
archivo.close()
|
from django.http import JsonResponse
from index.models import Products
from django.core.paginator import *
# Create your views here.
def snacks(request):
if request.method == 'GET':
seek = request.GET.get('seek', 'None')
if seek == 'None':
all_snacks = Products.objects.filter(products_type=1).order_by('-invest_money')
paginator = Paginator(all_snacks, 12)
current_page = request.GET.get('page', 1)
page = paginator.page(current_page)
#当前数据的总页码
max_num = paginator.num_pages
# 获取当前页面的页码
num = page.number
result = {
"code": 200,
'max_num':max_num,
'num':num,
'seeks':'None'
}
data = []
for obj in page:
dic = {}
dic["title"] = obj.title
dic['supplier'] = obj.supplier
dic['img'] = str(obj.img)
dic['market_price'] = int(obj.market_price)
dic['id'] = obj.id
data.append(dic)
result["data"] = data
return JsonResponse(result)
elif seek:
snacks = Products.objects.filter(title__contains=seek).order_by('-invest_money')
if not snacks:
result = {"code":123,'error':'没有找到此商品'}
return JsonResponse(result)
all_snacks = snacks.order_by('-invest_money')
paginator = Paginator(all_snacks, 12)
current_page = request.GET.get('page', 1)
page = paginator.page(current_page)
# 当前数据的总页码
max_num = paginator.num_pages
# 获取当前页面的页码
num = page.number
result = {
"code": 200,
'max_num': max_num,
'num': num,
'seeks':seek,
}
data = []
for obj in page:
dic = {}
dic["title"] = obj.title
dic['supplier'] = obj.supplier
dic['img'] = str(obj.img)
dic['market_price'] = int(obj.market_price)
dic['id'] = obj.id
data.append(dic)
result["data"] = data
return JsonResponse(result)
|
"""Datadog functions for autoscaler"""
import logging
from datadog import initialize, api
class DatadogClient:
def __init__(self, cli_args, logger=None):
if cli_args.datadog_api_key and cli_args.datadog_app_key:
self.dd_auth = dict(api_key=cli_args.datadog_api_key,
app_key=cli_args.datadog_app_key)
self.dd_env = cli_args.datadog_env
self.cpu_fan_out = cli_args.cpu_fan_out
self.logger = logger or logging.getLogger(__name__)
self.enabled = True
initialize(**self.dd_auth)
else:
self.enabled = False
def send_datadog_metrics(self, stats):
""" Enumerates metrics from stats object to send to Datadog
:param stats: a complex dictionary of marathon application metrics information
:return: None
"""
try:
if self.enabled:
metrics = []
for app, items in stats.items():
tags = ["env:{}".format(self.dd_env),
"app:{}".format(app)]
# Avg CPU for entire app
metrics.append(dict(metric='marathon.app.cpu_avg',
points=items['cpu_avg_usage'],
host='n/a',
tags=tags))
# Avg mem for entire app
metrics.append(dict(metric='marathon.app.mem_avg',
points=items['memory_avg_usage'],
host='n/a',
tags=tags))
tags = ["env:{}".format(self.dd_env),
"app:{}".format(app),
"executor:{}".format(items['max_cpu'][1])]
# Max CPU for entire app
metrics.append(dict(metric='marathon.app.cpu_max',
points=items['max_cpu'][0],
host='n/a',
tags=tags))
# Max mem for entire app
tags = ["env:{}".format(self.dd_env),
"app:{}".format(app),
"executor:{}".format(items['max_memory'][1])]
metrics.append(dict(metric='marathon.app.mem_max',
points=items['max_memory'][0],
host='n/a',
tags=tags))
# Per-executor metrics
for item in items['executor_metrics']:
tags = ["env:{}".format(self.dd_env),
"app:{}".format(app),
"executor:{}".format(item['executor_id'])]
metrics.append(dict(metric='marathon.executor.cpu',
points=item['cpu_total_usage'],
host=item['host'],
tags=tags))
metrics.append(dict(metric='marathon.executor.mem',
points=item['memory_total_usage'],
host=item['host'],
tags=tags))
api.Metric.send(metrics=metrics)
except Exception as err:
self.logger.error(err)
def send_counter_event(self, app, metric, points=None, tags=None, **kwargs):
"""
marathon_autoscaler.counters.min_instances [tags- app:{app_name} env:{env}]
marathon_autoscaler.counters.max_instances [tags- app:{app_name} env:{env}]
marathon_autoscaler.counters.current_instances [tags- app:{app_name} env:{env}]
:param app: the marathon application name
:param metric: the metric name
:param points: the metric value(s)
:param tags: datadog tags for categorization
:param kwargs: kwargs for additional future input
:return: None
"""
if self.enabled:
all_tags = ["env:{}".format(self.dd_env), "app:{}".format(app)]
if tags:
all_tags = tags + all_tags
try:
api.Metric.send(metric=metric,
points=points if points else 1,
tags=all_tags,
type='counter')
except Exception as err:
self.logger.error(err)
def send_scale_event(self, app, factor, direction, tags=None):
"""
marathon_autoscaler.events.scale_up [tags- app:{app_name} env:{env}]
marathon_autoscaler.events.scale_down [tags- app:{app_name} env:{env}]
:param app: the marathon application name
:param factor: the scaling factor
:param direction: the scaling direction
:param tags: datadog tags for categorization
:return: None
"""
if self.enabled:
all_tags = ["env:{}".format(self.dd_env), "app:{}".format(app)]
if tags:
all_tags = tags + all_tags
metrics = {
1: "marathon_autoscaler.events.scale_up",
-1: "marathon_autoscaler.events.scale_down",
0: "marathon_autoscaler.events.idle"
}
try:
api.Metric.send(metric=metrics[direction],
points=factor,
tags=all_tags,
type='counter')
except Exception as err:
self.logger.error(err)
|
##
import sys
import os, glob, pickle
from os import path, pardir
import json
import sqlite3
import h5py
import numpy as np
from Params import Params
from skimage import measure
from skimage import morphology
import trimesh
import gzip
import pymeshfix
import pyvista as pv
from Shared import Shared
main_dir = path.abspath(path.dirname(sys.argv[0]))
##
class GenerateClosedvolumes(Shared):
def __init__(self):
pass
def AnalyzeAnnotFile(self, params):
targ = Params()
targ.SetUserInfoAnnotator(params['Empty Folder for Annotator'])
surface_path = targ.surfaces_whole_path
paint_path = targ.paint_path
whole_mesh_filenames = glob.glob(os.path.join(surface_path, "*.stl"))
ids_volumes = {}
ph = params['Pitch in X (um)']
pw = params['Pitch in Y (um)']
pz = params['Pitch in Z (um)']
##
with h5py.File(targ.volume_file, 'r') as f:
ids_volume = f['volume'][()]
ids_volume = (ids_volume > 0).astype(np.int)
new_labels = np.zeros_like(ids_volume).astype(np.int)
##
## Load surface meshes
id = 1
whole_mesh_filename = os.path.join(surface_path, str(id).zfill(10)+".stl")
whole_mesh = trimesh.load( whole_mesh_filename )
surf_vertices = whole_mesh.vertices
surf_faces = whole_mesh.faces
surf_vertices[:,0] /= pw
surf_vertices[:,1] /= ph
surf_vertices[:,2] /= pz
pitch = 1
whole_mesh_name_wo_ext = os.path.splitext(os.path.basename(whole_mesh_filename))[0]
part_mesh_name_wildcard = os.path.normpath(os.path.join(paint_path, whole_mesh_name_wo_ext+"-*.pickle"))
part_mesh_filenames = glob.glob(part_mesh_name_wildcard)
## Check whether painted meshes
if part_mesh_filenames == [] :
return False
ids = []
for part_mesh_filename in part_mesh_filenames :
with open(part_mesh_filename, 'rb') as file:
data = pickle.load(file)
closed_mesh = self.GetClosedTrimesh(surf_vertices, surf_faces, data['painted'])
if closed_mesh.volume is None :
continue
###
id = os.path.basename(part_mesh_filename)
id = os.path.splitext(id)[0]
id = int( id.split('-')[1] )
print('ID: ', id,', Volume:', closed_mesh.volume)
ids.append(id)
#filename = os.path.join(targ.paint_path, str(id).zfill(4)+'.stl')
#closed_mesh.export(file_obj=filename)
#continue
###
part_faces = closed_mesh.faces
part_verts = closed_mesh.vertices
unique_ids_verts = np.unique(np.ravel(part_faces))
unique_verts = part_verts[unique_ids_verts]
wmin = np.floor( np.min(unique_verts[:,0])).astype(int)
hmin = np.floor( np.min(unique_verts[:,1])).astype(int)
zmin = np.floor( np.min(unique_verts[:,2])).astype(int)
wmax = np.floor( np.max(unique_verts[:,0])).astype(int)
hmax = np.floor( np.max(unique_verts[:,1])).astype(int)
zmax = np.floor( np.max(unique_verts[:,2])).astype(int)
print('wmin, wmax, wdiff: ', wmin, wmax, wmax-wmin)
print('hmin, hmax, hdiff: ', hmin, hmax, hmax-hmin)
print('zmin, zmax, zdiff: ', zmin, zmax, zmax-zmin)
## Trimesh
# v = self.GetVolumeTrimesh(closed_mesh)
## PyVista
v = self.GetVolumePyVista(part_verts, part_faces, wmin, hmin, zmin, wmax, hmax, zmax)
#print('dir(v) : ', dir(v))
#print('v.keys(): ', v.keys())
#print('v[12]: ', v[12])
wnum = v.shape[0]
hnum = v.shape[1]
znum = v.shape[2]
wmin += 1
hmin += 1
zmin += 1
print('wnum, hnum, znum : ', wnum, hnum, znum)
new_labels[wmin:wmin+wnum , hmin:hmin+hnum, zmin:zmin+znum] += v.astype(np.int) * id
####
#### Dilution, clarification, etc
####
new_labels_processed = np.zeros_like(ids_volume)
for id in ids :
## Pickup the target area
print('Dilution, id: ', id)
target_area = morphology.binary_dilation(new_labels == id, selem=morphology.ball(1), out=None).astype(np.int)
## Pickup segmented areas
labels_to_pickup_segmentation = morphology.label(ids_volume * (target_area == 0))
us, counts = np.unique(labels_to_pickup_segmentation, return_counts=True)
#print('us : ', us)
#print('segmentation count: ', counts)
segmented_us = us[counts < 30]
#print('segmented us : ', segmented_us)
segments = np.zeros_like(ids_volume)
for segmented_u in segmented_us:
segments += (labels_to_pickup_segmentation == segmented_u).astype(np.int)
## Merge target area with segmented areas if they are connected.
target_plus_segment = target_area*ids_volume + segments
labels_to_remove_segmented_target = morphology.label( target_plus_segment > 0 )
u, counts = np.unique(labels_to_remove_segmented_target, return_counts=True)
labels_segmented_target_removed = (labels_to_remove_segmented_target == u[counts.argsort()[-2]]).astype(np.int)
## Assign the id to (target area in cytosol) and (segmented area).
new_labels_processed += labels_segmented_target_removed*id
with h5py.File('labels.hdf5', 'w') as f:
f.create_dataset('dendrite', data=new_labels_processed)
with h5py.File('labeled_cytosol.hdf5', 'w') as f:
f.create_dataset('dendrite', data=new_labels_processed + ids_volume)
return ids_volume, new_labels
def GetVolumeTrimesh(self, closed_mesh):
v = closed_mesh.voxelized(pitch = pitch)
print('v.matrix.shape: ', v.matrix.shape)
wnum = v.matrix.shape[0]
hnum = v.matrix.shape[1]
znum = v.matrix.shape[2]
return v.matrix
def GetVolumePyVista(self, verts, faces, wmin, hmin, zmin, wmax, hmax, zmax):
verts = np.array(verts)
faces = np.array(faces)
num = faces.shape[0]
faces = np.hstack([np.ones([num,1]).astype(int)*3,faces])
surf = pv.PolyData(np.array(verts), np.array(faces))
ix = np.arange(wmin, wmax, 1)
iy = np.arange(hmin, hmax, 1)
iz = np.arange(zmin, zmax, 1)
x, y, z = np.meshgrid(ix, iy, iz)
grid = pv.StructuredGrid(x, y, z)
ugrid = pv.UnstructuredGrid(grid)
selection = ugrid.select_enclosed_points(surf, tolerance=0.0, check_surface=False)
mask = selection.point_arrays['SelectedPoints'].view(np.bool)
voxels = mask.reshape([iz.shape[0] , ix.shape[0], iy.shape[0] ])
voxels = voxels.transpose((1, 2, 0))
return voxels
if __name__ == "__main__":
params = {}
params['Hdf5 file containing segmentation volume'] = os.path.join(main_dir, 'CA1_small.h5')
params['Container name'] = 'dendrite'
params['Empty Folder for Annotator'] = os.path.join(main_dir, 'annot_lm')
params['Pitch in X (um)'] = 0.02
params['Pitch in Y (um)'] = 0.02
params['Pitch in Z (um)'] = 0.02
params['Downsampling factor in X'] = 1
params['Downsampling factor in Y'] = 1
params['Downsampling factor in Z'] = 1
p = GenerateClosedvolumes()
# p.GenerateAnnotFile(params)
p.AnalyzeAnnotFile(params)
|
from musket_core import datasets,genericcsv,context
from musket_core import image_datasets,datasets
@datasets.dataset_provider(origin="train.csv",kind="GenericDataSet")
def getBengali0():
return image_datasets.MultiOutputClassClassificationDataSet("bengaliai-cv19/train", "bengaliai-cv19/train.csv", 'image_id', ['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'])
# def getBengali1():
# return image_datasets.MultiOutputClassClassificationDataSet("bengali/train_1", "bengali/train.csv", 'image_id', ['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'])
@datasets.dataset_provider(origin="test.csv",kind="MultiClassificationDataset")
def getBengaliTest0():
return image_datasets.MultiOutputClassClassificationDataSet("bengali/test_0", "bengali/test_flat.csv", 'image_id', ['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'])
# @datasets.dataset_provider(origin="train.csv",kind="GenericDataSet")
# def getBengali0_small():
# return image_datasets.MultiOutputClassClassificationDataSet("bengali/train_0", "bengali/train.csv", 'image_id', ['grapheme_root', 'vowel_diacritic', 'consonant_diacritic'], len=10000)
import pandas as pd
import tqdm
import numpy as np
p=context.get_current_project_data_path()
HEIGHT = 137
WIDTH = 236
@datasets.dataset_provider(origin="test.csv",kind="MultiClassificationDataset")
def getData1():
ds=pd.read_csv(f"{p}/bengaliai-cv19/train.csv")
gr=ds["grapheme_root"].values
vd=ds["vowel_diacritic"].values
cd=ds["consonant_diacritic"].values
for i in range(1):
df = pd.read_parquet(f"{p}/bengaliai-cv19/train_image_data_{i}.parquet")
data0 = df.iloc[:, 1:].values.reshape(-1, HEIGHT, WIDTH).astype(np.uint8)
class M(datasets.DataSet):
def __len__(self):
return len(df)
def __getitem__(self, item)->datasets.PredictionItem:
X=data0[item]
y1=np.zeros(168)
y1[gr[item]]=1
y2=np.zeros(11)
y2[vd[item]]=1
y3=np.zeros(7)
y3[cd[item]]=1
return datasets.PredictionItem(item,np.stack([X],axis=-1),[y1,y2,y3])
return M();
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#####################################################################################################
# #
# clean_the_data.py: remove duplicated data line from data_summary data #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 10, 2021 #
# #
#####################################################################################################
import os
import sys
import re
import string
#-----------------------------------------------------------------------------------------
#-- clean_the_data: remove duplicated data line from data_summary data --
#-----------------------------------------------------------------------------------------
def clean_the_data(ifile):
"""
remove duplicated data line from data_summary data
input: ifile --- a file name
output: ifile --- a cleaned data file
"""
with open(ifile, 'r') as f
data = [line.strip() for line in f.readlines()]
head = data[0]
data = data[1:]
data.sort()
save = [data[0]]
comp = data[0]
for ent in data:
if ent == comp:
continue
else:
comp = ent
save.append(ent)
line = head + '\n'
for ent in save:
line = line + ent + '\n'
with open(ifile, 'w') as fo:
fo.write(line)
#-----------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) > 1:
ifile = sys.argv[1]
clean_the_data(ifile)
else:
print "Need a input file\n"
exit(1)
|
'''Importing datetime library for recording timestamp'''
from datetime import datetime
from hashlib import sha256
#creating a block for blockhchain
class Block:
def __init__(self,transactions,previous_hash,nonce=0):
self.transactions=transactions
self.previous_hash=previous_hash
self.nonce=nonce
self.timestamp=datetime.now()
self.hash=self.generate_hash
#generating hash for the block
def generate_hash(self):
block_details=str(self.timestamp)+str(self.transactions)+str(self.previous_hash)+str(self.nonce)
block_hash=sha256(block_details.encode())
return block_hash.hexdigest()
def print_contents(self):
print("timestamp:", self.timestamp)
print("transactions:", self.transactions)
print("current hash:", self.generate_hash())
print("previous hash:", self.previous_hash)
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import unicode_literals
from django.forms import ModelForm, forms
from django.forms.widgets import HiddenInput
from django.http import HttpResponse
from django.utils.translation import ugettext_lazy as _
from django.core.mail import EmailMessage, BadHeaderError
from django.conf import settings
from django.template import loader, RequestContext
from django.contrib.sites.models import Site
from django.db.models import Q
from rest_framework import serializers
from .models import GroceryList, GroceryShared, GroceryAisle, GroceryItem, GroceryRecipe
class GroceryItemSerializer(serializers.ModelSerializer):
"""used to create a new grocery list for a user"""
class Meta:
model = GroceryItem
exclude = ('slug',)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None) # get the user passed to the form off of the keyword argument
super(GroceryItemSerializer, self).__init__(*args, **kwargs)
def add_fields(self, form, index):
super(GroceryItemSerializer, self).add_fields(form, index)
form.fields["aisle"] = forms.ModelChoiceField(queryset=GroceryAisle.objects.filter(Q(author__isnull=True) | Q( author=self.user)), required=False)
def clean(self):
"""Require at least one form in the formset to be completed."""
super(GroceryItemSerializer, self).clean()
for error in self.errors:
if error:
return
completed = 0
for cleaned_data in self.cleaned_data:
if cleaned_data and not cleaned_data.get('DELETE', False):
completed += 1
if completed < 1:
raise forms.ValidationError("At least one %s is required." %
self.model._meta.object_name.lower())
class GroceryListSerializer(serializers.ModelSerializer):
"""used to create a new grocery list for a user"""
class Meta:
model = GroceryList
exclude = ('slug',)
class GrocerySharedSerializer(serializers.ModelSerializer):
"""form object for the popup from the recipe_form to add a new course"""
class Meta:
model = GroceryShared
fields = ('shared_to',)
class GroceryAisleSerializer(serializers.ModelSerializer):
"""form object for the popup from the recipe_form to add a new course"""
class Meta:
model = GroceryAisle
fields = "__all__"
widgets = {'author': HiddenInput()}
def clean(self):
"""make sure the user is not trying to add the same aisle twice"""
cleaned_data = self.cleaned_data
try:
GroceryAisle.objects.get(aisle=cleaned_data['aisle'], author=cleaned_data['author'])
except:
pass
else:
raise forms.ValidationError(_('Aisle with this name already exists for your account'))
return cleaned_data
class GroceryRecipeSerializer(ModelForm):
"""grocery form to allow you to select a user from your friends to share a list with"""
class Meta:
model = GroceryRecipe
fields = "__all__"
# This class isnt using any rest framework elements
# I should change this later
'''
class GrocerySendMail(forms.Form):
"""Grocery form to send a grocery list to someone in email"""
def __init__(self, data=None, files=None, request=None, *args, **kwargs):
if request is None:
raise TypeError("Keyword argument 'request must be supplies'")
super(GrocerySendMail, self).__init__(data=data, files=files, *args, **kwargs)
self.request = request
# set up the return email address and sender name to the user logged in
if request.user.is_authenticated():
self.fields['to_email'].initial= request.user.email
to_email = forms.EmailField(widget=forms.TextInput(), label=_('email address'))
gid = forms.CharField(widget=forms.HiddenInput())
from_email = settings.DEFAULT_FROM_EMAIL
from_site = Site.objects.get_current()
subject = _('Grocery list from ' + str(from_site))
def get_body(self):
"""get the grocery list and return the message body for the email"""
if self.is_valid():
list = GroceryList.objects.get(pk = self.cleaned_data['gid'])
template_name = 'list/grocery_mail_body.html' # template that contains the email body and also shared by the grocery print view
message = loader.render_to_string(template_name, {'list': list}, context_instance=RequestContext(self.request))
return message
else:
raise ValueError(_('Can not get grocery list id from invalid form data'))
def get_toMail(self):
"""gets the email to send the list to from the form"""
if self.is_valid():
return self.cleaned_data['to_email']
else:
raise ValueError(_('Can not get to_email from invalid form data'))
def save(self, fail_silently=False):
""" sends the email message"""
if self.subject and self.get_body() and self.from_email:
try:
msg = EmailMessage(self.subject, self.get_body(), self.from_email, [self.get_toMail()])
msg.content_subtype = 'html'
msg.send()
except BadHeaderError:
return HttpResponse(_('Invalid header found.'))
return HttpResponse(_('Email Sent'))
else:
return HttpResponse('Make sure all fields are entered and valid.')
'''
|
#
# Copyright © 2021 Uncharted Software Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as path
from d3m import container
from d3m.metadata import base as metadata_base
from d3m.base import utils as base_utils
def load_dataset(base_path: str) -> container.Dataset:
# loads a d3m formatted dataset
dataset_doc_path = path.join(base_path, "datasetDoc.json")
dataset = container.Dataset.load(
"file://{dataset_doc_path}".format(dataset_doc_path=dataset_doc_path)
)
return dataset
def get_dataframe(dataset: container.Dataset, resource_id: str) -> container.DataFrame:
# extracts a dataframe from a dataset and ensures its metadata is transferred over
# grab the resource and its metadata out of the dataset
dataframe_resource_id, dataframe = base_utils.get_tabular_resource(
dataset, resource_id
)
resource_metadata = dict(dataset.metadata.query((dataframe_resource_id,)))
# copy the resource metadata from the dataset into the resource
new_metadata = metadata_base.DataMetadata(resource_metadata)
new_metadata = dataset.metadata.copy_to(new_metadata, (resource_id,))
new_metadata = new_metadata.remove_semantic_type(
(), "https://metadata.datadrivendiscovery.org/types/DatasetEntryPoint"
)
dataframe.metadata = new_metadata
return dataframe
|
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
class NLTKTokenizer:
def __init__(self, lower=True, max_length=250, sos_token=None,
eos_token=None):
self.lower = lower
self.max_length = max_length
self.sos_token = sos_token
self.eos_token = eos_token
self.vocabs = dict()
def tokenize(self, text):
"""
Tokenizes and tags an input string
"""
assert isinstance(text, str)
tokens = nltk.word_tokenize(text)
if self.max_length is not None:
tokens = tokens[:self.max_length]
tokens, tags = zip(*nltk.pos_tag(tokens))
tags = list(tags)
if self.lower:
tokens = [token.lower() for token in tokens]
if self.sos_token is not None:
tokens = [self.sos_token] + tokens
tags = [self.sos_token] + tags
if self.eos_token is not None:
tokens = tokens + [self.eos_token]
tags = tags + [self.eos_token]
return tokens, tags
def numericalize(self, example):
"""
Takes a list of tokens and a vocabulary, numericalizes
"""
assert isinstance(example, dict)
for vocab_name in self.vocabs.keys():
if vocab_name not in example:
continue
vocab = self.vocabs[vocab_name]
field = example[vocab_name]
if vocab.unk_token is not None:
unk_idx = vocab.stoi[vocab.unk_token]
example[vocab_name] = [vocab.stoi.get(t, unk_idx) for t in field]
else:
example[vocab_name] = [vocab.stoi[t] for t in field]
return example
|
import sys
import logging
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtQml import QQmlApplicationEngine, QQmlComponent, qmlRegisterType, QQmlEngine
from controller.bestellung_controller import BestellungController
from controller.nutzer_controller import NutzerController
from controller.artikel_controller import ArtikelController
from controller.kassen_controller import KassenController
from controller.historie_controller import HistorieController
from model.artikel_model import ArtikelModel, ArtikelModelB
from model.bestell_model import BestellModel
from model.nutzer_model import NutzerModel, NutzerKassenModel
from model.historie_model import HistorieModel
from time import gmtime, strftime
def run():
app = QGuiApplication(sys.argv)
engine = QQmlApplicationEngine()
time = strftime("%d %m %Y", gmtime()).replace(" ", "")
logging.basicConfig(filename=f'log/{time}.log', format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
#Zugriff auf die python-Klassen von qml
nk_model = NutzerKassenModel()
nutzermodel = NutzerModel()
produktmodel = ArtikelModel()
artikelmodel = ArtikelModelB()
bestellmodel = BestellModel()
historiemodel = HistorieModel()
nutzercontroller = NutzerController(nutzermodel)
bestellungcontroller = BestellungController(nutzermodel, nk_model)
artikelcontroller = ArtikelController(produktmodel, artikelmodel)
kassencontroller = KassenController(nk_model, nutzermodel)
historiecontroller = HistorieController(nutzermodel, nk_model, historiemodel)
engine.rootContext().setContextProperty('artikelcontroller', artikelcontroller)
engine.rootContext().setContextProperty('bestellungcontroller', bestellungcontroller)
engine.rootContext().setContextProperty('nutzercontroller', nutzercontroller)
engine.rootContext().setContextProperty('kassencontroller', kassencontroller)
engine.rootContext().setContextProperty('historiecontroller', historiecontroller)
engine.rootContext().setContextProperty('produktModel', produktmodel)
engine.rootContext().setContextProperty('artikelmodel', artikelmodel)
engine.rootContext().setContextProperty('bestellModel', bestellmodel)
engine.rootContext().setContextProperty('nutzerkassenmodel', nk_model)
engine.rootContext().setContextProperty('historiemodel', historiemodel)
engine.load('main.qml')
return app.exec_()
if __name__ == '__main__':
sys.exit(run())
|
import numpy as np
from math import atan, acos, pi
# from sklearn.linear_model import LinearRegression
from scipy.optimize import curve_fit
import circle_fit as cf
# prev_leftdy = 0
# prev_rightdy = 0
# prev_leftc = 0
# prev_rightc = 0
def curve(left_lane, right_lane,stop):
xleft_plot = np.arange(5,40,0.01).reshape(-1,1)
xright_plot = np.arange(5,40,0.01).reshape(-1,1)
if len(left_lane) < 4 and len(right_lane) < 4 or stop:
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
########################### Only Right ############################
elif len(left_lane)< 4:
xc,yc,right_r,_ = cf.least_squares_circle(right_lane)
# left_center = direction*left_r +1.5
if yc<0: direction = -1
else: direction = 1
right_center = [xc,yc]
if 15 <right_r:
yright_plot = circle_plot(xright_plot,right_center,right_r)
yleft_plot = yright_plot+4
left_r = right_r-direction*4
left_center = right_center
left_fit = [left_r,left_center, direction]
right_fit = [right_r,right_center, direction]
else:
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
############################ Only Left ############################
elif len(right_lane)<4:
xc,yc,left_r,_ = cf.least_squares_circle(left_lane)
# left_center = direction*left_r +1.5
if yc<0: direction = -1
else: direction = 1
left_center = [xc,yc]
if 15 <left_r:
yleft_plot = circle_plot(xleft_plot,left_center,left_r)
yright_plot = yleft_plot-4
right_r = left_r-direction*4
right_center = left_center
left_fit = [left_r,left_center, direction]
right_fit = [right_r,right_center, direction]
else:
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
############################ Both Lanes ############################
else:
xc,yc,left_r,_ = cf.least_squares_circle(left_lane)
# left_center = direction*left_r +1.5
if yc<0: left_direction = -1
else: left_direction = 1
left_center = [xc,yc]
xc,yc,right_r,_ = cf.least_squares_circle(right_lane)
# left_center = direction*left_r +1.5
if yc<0: right_direction = -1
else: right_direction = 1
right_center = [xc,yc]
print('left_r : ', left_r)
print('right_r : ', right_r)
same = left_direction*right_direction
#if 20 <left_r and 20 <right_r and same >0 and abs(left_r-right_r)<5:
if 15 <left_r and 15 <right_r and same >0:
# if True:
direction = left_direction
yleft_plot = circle_plot(xleft_plot,left_center,left_r)
yright_plot = circle_plot(xright_plot,right_center,right_r)
left_fit = [left_r,left_center, direction]
right_fit = [right_r,right_center, direction]
#elif 20 <left_r and same>0:
# elif 15 <left_r and same>0:
elif 15 <left_r or left_r/right_r>100:
direction = left_direction
yleft_plot = circle_plot(xleft_plot,left_center,left_r)
yright_plot = yleft_plot-4
left_fit = [left_r,left_center, direction]
right_fit = [right_r,right_center, direction]
elif 15 <right_r or right_r/left_r>100:
direction = left_direction
yright_plot = circle_plot(xright_plot,right_center,right_r)
yleft_plot = yright_plot+4
left_fit = [left_r,left_center, direction]
right_fit = [right_r,right_center, direction]
else:
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
print("yleft_plot : ", yleft_plot[0])
print("yright_plot : ", yright_plot[0])
if yleft_plot[0]<0 and yright_plot[0] >0:
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
elif yleft_plot[0]<0:
yright_plot = yleft_plot-4
right_r = left_r-direction*4
right_center = left_center
elif yright_plot[0]>0:
yleft_plot = yright_plot+4
left_r = right_r-direction*4
left_center = right_center
################ Steering Angle #################
print(left_fit[0])
if left_fit[0] == False:
theta = 0
else:
if direction < 0 :
# theta = math.acos(right_fit[0]/(right_fit[0]-yright_plot[0]))
theta = acos(right_r/(right_r+abs(yright_plot[0])))*180/pi
else:
# theta = math.acos(left_fit[0]/(left_fit[0]+yleft_plot[0]))
theta = acos(left_r/(left_r+abs(yleft_plot[0])))*180/pi
theta = theta*direction
if abs(theta) < 2:
leftdy, leftc = 0,1.5
rightdy, rightc = 0,-1.5
yleft_plot = np.ones(3500)*1.5
yleft_plot = yleft_plot.reshape(-1,1)
yright_plot = np.ones(3500)*(-1.5)
yright_plot = yright_plot.reshape(-1,1)
flag = False
left_fit = [flag, leftdy,leftc]
right_fit = [flag, rightdy,rightc]
print('theta :' ,theta)
left_lane = np.append(xleft_plot,yleft_plot,axis =1)
# left_lane = left_lane[~np.isnan(yleft_plot)]
left_lane = left_lane[left_lane[:,1]<15]
left_lane = left_lane[left_lane[:,1]>-15]
right_lane = np.append(xright_plot,yright_plot,axis =1)
# right_lane = right_lane[~np.isnan(yright_plot)]
right_lane = right_lane[right_lane[:,1]<15]
right_lane = right_lane[right_lane[:,1]>-15]
if abs(theta) > 10:
left_lane = left_lane[left_lane[:,0]<15]
right_lane = right_lane[right_lane[:,0]<15]
return left_lane, right_lane, left_fit, right_fit, theta
def line_equation(x,line_fit):
line_dy = line_fit[1]
line_c = line_fit[2]
y = line_dy*x+line_c
return y
def curve_equation(x,curve_fit):
r = curve_fit[0]
a = curve_fit[1][0]
b = curve_fit[1][1]
if b<0: y = (r**2-(x-a)**2)**0.5 +b
else: y = -(r**2-(x-a)**2)**0.5 +b
return y
def circle(y,a,b,r):
return (r**2-(y-b)**2)**0.5 +a
def circle_plot(x,center,r):
a = center[0]
b = center[1]
if b<0: y = (r**2-(x-a)**2)**0.5 +b
else: y = -(r**2-(x-a)**2)**0.5 +b
return y
def invadeROI(point, left_fit, right_fit):
# if left_fit[0] == False:
if left_fit[0] == False:
y_left = line_equation(point[0], left_fit)
y_right = line_equation(point[0], right_fit)
else:
y_left = curve_equation(point[0], left_fit)
y_right = curve_equation(point[0], right_fit)
# if np.all(point[1]<y_left , point[1] > y_right , 5 < point[0] ,point[0] < 40): invade = True
if point[1]<y_left and point[1] > y_right and 5 < point[0] and point[0] < 40: invade = True
else: invade = False
return invade
# def roi_box(left_lane, right_lane, line1_fit, line2_fit):
# line1pred = line1_fit.predict(left_lane[:,0]).reshape([len1,1])
# line2pred = line2_fit.predict(right_lane[:,0]).reshape([len2,1])
# left_max = left_lane[:][np.argmax(line1pred),:2]
# left_min = left_lane[:][np.argmin(line1pred),:2]
# left_min = left_lane[:][np.argmin(line1pred),:2]
|
# Generated by Django 3.1.3 on 2020-12-19 15:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_basic', '0002_auto_20201214_1657'),
]
operations = [
migrations.AlterField(
model_name='article',
name='image',
field=models.ImageField(blank=True, max_length=255, null=True, upload_to='pictures/'),
),
]
|
import serial
import serial.tools.list_ports
import matplotlib.pyplot as plt
import numpy as np
import sys
import pandas as pd
import argparse
import atexit
from time import sleep
import os
from openpyxl import Workbook
import datetime
# import soundfile as sf
#COMMENT
# Modules for GUI
from pyqtgraph import PlotWidget
import pyqtgraph as pg
import qdarkstyle #has some issues on Apple devices and high dpi monitors
from main_ui import Ui_MainWindow, Ui_Dialog
from QLed import QLed
from PyQt5.QtGui import QDoubleValidator, QKeySequence, QPixmap, QRegExpValidator, QIcon, QFont, QFontDatabase
from PyQt5.QtWidgets import (QApplication, QPushButton, QWidget, QComboBox,
QHBoxLayout, QVBoxLayout, QFormLayout, QCheckBox, QGridLayout, QDialog,
QLabel, QLineEdit, QDialogButtonBox, QFileDialog, QSizePolicy, QLayout,
QSpacerItem, QGroupBox, QShortcut, QMainWindow, QSlider)
from PyQt5.QtCore import QDir, Qt, QTimer, QRegExp, QCoreApplication, QSize, QRunnable, QThread, QThreadPool
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, True)
if hasattr(Qt, 'AA_UseHighDpiPixmaps'):
QApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps, True)
directory = os.getcwd()
# plt.ion()
class Window(QMainWindow):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.currentItemsSB = [] # Used to store variables to be displayed in status bar at the bottom right
self.verbose = True # Initialization. Used in the thread generated in application
self.fs = 10000;
self.N = 10000;
self.dt = 1.0/self.fs
self.sample_time = self.N*self.dt
self.data = []
self.delay = 0
self.setStyleSheet(qdarkstyle.load_stylesheet())
self.getLogo()
self.getFonts()
self.initalConnections()
# self.initialGraphSettings()
self.arduinoStatusLed()
# self.initialTimer()
# self.initialState()
# self.ui.graphWidgetOutput.setLabel('left',"<span style=\"color:white;font-size:16px\">Speed (m/s)</span>")
# self.ui.graphWidgetOutput.setLabel('bottom',"<span style=\"color:white;font-size:16px\">Time (s)</span>")
# self.ui.graphWidgetOutput.setTitle("Speed", color="w", size="12pt")
# self.ui.graphWidgetInput.setLabel('left',"<span style=\"color:white;font-size:16px\">°C</span>")
# self.ui.graphWidgetInput.setLabel('bottom',"<span style=\"color:white;font-size:16px\">Time (s)</span>")
# self.ui.graphWidgetInput.setTitle("Temperature", color="w", size="12pt")
# # self.currentValueSB(self.course)
# self.course = "Sound"
self.list_ports()
self.COM = str(self.com_ports[0])
self.ui.com_port.addItems(self.com_ports)
self.ui.com_port.setCurrentIndex(self.com_ports.index(self.COM))
atexit.register(self.closeSerial)
self.now = datetime.datetime.now()
self.nrow = 1
self.result_file = Workbook()
self.dl = self.result_file.worksheets[0]
self.writerow(self.dl,['Data Logged From Teensy '+ self.now.strftime("%Y-%m-%d %H-%M")])
self.writerow(self.dl,['Time (s)','A0', 'A1', 'A3','Temperature (oC)'])
def getLogo(self):
script_dir = os.path.dirname(__file__)
logo_rel_path = r"logo\CUAtHomeLogo-Horz.png"
logo_abs_file_path = os.path.join(script_dir, logo_rel_path)
self.ui.imageLabel.setPixmap(QPixmap(logo_abs_file_path).scaled(200, 130,
Qt.KeepAspectRatio,
Qt.FastTransformation))
def getFonts(self):
script_dir = os.path.dirname(__file__)
font_rel_path = r"fonts\Roboto"
font_abs_file_path = os.path.join(script_dir, font_rel_path)
for f in os.listdir(font_abs_file_path):
if f.endswith("ttf"):
QFontDatabase.addApplicationFont(os.path.join(font_abs_file_path,f))
#print(QFontDatabase().families())
def arduinoStatusLed(self):
self._led = QLed(self, onColour=QLed.Red, shape=QLed.Circle)
self._led.clickable = False
self._led.value = True
self._led.setMinimumSize(QSize(15, 15))
self._led.setMaximumSize(QSize(15, 15))
self.statusLabel = QLabel("Teensy Status:")
self.statusLabel.setFont(QFont("Roboto", 12))
self.statusBar().addWidget(self.statusLabel)
#self.statusBar().reformat()
self.statusBar().addWidget(self._led)
def initalConnections(self):
"""
6 Main Buttons (for now)
"""
self.ui.serialOpenButton.clicked.connect(self.serialOpenPushed)
self.ui.settingsButton.clicked.connect(self.settingsPushed)
self.ui.recordbutton.clicked.connect(self.recordbuttonPushed)
self.ui.sendbutton.clicked.connect(self.sendbuttonPushed)
self.ui.plotbutton.clicked.connect(self.plotbuttonPushed)
self.ui.savebutton.clicked.connect(self.savebuttonPushed)
# self.ui.clearbutton.clicked.connect(self.clearbuttonPushed)
# self.ui.settings.clicked.connect(self.settingsMenu)
def serialOpenPushed(self):
self.COM = str(self.ui.com_port.currentText())
self.ser = serial.Serial(port=self.COM)
sleep(2)
self.ser.flush()
print("Serial opened successfully!")
if self.ser.is_open:
self._led.onColour = QLed.Green
self.ui.serialOpenButton.clicked.disconnect(self.serialOpenPushed)
# self.ui.serialCloseButton.clicked.connect(self.serialClosePushed)
# self.ui.startbutton.clicked.connect(self.recordbuttonPushed)
# Set N and fs on Teensy
write_string = f"S0,N{self.N},%".encode('utf-8')
self.ser.write(write_string)
write_string = f"S1,T{self.fs},%".encode('utf-8')
self.ser.write(write_string)
write_string = f"S2,T{self.delay},%".encode('utf-8')
self.ser.write(write_string)
def closeSerial(self):
if self.ser.is_open:
self.ser.close()
print("Serial closed.")
def recordbuttonPushed(self):
print('Recording...',end='',flush=True)
self.ser.write(b'R1,%')
sleep(self.sample_time+self.delay)
print('Done.')
def sendbuttonPushed(self):
print('Sending Data...',end='',flush=True)
# Initialize data list
self.raw_data=[]
# Continue reading the data until '#' is found
while(1):
self.ser.write(b'R0%') # Request a data point from arduino
line = [] # initialize as empty array, line buffer
string_buff='' # intiailze as empty string
while(1): # Read char by char until '\0' found
if(self.ser.in_waiting > 0): # if data in buffer read
line.append(self.ser.read(1).decode()) # Read 1 character and decode from bytes to characters
if '\0' in line: break # if '\0' found break out of line loop
# print(''.join(line)) # See line received
self.raw_data.append(''.join(line)) # Append line read (joined as one single string) to data block
if '#' in line: # if '#' found break out of reading loop
break
print('Done.')
self.decipher_raw_data()
def decipher_raw_data(self):
# Get first letter of each segment separated by commas
# Give the pandas dataframe column names
col_name = [i[0] for i in self.raw_data[0].split(',')[:-1]]
# Initialize data buffers
buff=[]
for row in self.raw_data:
# Get all but first character in each segment separated by commas
# Get all the numbers in the data into a 2d list
new_data = row.split(',')[:-1]
# print(new_data)
buff.append([float(j[1:]) for j in new_data])
# print(col_name)
self.data = pd.DataFrame(buff, columns=col_name)
# Conversion to temperature: R_therm = 10k*exp(K*(1/T-1/T0)
# and ADC = R/(R+10k)*(2^12-1)
T0 = 25+273.15 # K
K = 3950 # K
self.data['Temp'] = 1.0/(1.0/T0+np.log(self.data['C']/((2**12-1)-self.data['C']))/K)-273.15 # oC
# def initialGraphSettings(self):
# self.ui.graphWidgetOutput.showGrid(x=True, y=True, alpha=None)
# self.ui.graphWidgetInput.showGrid(x=True, y=True, alpha=None)
# self.ui.graphWidgetOutput.setBackground((0, 0, 0))
# self.ui.graphWidgetInput.setBackground((0, 0, 0))
def plotbuttonPushed(self):
# self.ui.graphWidgetOutput.clear()
# self.ui.graphWidgetInput.clear()
# self.legendOutput.clear()
# self.legendInput.clear()
self.time = self.data['T'].to_numpy()
self.A = self.data['A'].to_numpy()*3.3/(2^12-1)
self.B = self.data['B'].to_numpy()*3.3/(2^12-1)
self.A = self.A-self.A.mean()
self.B = self.B-self.B.mean()
self.sliderDialog = QDialog()
self.sliderDialog.setStyleSheet(qdarkstyle.load_stylesheet())
self.sliderDialog.setWindowTitle("Sample Slider")
self.slider_layout = QVBoxLayout(self.sliderDialog)
self.plot_button = QPushButton()
self.plot_button.setText("Plot")
self.plot_button.clicked.connect(self.plot_it)
self.slider_layout.addWidget(self.plot_button)
self.sliderDialog.setLayout(self.slider_layout)
self.sliderDialog.resize(900,75)
self.sliderDialog.exec_()
def plot_it(self):
# self.time = np.array([0,1,2,3,4,5,6,7,8,9])
# self.A = np.array([0,0,0,1,0,0,0,0,0,0])
# self.B = np.array([0,0,0,0,0,0,0,1,0,0])
self.slider_layout.removeWidget(self.plot_button)
self.plot_button.deleteLater()
self.plot_button.setParent(None)
self.slider_pos = 0
self.slider_label = QLabel(str(self.slider_pos))
self.slider_label.setAlignment(Qt.AlignCenter)
self.slider_layout.addWidget(self.slider_label)
self.slider = QSlider(Qt.Horizontal)
self.slider.setMinimum(-500)
self.slider.setMaximum(500)
self.slider.setValue = self.slider_pos
self.slider.setSingleStep(1)
self.slider_layout.addWidget(self.slider)
self.slider.valueChanged.connect(self.sliderValueChanged)
self.slider.sliderReleased.connect(self.sliderChanged)
self.fig = plt.figure()
ax = self.fig.add_subplot(111)
ax.plot(self.time, self.A)
self.update_line, = ax.plot(self.time,self.B)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
plt.show()
def sliderValueChanged(self):
self.slider_pos = self.slider.value()
self.slider_label.setText(str(self.slider_pos))
def sliderChanged(self):
self.B2 = np.roll(self.B,self.slider_pos)
self.update_line.set_ydata(self.B2)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def savebuttonPushed(self):
print('Saving...',end='',flush=True)
self.now = datetime.datetime.now()
for row in self.data.to_numpy():
self.writerow(self.dl,row)
self.result_file.save(directory+'\\data\\Experiment Data '+ self.now.strftime("%Y-%m-%d %H-%M") +'.xlsx')
print('Done.')
def writerow(self,ws,output=[]):
for dd, data in enumerate(output):
ws.cell(row = self.nrow,column = dd+1).value = data
self.nrow = self.nrow+1
def settingsPushed(self):
self.settingsDialog = QDialog()
self.settingsDialog.setStyleSheet(qdarkstyle.load_stylesheet())
self.settingsDialog.setWindowTitle("Settings")
layout = QVBoxLayout(self.settingsDialog)
# com_widget = QComboBox(self.settingsDialog)
# self.list_ports()
# com_widget.addItems(self.com_ports)
# com_widget.setCurrentIndex(self.com_ports.index(self.COM))
# layout.addWidget(com_widget)
fs_layout = QHBoxLayout()
fs_label = QLabel()
fs_label.setText("Sampling Rate (fs)")
fs_widget = QLineEdit(self.settingsDialog)
fs_widget.setText(str(self.fs))
fs_layout.addWidget(fs_label)
fs_layout.addWidget(fs_widget)
layout.addLayout(fs_layout)
N_layout = QHBoxLayout()
N_label = QLabel()
N_label.setText("Number of Samples (N)")
N_widget = QLineEdit(self.settingsDialog)
N_widget.setText(str(self.N))
N_layout.addWidget(N_label)
N_layout.addWidget(N_widget)
layout.addLayout(N_layout)
delay_layout = QHBoxLayout()
delay_label = QLabel()
delay_label.setText("Record Delay (s)")
delay_widget = QLineEdit(self.settingsDialog)
delay_widget.setText(str(self.delay))
delay_layout.addWidget(delay_label)
delay_layout.addWidget(delay_widget)
layout.addLayout(delay_layout)
buttonBox = QDialogButtonBox(QDialogButtonBox.Save
| QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.settingsDialog.accept)
buttonBox.rejected.connect(self.settingsDialog.reject)
layout.addWidget(buttonBox)
self.settingsDialog.setLayout(layout)
if self.settingsDialog.exec_() == QDialog.Accepted:
self.fs = int(fs_widget.text())
self.N = int(N_widget.text())
self.dt = 1.0/self.fs
self.sample_time = self.N*self.dt
self.delay = int(delay_widget.text())
write_string = f"S0,N{self.N},%".encode('utf-8')
self.ser.write(write_string)
write_string = f"S1,T{self.fs},%".encode('utf-8')
self.ser.write(write_string)
write_string = f"S2,T{self.delay},%".encode('utf-8')
self.ser.write(write_string)
print('Settings saved.')
else:
print('Settings NOT saved.')
# print(f"Current fs is {self.fs}.")
# self.fs = int(input('Specify Sampling Rate (fs): '));
# print(f"Current N is {self.N}.")
# self.N = int(input('Specify Number of Samples (N): '));
# self.dt = 1.0/self.fs;
# self.sample_time = self.N*self.dt
# write_string = f"S0,N{self.N},%".encode('utf-8')
# self.ser.write(write_string)
# write_string = f"S1,T{self.fs},%".encode('utf-8')
# self.ser.write(write_string)
# print ('Settings saved.')
def list_ports(self):
self.com_ports = [
p.device
for p in serial.tools.list_ports.comports()
if 'Arduino' or 'tty' in p.description
]
if not self.com_ports:
raise IOError("No COM ports found. Replug in USB cable and try again.")
self.com_ports.sort(key=lambda s: (s[:-2], int(s[-2:])) if s[-2] in '0123456789' else (s[:-1], int(s[-1:])))
def main():
app = QApplication(sys.argv)
main = Window()
main.show()
#app.aboutToQuit.connect(main.cleanUp) #See Window.cleanUp()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
# def main(c):
# global raw_data
# global sample_time
# global N
# global fs
# # Start recording of data on arduino
# if c == 'r':
# print('Recording...',end='',flush=True)
# ser.write(b'R1,%')
# sleep(sample_time)
# print('Done.')
# # Loop through saved data on arduino
# # Each data point line ends with \0
# # End of data ends with '#'
# if c == 's':
# print('Sending Data...',end='',flush=True)
# # Initialize data list
# raw_data=[]
# # Continue reading the data until '#' is found
# while(1):
# ser.write(b'R0%') # Request a data point from arduino
# line = [] # initialize as empty array, line buffer
# string_buff='' # intiailze as empty string
# while(1): # Read char by char until '\0' found
# if(ser.in_waiting > 0): # if data in buffer read
# line.append(ser.read(1).decode()) # Read 1 character and decode from bytes to characters
# if '\0' in line: break # if '\0' found break out of line loop
# # print(''.join(line)) # See line received
# raw_data.append(''.join(line)) # Append line read (joined as one single string) to data block
# if '#' in line: # if '#' found break out of reading loop
# break
# print('Done.')
# if c =='p':
# #TODO: Fix plotting, y-axis is upside down
# plot_data = decipher_raw_data(raw_data)
# # fig = plt.figure(1)
# # plt.clf()
# # dnp = plot_data.to_numpy()
# time = plot_data['T']
# A = plot_data['A']
# B = plot_data['B']
# # print(time)
# # print(A)
# #setup plotting
# #TODO: clean up figure and plotting
# # plt.xticks([])
# plt.plot(time, A)
# plt.plot(time,B)
# plt.ylim((0,2**12-1))
# plt.show()
# if c == 't':
# fs = int(input('Specify Sampling Rate (fs): '));
# N = int(input('Specify Number of Samples (N): '));
# dt = 1/fs;
# sample_time = N*dt;
# write_string = f"S0,N{N},%".encode('utf-8')
# ser.write(write_string)
# write_string = f"S1,T{fs},%".encode('utf-8')
# ser.write(write_string)
# # Convert each line of data into pandas array
# def decipher_raw_data(d:list):
# # Get first letter of each segment separated by commas
# # Give the pandas dataframe column names
# col_name = [i[0] for i in d[0].split(',')[:-1]]
# # Initialize data buffers
# buff=[]
# for row in d:
# # Get all but first character in each segment separated by commas
# # Get all the numbers in the data into a 2d list
# new_data = row.split(',')[:-1]
# # print(new_data)
# buff.append([float(j[1:]) for j in new_data])
# # print(col_name)
# df = pd.DataFrame(buff, columns=col_name)
# # print(df)
# return df
# def on_quit():
# if ser.is_open:
# ser.close()
# print("Serial closed.")
# atexit.register(on_quit)
# if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="COM PORT")
# parser.add_argument('--port', dest='port', required=True)
# args = parser.parse_args()
# port = args.port
# # port = "COM3"
# ser = serial.Serial(port=port)
# ser.flush()
# fs = 1000;
# N = 1000;
# dt = 1/fs;
# sample_time = N*dt;
# write_string = f"S0,N{N},%".encode('utf-8')
# ser.write(write_string)
# write_string = f"S1,T{fs},%".encode('utf-8')
# ser.write(write_string)
# # write_string = f"S0,T{dt},%".encode()
# # ser.write(write_string)
# global raw_data
# raw_data=[]
# line=[]
# buff = []
# d = ''
# # df: pd.DataFrame
# while(1):
# try:
# uin = input('r: record, s: send, p: plot, t: change settings, q: quit\n')
# if(uin=='q'):break
# main(uin)
# except Exception as e:
# print(e)
|
class Movie:
def __init__(self, title, seeds=0):
self.title = title
self.seeds = seeds
def __hash__(self):
return hash(self.title)
def __eq__(self, other):
return self.title == other.title
|
from nltk.tokenize import word_tokenize
import numpy as np
def data_stream():
"""Stream the data in 'leipzig100k.txt' """
with open('leipzig100k.txt', 'r') as f:
for line in f:
for w in word_tokenize(line):
if w.isalnum():
yield w
def bloom_filter_set():
"""Stream the data in 'Proper.txt' """
with open('Proper.txt', 'r') as f:
for line in f:
yield line.strip()
############### DO NOT MODIFY ABOVE THIS LINE #################
# Implement a universal hash family of functions below: each function from the
# family should be able to hash a word from the data stream to a number in the
# appropriate range needed.
def uhf(rng):
"""Returns a hash function that can map a word to a number in the range
0 - rng
"""
pass
###############
################### Part 1 ######################
# Implement a universal hash family of functions below: each function from the
# family should be able to hash a word from the data stream to a number in the
# appropriate range needed.
def uhf(rng):
"""Returns a hash function that can map a word to a number in the range
0 - rng
"""
a = np.random.randint(1,p)
b = np.random.randint(0,p)
return lambda x: ((a*x+b)%p)%m
###############
################### Part 1 ######################
import nltk
nltk.download('punkt')
from bitarray import bitarray
size = 2**18 # size of the filter
a=uhf(2059859,262144)
b=uhf(2059861,262144)
c=uhf(2059879,262144)
d=uhf(2059891,262144)
e=uhf(2059913,262144)
hash_fns = [a,b,c,d,e] # place holder for hash functions
bit_array=bitarray(size)
bit_array.setall=0
bloom_filter = bit_array
k=[]
for i in data_stream():
k.append(i)
N=[]
for i in bloom_filter_set():
N.append(i)
num_words = len(k) # number in data stream = 2059856
num_words_in_set = len(N) # number in Bloom filter's set = 32657
for word in bloom_filter_set(): # add the word to the filter by hashing etc.
for h in H:
num=h(int(word,36))
bloom_filter[num]==1
print(len(bloom_filter))
M=[]
for word in data_stream(): # check for membership in the Bloom filter
for h in H:
# num = h(int(word,36))
if bloom_filter[h(int(word,36))] == 0:
break
elif bloom_filter[h(int(word,36))] == 1:
continue
M.append(word)
print(len(M))
FP=len(set(M)-set(N))
FP
print('Total number of words in stream = %s'%(num_words,))
print('Total number of words in stream = %s'%(num_words_in_set,))
################### Part 2 ######################
import random
num_features=2059856
def findPrime(n):
"""Returns a prime number larger than n
"""
def isPrime(k):
import math
for divisor in range(2, round(math.sqrt(n)-0.5)):
if k%divisor==0:
return False
return True
if n%2==0:
candidate = n+1
else:
candidate = n
while not isPrime(candidate):
candidate += 2
return candidate
hash_range = 24 # number of bits in the range of the hash functions
fm_hash_functions = [None]*35 # Create the appropriate hashes here
p=findPrime(num_features)
def uhf(p, m):
a = random.randint(1,p-1)
b = random.randint(0,p-1)
return lambda x: ((a*x+b)%p)%m
hash_range = 24 # number of bits in the range of the hash functions
num_hashes = 35
'''
h1=[np.vectorize(uhf(p,2**hash_range))(range(num_features))]
len(h1[0])
'''
hashes=[np.vectorize(uhf(p,2**hash_range))(range(num_features)) for
_ in range(num_hashes)]
len(hashes)
def binary(n):
return bin(n)
binary_hashes=[]
for i in range(len(hashes)):
vf= (np.vectorize(binary)(hashes[i]))
binary_hashes.append(vf)
print(len(binary_hashes))
print(binary_hashes[0])
'''
fm_hash_functions = [np.vectorize(uhf(p,2**hash_range))(range(num_features))]*35 # Create the appropriate hashes here
fm_hash_functions[2]
'''
def num_trailing_bits(n):
"""Returns the number of trailing zeros in bin(n)
n: integer
"""
return len(n)- len(n.rstrip('0'))
#num_trailing_bits('1001000') #returns 3
trailing_zeros=[]
for i in range(len(binary_hashes)):
vf= (np.vectorize(num_trailing_bits)(binary_hashes[i]))
trailing_zeros.append(vf)
print(trailing_zeros[:5])
A=np.vstack(trailing_zeros)
A[2,:]
max_zeros=np.amax(A,axis = 1)
max_zeros.shape
#estimate of distinct elements = 2^20 =1048576
num_distinct = 0
#for word in data_stream(): # Implement the Flajolet-Martin algorithm
# pass
print("Estimate of number of distinct elements = %s"%(num_distinct,))
def num_trailing_bits(n):
"""Returns the number of trailing zeros in bin(n)
n: integer
"""
pass
num_distinct = 0
#for word in data_stream(): # Implement the Flajolet-Martin algorithm
# pass
print("Estimate of number of distinct elements = %s"%(num_distinct,))
################### Part 3 ######################
sample=np.random.choice(2059856,513)
#sample
#variables=[]
d={}
for i in sample:
d[i]=k[i]
print(len(d))
'''
for i in sample:
variables.append(k[i])
print(len(variables))
sample
'''
var_reservoir = [0]*512
second_moment = 0
third_moment = 0
d1={}
for key,value in d.items():
L1 = k[key:]
d1[key]=L1.count(value)
print(len(d1))
estimates=[]
for v in d1.values():
i == 2059856(2*v - 1)
estimates.append( i)
print(len(estimates))
# You can use numpy.random's API for maintaining the reservoir of variables
#for word in data_stream(): # Imoplement the AMS algorithm here
# pass
print("Estimate of second moment = %s"%(second_moment,))
print("Estimate of third moment = %s"%(third_moment,))
|
from asl_test_recognizer import TestRecognize
import unittest
suite = unittest.TestLoader().loadTestsFromModule(TestRecognize())
unittest.TextTestRunner().run(suite)
|
# -*- coding: utf-8 -*-
from setuptools import setup
from distutils.extension import Extension
from distutils.version import LooseVersion
import platform
import sys
import warnings
if "--without-cseabreeze" in sys.argv:
sys.argv.remove("--without-cseabreeze") # this is a hack...
# user requests to not install cython wrapper
_extensions = []
else:
# default to building the cython wrapper
try:
# try to import cython
import Cython
# We require at least version 0.18
if LooseVersion(Cython.__version__) < LooseVersion("0.18"):
raise ImportError("Cython version < 0.18")
except ImportError:
# if not installed or too old fall back to the provided C file
cythonize = lambda x: x
fn_ext = "c"
else:
from Cython.Build import cythonize
fn_ext = "pyx"
# The windows version of the cython wrapper depends on winusb
if platform.system() == "Windows":
libs = ['seabreeze', 'winusb']
elif platform.system() == "Darwin":
libs = ['seabreeze']
else:
libs = ['seabreeze', 'usb']
# define extension
try:
import numpy
except ImportError:
warnings.warn("Installation of cseabreeze backend requires numpy.")
exit(1)
extensions = [Extension('seabreeze.cseabreeze.wrapper',
['./seabreeze/cseabreeze/wrapper.%s' % fn_ext],
include_dirs=[numpy.get_include()],
libraries=libs,
)]
_extensions = cythonize(extensions)
setup(
name='seabreeze',
version='0.4.3',
author='Andreas Poehlmann',
author_email='mail@andreaspoehlmann.de',
packages=['seabreeze',
'seabreeze.cseabreeze',
'seabreeze.pyseabreeze',
'seabreeze.pyseabreeze.interfaces'],
scripts=['scripts/seabreeze-compare'],
description=('Python interface module for oceanoptics spectrometers. '
'This software is not associated with Ocean Optics. '
'Use it at your own risk.'),
long_description=open('README.md').read(),
requires=['python (>= 2.7)', 'pyusb (>= 1.0)', 'numpy'],
ext_modules=_extensions,
)
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
extensions = [
Extension(
"_voidfinder_cython",
["_voidfinder_cython.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
),
Extension(
"_voidfinder_cython_find_next",
["_voidfinder_cython_find_next.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
)
]
setup(
name = 'voidfinder',
cmdclass = {'build_ext': build_ext},
ext_modules = extensions,
include_dirs=[numpy.get_include(), numpy.get_include()+"/numpy"]
)
'''
Extension(
"lgamma",
["lgamma.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
),
Extension(
"typedefs",
["typedefs.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
),
Extension(
"dist_metrics",
["dist_metrics.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
),
Extension(
"kd_tree",
["kd_tree.pyx"],
include_dirs=[numpy.get_include()+"/numpy"],
libraries=["m"],
#extra_compile_args=['-fopenmp'],
#extra_link_args=['-fopenmp']
),
'''
|
#!/usr/bin/env python
# Script by Steven Grove (@sigwo)
# www.sigwo.com
# Reference http://stackoverflow.com/questions/3160699/python-progress-bar/15860757#15860757
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Date: 09-24-13
def update_progress():
barLength = 10 # Modify this to change the length of the progress bar
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rPercent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), progress*100, status)
sys.stdout.write(text)
sys.stdout.flush()
|
#!/usr/bin/env python3
"""mem2log"""
from argparse import ArgumentParser
from ctypes import CDLL
from signal import SIGHUP, SIGINT, SIGQUIT, SIGTERM, signal
from sys import exit
from time import sleep
def log(msg):
"""
"""
print(msg)
if separate_log:
logging.info(msg)
def mlockall():
"""
"""
MCL_CURRENT = 1
MCL_FUTURE = 2
MCL_ONFAULT = 4
libc = CDLL(None, use_errno=True)
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE | MCL_ONFAULT)
if result != 0:
result = libc.mlockall(MCL_CURRENT | MCL_FUTURE)
if result != 0:
log('WARNING: cannot lock process memory: [Errno {}]'.format(
result))
else:
log('Process memory locked with MCL_CURRENT | MCL_FUTURE')
else:
log('Process memory locked with MCL_CURRENT | MCL_FUTURE | MCL_'
'ONFAULT')
def check_meminfo():
"""
"""
with open('/proc/meminfo') as f:
for n, line in enumerate(f):
# MA
if n == mem_available_index:
mem_available = int(line.split(':')[1][:-4])
continue
# MF
if n == mem_free_index:
mem_free = int(line.split(':')[1][:-4])
continue
# BU
if n == buffers_index:
buffers = int(line.split(':')[1][:-4])
continue
# CA
if n == cached_index:
cached = int(line.split(':')[1][:-4])
continue
# AA
if n == active_anon_index:
active_anon = int(line.split(':')[1][:-4])
continue
# IA
if n == inactive_anon_index:
inactive_anon = int(line.split(':')[1][:-4])
continue
# AF
if n == active_file_index:
active_file = int(line.split(':')[1][:-4])
continue
# IF
if n == inactive_file_index:
inactive_file = int(line.split(':')[1][:-4])
continue
# ST
if n == swap_total_index:
swap_total = int(line.split(':')[1][:-4])
continue
# SF
if n == swap_free_index:
swap_free = int(line.split(':')[1][:-4])
continue
# DI
if n == dirty_index:
dirty = int(line.split(':')[1][:-4])
continue
# SH
if n == shmem_index:
shmem = int(line.split(':')[1][:-4])
continue
# SR
if n == sreclaimable_index:
sreclaimable = int(line.split(':')[1][:-4])
continue
return (
mem_available,
mem_free,
buffers,
cached,
active_anon,
inactive_anon,
active_file,
inactive_file,
swap_total,
swap_free,
dirty,
shmem,
sreclaimable)
def percent(num):
"""Interprete num as percentage."""
return round(num * 100, 1)
def signal_handler(signum, frame):
"""
"""
def signal_handler_inner(signum, frame):
pass
for i in sig_list:
signal(i, signal_handler_inner)
log('--')
log('Got the {} signal'.format(
sig_dict[signum]))
log('Peak values:')
log(' MA: min {}, max {}'.format(
round(min_dict['MA'] / 1024, 1),
round(max_dict['MA'] / 1024, 1)))
log(' MF: min {}, max {}'.format(
round(min_dict['MF'] / 1024, 1),
round(max_dict['MF'] / 1024, 1)))
if mode == '1':
log(' A: min {}, max {}'.format(
round(min_dict['A'] / 1024, 1),
round(max_dict['A'] / 1024, 1)))
log(' F: min {}, max {}'.format(
round(min_dict['F'] / 1024, 1),
round(max_dict['F'] / 1024, 1)))
log(' AF: min {}, max {}'.format(
round(min_dict['AF'] / 1024, 1),
round(max_dict['AF'] / 1024, 1)))
log(' IF: min {}, max {}'.format(
round(min_dict['IF'] / 1024, 1),
round(max_dict['IF'] / 1024, 1)))
log(' SF: min {}, max {}'.format(
round(min_dict['SF'] / 1024, 1),
round(max_dict['SF'] / 1024, 1)))
else:
log(' BU: min {}, max {}'.format(
round(min_dict['BU'] / 1024, 1),
round(max_dict['BU'] / 1024, 1)))
log(' CA: min {}, max {}'.format(
round(min_dict['CA'] / 1024, 1),
round(max_dict['CA'] / 1024, 1)))
log(' AA: min {}, max {}'.format(
round(min_dict['AA'] / 1024, 1),
round(max_dict['AA'] / 1024, 1)))
log(' IA: min {}, max {}'.format(
round(min_dict['IA'] / 1024, 1),
round(max_dict['IA'] / 1024, 1)))
log(' AF: min {}, max {}'.format(
round(min_dict['AF'] / 1024, 1),
round(max_dict['AF'] / 1024, 1)))
log(' IF: min {}, max {}'.format(
round(min_dict['IF'] / 1024, 1),
round(max_dict['IF'] / 1024, 1)))
log(' SF: min {}, max {}'.format(
round(min_dict['SF'] / 1024, 1),
round(max_dict['SF'] / 1024, 1)))
log(' SU: min {}, max {}'.format(
round(min_dict['SU'] / 1024, 1),
round(max_dict['SU'] / 1024, 1)))
log(' DI: min {}, max {}'.format(
round(min_dict['DI'] / 1024, 1),
round(max_dict['DI'] / 1024, 1)))
log(' CF: min {}, max {}'.format(
round(min_dict['CF'] / 1024, 1),
round(max_dict['CF'] / 1024, 1)))
log(' SH: min {}, max {}'.format(
round(min_dict['SH'] / 1024, 1),
round(max_dict['SH'] / 1024, 1)))
log(' SR: min {}, max {}'.format(
round(min_dict['SR'] / 1024, 1),
round(max_dict['SR'] / 1024, 1)))
log('Exit.')
exit()
parser = ArgumentParser()
parser.add_argument(
'-i',
'--interval',
help="""interval in sec""",
default=2,
type=float
)
parser.add_argument(
'-l',
'--log',
help="""path to log file""",
default=None,
type=str
)
parser.add_argument(
'-m',
'--mode',
help="""mode (1 or 2)""",
default='1',
type=str
)
args = parser.parse_args()
interval = args.interval
log_file = args.log
mode = args.mode
if log_file is None:
separate_log = False
else:
separate_log = True
import logging
if log_file is not None:
logstring = 'log file: {}, '.format(log_file)
else:
logstring = 'log file is not set, '
if separate_log:
try:
logging.basicConfig(
filename=log_file,
level=logging.INFO,
format="%(asctime)s: %(message)s")
except Exception as e:
print(e)
exit(1)
if interval <= 0:
log('error: argument -i/--interval: the value must be greater than 0')
exit(1)
with open('/proc/meminfo') as f:
mem_list = f.readlines()
mem_list_names = []
for s in mem_list:
mem_list_names.append(s.split(':')[0])
mem_available_index = mem_list_names.index('MemAvailable')
mem_free_index = mem_list_names.index('MemFree')
buffers_index = mem_list_names.index('Buffers')
cached_index = mem_list_names.index('Cached')
active_anon_index = mem_list_names.index('Active(anon)')
inactive_anon_index = mem_list_names.index('Inactive(anon)')
active_file_index = mem_list_names.index('Active(file)')
inactive_file_index = mem_list_names.index('Inactive(file)')
swap_total_index = mem_list_names.index('SwapTotal')
swap_free_index = mem_list_names.index('SwapFree')
dirty_index = mem_list_names.index('Dirty')
shmem_index = mem_list_names.index('Shmem')
sreclaimable_index = mem_list_names.index('SReclaimable')
mem_total = int(mem_list[0].split(':')[1][:-4])
swap_total = int(mem_list[swap_total_index].split(':')[1][:-4])
sig_list = [SIGTERM, SIGINT, SIGQUIT, SIGHUP]
sig_dict = {
SIGINT: 'SIGINT',
SIGQUIT: 'SIGQUIT',
SIGHUP: 'SIGHUP',
SIGTERM: 'SIGTERM'
}
for i in sig_list:
signal(i, signal_handler)
if not (mode == '1' or mode == '2'):
print('ERROR: invalid mode. Valid values are 1 and 2. Exit.')
exit(1)
log('Starting mem2log with interval {}s, mode: {}'.format(interval, mode))
if separate_log:
log('Log file: {}'.format(log_file))
mlockall()
log('All values are in mebibytes')
log(
'MemTotal: {}, SwapTotal: {}'.format(
round(mem_total / 1024, 1),
round(swap_total / 1024, 1)
)
)
log('--')
min_dict = dict()
max_dict = dict()
if mode == '2':
log(
'MA is MemAvailable, '
'MF is MemFree, '
'BU is Buffers, '
'CA is Cached'
)
log(
'AA is Active(anon), '
'IA is Inactive(anon), '
'AF is Active(file), '
'IF is Inactive(file)'
)
log(
'SF is SwapFree, '
'SU is `SwapTotal - SwapFree`, '
'DI is Dirty, '
'CF is Clean File (`AF + IF - DI`), '
'SH is Shmem, '
'SR is SReclaimable'
)
log('--')
while True:
(mem_available, mem_free, buffers, cached, active_anon, inactive_anon,
active_file, inactive_file, swap_total, swap_free, dirty, shmem,
sreclaimable) = check_meminfo()
swap_used = swap_total - swap_free
clean_file = max(0, (active_file + inactive_file - dirty))
if ('MA' not in max_dict or mem_available > max_dict['MA']):
max_dict['MA'] = mem_available
if ('MA' not in min_dict or mem_available < min_dict['MA']):
min_dict['MA'] = mem_available
if ('MF' not in max_dict or mem_free > max_dict['MF']):
max_dict['MF'] = mem_free
if ('MF' not in min_dict or mem_free < min_dict['MF']):
min_dict['MF'] = mem_free
if ('BU' not in max_dict or buffers > max_dict['BU']):
max_dict['BU'] = buffers
if ('BU' not in min_dict or buffers < min_dict['BU']):
min_dict['BU'] = buffers
if ('CA' not in max_dict or cached > max_dict['CA']):
max_dict['CA'] = cached
if ('CA' not in min_dict or cached < min_dict['CA']):
min_dict['CA'] = cached
if ('AA' not in max_dict or active_anon > max_dict['AA']):
max_dict['AA'] = active_anon
if ('AA' not in min_dict or active_anon < min_dict['AA']):
min_dict['AA'] = active_anon
if ('IA' not in max_dict or inactive_anon > max_dict['IA']):
max_dict['IA'] = inactive_anon
if ('IA' not in min_dict or inactive_anon < min_dict['IA']):
min_dict['IA'] = inactive_anon
if ('AF' not in max_dict or active_file > max_dict['AF']):
max_dict['AF'] = active_file
if ('AF' not in min_dict or active_file < min_dict['AF']):
min_dict['AF'] = active_file
if ('IF' not in max_dict or inactive_file > max_dict['IF']):
max_dict['IF'] = inactive_file
if ('IF' not in min_dict or inactive_file < min_dict['IF']):
min_dict['IF'] = inactive_file
if ('SF' not in max_dict or swap_free > max_dict['SF']):
max_dict['SF'] = swap_free
if ('SF' not in min_dict or swap_free < min_dict['SF']):
min_dict['SF'] = swap_free
if ('SU' not in max_dict or swap_used > max_dict['SU']):
max_dict['SU'] = swap_used
if ('SU' not in min_dict or swap_used < min_dict['SU']):
min_dict['SU'] = swap_used
if ('DI' not in max_dict or dirty > max_dict['DI']):
max_dict['DI'] = dirty
if ('DI' not in min_dict or dirty < min_dict['DI']):
min_dict['DI'] = dirty
if ('CF' not in max_dict or clean_file > max_dict['CF']):
max_dict['CF'] = clean_file
if ('CF' not in min_dict or clean_file < min_dict['CF']):
min_dict['CF'] = clean_file
if ('SH' not in max_dict or shmem > max_dict['SH']):
max_dict['SH'] = shmem
if ('SH' not in min_dict or shmem < min_dict['SH']):
min_dict['SH'] = shmem
if ('SR' not in max_dict or sreclaimable > max_dict['SR']):
max_dict['SR'] = sreclaimable
if ('SR' not in min_dict or sreclaimable < min_dict['SR']):
min_dict['SR'] = sreclaimable
log(
'MA {}, MF {}, BU {}, CA {}, AA {}, IA {}, AF {}, IF {}, '
'SF {}, SU {}, DI {}, CF {}, SH {}, SR {}'.format(
round(mem_available / 1024),
round(mem_free / 1024),
round(buffers / 1024),
round(cached / 1024),
round(active_anon / 1024),
round(inactive_anon / 1024),
round(active_file / 1024),
round(inactive_file / 1024),
round(swap_free / 1024),
round(swap_used / 1024),
round(dirty / 1024),
round(clean_file / 1024),
round(shmem / 1024),
round(sreclaimable / 1024)
))
sleep(interval)
log(
'MA is MemAvailable, '
'MF is MemFree, '
'A is Anon, '
'F is File'
)
log(
'AF is Active(file), '
'IF is Inactive(file), '
'SF is SwapFree'
)
log('--')
while True:
(mem_available, mem_free, _, _, active_anon, inactive_anon, active_file,
inactive_file, swap_total, swap_free, _, _, _) = check_meminfo()
swap_used = swap_total - swap_free
anon = active_anon + inactive_anon
filez = active_file + inactive_file
if ('MA' not in max_dict or mem_available > max_dict['MA']):
max_dict['MA'] = mem_available
if ('MA' not in min_dict or mem_available < min_dict['MA']):
min_dict['MA'] = mem_available
if ('MF' not in max_dict or mem_free > max_dict['MF']):
max_dict['MF'] = mem_free
if ('MF' not in min_dict or mem_free < min_dict['MF']):
min_dict['MF'] = mem_free
if ('A' not in max_dict or anon > max_dict['A']):
max_dict['A'] = anon
if ('A' not in min_dict or anon < min_dict['A']):
min_dict['A'] = anon
if ('F' not in max_dict or filez > max_dict['F']):
max_dict['F'] = filez
if ('F' not in min_dict or filez < min_dict['F']):
min_dict['F'] = filez
if ('AF' not in max_dict or active_file > max_dict['AF']):
max_dict['AF'] = active_file
if ('AF' not in min_dict or active_file < min_dict['AF']):
min_dict['AF'] = active_file
if ('IF' not in max_dict or inactive_file > max_dict['IF']):
max_dict['IF'] = inactive_file
if ('IF' not in min_dict or inactive_file < min_dict['IF']):
min_dict['IF'] = inactive_file
if ('SF' not in max_dict or swap_free > max_dict['SF']):
max_dict['SF'] = swap_free
if ('SF' not in min_dict or swap_free < min_dict['SF']):
min_dict['SF'] = swap_free
if ('SU' not in max_dict or swap_used > max_dict['SU']):
max_dict['SU'] = swap_used
if ('SU' not in min_dict or swap_used < min_dict['SU']):
min_dict['SU'] = swap_used
log(
'MA={} ({}%), MF={}, A={}, F={} (AF={}, IF={}), SF={} ({}%)'.format(
round(mem_available / 1024),
round(mem_available / mem_total * 100),
round(mem_free / 1024),
round(anon / 1024),
round(filez / 1024),
round(active_file / 1024),
round(inactive_file / 1024),
round(swap_free / 1024),
round(swap_free / (swap_total + 1) * 100)
))
sleep(interval)
|
def outer(func):
def inner(name, age):
if age < 0:
age = 0
func(name, age) # 执行作为参数传过来的函数
return inner
# 使用@符号作用于函数,相当于给函数加上装饰器
@outer # 相当于 myprint = outer(myprint)
def myprint(name, age):
print("{} is {} years old".format(name, age))
# myprint = outer(myprint)
myprint("xiaoming", -12)
|
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 7 16:17:46 2017
@author: zx621293
"""
def myIsomap (X, color, n_neighbors, n_components=2):
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
fig = plt.figure(figsize=(15, 8))
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap with %i neighbors considered for each data point (%.2g sec)" % (n_neighbors, t1 - t0))
plt.axis('tight')
plt.xlabel("first principal component",fontsize=14)
plt.ylabel("second principal component",fontsize=14)
plt.show()
return;
|
# Generated by Django 3.1.1 on 2020-09-17 03:12
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('attend', '0011_auto_20200917_0842'),
]
operations = [
migrations.RemoveField(
model_name='face',
name='created',
),
]
|
"""方法1:使用main方式执行单个测试类的全部用例"""
__author__ = 'slyang'
import unittest
from count import Count
#创建一个测试类,但是必须要继承unit
class MyTest(unittest.TestCase):
# 用例1-- 测试整数加法
def test_b_add1(self):
#测试步骤
count = Count()
result = count.add(1,2)
#实际结果
actual_result = result
#预期结果
expect_result = 3
#断言
self.assertEqual(expect_result,actual_result)
# 用例2-- 测试小数加法
def test_a_add2(self):
#测试步骤
count = Count()
result = count.add(1.1,2.13)
#实际结果
actual_result = float(result)
#预期结果
expect_reuslt = 3.23
#断言
self.assertEqual(expect_reuslt,actual_result)
#用例3-- 测试整数减法
def test_B_sub1(self):
pass
#用例4-- 测试小数减法
def test_A_sub2(self):
pass
if __name__ == '__main__':
#方法1 --无顺序
#unittest.main()
#方法2 --无顺序
suit = unittest.TestSuite()
suit.addTest(MyTest("test_add1"))
suit.addTest(MyTest("test_add2"))
runner = unittest.TextTestRunner()
runner.run(suit)
#方法3 --每一个文件,对应一个需要测试的测试部件,一个测试部件存在同一个部件的多个测试用例
|
# encoding=utf8
"""
Author: 'jdwang'
Date: 'create date: 2017-01-13'; 'last updated date: 2017-01-13'
Email: '383287471@qq.com'
Describe: 测试发布在服务器上的API
"""
from __future__ import print_function
import requests
__version__ = '1.3'
TAG_URL = 'http://119.29.81.170:10545/id_detection/regex/rawInput=%s'
# 输入要测试的句子
sentences = list(open('test_sentences.txt'))
for sentence in sentences:
sentence = sentence.strip()
r = requests.get(TAG_URL % sentence)
if r.status_code == 200:
if r.json()['is_id']:
print('[%s]含有效语义' % sentence)
else:
print('[%s]无有效语义' % sentence)
else:
print('网站访问不到!')
|
"""
Creates hierarchical data format files with complexe frequency spectrograms for audio files in a given folder.
"""
__author__ = 'David Flury'
__email__ = "david@flury.email"
import os
import sys
import glob
import h5py
import time
import librosa
import warnings
import argparse
import numpy as np
import multiprocessing
import skimage.io as io
from os.path import basename
from joblib import Parallel, delayed
from matplotlib.cm import get_cmap
import audioread
audio_extensions = ['.wav', '.mp3']
error_count = 0
def generate_container(file, destination, fft_window, target_sample_rate, channels, generate_image):
global error_count
try:
stereo = channels > 1
audio, sample_rate = librosa.load(file, mono=not stereo, sr=target_sample_rate if target_sample_rate > 0 else None)
spectrograms = []
real_stereo = isinstance(audio[0], (np.ndarray))
if stereo and real_stereo:
spectrograms.append(stft_to_complex_spectrogram(generate_spectrogram(destination, audio[0], '0-stereo_left', fft_window, sample_rate, generate_image)))
spectrograms.append(stft_to_complex_spectrogram(generate_spectrogram(destination, audio[1], '1-stereo_right', fft_window, sample_rate, generate_image)))
else:
spectrograms.append(stft_to_complex_spectrogram(generate_spectrogram(file, audio, '0-mono', fft_window, sample_rate, generate_image)))
configuration = 'fft-window=%d_sample-rate=%d_channels=%d-%s' % (fft_window, sample_rate, channels, "stereo" if stereo else "mono")
song = os.path.basename(os.path.dirname(file))
collection = os.path.basename(os.path.dirname(os.path.dirname(file)))
folder = os.path.join(destination, configuration, collection, song)
if not os.path.exists(folder):
try:
os.makedirs(folder)
except:
pass
path = os.path.join(folder, "%s-spectrogram_%s" % (os.path.basename(file), configuration))
save_spectrogram_data(spectrograms, path, fft_window, sample_rate, channels, real_stereo, song, collection)
print('Generated spectrogram %s' % path)
except Exception as e:
print('Error while generating spectrogram for %s: %s' % (file, str(e)))
error_count += 1
pass
def generate_spectrogram(file, audio, part, fft_window, sample_rate, generate_image):
stft = librosa.stft(audio, fft_window)
if generate_image:
save_spectrogram_image(stft_to_real_spectrogram(stft), file, part, fft_window, sample_rate)
return stft
def stft_to_real_spectrogram(stft):
spectrogram = np.log1p(np.abs(stft))
return np.array(spectrogram)[:, :, np.newaxis]
def stft_to_complex_spectrogram(stft):
real_part = np.real(stft)
imag_part = np.imag(stft)
spectrogram = np.zeros((stft.shape[0], stft.shape[1], 2))
spectrogram[:, :, 0] = real_part
spectrogram[:, :, 1] = imag_part
return spectrogram
def save_spectrogram_image(spectrogram, file, part, fft_window, sample_rate):
file_name = '%s_spectrogram_%s_fft-window[%d]_sample-rate[%d].png' % (file, part, fft_window, sample_rate)
real_part = spectrogram[:, :, 0]
cm_hot = get_cmap('plasma')
image = np.clip((real_part - np.min(real_part)) / (np.max(real_part) - np.min(real_part)), 0, 1)
with warnings.catch_warnings():
image = cm_hot(image)
warnings.simplefilter('ignore')
io.imsave(file_name, image)
def save_spectrogram_data(spectrograms, file, fft_window, sample_rate, channels, real_stereo, song, collection):
h5f = h5py.File(file + '.h5', 'w')
if len(spectrograms) > 1:
h5f.create_dataset('spectrogram_left', data=spectrograms[0])
h5f.create_dataset('spectrogram_right', data=spectrograms[1])
else:
h5f.create_dataset('spectrogram', data=spectrograms[0])
dimensions = spectrograms[0].shape
h5f.create_dataset('height', data=dimensions[0])
h5f.create_dataset('width', data=dimensions[1])
h5f.create_dataset('depth', data=dimensions[2])
h5f.create_dataset('fft_window', data=fft_window)
h5f.create_dataset('sample_rate', data=sample_rate)
h5f.create_dataset('channels', data=channels)
h5f.create_dataset('stereo', data=real_stereo)
h5f.create_dataset('song', data=song)
h5f.create_dataset('collection', data=collection)
h5f.create_dataset('file', data=os.path.basename(file))
h5f.close()
def build_destination(file, path, destination):
return file.replace(path, destination)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Creates hierarchical data format files including complexe frequency spectrograms for audio files in a given folder.')
parser.add_argument('--path', default='U:\\3_filter\\musdb18\\', type=str, help='Working path')
parser.add_argument('--destination', default='D:\\Data\\unmix.io\\4_training\\', type=str, help='Destination path')
parser.add_argument('--fft_window', default=1536, type=int, help='Size [Samples] of FFT windows')
parser.add_argument('--sample_rate', default=11025, type=int, help='Optional target samplerate [Hz] for the audiofiles')
parser.add_argument('--channels', default=1, type=int, help='1 (Mono) or 2 (Stereo)')
parser.add_argument('--generate_image', default='true', type=str, help='If spectrogram image should be generated and saved')
parser.add_argument('--job_count', default=int(multiprocessing.cpu_count()), type=int, help='Maximum number of concurrently running jobs')
args = parser.parse_args()
# Arguments cleanup
if args.channels > 2:
args.channels = 2
if args.channels < 1:
args.channels = 1
if args.generate_image.lower() in ('no', 'false', 'f', 'n', '0'):
args.generate_image = False
else:
args.generate_image = True
print('Arguments:', str(args))
files = [] # Load all files into list
print('Load all music files...')
for file in glob.iglob(os.path.join(args.path, '**/*'), recursive=True):
extension = os.path.splitext(file)[1].lower()
if extension in audio_extensions:
files.append(file)
print('Found %d music files' % len(files))
start = time.time()
print('Generate spectrograms with maximum %d jobs...' % args.job_count)
#generate_container(files[0], args.destination, args.fft_window, args.sample_rate, args.channels, args.generate_image)
Parallel(n_jobs=args.job_count)(delayed(generate_container)(file, args.destination, args.fft_window, args.sample_rate, args.channels, args.generate_image) for file in files)
end = time.time()
print('Finished processing in %d [s] with %d errors.' % ((end - start), error_count))
|
from itertools import izip_longest
def solution(s):
return [''.join(a) for a in izip_longest(s[::2], s[1::2], fillvalue='_')]
|
"""
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
def isPalindrome(number):
reverse = 0
n = number
while n != 0:
reverse = reverse * 10
reverse = reverse + n % 10
n = int(n / 10)
if number == reverse:
return True
else:
return False
product = 0
maxPali = 0
for i in range(1000):
for j in range(1000):
product = i * j
if product > maxPali and isPalindrome(product) == True:
maxPali = product
print(maxPali)
|
import re
import os
import pandas as pd
import time
DIR=os.getcwd()
logfile=DIR+'/log-count.txt'
sp_names=[]
chr_names=['chr1','chr2','chr3','chr4',
'chr5','chr6','chr7','chr8',
'chr9','chr10','chr11','chr12',
'chr13','chr14','chr15','chr16',
'chr17','chr18','chr19','chr20',
'chr21','chr22']
sp=open('species_names.txt')
for line in sp:
sp_names.append(line.strip())
contexts=[ c1+'.'+c2 for c1 in ['A','T','G','C'] for c2 in ['A','T','G','C']]
context_counts=dict.fromkeys(contexts,0)
context_abundance=pd.DataFrame()
context_abundance['Context']=contexts
log=open(logfile,'w')
log.write(str(sp_names)+'\n')
log.close()
for name in sp_names:
log=open(logfile,'a')
log.write('Start time \n'+str(time.asctime(time.localtime()))+'\n')
log.write('Start '+name+'\n')
log.close()
context_counts=dict.fromkeys(contexts,0)
for CHR in chr_names: #['chr1']:
log=open(logfile,'a')
log.write('chr '+CHR+'\n')
log.close()
file_path=CHR+'/'+name+'.fasta'
f=open(file_path,'r')
f.readline()
first = f.read(1).upper()
second= f.read(1).upper()
third = f.read(1).upper()
while third!='' and third!=' ' and third!=None:
if third.isalpha() and first.isalpha() and ('N' not in third+first) and ('n' not in third+first):
context_counts[first+'.'+third]+=1
first =second
second=third
third =f.read(1).upper()
f.close()
context_abundance[name]=context_counts.values()
context_counts=dict.fromkeys(context_counts,0)
log=open(logfile,'a')
log.write('End time \n'+str(time.asctime(time.localtime()))+'\n')
log.close()
context_abundance.to_csv('context_abundance.csv',sep='\t')
|
# Generated by Django 2.2.6 on 2019-10-20 20:33
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('checkout', '0008_promotioncode'),
]
operations = [
migrations.AddField(
model_name='promotioncode',
name='percentage_discount',
field=models.FloatField(default='0', validators=[django.core.validators.MinValueValidator(0.0), django.core.validators.MaxValueValidator(1.0)]),
preserve_default=False,
),
]
|
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import unittest
from test.helper import TestHelper
from mediafile import MediaFile
from beets.util import displayable_path
class InfoTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('info')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_path(self):
path = self.create_mediafile_fixture()
mediafile = MediaFile(path)
mediafile.albumartist = 'AAA'
mediafile.disctitle = 'DDD'
mediafile.genres = ['a', 'b', 'c']
mediafile.composer = None
mediafile.save()
out = self.run_with_output('info', path)
self.assertIn(displayable_path(path), out)
self.assertIn('albumartist: AAA', out)
self.assertIn('disctitle: DDD', out)
self.assertIn('genres: a; b; c', out)
self.assertNotIn('composer:', out)
self.remove_mediafile_fixtures()
def test_item_query(self):
item1, item2 = self.add_item_fixtures(count=2)
item1.album = 'xxxx'
item1.write()
item1.album = 'yyyy'
item1.store()
out = self.run_with_output('info', 'album:yyyy')
self.assertIn(displayable_path(item1.path), out)
self.assertIn('album: xxxx', out)
self.assertNotIn(displayable_path(item2.path), out)
def test_item_library_query(self):
item, = self.add_item_fixtures()
item.album = 'xxxx'
item.store()
out = self.run_with_output('info', '--library', 'album:xxxx')
self.assertIn(displayable_path(item.path), out)
self.assertIn('album: xxxx', out)
def test_collect_item_and_path(self):
path = self.create_mediafile_fixture()
mediafile = MediaFile(path)
item, = self.add_item_fixtures()
item.album = mediafile.album = 'AAA'
item.tracktotal = mediafile.tracktotal = 5
item.title = 'TTT'
mediafile.title = 'SSS'
item.write()
item.store()
mediafile.save()
out = self.run_with_output('info', '--summarize', 'album:AAA', path)
self.assertIn('album: AAA', out)
self.assertIn('tracktotal: 5', out)
self.assertIn('title: [various]', out)
self.remove_mediafile_fixtures()
def test_custom_format(self):
self.add_item_fixtures()
out = self.run_with_output('info', '--library', '--format',
'$track. $title - $artist ($length)')
self.assertEqual('02. tïtle 0 - the artist (0:01)\n', out)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
import traceback
from colorama import Fore, init
init()
class Logger(object):
def __init__(self):
pass
def _print(self, color, s):
print("%s[infrabox] %s%s" % (color, s, Fore.RESET))
def log(self, s, print_header=True):
print("%s%s" % ("[infrabox] " if print_header else "", s))
def info(self, s):
self._print(Fore.BLUE, s)
def warn(self, s):
self._print(Fore.YELLOW, s)
def error(self, s):
self._print(Fore.RED, s)
def exception(self):
msg = traceback.format_exc()
self.error(msg)
logger = Logger()
|
from hypothesis import given,example
import hypothesis.strategies as st
import numpy as np
from gInteg import Integration_GaussLegendre, calculate, solve,get_param
@given(n = st.floats(1.0,5.0), flg = st.booleans())
@example(1.0,True)
def test_calculate(n,flg):
f = "1/(x**2+4)"
result = calculate(f,n,flg)
x = 0
y = 0
if flg:
x = 1/n
y = eval(f) *(-1/(n*n))
else:
x = n
y = eval(f)
assert isinstance(result, float)
assert result == y
@given(n = st.integers(1,10))
@example(2)
def test_get_param(n):
if n != 2 and n !=3:
assert get_param(n) == "error"
else:
w,x = get_param(n)
assert len(w) != 0
@given(n = st.floats(1.0,5.0), a = st.floats(1.0,5.0), b = st.floats(1.0,5.0))
@example(2,2,1e9)
def test_Integration_GaussLegendre(n,a,b):
f = "1/(x**2+4)"
result = Integration_GaussLegendre(f,n,a,b)
assert isinstance(result,float)
if __name__=="__main__":
test_get_param()
test_calculate()
test_Integration_GaussLegendre()
|
import pandas as pd
import numpy as np
import scipy.sparse
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction import DictVectorizer
from sklearn.linear_model import Ridge
"""
Загрузите данные об описаниях вакансий и соответствующих годовых зарплатах из файла salary-train.csv
(либо его заархивированную версию salary-train.zip).
Проведите предобработку:
Приведите тексты к нижнему регистру (text.lower()).
Замените все, кроме букв и цифр, на пробелы — это облегчит дальнейшее разделение текста на слова.
Для такой замены в строке text подходит следующий вызов: re.sub('[^a-zA-Z0-9]', ' ', text).
Также можно воспользоваться методом replace у DataFrame, чтобы сразу преобразовать все тексты:
Примените TfidfVectorizer для преобразования текстов в векторы признаков. Оставьте только те слова,
которые встречаются хотя бы в 5 объектах (параметр min_df у TfidfVectorizer).
Замените пропуски в столбцах LocationNormalized и ContractTime на специальную строку 'nan'.
Код для этого был приведен выше.
Примените DictVectorizer для получения one-hot-кодирования признаков LocationNormalized и ContractTime.
Объедините все полученные признаки в одну матрицу "объекты-признаки". Обратите внимание, что матрицы для текстов и
категориальных признаков являются разреженными. Для объединения их столбцов нужно
воспользоваться функцией scipy.sparse.hstack.
3. Обучите гребневую регрессию с параметрами alpha=1 и random_state=241.
Целевая переменная записана в столбце SalaryNormalized.
4. Постройте прогнозы для двух примеров из файла salary-test-mini.csv.
Значения полученных прогнозов являются ответом на задание. Укажите их через пробел.
"""
data = pd.read_csv('/2019/data-out/salary-train.csv')
data_test = pd.read_csv('/data-out/salary-test-mini.csv')
data['FullDescription'] = data['FullDescription'].str.lower().replace('[^a-zA-Z0-9]', ' ', regex=True)
data['LocationNormalized'].fillna('nan', inplace=True)
data['ContractTime'].fillna('nan', inplace=True)
y_train = data['SalaryNormalized']
x_train = data[['FullDescription', 'LocationNormalized', 'ContractTime']]
y_test = data_test['SalaryNormalized']
x_test = data_test[['FullDescription', 'LocationNormalized', 'ContractTime']]
vectorizer = TfidfVectorizer(min_df=5) # function that represent words as some view of a number
scaled = vectorizer.fit_transform(data['FullDescription'])
test_scaled = vectorizer.transform(data_test['FullDescription'])
enc = DictVectorizer()
X_train_categ = enc.fit_transform(data[['LocationNormalized', 'ContractTime']].to_dict('records'))
X_test_categ = enc.transform(data_test[['LocationNormalized', 'ContractTime']].to_dict('records'))
train_m = scipy.sparse.hstack((scaled, X_train_categ))
test_m = scipy.sparse.hstack((test_scaled, X_test_categ))
clf = Ridge(random_state=241, alpha=1)
clf.fit(train_m, y_train)
anw = clf.predict(test_m)
print(anw)
anw = str(str(anw[1])[1:9] + ' ' + str(anw[2])[1:9])
with open('/data-out/LinearRegression.txt', 'w') as f:
f.write(str(max(anw))) # max accuracy
f.close()
|
import util #같은 디렉토리에 있어야 가져올 수 있다
print("1 inch = ", util.INCH, "cm") #1 inch = 2.54 cm
print("1 ~ 10까지의 합계:", util.sum(10)) #1 ~ 10까지의 합계: 55
import sys
print(sys.path) #디렉토리를 문자열로 나타내는
"""
['c:\\workspace\\Day0605',
'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38-32\\python38.zip',
'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38-32\\DLLs',
'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38-32\\lib',
'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38-32',
'C:\\Users\\user\\AppData\\Roaming\\Python\\Python38\\site-packages',
'C:\\Users\\user\\AppData\\Local\\Programs\\Python\\Python38-32\\lib\\site-packages']
c:\\workspace\\Day0605가 첫번째 경로로 나타남
"""
sys.path.append("C:\\workspace\\Day0604") #다른 경로에 있는 모듈 가져오기
# import Test02
"""
1591330666.4857507
Fri Jun 5 13:17:46 2020
time.struct_time(tm_year=2020, tm_mon=6, tm_mday=5, tm_hour=13, tm_min=17, tm_sec=46, tm_wday=4, tm_yday=157, tm_isdst=0)
time.struct_time(tm_year=2020, tm_mon=6, tm_mday=5, tm_hour=4, tm_min=17, tm_sec=46, tm_wday=4, tm_yday=157, tm_isdst=0)
"""
#Test02.__name__: Test02
sys.path.append("C:\\workspace\\Day0605\\testpack")
import testpack.dir1.mod1 as mod1
mod1.func1() #저는 mod1입니다.
from testpack.dir2 import mod2 #디렉토리를 정해서 가져옴
mod2.func2() #저는 mod2입니다.
|
print("quelle année ? : ", end="")
annee = int(input())
if annee % 4 == 0 :
print("année bissextile")
if annee % 100 == 0:
print("année normale")
if annee % 400 == 0:
print("année bissextile")
else:
print("année normale")
|
from multiaddr.address import *
|
"""
MIT License
Copyright (c) 2018 Max Planck Institute of Molecular Physiology
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import setuptools # type: ignore
CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(CURRENT_DIR, 'README.md'), encoding='utf-8') as f:
LONG_DESCRIPTION = f.read()
setuptools.setup(
name='transphire_transform',
version='0.0.3',
description='Utilites to change between SPHIRE, RELION and to modify the data',
long_description=LONG_DESCRIPTION,
url='https://github.com/MPI-Dortmund/transphire_transform',
author='Markus Stabrin',
author_email='markus.stabrin@tu-dortmund.de',
license='MIT',
packages=setuptools.find_packages(exclude=[]),
include_package_data=True,
entry_points={
'console_scripts': [
]
},
install_requires=[
'pandas>=0.23.0,<0.24.0',
'mrcfile>=1.0.4,<1.1.0',
],
python_requires='~=3.7',
classifiers=(
'Programming Language :: Python :: 3.7',
'License :: OSI Approved',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Development Status :: 4 - Beta'
),
)
|
import cv2
import docdetect
import numpy as np
import sys
import urllib.request
url='http://172.16.50.57:8080/shot.jpg'
# video = cv2.VideoCapture(video_path)
cv2.startWindowThread()
cv2.namedWindow('output')
model = sys.argv[2]
# print (model)
edge_detection = cv2.ximgproc.createStructuredEdgeDetection(model)
while True:
imgResp=urllib.request.urlopen(url)
imgNp=np.array(bytearray(imgResp.read()),dtype=np.uint8)
frame=cv2.imdecode(imgNp,-1)
# ret, frame = video.read()
rects = docdetect.process(frame, edge_detection)
frame = docdetect.draw(rects, frame)
cv2.imshow('output', frame)
cv2.waitKey(1)
video.release()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from dataclasses import dataclass
from pants.backend.go.subsystems.gotest import GoTestSubsystem
from pants.backend.go.util_rules import coverage_html
from pants.backend.go.util_rules.coverage import GoCoverageData
from pants.backend.go.util_rules.coverage_html import (
RenderGoCoverageProfileToHtmlRequest,
RenderGoCoverageProfileToHtmlResult,
)
from pants.core.goals.test import CoverageDataCollection, CoverageReports, FilesystemCoverageReport
from pants.core.util_rules import distdir
from pants.core.util_rules.distdir import DistDir
from pants.engine.engine_aware import EngineAwareParameter
from pants.engine.fs import CreateDigest, DigestContents, FileContent
from pants.engine.internals.native_engine import Digest, Snapshot
from pants.engine.internals.selectors import Get, MultiGet
from pants.engine.rules import collect_rules, rule
from pants.engine.unions import UnionRule
from pants.util.logging import LogLevel
class GoCoverageDataCollection(CoverageDataCollection):
element_type = GoCoverageData
@dataclass(frozen=True)
class RenderGoCoverageReportRequest(EngineAwareParameter):
raw_report: GoCoverageData
def debug_hint(self) -> str | None:
return self.raw_report.import_path
@dataclass(frozen=True)
class RenderGoCoverageReportResult:
coverage_report: FilesystemCoverageReport
html_report: FilesystemCoverageReport | None = None
@rule
async def go_render_coverage_report(
request: RenderGoCoverageReportRequest,
distdir_value: DistDir,
go_test_subsystem: GoTestSubsystem,
) -> RenderGoCoverageReportResult:
output_dir = go_test_subsystem.coverage_output_dir(
distdir=distdir_value,
address=request.raw_report.pkg_target_address,
import_path=request.raw_report.import_path,
)
snapshot, digest_contents = await MultiGet(
Get(Snapshot, Digest, request.raw_report.coverage_digest),
Get(DigestContents, Digest, request.raw_report.coverage_digest),
)
html_coverage_report: FilesystemCoverageReport | None = None
if go_test_subsystem.coverage_html:
html_result = await Get(
RenderGoCoverageProfileToHtmlResult,
RenderGoCoverageProfileToHtmlRequest(
raw_coverage_profile=digest_contents[0].content,
description_of_origin=f"Go package with import path `{request.raw_report.import_path}`",
sources_digest=request.raw_report.sources_digest,
sources_dir_path=request.raw_report.sources_dir_path,
),
)
html_report_snapshot = await Get(
Snapshot,
CreateDigest(
[
FileContent(
path="coverage.html",
content=html_result.html_output,
)
]
),
)
html_coverage_report = FilesystemCoverageReport(
coverage_insufficient=False,
result_snapshot=html_report_snapshot,
directory_to_materialize_to=output_dir,
report_file=output_dir / "coverage.html",
report_type="go_cover_html",
)
coverage_report = FilesystemCoverageReport(
coverage_insufficient=False,
result_snapshot=snapshot,
directory_to_materialize_to=output_dir,
report_file=output_dir / "cover.out",
report_type="go_cover",
)
return RenderGoCoverageReportResult(
coverage_report=coverage_report,
html_report=html_coverage_report,
)
@rule(desc="Merge Go coverage data", level=LogLevel.DEBUG)
async def go_gather_coverage_reports(
raw_coverage_reports: GoCoverageDataCollection,
) -> CoverageReports:
coverage_report_results = await MultiGet(
Get(
RenderGoCoverageReportResult,
RenderGoCoverageReportRequest(
raw_report=raw_coverage_report,
),
)
for raw_coverage_report in raw_coverage_reports
)
coverage_reports = []
for result in coverage_report_results:
coverage_reports.append(result.coverage_report)
if result.html_report:
coverage_reports.append(result.html_report)
return CoverageReports(reports=tuple(coverage_reports))
def rules():
return (
*collect_rules(),
*coverage_html.rules(),
*distdir.rules(),
UnionRule(CoverageDataCollection, GoCoverageDataCollection),
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.