text
stringlengths 8
6.05M
|
|---|
def addi():
a = int(input("Bitte die erste Zahl eingeben: "))
b = int(input("Bitte die zu addierende Zahl eingeben: "))
c = a + b
print(c)
def subbi():
a = int(input("Bitte die Zahl eingeben von der subtrahiert werden soll: "))
b = int(input("Bitte die Zahl eingeben die subtrahiert werden soll: "))
c = a - b
print(c)
def multipli():
a = int(input("Bitte die erste Zahl eingeben: "))
b = int(input("Bitte die Zahl eingeben mit der multipliziert werden soll: "))
c = a * b
print(c)
def dividi():
a = int(input("Bitte die Zahl eingeben die geteilt werden soll: "))
b = int(input("Bitte die Zahl eingeben mit der geteilt werden soll: "))
c = a / b
print(c)
def rechner():
print("Hallo Michaela :-) ")
print("Herzlich Willkommen zum Taschenrechner. Bitte wähle aus was Du rechnen lassen willst: ")
print("Gebe addition, subtraktion, multiplikation oder dividieren ein: ")
eingabe = input()
if eingabe == "addition":
addi()
elif eingabe == "subtraktion":
subbi()
elif eingabe == "multiplikation":
multipli()
elif eingabe == "dividieren":
dividi()
else:
print("Du Vollhohnk kannst ja nicht mal eine der Grundrechenarten richtig schreiben ;-P")
playAgain = "ja"
while playAgain == "ja" or playAgain == "j":
rechner()
print("Möchtest Du noch etwas rechnen: ja oder nein?")
playAgain = input()
|
from django.contrib import admin
# Register your models here.
from django.contrib import admin
from django.db import models
from .models import TaggedPost
from .models import PostTag
from .models import Post
admin.site.register(Post)
admin.site.register(TaggedPost)
admin.site.register(PostTag)
|
#Arkadaslar master odevimiz sudur ;
#Bir dongu icerisinde random olacak sekilde iki tane 10x10’luk matris uretin ve bu matrislerin farklarini alin.
#Ve fark matrisinin diagonali, -0.1 ile 0.1 arasinda olana kadar bu islemi tekrarlayin.
#Istenilen matris bulundugunda program dursun ve toplam kac dongunun kuruldugunu, ne kadar sure icinde buldugunu ve istenilen matrisi print ile birlikte ekrana yazdiriniz...
#Tavsiye: 10 x 10 luk matrisin bulunmasi saatler surebilir. Bu yuzden algoritmanizin dogrulugunu test etmek icin once 4x4, 5x5 gibi kucuk matrislerde deneyebilirsiniz. Ve matris sayisisini arttirarak en son 10x10 u deneyebilirsiniz.
#Matris uretme islemini np.random.random((axb)) fonksiyonu ile yapabilirsiniz.(size kalmis) (edited)
import numpy as np
import datetime as dt
n1=dt.datetime.now()
sayac=0
while(True):
a=np.random.rand(10,10)
a=np.round(a,1)
b=np.random.rand(10,10)
b=np.round(b,1)
print("------------------------------------------------------------------")
print("a matrisi")
print(a)
print("b matrisi")
print(b)
print("fark matrisi")
print(a-b)
diagonal=np.diag(np.diag(a-b))
matris=(a-b)!=0
matris2=diagonal!=0
print("diagonal matris")
print(diagonal)
sayac+=1
if (matris2==matris).all():
print("---------------------ISTENEN MATRIS----------------------------")
print(a-b)
print(sayac,"turda tamamlandi")
n2=dt.datetime.now()
print(n2-n1,"zamanda tamamlandi")
break
|
start = time.time()
|
"""
# 今までの書き方
from dataclasses import *
@dataclass
class Card:
suit: str
rank: int
def print_card(card):
print(f"{card.suit}の{card.rank}")
card = Card("heart", 10)
print_card(card)
"""
"""
# メソッドを用いた書き方
from dataclasses import *
@dataclass
class Card:
suit: str
rank: int
def print_card(self):
print(f"{self.suit}の{self.rank}")
card = Card("heart", 10)
card.print_card()
"""
"""
# printなど、もともとある関数名を使えるようになる
from dataclasses import *
@dataclass
class Card:
suit: str
rank: int
def print(self):
print(f"{self.suit}の{self.rank}")
card = Card("heart", 10)
card.print()
"""
"""
# 引数が1つで書ける
from dataclasses import *
@dataclass
class Card:
suit: str
rank: int
def print(self,count):
for x in range(count):
print(f"{self.suit}の{self.rank}")
card = Card("heart", 10)
card.print(10)
"""
"""
# コンストラクタ
class Card:
def __init__(self,suit,rank):
self.suit = suit
self.rank = rank
def print_card(card):
print(f"{card.suit}の{card.rank}")
card = Card("haert",10)
card.print_card()
"""
"""
# ポリモーフィズム
from dataclasses import *
@dataclass
class Square:
width: float
height:float
def area(self):
return self.width * self.height
@dataclass
class Circle:
radius: float
def area(self):
return 3.14 * self.radius * self.radius
shapes = [Square(3,4),Circle(5)]
for shape in shapes:
print(f"{shape}の面積:{shape.area()}")
"""
# プロトコル
from dataclasses import *
@dataclass
class Card:
suit: str
rank: int
def __str__(self):
return f"{self.suit}の{self.rank}"
card = Card("spade",1)
print(f"カード:{card}")
#
|
from __future__ import absolute_import
default_app_config = 'app.groups.apps.GroupsConfig'
|
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 08:52:13 2021
@author: User
"""
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
from glob import glob
import os
import numpy as np
import pandas as pd
os.chdir(r'C:\Users\User\Documents\08_publications\20210220_ijms\figure4')
datadir = '../data/figure4/'
#%%%%%%%%%% Common variables %%%%%%%%%%%%%%
def generate_clist(NUM_COLORS):
import pylab
cm = pylab.get_cmap('cool')
color = [cm(1-1.*i/NUM_COLORS) for i in range(NUM_COLORS)]
return color
#%% Figure 4A 10 POPE
spectra = ["20210302_TK_BLAC_30_10_MS_0_ms.txt",
"20210302_TK_C8E4_15_POPE_BLAC_30_10_NB_M2_0_ms.txt",
"20210302_TK_LDAO_15_POPE_BLAC_30_10_NB_M1_0_ms.txt",
"20210302_TK_OG_15_POPE_BLAC_30_10_1CMC_NB_M1_0_ms.txt"]
colors = generate_clist(len(spectra))
fig, axs = plt.subplots(len(spectra),1, figsize=(6,4), squeeze=True, frameon=False, sharex=True)
for ib, s in enumerate(spectra):
mz, intens = np.genfromtxt(datadir + s, delimiter=' ', unpack=True)
intens = 100*intens/intens[mz>1700].max() # relative intensity
l = axs[ib].plot(mz, intens, color=colors[ib], label=s)
axs[ib].legend()
axs[ib].set_ylim([0,110])
axs[ib].set_xlim([1700,4500])
plt.savefig(os.path.join('figure4.pdf'))
plt.show()
#%% Figure 4B: 20 POPE
spectra = ["20210302_TK_BLAC_30_10_MS_0_ms.txt",
"20210302_TK_C8E4_25_POPE_BLAC_30_10_NB_M2_0_ms.txt",
"20210302_TK_LDAO_25_POPE_BLAC_30_10_M1_0_ms.txt",
"20210302_TK_OG_25_POPE_BLAC_30_10_1CMC_NB_M1_0_ms.txt"]
colors = generate_clist(len(spectra))
fig, axs = plt.subplots(len(spectra),1, figsize=(6,4), squeeze=True, frameon=False, sharex=True)
for ib, s in enumerate(spectra):
mz, intens = np.genfromtxt(datadir + s, delimiter=' ', unpack=True)
intens = 100*intens/intens[mz>1700].max() # relative intensity
l = axs[ib].plot(mz, intens, color=colors[ib], label=s)
axs[ib].legend()
axs[ib].set_ylim([0,110])
axs[ib].set_xlim([1700,4500])
plt.savefig(os.path.join('figure4B.pdf'))
plt.show()
#%% Figure 4C part1
fig, ax = plt.subplots(1,1, figsize=(2,2))
spectra = ["20210302_TK_C8E4_10_POPE_BLAC_30_10_NB_MS2_0_ms.txt",
"20210302_TK_C8E4_15_POPE_BLAC_30_10_NB_M2_0_ms.txt",
"20210302_TK_C8E4_25_POPE_BLAC_30_10_NB_M2_0_ms.txt",
"20210302_TK_C8E4_50_POPE_BLAC_30_10_NB_M2_0_ms.txt"]
for ib, s in enumerate(spectra):
mz, intens = np.genfromtxt(datadir+s, delimiter=' ', unpack=True)
intens = 100*intens/intens[mz>3500].max() # relative intensity
l = ax.plot(mz, intens, color=colors[ib], label=s)
ax.set_ylim([0,110])
ax.set_xlim([3500,4500])
plt.savefig(os.path.join('figure4C-1.pdf'))
plt.show()
#%% Figure 4C part2
fig, ax = plt.subplots(1,1, figsize=(2,2))
spectra = ["20210302_TK_LDAO_10_POPE_BLAC_30_10_NB_M1_0_ms.txt",
"20210302_TK_LDAO_15_POPE_BLAC_30_10_NB_M1_0_ms.txt",
"20210302_TK_LDAO_25_POPE_BLAC_30_10_NB_M1_0_ms.txt",
"20210302_TK_LDAO_50_POPE_BLAC_30_10_M2_0_ms.txt"]
for ib, s in enumerate(spectra):
mz, intens = np.genfromtxt(datadir+s, delimiter=' ', unpack=True)
intens = 100*intens/intens[mz>3500].max() # relative intensity
l = ax.plot(mz, intens, color=colors[ib], label=s)
ax.set_ylim([0,110])
ax.set_xlim([3500,4500])
plt.savefig(os.path.join('figure4C-2.pdf'))
plt.show()
#%% Figure 4C part3
fig, ax = plt.subplots(1,1, figsize=(2,2))
spectra = ["20210302_TK_OG_10_POPE_BLAC_30_10_1CMC_NB_M1_0_ms.txt",
"20210302_TK_OG_15_POPE_BLAC_30_10_1CMC_NB_M1_0_ms.txt",
"20210302_TK_OG_25_POPE_BLAC_30_10_1CMC_NB_M1_0_ms.txt",
"20210302_TK_OG_50_POPE_BLAC_30_10_1cmc_NB_M1_0_ms.txt"]
for ib, s in enumerate(spectra):
mz, intens = np.genfromtxt(datadir+s, delimiter=' ', unpack=True)
intens = 100*intens/intens[mz>3500].max() # relative intensity
l = ax.plot(mz, intens, color=colors[ib], label=s)
ax.set_ylim([0,110])
ax.set_xlim([3500,4500])
plt.savefig(os.path.join('figure4C-3.pdf'))
plt.show()
|
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the command-line interface.
"""
import os
import shutil
import re
import subprocess
import platform
import sys
import unittest
from unittest.mock import patch, Mock
from test import _common
from test.helper import capture_stdout, has_program, TestHelper, control_stdin
from beets import library
from beets import ui
from beets.ui import commands
from beets import autotag
from beets.autotag.match import distance
from mediafile import MediaFile
from beets import config
from beets import plugins
from confuse import ConfigError
from beets import util
from beets.util import syspath, MoveOperation
class ListTest(unittest.TestCase):
def setUp(self):
self.lib = library.Library(':memory:')
self.item = _common.item()
self.item.path = 'xxx/yyy'
self.lib.add(self.item)
self.lib.add_album([self.item])
def _run_list(self, query='', album=False, path=False, fmt=''):
with capture_stdout() as stdout:
commands.list_items(self.lib, query, album, fmt)
return stdout
def test_list_outputs_item(self):
stdout = self._run_list()
self.assertIn('the title', stdout.getvalue())
def test_list_unicode_query(self):
self.item.title = 'na\xefve'
self.item.store()
self.lib._connection().commit()
stdout = self._run_list(['na\xefve'])
out = stdout.getvalue()
self.assertTrue('na\xefve' in out)
def test_list_item_path(self):
stdout = self._run_list(fmt='$path')
self.assertEqual(stdout.getvalue().strip(), 'xxx/yyy')
def test_list_album_outputs_something(self):
stdout = self._run_list(album=True)
self.assertGreater(len(stdout.getvalue()), 0)
def test_list_album_path(self):
stdout = self._run_list(album=True, fmt='$path')
self.assertEqual(stdout.getvalue().strip(), 'xxx')
def test_list_album_omits_title(self):
stdout = self._run_list(album=True)
self.assertNotIn('the title', stdout.getvalue())
def test_list_uses_track_artist(self):
stdout = self._run_list()
self.assertIn('the artist', stdout.getvalue())
self.assertNotIn('the album artist', stdout.getvalue())
def test_list_album_uses_album_artist(self):
stdout = self._run_list(album=True)
self.assertNotIn('the artist', stdout.getvalue())
self.assertIn('the album artist', stdout.getvalue())
def test_list_item_format_artist(self):
stdout = self._run_list(fmt='$artist')
self.assertIn('the artist', stdout.getvalue())
def test_list_item_format_multiple(self):
stdout = self._run_list(fmt='$artist - $album - $year')
self.assertEqual('the artist - the album - 0001',
stdout.getvalue().strip())
def test_list_album_format(self):
stdout = self._run_list(album=True, fmt='$genre')
self.assertIn('the genre', stdout.getvalue())
self.assertNotIn('the album', stdout.getvalue())
class RemoveTest(_common.TestCase, TestHelper):
def setUp(self):
super().setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
os.mkdir(syspath(self.libdir))
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
self.item_path = os.path.join(_common.RSRC, b'full.mp3')
self.i = library.Item.from_path(self.item_path)
self.lib.add(self.i)
self.i.move(operation=MoveOperation.COPY)
def test_remove_items_no_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, '', False, False, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertExists(self.i.path)
def test_remove_items_with_delete(self):
self.io.addinput('y')
commands.remove_items(self.lib, '', False, True, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertNotExists(self.i.path)
def test_remove_items_with_force_no_delete(self):
commands.remove_items(self.lib, '', False, False, True)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertExists(self.i.path)
def test_remove_items_with_force_delete(self):
commands.remove_items(self.lib, '', False, True, True)
items = self.lib.items()
self.assertEqual(len(list(items)), 0)
self.assertNotExists(self.i.path)
def test_remove_items_select_with_delete(self):
i2 = library.Item.from_path(self.item_path)
self.lib.add(i2)
i2.move(operation=MoveOperation.COPY)
for s in ('s', 'y', 'n'):
self.io.addinput(s)
commands.remove_items(self.lib, '', False, True, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 1)
# There is probably no guarantee that the items are queried in any
# spcecific order, thus just ensure that exactly one was removed.
# To improve upon this, self.io would need to have the capability to
# generate input that depends on previous output.
num_existing = 0
num_existing += 1 if os.path.exists(syspath(self.i.path)) else 0
num_existing += 1 if os.path.exists(syspath(i2.path)) else 0
self.assertEqual(num_existing, 1)
def test_remove_albums_select_with_delete(self):
a1 = self.add_album_fixture()
a2 = self.add_album_fixture()
path1 = a1.items()[0].path
path2 = a2.items()[0].path
items = self.lib.items()
self.assertEqual(len(list(items)), 3)
for s in ('s', 'y', 'n'):
self.io.addinput(s)
commands.remove_items(self.lib, '', True, True, False)
items = self.lib.items()
self.assertEqual(len(list(items)), 2) # incl. the item from setUp()
# See test_remove_items_select_with_delete()
num_existing = 0
num_existing += 1 if os.path.exists(syspath(path1)) else 0
num_existing += 1 if os.path.exists(syspath(path2)) else 0
self.assertEqual(num_existing, 1)
class ModifyTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.album = self.add_album_fixture()
[self.item] = self.album.items()
def tearDown(self):
self.teardown_beets()
def modify_inp(self, inp, *args):
with control_stdin(inp):
self.run_command('modify', *args)
def modify(self, *args):
self.modify_inp('y', *args)
# Item tests
def test_modify_item(self):
self.modify("title=newTitle")
item = self.lib.items().get()
self.assertEqual(item.title, 'newTitle')
def test_modify_item_abort(self):
item = self.lib.items().get()
title = item.title
self.modify_inp('n', "title=newTitle")
item = self.lib.items().get()
self.assertEqual(item.title, title)
def test_modify_item_no_change(self):
title = "Tracktitle"
item = self.add_item_fixture(title=title)
self.modify_inp('y', "title", f"title={title}")
item = self.lib.items(title).get()
self.assertEqual(item.title, title)
def test_modify_write_tags(self):
self.modify("title=newTitle")
item = self.lib.items().get()
item.read()
self.assertEqual(item.title, 'newTitle')
def test_modify_dont_write_tags(self):
self.modify("--nowrite", "title=newTitle")
item = self.lib.items().get()
item.read()
self.assertNotEqual(item.title, 'newTitle')
def test_move(self):
self.modify("title=newTitle")
item = self.lib.items().get()
self.assertIn(b'newTitle', item.path)
def test_not_move(self):
self.modify("--nomove", "title=newTitle")
item = self.lib.items().get()
self.assertNotIn(b'newTitle', item.path)
def test_no_write_no_move(self):
self.modify("--nomove", "--nowrite", "title=newTitle")
item = self.lib.items().get()
item.read()
self.assertNotIn(b'newTitle', item.path)
self.assertNotEqual(item.title, 'newTitle')
def test_update_mtime(self):
item = self.item
old_mtime = item.mtime
self.modify("title=newTitle")
item.load()
self.assertNotEqual(old_mtime, item.mtime)
self.assertEqual(item.current_mtime(), item.mtime)
def test_reset_mtime_with_no_write(self):
item = self.item
self.modify("--nowrite", "title=newTitle")
item.load()
self.assertEqual(0, item.mtime)
def test_selective_modify(self):
title = "Tracktitle"
album = "album"
original_artist = "composer"
new_artist = "coverArtist"
for i in range(0, 10):
self.add_item_fixture(title=f"{title}{i}",
artist=original_artist,
album=album)
self.modify_inp('s\ny\ny\ny\nn\nn\ny\ny\ny\ny\nn',
title, f"artist={new_artist}")
original_items = self.lib.items(f"artist:{original_artist}")
new_items = self.lib.items(f"artist:{new_artist}")
self.assertEqual(len(list(original_items)), 3)
self.assertEqual(len(list(new_items)), 7)
def test_modify_formatted(self):
for i in range(0, 3):
self.add_item_fixture(title=f"title{i}",
artist="artist",
album="album")
items = list(self.lib.items())
self.modify("title=${title} - append")
for item in items:
orig_title = item.title
item.load()
self.assertEqual(item.title, f"{orig_title} - append")
# Album Tests
def test_modify_album(self):
self.modify("--album", "album=newAlbum")
album = self.lib.albums().get()
self.assertEqual(album.album, 'newAlbum')
def test_modify_album_write_tags(self):
self.modify("--album", "album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, 'newAlbum')
def test_modify_album_dont_write_tags(self):
self.modify("--album", "--nowrite", "album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertEqual(item.album, 'the album')
def test_album_move(self):
self.modify("--album", "album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertIn(b'newAlbum', item.path)
def test_album_not_move(self):
self.modify("--nomove", "--album", "album=newAlbum")
item = self.lib.items().get()
item.read()
self.assertNotIn(b'newAlbum', item.path)
def test_modify_album_formatted(self):
item = self.lib.items().get()
orig_album = item.album
self.modify("--album", "album=${album} - append")
item.load()
self.assertEqual(item.album, f"{orig_album} - append")
# Misc
def test_write_initial_key_tag(self):
self.modify("initial_key=C#m")
item = self.lib.items().get()
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.initial_key, 'C#m')
def test_set_flexattr(self):
self.modify("flexattr=testAttr")
item = self.lib.items().get()
self.assertEqual(item.flexattr, 'testAttr')
def test_remove_flexattr(self):
item = self.lib.items().get()
item.flexattr = 'testAttr'
item.store()
self.modify("flexattr!")
item = self.lib.items().get()
self.assertNotIn("flexattr", item)
@unittest.skip('not yet implemented')
def test_delete_initial_key_tag(self):
item = self.lib.items().get()
item.initial_key = 'C#m'
item.write()
item.store()
mediafile = MediaFile(syspath(item.path))
self.assertEqual(mediafile.initial_key, 'C#m')
self.modify("initial_key!")
mediafile = MediaFile(syspath(item.path))
self.assertIsNone(mediafile.initial_key)
def test_arg_parsing_colon_query(self):
(query, mods, dels) = commands.modify_parse_args(["title:oldTitle",
"title=newTitle"])
self.assertEqual(query, ["title:oldTitle"])
self.assertEqual(mods, {"title": "newTitle"})
def test_arg_parsing_delete(self):
(query, mods, dels) = commands.modify_parse_args(["title:oldTitle",
"title!"])
self.assertEqual(query, ["title:oldTitle"])
self.assertEqual(dels, ["title"])
def test_arg_parsing_query_with_exclaimation(self):
(query, mods, dels) = commands.modify_parse_args(["title:oldTitle!",
"title=newTitle!"])
self.assertEqual(query, ["title:oldTitle!"])
self.assertEqual(mods, {"title": "newTitle!"})
def test_arg_parsing_equals_in_value(self):
(query, mods, dels) = commands.modify_parse_args(["title:foo=bar",
"title=newTitle"])
self.assertEqual(query, ["title:foo=bar"])
self.assertEqual(mods, {"title": "newTitle"})
class WriteTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
def write_cmd(self, *args):
return self.run_with_output('write', *args)
def test_update_mtime(self):
item = self.add_item_fixture()
item['title'] = 'a new title'
item.store()
item = self.lib.items().get()
self.assertEqual(item.mtime, 0)
self.write_cmd()
item = self.lib.items().get()
self.assertEqual(item.mtime, item.current_mtime())
def test_non_metadata_field_unchanged(self):
"""Changing a non-"tag" field like `bitrate` and writing should
have no effect.
"""
# An item that starts out "clean".
item = self.add_item_fixture()
item.read()
# ... but with a mismatched bitrate.
item.bitrate = 123
item.store()
output = self.write_cmd()
self.assertEqual(output, '')
def test_write_metadata_field(self):
item = self.add_item_fixture()
item.read()
old_title = item.title
item.title = 'new title'
item.store()
output = self.write_cmd()
self.assertTrue(f'{old_title} -> new title'
in output)
class MoveTest(_common.TestCase):
def setUp(self):
super().setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
os.mkdir(syspath(self.libdir))
self.itempath = os.path.join(self.libdir, b'srcfile')
shutil.copy(
syspath(os.path.join(_common.RSRC, b'full.mp3')),
syspath(self.itempath),
)
# Add a file to the library but don't copy it in yet.
self.lib = library.Library(':memory:', self.libdir)
self.i = library.Item.from_path(self.itempath)
self.lib.add(self.i)
self.album = self.lib.add_album([self.i])
# Alternate destination directory.
self.otherdir = os.path.join(self.temp_dir, b'testotherdir')
def _move(self, query=(), dest=None, copy=False, album=False,
pretend=False, export=False):
commands.move_items(self.lib, dest, query, copy, album, pretend,
export=export)
def test_move_item(self):
self._move()
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_item(self):
self._move(copy=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_album(self):
self._move(album=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_copy_album(self):
self._move(copy=True, album=True)
self.i.load()
self.assertTrue(b'testlibdir' in self.i.path)
self.assertExists(self.i.path)
self.assertExists(self.itempath)
def test_move_item_custom_dir(self):
self._move(dest=self.otherdir)
self.i.load()
self.assertTrue(b'testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_move_album_custom_dir(self):
self._move(dest=self.otherdir, album=True)
self.i.load()
self.assertTrue(b'testotherdir' in self.i.path)
self.assertExists(self.i.path)
self.assertNotExists(self.itempath)
def test_pretend_move_item(self):
self._move(dest=self.otherdir, pretend=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
def test_pretend_move_album(self):
self._move(album=True, pretend=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
def test_export_item_custom_dir(self):
self._move(dest=self.otherdir, export=True)
self.i.load()
self.assertEqual(self.i.path, self.itempath)
self.assertExists(self.otherdir)
def test_export_album_custom_dir(self):
self._move(dest=self.otherdir, album=True, export=True)
self.i.load()
self.assertEqual(self.i.path, self.itempath)
self.assertExists(self.otherdir)
def test_pretend_export_item(self):
self._move(dest=self.otherdir, pretend=True, export=True)
self.i.load()
self.assertIn(b'srcfile', self.i.path)
self.assertNotExists(self.otherdir)
class UpdateTest(_common.TestCase):
def setUp(self):
super().setUp()
self.io.install()
self.libdir = os.path.join(self.temp_dir, b'testlibdir')
# Copy a file into the library.
self.lib = library.Library(':memory:', self.libdir)
item_path = os.path.join(_common.RSRC, b'full.mp3')
item_path_two = os.path.join(_common.RSRC, b'full.flac')
self.i = library.Item.from_path(item_path)
self.i2 = library.Item.from_path(item_path_two)
self.lib.add(self.i)
self.lib.add(self.i2)
self.i.move(operation=MoveOperation.COPY)
self.i2.move(operation=MoveOperation.COPY)
self.album = self.lib.add_album([self.i, self.i2])
# Album art.
artfile = os.path.join(self.temp_dir, b'testart.jpg')
_common.touch(artfile)
self.album.set_art(artfile)
self.album.store()
util.remove(artfile)
def _update(self, query=(), album=False, move=False, reset_mtime=True,
fields=None):
self.io.addinput('y')
if reset_mtime:
self.i.mtime = 0
self.i.store()
commands.update_items(self.lib, query, album, move, False,
fields=fields)
def test_delete_removes_item(self):
self.assertTrue(list(self.lib.items()))
util.remove(self.i.path)
util.remove(self.i2.path)
self._update()
self.assertFalse(list(self.lib.items()))
def test_delete_removes_album(self):
self.assertTrue(self.lib.albums())
util.remove(self.i.path)
util.remove(self.i2.path)
self._update()
self.assertFalse(self.lib.albums())
def test_delete_removes_album_art(self):
artpath = self.album.artpath
self.assertExists(artpath)
util.remove(self.i.path)
util.remove(self.i2.path)
self._update()
self.assertNotExists(artpath)
def test_modified_metadata_detected(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.save()
self._update()
item = self.lib.items().get()
self.assertEqual(item.title, 'differentTitle')
def test_modified_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue(b'differentTitle' in item.path)
def test_modified_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.save()
self._update(move=False)
item = self.lib.items().get()
self.assertTrue(b'differentTitle' not in item.path)
def test_selective_modified_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.genre = 'differentGenre'
mf.save()
self._update(move=True, fields=['title'])
item = self.lib.items().get()
self.assertTrue(b'differentTitle' in item.path)
self.assertNotEqual(item.genre, 'differentGenre')
def test_selective_modified_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.genre = 'differentGenre'
mf.save()
self._update(move=False, fields=['title'])
item = self.lib.items().get()
self.assertTrue(b'differentTitle' not in item.path)
self.assertNotEqual(item.genre, 'differentGenre')
def test_modified_album_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = 'differentAlbum'
mf.save()
self._update(move=True)
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' in item.path)
def test_modified_album_metadata_art_moved(self):
artpath = self.album.artpath
mf = MediaFile(syspath(self.i.path))
mf.album = 'differentAlbum'
mf.save()
self._update(move=True)
album = self.lib.albums()[0]
self.assertNotEqual(artpath, album.artpath)
self.assertIsNotNone(album.artpath)
def test_selective_modified_album_metadata_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = 'differentAlbum'
mf.genre = 'differentGenre'
mf.save()
self._update(move=True, fields=['album'])
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' in item.path)
self.assertNotEqual(item.genre, 'differentGenre')
def test_selective_modified_album_metadata_not_moved(self):
mf = MediaFile(syspath(self.i.path))
mf.album = 'differentAlbum'
mf.genre = 'differentGenre'
mf.save()
self._update(move=True, fields=['genre'])
item = self.lib.items().get()
self.assertTrue(b'differentAlbum' not in item.path)
self.assertEqual(item.genre, 'differentGenre')
def test_mtime_match_skips_update(self):
mf = MediaFile(syspath(self.i.path))
mf.title = 'differentTitle'
mf.save()
# Make in-memory mtime match on-disk mtime.
self.i.mtime = os.path.getmtime(syspath(self.i.path))
self.i.store()
self._update(reset_mtime=False)
item = self.lib.items().get()
self.assertEqual(item.title, 'full')
def test_multivalued_albumtype_roundtrip(self):
# https://github.com/beetbox/beets/issues/4528
# albumtypes is empty for our test fixtures, so populate it first
album = self.album
correct_albumtypes = ["album", "live"]
# Setting albumtypes does not set albumtype, currently.
# Using x[0] mirrors https://github.com/beetbox/mediafile/blob/057432ad53b3b84385e5582f69f44dc00d0a725d/mediafile.py#L1928 # noqa: E501
correct_albumtype = correct_albumtypes[0]
album.albumtype = correct_albumtype
album.albumtypes = correct_albumtypes
album.try_sync(write=True, move=False)
album.load()
self.assertEqual(album.albumtype, correct_albumtype)
self.assertEqual(album.albumtypes, correct_albumtypes)
self._update()
album.load()
self.assertEqual(album.albumtype, correct_albumtype)
self.assertEqual(album.albumtypes, correct_albumtypes)
class PrintTest(_common.TestCase):
def setUp(self):
super().setUp()
self.io.install()
def test_print_without_locale(self):
lang = os.environ.get('LANG')
if lang:
del os.environ['LANG']
try:
ui.print_('something')
except TypeError:
self.fail('TypeError during print')
finally:
if lang:
os.environ['LANG'] = lang
def test_print_with_invalid_locale(self):
old_lang = os.environ.get('LANG')
os.environ['LANG'] = ''
old_ctype = os.environ.get('LC_CTYPE')
os.environ['LC_CTYPE'] = 'UTF-8'
try:
ui.print_('something')
except ValueError:
self.fail('ValueError during print')
finally:
if old_lang:
os.environ['LANG'] = old_lang
else:
del os.environ['LANG']
if old_ctype:
os.environ['LC_CTYPE'] = old_ctype
else:
del os.environ['LC_CTYPE']
class ImportTest(_common.TestCase):
def test_quiet_timid_disallowed(self):
config['import']['quiet'] = True
config['import']['timid'] = True
self.assertRaises(ui.UserError, commands.import_files, None, [],
None)
def test_parse_paths_from_logfile(self):
if os.path.__name__ == 'ntpath':
logfile_content = (
"import started Wed Jun 15 23:08:26 2022\n"
"asis C:\\music\\Beatles, The\\The Beatles; C:\\music\\Beatles, The\\The Beatles\\CD 01; C:\\music\\Beatles, The\\The Beatles\\CD 02\n" # noqa: E501
"duplicate-replace C:\\music\\Bill Evans\\Trio '65\n"
"skip C:\\music\\Michael Jackson\\Bad\n"
"skip C:\\music\\Soulwax\\Any Minute Now\n"
)
expected_paths = [
"C:\\music\\Beatles, The\\The Beatles",
"C:\\music\\Michael Jackson\\Bad",
"C:\\music\\Soulwax\\Any Minute Now",
]
else:
logfile_content = (
"import started Wed Jun 15 23:08:26 2022\n"
"asis /music/Beatles, The/The Beatles; /music/Beatles, The/The Beatles/CD 01; /music/Beatles, The/The Beatles/CD 02\n" # noqa: E501
"duplicate-replace /music/Bill Evans/Trio '65\n"
"skip /music/Michael Jackson/Bad\n"
"skip /music/Soulwax/Any Minute Now\n"
)
expected_paths = [
"/music/Beatles, The/The Beatles",
"/music/Michael Jackson/Bad",
"/music/Soulwax/Any Minute Now",
]
logfile = os.path.join(self.temp_dir, b"logfile.log")
with open(logfile, mode="w") as fp:
fp.write(logfile_content)
actual_paths = list(commands._paths_from_logfile(logfile))
self.assertEqual(actual_paths, expected_paths)
@_common.slow_test()
class ConfigTest(unittest.TestCase, TestHelper, _common.Assertions):
def setUp(self):
self.setup_beets()
# Don't use the BEETSDIR from `helper`. Instead, we point the home
# directory there. Some tests will set `BEETSDIR` themselves.
del os.environ['BEETSDIR']
self._old_home = os.environ.get('HOME')
os.environ['HOME'] = util.py3_path(self.temp_dir)
# Also set APPDATA, the Windows equivalent of setting $HOME.
self._old_appdata = os.environ.get('APPDATA')
os.environ['APPDATA'] = \
util.py3_path(os.path.join(self.temp_dir, b'AppData', b'Roaming'))
self._orig_cwd = os.getcwd()
self.test_cmd = self._make_test_cmd()
commands.default_commands.append(self.test_cmd)
# Default user configuration
if platform.system() == 'Windows':
self.user_config_dir = os.path.join(
self.temp_dir, b'AppData', b'Roaming', b'beets'
)
else:
self.user_config_dir = os.path.join(
self.temp_dir, b'.config', b'beets'
)
os.makedirs(syspath(self.user_config_dir))
self.user_config_path = os.path.join(self.user_config_dir,
b'config.yaml')
# Custom BEETSDIR
self.beetsdir = os.path.join(self.temp_dir, b'beetsdir')
os.makedirs(syspath(self.beetsdir))
self._reset_config()
self.load_plugins()
def tearDown(self):
commands.default_commands.pop()
os.chdir(syspath(self._orig_cwd))
if self._old_home is not None:
os.environ['HOME'] = self._old_home
if self._old_appdata is None:
del os.environ['APPDATA']
else:
os.environ['APPDATA'] = self._old_appdata
self.unload_plugins()
self.teardown_beets()
def _make_test_cmd(self):
test_cmd = ui.Subcommand('test', help='test')
def run(lib, options, args):
test_cmd.lib = lib
test_cmd.options = options
test_cmd.args = args
test_cmd.func = run
return test_cmd
def _reset_config(self):
# Config should read files again on demand
config.clear()
config._materialized = False
def write_config_file(self):
return open(self.user_config_path, 'w')
def test_paths_section_respected(self):
with self.write_config_file() as config:
config.write('paths: {x: y}')
self.run_command('test', lib=None)
key, template = self.test_cmd.lib.path_formats[0]
self.assertEqual(key, 'x')
self.assertEqual(template.original, 'y')
def test_default_paths_preserved(self):
default_formats = ui.get_path_formats()
self._reset_config()
with self.write_config_file() as config:
config.write('paths: {x: y}')
self.run_command('test', lib=None)
key, template = self.test_cmd.lib.path_formats[0]
self.assertEqual(key, 'x')
self.assertEqual(template.original, 'y')
self.assertEqual(self.test_cmd.lib.path_formats[1:],
default_formats)
def test_nonexistant_db(self):
with self.write_config_file() as config:
config.write('library: /xxx/yyy/not/a/real/path')
with self.assertRaises(ui.UserError):
self.run_command('test', lib=None)
def test_user_config_file(self):
with self.write_config_file() as file:
file.write('anoption: value')
self.run_command('test', lib=None)
self.assertEqual(config['anoption'].get(), 'value')
def test_replacements_parsed(self):
with self.write_config_file() as config:
config.write("replace: {'[xy]': z}")
self.run_command('test', lib=None)
replacements = self.test_cmd.lib.replacements
repls = [(p.pattern, s) for p, s in replacements] # Compare patterns.
self.assertEqual(repls, [('[xy]', 'z')])
def test_multiple_replacements_parsed(self):
with self.write_config_file() as config:
config.write("replace: {'[xy]': z, foo: bar}")
self.run_command('test', lib=None)
replacements = self.test_cmd.lib.replacements
repls = [(p.pattern, s) for p, s in replacements]
self.assertEqual(repls, [
('[xy]', 'z'),
('foo', 'bar'),
])
def test_cli_config_option(self):
config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(config_path, 'w') as file:
file.write('anoption: value')
self.run_command('--config', config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'value')
def test_cli_config_file_overwrites_user_defaults(self):
with open(self.user_config_path, 'w') as file:
file.write('anoption: value')
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('anoption: cli overwrite')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'cli overwrite')
def test_cli_config_file_overwrites_beetsdir_defaults(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('anoption: value')
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('anoption: cli overwrite')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assertEqual(config['anoption'].get(), 'cli overwrite')
# @unittest.skip('Difficult to implement with optparse')
# def test_multiple_cli_config_files(self):
# cli_config_path_1 = os.path.join(self.temp_dir, b'config.yaml')
# cli_config_path_2 = os.path.join(self.temp_dir, b'config_2.yaml')
#
# with open(cli_config_path_1, 'w') as file:
# file.write('first: value')
#
# with open(cli_config_path_2, 'w') as file:
# file.write('second: value')
#
# self.run_command('--config', cli_config_path_1,
# '--config', cli_config_path_2, 'test', lib=None)
# self.assertEqual(config['first'].get(), 'value')
# self.assertEqual(config['second'].get(), 'value')
#
# @unittest.skip('Difficult to implement with optparse')
# def test_multiple_cli_config_overwrite(self):
# cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
# cli_overwrite_config_path = os.path.join(self.temp_dir,
# b'overwrite_config.yaml')
#
# with open(cli_config_path, 'w') as file:
# file.write('anoption: value')
#
# with open(cli_overwrite_config_path, 'w') as file:
# file.write('anoption: overwrite')
#
# self.run_command('--config', cli_config_path,
# '--config', cli_overwrite_config_path, 'test')
# self.assertEqual(config['anoption'].get(), 'cli overwrite')
# FIXME: fails on windows
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_cli_config_paths_resolve_relative_to_user_dir(self):
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.user_config_dir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.user_config_dir, b'state')
)
def test_cli_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
self.run_command('--config', cli_config_path, 'test', lib=None)
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state')
)
def test_command_line_option_relative_to_working_dir(self):
config.read()
os.chdir(syspath(self.temp_dir))
self.run_command('--library', 'foo.db', 'test', lib=None)
self.assert_equal_path(config['library'].as_filename(),
os.path.join(os.getcwd(), 'foo.db'))
def test_cli_config_file_loads_plugin_commands(self):
cli_config_path = os.path.join(self.temp_dir, b'config.yaml')
with open(cli_config_path, 'w') as file:
file.write('pluginpath: %s\n' % _common.PLUGINPATH)
file.write('plugins: test')
self.run_command('--config', cli_config_path, 'plugin', lib=None)
self.assertTrue(plugins.find_plugins()[0].is_test_plugin)
def test_beetsdir_config(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('anoption: overwrite')
config.read()
self.assertEqual(config['anoption'].get(), 'overwrite')
def test_beetsdir_points_to_file_error(self):
beetsdir = os.path.join(self.temp_dir, b'beetsfile')
open(beetsdir, 'a').close()
os.environ['BEETSDIR'] = util.py3_path(beetsdir)
self.assertRaises(ConfigError, self.run_command, 'test')
def test_beetsdir_config_does_not_load_default_user_config(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
with open(self.user_config_path, 'w') as file:
file.write('anoption: value')
config.read()
self.assertFalse(config['anoption'].exists())
def test_default_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
config.read()
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'library.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state.pickle')
)
def test_beetsdir_config_paths_resolve_relative_to_beetsdir(self):
os.environ['BEETSDIR'] = util.py3_path(self.beetsdir)
env_config_path = os.path.join(self.beetsdir, b'config.yaml')
with open(env_config_path, 'w') as file:
file.write('library: beets.db\n')
file.write('statefile: state')
config.read()
self.assert_equal_path(
util.bytestring_path(config['library'].as_filename()),
os.path.join(self.beetsdir, b'beets.db')
)
self.assert_equal_path(
util.bytestring_path(config['statefile'].as_filename()),
os.path.join(self.beetsdir, b'state')
)
class ShowModelChangeTest(_common.TestCase):
def setUp(self):
super().setUp()
self.io.install()
self.a = _common.item()
self.b = _common.item()
self.a.path = self.b.path
def _show(self, **kwargs):
change = ui.show_model_changes(self.a, self.b, **kwargs)
out = self.io.getoutput()
return change, out
def test_identical(self):
change, out = self._show()
self.assertFalse(change)
self.assertEqual(out, '')
def test_string_fixed_field_change(self):
self.b.title = 'x'
change, out = self._show()
self.assertTrue(change)
self.assertTrue('title' in out)
def test_int_fixed_field_change(self):
self.b.track = 9
change, out = self._show()
self.assertTrue(change)
self.assertTrue('track' in out)
def test_floats_close_to_identical(self):
self.a.length = 1.00001
self.b.length = 1.00005
change, out = self._show()
self.assertFalse(change)
self.assertEqual(out, '')
def test_floats_different(self):
self.a.length = 1.00001
self.b.length = 2.00001
change, out = self._show()
self.assertTrue(change)
self.assertTrue('length' in out)
def test_both_values_shown(self):
self.a.title = 'foo'
self.b.title = 'bar'
change, out = self._show()
self.assertTrue('foo' in out)
self.assertTrue('bar' in out)
class ShowChangeTest(_common.TestCase):
def setUp(self):
super().setUp()
self.io.install()
self.items = [_common.item()]
self.items[0].track = 1
self.items[0].path = b'/path/to/file.mp3'
self.info = autotag.AlbumInfo(
album='the album', album_id='album id', artist='the artist',
artist_id='artist id', tracks=[
autotag.TrackInfo(title='the title', track_id='track id',
index=1)
]
)
def _show_change(self, items=None, info=None,
cur_artist='the artist', cur_album='the album',
dist=0.1):
"""Return an unicode string representing the changes"""
items = items or self.items
info = info or self.info
mapping = dict(zip(items, info.tracks))
config['ui']['color'] = False
album_dist = distance(items, info, mapping)
album_dist._penalties = {'album': [dist]}
commands.show_change(
cur_artist,
cur_album,
autotag.AlbumMatch(album_dist, info, mapping, set(), set()),
)
return self.io.getoutput().lower()
def test_null_change(self):
msg = self._show_change()
self.assertTrue('similarity: 90' in msg)
self.assertTrue('tagging:' in msg)
def test_album_data_change(self):
msg = self._show_change(cur_artist='another artist',
cur_album='another album')
self.assertTrue('correcting tags from:' in msg)
def test_item_data_change(self):
self.items[0].title = 'different'
msg = self._show_change()
self.assertTrue('different -> the title' in msg)
def test_item_data_change_with_unicode(self):
self.items[0].title = 'caf\xe9'
msg = self._show_change()
self.assertTrue('caf\xe9 -> the title' in msg)
def test_album_data_change_with_unicode(self):
msg = self._show_change(cur_artist='caf\xe9',
cur_album='another album')
self.assertTrue('correcting tags from:' in msg)
def test_item_data_change_title_missing(self):
self.items[0].title = ''
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue('file.mp3 -> the title' in msg)
def test_item_data_change_title_missing_with_unicode_filename(self):
self.items[0].title = ''
self.items[0].path = '/path/to/caf\xe9.mp3'.encode()
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue('caf\xe9.mp3 -> the title' in msg or
'caf.mp3 ->' in msg)
@patch('beets.library.Item.try_filesize', Mock(return_value=987))
class SummarizeItemsTest(_common.TestCase):
def setUp(self):
super().setUp()
item = library.Item()
item.bitrate = 4321
item.length = 10 * 60 + 54
item.format = "F"
self.item = item
def test_summarize_item(self):
summary = commands.summarize_items([], True)
self.assertEqual(summary, "")
summary = commands.summarize_items([self.item], True)
self.assertEqual(summary, "F, 4kbps, 10:54, 987.0 B")
def test_summarize_items(self):
summary = commands.summarize_items([], False)
self.assertEqual(summary, "0 items")
summary = commands.summarize_items([self.item], False)
self.assertEqual(summary, "1 items, F, 4kbps, 10:54, 987.0 B")
# make a copy of self.item
i2 = self.item.copy()
summary = commands.summarize_items([self.item, i2], False)
self.assertEqual(summary, "2 items, F, 4kbps, 21:48, 1.9 KiB")
i2.format = "G"
summary = commands.summarize_items([self.item, i2], False)
self.assertEqual(summary, "2 items, F 1, G 1, 4kbps, 21:48, 1.9 KiB")
summary = commands.summarize_items([self.item, i2, i2], False)
self.assertEqual(summary, "3 items, G 2, F 1, 4kbps, 32:42, 2.9 KiB")
class PathFormatTest(_common.TestCase):
def test_custom_paths_prepend(self):
default_formats = ui.get_path_formats()
config['paths'] = {'foo': 'bar'}
pf = ui.get_path_formats()
key, tmpl = pf[0]
self.assertEqual(key, 'foo')
self.assertEqual(tmpl.original, 'bar')
self.assertEqual(pf[1:], default_formats)
@_common.slow_test()
class PluginTest(_common.TestCase, TestHelper):
def test_plugin_command_from_pluginpath(self):
config['pluginpath'] = [_common.PLUGINPATH]
config['plugins'] = ['test']
self.run_command('test', lib=None)
@_common.slow_test()
class CompletionTest(_common.TestCase, TestHelper):
def test_completion(self):
# Load plugin commands
config['pluginpath'] = [_common.PLUGINPATH]
config['plugins'] = ['test']
# Do not load any other bash completion scripts on the system.
env = dict(os.environ)
env['BASH_COMPLETION_DIR'] = os.devnull
env['BASH_COMPLETION_COMPAT_DIR'] = os.devnull
# Open a `bash` process to run the tests in. We'll pipe in bash
# commands via stdin.
cmd = os.environ.get('BEETS_TEST_SHELL', '/bin/bash --norc').split()
if not has_program(cmd[0]):
self.skipTest('bash not available')
tester = subprocess.Popen(cmd, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, env=env)
# Load bash_completion library.
for path in commands.BASH_COMPLETION_PATHS:
if os.path.exists(syspath(path)):
bash_completion = path
break
else:
self.skipTest('bash-completion script not found')
try:
with open(util.syspath(bash_completion), 'rb') as f:
tester.stdin.writelines(f)
except OSError:
self.skipTest('could not read bash-completion script')
# Load completion script.
self.io.install()
self.run_command('completion', lib=None)
completion_script = self.io.getoutput().encode('utf-8')
self.io.restore()
tester.stdin.writelines(completion_script.splitlines(True))
# Load test suite.
test_script_name = os.path.join(_common.RSRC, b'test_completion.sh')
with open(test_script_name, 'rb') as test_script_file:
tester.stdin.writelines(test_script_file)
out, err = tester.communicate()
if tester.returncode != 0 or out != b'completion tests passed\n':
print(out.decode('utf-8'))
self.fail('test/test_completion.sh did not execute properly')
class CommonOptionsParserCliTest(unittest.TestCase, TestHelper):
"""Test CommonOptionsParser and formatting LibModel formatting on 'list'
command.
"""
def setUp(self):
self.setup_beets()
self.item = _common.item()
self.item.path = b'xxx/yyy'
self.lib.add(self.item)
self.lib.add_album([self.item])
self.load_plugins()
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_base(self):
l = self.run_with_output('ls')
self.assertEqual(l, 'the artist - the album - the title\n')
l = self.run_with_output('ls', '-a')
self.assertEqual(l, 'the album artist - the album\n')
def test_path_option(self):
l = self.run_with_output('ls', '-p')
self.assertEqual(l, 'xxx/yyy\n')
l = self.run_with_output('ls', '-a', '-p')
self.assertEqual(l, 'xxx\n')
def test_format_option(self):
l = self.run_with_output('ls', '-f', '$artist')
self.assertEqual(l, 'the artist\n')
l = self.run_with_output('ls', '-a', '-f', '$albumartist')
self.assertEqual(l, 'the album artist\n')
def test_format_option_unicode(self):
l = self.run_with_output(b'ls', b'-f',
'caf\xe9'.encode(util.arg_encoding()))
self.assertEqual(l, 'caf\xe9\n')
def test_root_format_option(self):
l = self.run_with_output('--format-item', '$artist',
'--format-album', 'foo', 'ls')
self.assertEqual(l, 'the artist\n')
l = self.run_with_output('--format-item', 'foo',
'--format-album', '$albumartist',
'ls', '-a')
self.assertEqual(l, 'the album artist\n')
def test_help(self):
l = self.run_with_output('help')
self.assertIn('Usage:', l)
l = self.run_with_output('help', 'list')
self.assertIn('Usage:', l)
with self.assertRaises(ui.UserError):
self.run_command('help', 'this.is.not.a.real.command')
def test_stats(self):
l = self.run_with_output('stats')
self.assertIn('Approximate total size:', l)
# # Need to have more realistic library setup for this to work
# l = self.run_with_output('stats', '-e')
# self.assertIn('Total size:', l)
def test_version(self):
l = self.run_with_output('version')
self.assertIn('Python version', l)
self.assertIn('no plugins loaded', l)
# # Need to have plugin loaded
# l = self.run_with_output('version')
# self.assertIn('plugins: ', l)
class CommonOptionsParserTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
def tearDown(self):
self.teardown_beets()
def test_album_option(self):
parser = ui.CommonOptionsParser()
self.assertFalse(parser._album_flags)
parser.add_album_option()
self.assertTrue(bool(parser._album_flags))
self.assertEqual(parser.parse_args([]), ({'album': None}, []))
self.assertEqual(parser.parse_args(['-a']), ({'album': True}, []))
self.assertEqual(parser.parse_args(['--album']),
({'album': True}, []))
def test_path_option(self):
parser = ui.CommonOptionsParser()
parser.add_path_option()
self.assertFalse(parser._album_flags)
config['format_item'].set('$foo')
self.assertEqual(parser.parse_args([]), ({'path': None}, []))
self.assertEqual(config['format_item'].as_str(), '$foo')
self.assertEqual(parser.parse_args(['-p']),
({'path': True, 'format': '$path'}, []))
self.assertEqual(parser.parse_args(['--path']),
({'path': True, 'format': '$path'}, []))
self.assertEqual(config['format_item'].as_str(), '$path')
self.assertEqual(config['format_album'].as_str(), '$path')
def test_format_option(self):
parser = ui.CommonOptionsParser()
parser.add_format_option()
self.assertFalse(parser._album_flags)
config['format_item'].set('$foo')
self.assertEqual(parser.parse_args([]), ({'format': None}, []))
self.assertEqual(config['format_item'].as_str(), '$foo')
self.assertEqual(parser.parse_args(['-f', '$bar']),
({'format': '$bar'}, []))
self.assertEqual(parser.parse_args(['--format', '$baz']),
({'format': '$baz'}, []))
self.assertEqual(config['format_item'].as_str(), '$baz')
self.assertEqual(config['format_album'].as_str(), '$baz')
def test_format_option_with_target(self):
with self.assertRaises(KeyError):
ui.CommonOptionsParser().add_format_option(target='thingy')
parser = ui.CommonOptionsParser()
parser.add_format_option(target='item')
config['format_item'].set('$item')
config['format_album'].set('$album')
self.assertEqual(parser.parse_args(['-f', '$bar']),
({'format': '$bar'}, []))
self.assertEqual(config['format_item'].as_str(), '$bar')
self.assertEqual(config['format_album'].as_str(), '$album')
def test_format_option_with_album(self):
parser = ui.CommonOptionsParser()
parser.add_album_option()
parser.add_format_option()
config['format_item'].set('$item')
config['format_album'].set('$album')
parser.parse_args(['-f', '$bar'])
self.assertEqual(config['format_item'].as_str(), '$bar')
self.assertEqual(config['format_album'].as_str(), '$album')
parser.parse_args(['-a', '-f', '$foo'])
self.assertEqual(config['format_item'].as_str(), '$bar')
self.assertEqual(config['format_album'].as_str(), '$foo')
parser.parse_args(['-f', '$foo2', '-a'])
self.assertEqual(config['format_album'].as_str(), '$foo2')
def test_add_all_common_options(self):
parser = ui.CommonOptionsParser()
parser.add_all_common_options()
self.assertEqual(parser.parse_args([]),
({'album': None, 'path': None, 'format': None}, []))
class EncodingTest(_common.TestCase):
"""Tests for the `terminal_encoding` config option and our
`_in_encoding` and `_out_encoding` utility functions.
"""
def out_encoding_overridden(self):
config['terminal_encoding'] = 'fake_encoding'
self.assertEqual(ui._out_encoding(), 'fake_encoding')
def in_encoding_overridden(self):
config['terminal_encoding'] = 'fake_encoding'
self.assertEqual(ui._in_encoding(), 'fake_encoding')
def out_encoding_default_utf8(self):
with patch('sys.stdout') as stdout:
stdout.encoding = None
self.assertEqual(ui._out_encoding(), 'utf-8')
def in_encoding_default_utf8(self):
with patch('sys.stdin') as stdin:
stdin.encoding = None
self.assertEqual(ui._in_encoding(), 'utf-8')
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
#!/usr/bin/env python3
#
# Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Script to convert snapshot files to a C++ file which can be compiled and
# linked together with VM binary.
import getopt
import optparse
import string
import subprocess
import sys
import utils
HOST_OS = utils.GuessOS()
HOST_CPUS = utils.GuessCpus()
def BuildOptions():
result = optparse.OptionParser()
result.add_option(
"--vm_input_bin",
action="store",
type="string",
help="input file name of the vm isolate snapshot in binary form")
result.add_option(
"--input_bin",
action="store",
type="string",
help="input file name of the isolate snapshot in binary form")
result.add_option(
"--input_cc",
action="store",
type="string",
help="input file name which contains the C buffer template")
result.add_option(
"--output",
action="store",
type="string",
help="output file name into which snapshot in C buffer form is generated"
)
result.add_option(
"-v",
"--verbose",
help='Verbose output.',
default=False,
action="store_true")
return result
def ProcessOptions(options):
if not options.vm_input_bin:
sys.stderr.write('--vm_input_bin not specified\n')
return False
if not options.input_bin:
sys.stderr.write('--input_bin not specified\n')
return False
if not options.input_cc:
sys.stderr.write('--input_cc not specified\n')
return False
if not options.output:
sys.stderr.write('--output not specified\n')
return False
return True
def WriteBytesAsText(out, input_file):
"""Writes byte contents of the input_file into out file as text.
Output is formatted as a list of comma separated integer values - one value
for each byte.
"""
with open(input_file, 'rb') as input:
lineCounter = 0
line = ' '
for byte in input.read():
line += ' %d,' % ord(byte)
lineCounter += 1
if lineCounter == 10:
out.write(line + '\n')
line = ' '
lineCounter = 0
if lineCounter != 0:
out.write(line + '\n')
def GenerateFileFromTemplate(output_file, input_cc_file, vm_isolate_input_file,
isolate_input_file):
"""Generates C++ file based on a input_cc_file template and two binary files
Template is expected to have two %s placeholders which would be filled
with binary contents of the given files each formatted as a comma separated
list of integers.
"""
snapshot_cc_text = open(input_cc_file).read()
chunks = snapshot_cc_text.split("%s")
if len(chunks) != 3:
raise Exception("Template %s should contain exactly two %%s occurrences"
% input_cc_file)
with open(output_file, 'w') as out:
out.write(chunks[0])
WriteBytesAsText(out, vm_isolate_input_file)
out.write(chunks[1])
WriteBytesAsText(out, isolate_input_file)
out.write(chunks[2])
def Main():
# Parse options.
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
# If there are additional arguments, report error and exit.
if args:
parser.print_help()
return 1
GenerateFileFromTemplate(options.output, options.input_cc,
options.vm_input_bin, options.input_bin)
return 0
if __name__ == '__main__':
sys.exit(Main())
|
from blackjack import total
from random import choice
def dealer_strategy(hand, playerhand, turn, difficulty='normal'):
if difficulty == 'easy':
if turn == 1:
if total(hand) < 7:
return 'd'
elif 7 <= total(hand) <= 11:
return 'd'
elif 12 <= total(hand) <= 15:
return 'h'
elif total(hand) > 15:
return 's'
else:
if total(hand) <= 11:
return 'h'
elif 12 <= total(hand) <= 15:
return choice(['h', 's'])
elif total(hand) > 15:
return 's'
if difficulty == 'normal':
# soft
if 'A' in hand:
newhand = [11 if x == 'A' else x for x in hand]
if turn == 1:
if 'J' in hand or 'Q' in hand or 'K' in hand:
return 's'
elif sum(newhand) == 20:
return 's'
elif sum(newhand) == 19:
if 6 not in playerhand:
return 's'
else:
return 'd'
elif sum(newhand) == 18:
elsestop_18 = [2, 3, 4, 5, 6]
hit_18 = [9, 10, 11]
d_switch = None
for i in elsestop_18:
if i in playerhand:
d_switch = 'd'
for i in hit_18:
if i in playerhand:
d_switch = 'h'
if d_switch is not None:
return d_switch
else:
return 's'
elif sum(newhand) == 17:
elsehit_17 = []
elif sum(newhand) >= 15:
newhand = [1 if x==11 else x for x in newhand]
if sum(newhand) < 11:
return 'd'
elif sum(newhand) >= 11:
return 'h'
else:
return 'h'
elif turn > 1:
if 'J' in hand or 'Q' in hand or 'K' in hand:
newhand = [10 if x == 'J' or x == 'Q' or x == 'K' else x for x in newhand]
if sum(newhand) > 19:
return 's'
elif sum(newhand) <= 18:
newhand = [1 if x==11 else x for x in newhand]
if sum(newhand) <= 14:
return 'h'
else:
return 's'
else:
return 's'
elif sum(newhand) > 19:
return 's'
elif sum(newhand) <= 18:
newhand = [1 if x==11 else x for x in newhand]
if sum(newhand) <= 14:
return 'h'
else:
return 's'
# hard
else:
if 'J' in hand or 'Q' in hand or 'K' in hand:
newhand = [10 if x == 'J' or x == 'Q' or x == 'K' else x for x in hand]
if sum(newhand) >= 17:
return 's'
elif 13 <= sum(newhand) <= 16:
hard_hit = [7, 8, 9, 10]
d_switch = None
for i in hard_hit:
if i in playerhand:
d_switch = 'h'
if d_switch is not None:
return d_switch
else:
return 's'
elif sum(newhand) == 12:
stand = [4,5,6]
d_switch = None
for i in stand:
if i in playerhand:
d_switch = 's'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(newhand) == 11:
return 'h'
elif sum(newhand) == 10:
double = [2,3,4,5,6,7,8,9]
d_switch = None
for i in double:
if i in playerhand:
d_switch = 'd'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(newhand) == 9:
double = [3, 4, 5, 6]
d_switch = None
for i in double:
if i in playerhand:
d_switch = 'd'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(newhand) <= 8:
return 'h'
elif sum(hand) >= 17:
return 's'
elif 13 <= sum(hand) <= 16:
hard_hit = [6, 8, 9, 10]
d_switch = None
for i in hard_hit:
if i in playerhand:
d_switch = 'h'
if d_switch is not None:
return d_switch
else:
return 's'
elif sum(hand) == 12:
stand = [4,5,6]
d_switch = None
for i in stand:
if i in playerhand:
d_switch = 's'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(hand) == 11:
return 'h'
elif sum(hand) == 10:
double = [2,3,4,5,6,7,8,9]
d_switch = None
for i in double:
if i in playerhand:
d_switch = 'd'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(hand) == 9:
double = [3, 4, 5, 6]
d_switch = None
for i in double:
if i in playerhand:
d_switch = 'd'
if d_switch is not None:
return d_switch
else:
return 'h'
elif sum(hand) <= 8:
return 'h'
if difficulty == 'cheater': #TODO
pass
|
# -*- coding:utf-8 -*-
from datetime import datetime
from django.db import models
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=20, verbose_name=u"城市名称")
desc_city = models.CharField(max_length=200, verbose_name=u"城市描述")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"城市"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class CourseOrganization(models.Model):
name = models.CharField(max_length=50, verbose_name=u"机构名称")
desc_organization = models.TextField(verbose_name=u"机构描述")
tag = models.CharField(max_length=10, verbose_name=u"机构标签", default="全国知名")
location = models.CharField(max_length=150, verbose_name=u"机构地址")
category = models.CharField(default="pxjg", verbose_name=u"机构类别", max_length=20, choices=(("pxjg", "培训机构"), ("gx", "高校"), ("gr", "个人")))
fav_num = models.IntegerField(default=0, verbose_name=u"收藏人数")
image = models.ImageField(upload_to="org/%Y/%m", verbose_name=u"logo", max_length=200)
click_num = models.IntegerField(default=0, verbose_name=u"点击数")
# address = location = models.CharField(max_length=150, verbose_name=u"机构地址", blank=True, null=True)
students = models.IntegerField(default=0, verbose_name=u"学习人数")
course_nums = models.IntegerField(default=0, verbose_name=u"课程数")
city = models.ForeignKey(City, verbose_name=u"机构所在城市")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
class Meta:
verbose_name = u"课程机构"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def get_teacher_num(self):
return self.teacher_set.all().count()
class Teacher(models.Model):
age = models.IntegerField(default=27, verbose_name=u"讲师年龄")
points = models.CharField(max_length=50, default="教学严谨", verbose_name=u"教学特点")
org = models.ForeignKey(CourseOrganization, verbose_name=u"所属机构")
name = models.CharField(max_length=20, verbose_name=u"授课老师")
work_year = models.IntegerField(default=0, verbose_name=u"工作年限")
work_company = models.CharField(max_length=50, verbose_name=u"所在公司")
work_position = models.CharField(max_length=50, verbose_name=u"公司职位")
points = models.CharField(max_length=50, verbose_name=u"教学特点")
fav_num = models.IntegerField(default=0, verbose_name=u"收藏人数")
click_num = models.IntegerField(default=0, verbose_name=u"点击数")
add_time = models.DateTimeField(default=datetime.now, verbose_name=u"添加时间")
image = models.ImageField(upload_to="teacher/%Y/%m", verbose_name=u"头像", max_length=100, default="")
class Meta:
verbose_name = u"授课老师"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
def get_course_num(self):
return self.course_set.all().count()
|
class Solution:
def cipher(self, input, key):
result = ""
new_key = key%26
for i in input:
if i.isalpha():
if 65 <= ord(i) <= 90:
if new_key+ord(i) > 90:
decode = chr(ord(i)-(26-new_key))
result += decode
else:
decode = chr(ord(i)+new_key)
result += decode
elif 97 <= ord(i) <= 122:
if new_key+ord(i) > 122:
decode = chr(ord(i)-(26-new_key))
result += decode
else:
decode = chr(ord(i)+new_key)
result += decode
else:
result += i
else:
result += i
return(result)
|
#Ikhventi race file
import RaceBP, Buildables, Ships, Technology, Soldiers, Colonies
class ikhventiRace(RaceBP.baseRace):
def __init__(self):
self.name = "The Ikhventi"
self.traits = ()
def createStartingColonies(self):
pass
#List of ikhventi structures
class ikhventi_Habitat(Buildables.baseStructure):
def __init__(self):
self.built = 0
self.buildtime = 4
self.name = "Basic Habitat"
#List of ikhventi ships
class ikhventi_Colonyship(Ships.shipBase):
def __init__(self):
self.name = "Inhabiter"
self.type = "Corvette"
self.health = 10
buildables_list = {'structures':{'habitat':ikhventi_Habitat()},
'vessels':{'colony ship':ikhventi_Colonyship()}}
|
from collections import Counter
with open('orc.txt', 'r') as myfile:
data = myfile.read()
sList = list(data)
print(Counter(sList))
# then i found [t, y, i, a, l, e, q, u] is rare characters with occurrence = 1 but in the wrong order
# so i found the each letter in the text file to determine the order that they appear in the mess message
# finally, i found out the result is "equality"
# cach 2
s = ''.join([line.rstrip() for line in open('orc.txt')]) # rstrip() removes all white spaces in a string
OCCURRENCES = {}
for c in s: OCCURRENCES[c] = OCCURRENCES.get(c, 0) + 1
avgOC = len(s) // len(OCCURRENCES)
print(''.join([c for c in s if OCCURRENCES[c] < avgOC]))
|
from functions import isPrime
def main():
num = 2
index = 1
while True:
if isPrime(num):
print(index, '-->',num)
index+=1
if index==10002:
break
num+=1
if __name__ == '__main__':
main()
|
import uvloop
import asyncio
import logging
from colorlog import ColoredFormatter
from tonga.models.structs.persistency_type import PersistencyType
from tonga.stores.local_store import LocalStore, StoreKeyNotFound
def setup_logger():
"""Return a logger with a default ColoredFormatter."""
formatter = ColoredFormatter(
"%(log_color)s[%(asctime)s]%(levelname)s: %(name)s/%(module)s/%(funcName)s:%(lineno)d"
" (%(thread)d) %(blue)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
logger = logging.getLogger('tonga')
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
return logger
async def main() -> None:
local_store = LocalStore(db_type=PersistencyType.MEMORY, loop=loop)
print(loop)
await local_store.__getattribute__('_build_set').__call__('test1', b'value1')
await local_store.__getattribute__('_build_set').__call__('test2', b'value2')
await local_store.__getattribute__('_build_set').__call__('test3', b'value3')
await local_store.__getattribute__('_build_delete').__call__('test2')
local_store.get_persistency().__getattribute__('_set_initialize').__call__()
assert local_store.get_persistency().is_initialize()
assert await local_store.get('test1') == b'value1'
assert await local_store.get('test3') == b'value3'
try:
await local_store.delete('toto')
except StoreKeyNotFound:
print("Logic n'est ce pas ? ")
await local_store.delete('test3')
try:
print('BEFORE THIS FUCKING LAST GET')
await local_store.get('test3')
except StoreKeyNotFound:
print("2 Logic n'est ce pas ? ")
loop.stop()
if __name__ == '__main__':
logger = setup_logger()
loop = uvloop.new_event_loop()
asyncio.set_event_loop(loop)
asyncio.ensure_future(main(), loop=loop)
try:
# Runs forever
loop.run_forever()
except Exception:
# If an exception was raised loop was stopped
loop.stop()
|
# coding=utf-8
# @Author: wjn
from config import choice_environment
class MpApp():
# @property
def mp_app(self):
'''小程序打开接口'''
domain = choice_environment.current_url
api = '/mp-app'
url = str(domain) + api
return url
|
from test_data.login_credentials import LoginCredentials
class Login():
"""
Login Valid user
"""
# Step: Enter Username
def enter_username(self, username):
find_element("username_field_element").send_keys(username)
# Enter Password
def enter_password(self, password):
find_element("password_field_element").send_keys(password)
# Login User
def user_login(self, username=LoginCredentials.username, password=LoginCredentials.password):
self.enter_username(username)
self.enter_password(password)
self.click_submit_button()
|
''' \
Usage:
python b.py -n <file.fasta> -o <output.tsv> -c <coverage>'''
import sys
...
inputs = sys.argv
if '-n' not in inputs and '-o' not in inputs:
print (__doc__)
else:
f_in = inputs[inputs.index('-n') + 1]
f_out = inputs[inputs.index('-o') + 1]
cov = int(inputs[inputs.index("-c") + 1])
def gc_analyzer():
f_open = open(f_in, "r")
f_read = f_open.readlines()
f_write = open(f_out, "w+")
head = ""
seq = ""
for line in f_read:
if ">" in line[0:2]:
head = line
elif ">" not in line[0:2]:
seq += line.strip()
for i in range(0, len(seq), cov):
seq_frag = seq[i:i+cov]
gc = ("%.2f" %(((seq_frag.count("G")+seq_frag.count("C"))/len(seq_frag))*100))
f_write.write(str((i+cov)/1000000)+"\t"+str(gc)+"\n")
gc_analyzer()
...
|
class Solution(object):
def mergeKLists(self, lists):
self.nodes = []
head = point = ListNode(0)
for l in lists:
while l:
self.nodes.append(l.val)
l = l.next
for x in sorted(self.nodes):
point.next = ListNode(x)
point = point.next
return head.next
from operator import attrgetter
class Solution(object):
def mergeKLists(self, lists):
sorted_list = []
for head in lists:
curr = head
while curr is not None:
sorted_list.append(curr)
curr = curr.next
sorted_list = sorted(sorted_list, key=attrgetter('val'))
for i, node in enumerate(sorted_list):
try:
node.next = sorted_list[i + 1]
except:
node.next = None
if sorted_list:
return sorted_list[0]
else:
return None
|
import sys
sys.path.append('../500_common')
import lib
import lib_ss
if True:
images = lib.get_images("data/result.html")
else:
soup = lib_ss.main("/Users/nakamurasatoru/git/d_genji/genji_curation/src/500_common/Chrome31", "Profile 3", 10)
images = lib.get_images_by_soup(soup)
collectionUrl = "https://utda.github.io/genji/iiif/ndl-2610937/top.json"
areas = ["3200,250,2800,2600", "430,250,2800,2600"]
countMax = 10
# token = lib.get_token("../token.yml")
token = "eyJhbGciOiJSUzI1NiIsImtpZCI6IjA4MGU0NWJlNGIzMTE4MzA5M2RhNzUyYmIyZGU5Y2RjYTNlNmU4ZTciLCJ0eXAiOiJKV1QifQ.eyJuYW1lIjoi5Lit5p2R6KaaIiwicGljdHVyZSI6Imh0dHBzOi8vbGgzLmdvb2dsZXVzZXJjb250ZW50LmNvbS8tWHQ1NENUT1pEdVEvQUFBQUFBQUFBQUkvQUFBQUFBQUFBQUEvQUFLV0pKTjg3RWs3MVZqeTZyWTNpeTh6bmFFR0FqeFlpdy9waG90by5qcGciLCJpc3MiOiJodHRwczovL3NlY3VyZXRva2VuLmdvb2dsZS5jb20vY29kaC04MTA0MSIsImF1ZCI6ImNvZGgtODEwNDEiLCJhdXRoX3RpbWUiOjE2MDkzMzYyMDgsInVzZXJfaWQiOiJvZ2Z0UkpaeGxDZzZIRDZMelNPWGZ4ZlBXYUEzIiwic3ViIjoib2dmdFJKWnhsQ2c2SEQ2THpTT1hmeGZQV2FBMyIsImlhdCI6MTYwOTM5MTAyMSwiZXhwIjoxNjA5Mzk0NjIxLCJlbWFpbCI6InNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSIsImVtYWlsX3ZlcmlmaWVkIjp0cnVlLCJmaXJlYmFzZSI6eyJpZGVudGl0aWVzIjp7Imdvb2dsZS5jb20iOlsiMTA0ODEzNDEzMzM0OTI0ODM4NDQzIl0sImVtYWlsIjpbInNhLnRvcnUuYTQxNjIzQGdtYWlsLmNvbSJdfSwic2lnbl9pbl9wcm92aWRlciI6Imdvb2dsZS5jb20ifX0.DQc2txuMzPl7tGdKIRfdIYGfrN9XLchy_QzwwcGa9psT6dA3AXSCwSd-wO45rRslevlOjcJe7cZVHl4iHo9rOPUYLmWhdhRycOhdYRAk0VvibOpcO8KgfJvKl7dGnD_5D1H9Mpyvarw7s7APmTKchnFJeo71uJrdAtDhq51Utg4UNVpGTfG8arjFedptkld2ocmxKgtyj5TQ7-5Dnho_Et0tD2_Ua3FNBo0JvNtUyRtnFML1jetyuLnTRBX30inF_bXh7cEPKtigRnQkFIdMWikeapevB_3HR0c5QbLC5ymo9iNAod34PMg1uabUoRID0letiHtGbM__7dGNZPiQ8w"
lib.post(collectionUrl, areas, countMax, token, images, "Collection")
|
mobile_number = input("Enter mobile number:")
joined = ''
split_array = list(mobile_number)
for digit in split_array:
print(digit)
if digit == '1':
joined +="ONE "
elif digit == '2':
joined += "TWO "
elif digit == '3':
joined += "THREE "
elif digit == '4':
joined += "FOUR "
elif digit == '5':
joined += "FIVE "
elif digit == '6':
joined += "SIX "
elif digit == '7':
joined += "SEVEN "
elif digit == '8':
joined += "EIGHT "
elif digit == '9':
joined += "NINE "
elif digit == '0':
joined += "ZERO "
print(joined)
|
import gc
import glob
import os
import shutil
import time
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.callbacks import ModelCheckpoint
from keras.models import load_model
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from .utils import copytree
class KerasDirectoryFlowPipeline(object):
"""Creates Keras pipeline with .flow_from_directory method for
out-of-memory training with data augmentation.
# Arguments
model_name: Name of model based on .py file with models definitions.
model_params: Dict, parameters provided to the model according to it's
definitions as specified in .py models file.
predict_test: Boolean, whether to predict on test set.
n_bags: Int, number of bags to use in bagging run.
n_folds: Int, number of folds to use in KFold/SKFold run.
split_size: Float, size of validation split, percent of training data size.
stratify: Boolean, whether to stratify target classes in KFold run.
shuffle: Boolean, whether to shuffle data during training & data split.
user_split: Boolean, whether validation data (X and y) is provided by user.
seed: Int, random seed number for splits.
verbose: Boolean, whether to print information about the run.
number_epochs: Int, number of epochs to train the model for.
batch_size: Int, batch size for model training and prediction.
callbacks: List, list of callbacks for the model.
run_save_name: String, name of run used during checkpoint & run statistics
saving.
save_statistics: Boolean, whether to save run statistics.
save_model: Boolean, whether to save model checkpoints, by default in src_dir + 'checkpoints/'.
output_statistics: Boolean, whether to show run statistics.
src_dir: String, working directory for model training & default checkpoints location.
full_train_dir: String, directory containing full (original) training dataset.
train_dir: String, directory containing training split data.
valid_dir: String, directory containing validation split data.
test_dir: String, directory containing test split data.
image_size: Tuple, containing image width and height, e.g. (299, 299)
classes: List, list of names of classes in the data,
e.g. ['Shark', 'Tuna', 'Whale']
train_datagen: ImageDataGenerator object specifying data augmentation
parameters for training set.
valid_datagen: ImageDataGenerator object specifying data augmentation
parameters for validation set.
test_datagen: ImageDataGenerator object specifying data augmentation
parameters for training set.
number_train_samples: Int, number of samples in training set,
given to Keras generator
number_validation_samples: Int, number of samples in validation set,
given to Keras generator
number_test_samples: Int, number of samples in test set,
given to Keras generator
number_test_augmentations: Int, number of data augmentations to perform
during test data prediction.
"""
def __init__(self, model_name, model_params=None,
predict_test=False,
n_bags=2, n_folds=5, split_size=0.2,
stratify=False, shuffle=True,
user_split=False,
seed=None, verbose=True,
number_epochs=1, batch_size=1, callbacks=None,
run_save_name=None, save_statistics=False, save_model=True,
output_statistics=True,
src_dir=None, full_train_dir=None,
train_dir=None, valid_dir=None, test_dir=None,
image_size=None,
classes=None,
train_datagen=None, valid_datagen=None, test_datagen=None,
number_train_samples=0, number_validation_samples=0, number_test_samples=0,
number_test_augmentations=0):
self.model_name = model_name
self.model_params = model_params if model_params is not None else {}
self.predict_test = predict_test
self.n_bags = n_bags
self.n_folds = n_folds
self.split_size = split_size
self.stratify = stratify
self.shuffle = shuffle
self.user_split = user_split
self.seed = seed
self.verbose = verbose
self.number_epochs = number_epochs
self.batch_size = batch_size
self.callbacks = callbacks if callbacks is not None else []
self.src_dir = src_dir if src_dir is not None else os.getcwd()
self.full_train_dir = full_train_dir
self.train_dir = train_dir
self.valid_dir = valid_dir
self.test_dir = test_dir
self.image_size = image_size
self.classes = classes if classes is not None else []
self.train_datagen = train_datagen
self.valid_datagen = valid_datagen
self.test_datagen = test_datagen
self.number_train_samples = number_train_samples
self.number_validation_samples = number_validation_samples
self.number_test_samples = number_test_samples
self.number_test_augmentations = number_test_augmentations
self.run_save_name = run_save_name
self.save_statistics = save_statistics if run_save_name is not None else False
self.save_model = save_model if run_save_name is not None else False
self.output_statistics = output_statistics
self.oof_train = None
self.oof_test = None
self.i = 1
self.start_time = time.time()
self.checkpoints_dst = self.src_dir + '/checkpoints/'
self.predictions_valid = []
self.predictions_test = []
self.loss_history = []
self.min_losses = []
def bag_flow_run(self, split_every_bag=False):
"""Runs Keras bagged run with out-of-memory training with data augmentation.
# Arguments
split_every_bag: Boolean, whether to create random training/validation
split every bag.
# Returns
A trained model.
"""
for bag in range(self.n_bags):
print('Training on bag:', self.i, '\n')
model = self.model_name(self.model_params)
if self.save_statistics:
os.makedirs('{}{}'.format(
self.checkpoints_dst, self.run_save_name), exist_ok=True)
if self.save_model:
self.callbacks.append(ModelCheckpoint('{}{}/{}_bag{}.h5'.format(self.checkpoints_dst,
self.run_save_name, self.run_save_name,
self.i),
monitor='val_loss',
verbose=0, save_best_only=True))
if split_every_bag:
self.perform_random_validation_split()
train_generator = self.train_datagen.flow_from_directory(
self.train_dir,
target_size=self.image_size,
batch_size=self.batch_size,
seed=self.seed,
shuffle=self.shuffle,
classes=self.classes,
class_mode='categorical')
validation_generator = self.valid_datagen.flow_from_directory(
self.valid_dir,
target_size=self.image_size,
batch_size=self.batch_size,
seed=self.seed,
shuffle=self.shuffle,
classes=self.classes,
class_mode='categorical')
history = model.fit_generator(
train_generator,
steps_per_epoch=self.number_train_samples / self.batch_size,
epochs=self.number_epochs,
validation_data=validation_generator,
validation_steps=self.number_validation_samples / self.batch_size,
callbacks=self.callbacks)
validation_loss = history.history['val_loss']
self.loss_history.append(validation_loss)
self.min_losses.append(np.min(validation_loss))
self.i += 1
if self.output_statistics:
self.output_run_statistics()
if self.predict_test:
self.predictions_test, test_image_names = self.predict_test_augment()
return model, self.predictions_test, test_image_names
return model
def predict_test_augment(self):
"""Runs Keras bagged model test data prediction with data augmentation.
# Returns
2 objects: test data predictions, test filenames
"""
print('Predicting set from directory: {}'.format(self.test_dir))
predictions_test_bags = []
for bag in range(self.n_bags):
print('Predicting crops for bag: {}'.format(bag + 1))
model = load_model('{}{}/{}_bag{}.h5'.format(self.checkpoints_dst,
self.run_save_name,
self.run_save_name, bag + 1))
print('Model loaded.', '\n')
for augment in range(self.number_test_augmentations):
print('Augmentation number: {}'.format(augment + 1))
test_generator = self.test_datagen.flow_from_directory(
self.test_dir,
target_size=self.image_size,
batch_size=self.batch_size,
seed=self.seed,
shuffle=False,
classes=None,
class_mode='categorical')
test_image_names = test_generator.filenames
if augment == 0:
predictions_test = model.predict_generator(test_generator,
self.number_test_samples / self.batch_size)
else:
predictions_test += model.predict_generator(test_generator,
self.number_test_samples / self.batch_size)
predictions_test /= self.number_test_augmentations
predictions_test_bags.append(predictions_test)
self.predictions_test = np.array(predictions_test_bags).mean(axis=0)
print('Predictions on test data with augmentation done.')
return self.predictions_test, test_image_names
def perform_random_validation_split(self):
"""Performs random split into training and validation sets.
"""
print('Performing random split with split size: {}'.format(self.split_size))
os.chdir(self.train_dir)
os.chdir('../')
shutil.rmtree(self.train_dir)
shutil.rmtree(self.valid_dir)
os.makedirs(self.train_dir, exist_ok=True)
os.makedirs(self.valid_dir, exist_ok=True)
copytree(self.full_train_dir, self.train_dir)
os.chdir(self.train_dir)
for _class in glob.glob('*'):
os.mkdir(self.valid_dir + _class)
train_images_names, valid_images_names = train_test_split(glob.glob(self.train_dir + '*/*.*'),
test_size=self.split_size, random_state=self.seed)
print('Number of training set images: {}, validation set images: {}'.format(len(train_images_names),
len(valid_images_names)))
for i in range(len(valid_images_names)):
os.rename(valid_images_names[i], '{}/{}'.format(self.valid_dir,
'/'.join(valid_images_names[i].split('/')[-2:])))
return
def output_run_statistics(self):
if self.verbose:
print('Loss statistics for best epoch in current run: \n',
'Mean: {}'.format(np.mean(self.min_losses)), '\n',
'Minimum: {}'.format(np.min(self.min_losses)), '\n',
'Maximum: {}'.format(np.max(self.min_losses)), '\n',
'Standard Deviation: {}'.format(np.std(self.min_losses)), '\n')
if self.save_statistics:
with open('{}{}/{}_stats.txt'.format(self.checkpoints_dst,
self.run_save_name, self.run_save_name), 'w') as text_file:
text_file.write(
'Loss statistics for best epoch in current run: \n')
text_file.write('Minimum: {} \n'.format(
np.min(self.min_losses)))
text_file.write('Maximum: {} \n'.format(
np.max(self.min_losses)))
text_file.write('Mean: {} \n'.format(
np.mean(self.min_losses)))
text_file.write('Standard Deviation: {} \n'.format(
np.std(self.min_losses)))
text_file.write('Seconds it took to train the model: {} \n'.format(
time.time() - self.start_time))
return
|
from vpython import sphere, canvas, vector, color, material
from math import sin, cos
class Planet(object):
def __init__(self, radius, s_pos, material = None, color = None):
pass
class Star(object):
def __init__(self, radius, s_pos, color = vpython.colors.yellow):
pass
class Comet(object):
def __init__(self, close_approach_object):
self.data = close_approach_object.get_frame()
pass
|
import re
script_1 = open("abc.txt", "r")
lines = script_1.readlines()
# print(lines[0])
# lab = int(lines[0],10)
# print(lab)
a = []
b = []
c = []
aflag = 0
bflag = 0
cflag = 0
for line in lines:
if re.match('JavaScript', line):
a = line.split()
aflag = 1
if re.match('HTML', line):
b = line.split()
bflag = 1
if re.match('CSS', line):
c = line.split()
cflag = 1
x = []
if aflag:
x.append(a[0])
x.append(a[-1])
if bflag:
x.append(b[0])
x.append(b[-1])
if cflag:
x.append(c[0])
x.append(c[-1])
# print(x)
|
""" Mixin with computed along horizon geological attributes. """
# pylint: disable=too-many-statements
import numpy as np
from cv2 import dilate
from scipy.signal import hilbert, ricker
from scipy.ndimage import convolve
from scipy.ndimage.morphology import binary_fill_holes, binary_erosion, binary_dilation
from skimage.measure import label
from sklearn.decomposition import PCA
from ..functional import smooth_out, special_convolve
from ..utils import transformable, lru_cache
class AttributesMixin:
""" Geological attributes along horizon:
- scalars computed from its depth map only: number of holes, perimeter, coverage
- matrices computed from its depth map only: presence matrix, gradients along directions, etc
- properties of a carcass
- methods to cut data from the cube along horizon
- matrices derived from amplitudes along horizon: instant amplitudes/phases, decompositions, etc.
Also changes the `__getattr__` of a horizon by allowing the `full_` prefix to apply `:meth:~.put_on_full`.
For example, `full_binary_matrix` would return the result of `binary_matrix`, wrapped with `:meth:~.put_on_full`.
Method for getting desired attributes is `load_attribute`. It works with nested keys, i.e. one can get attributes
of horizon subsets. Address method documentation for further details.
"""
#pylint: disable=unexpected-keyword-arg
def __getattr__(self, key):
if key.startswith('full_'):
key = key.replace('full_', '')
matrix = getattr(self, key)
return self.matrix_put_on_full(matrix)
raise AttributeError(key)
# Modify computed matrices
def _dtype_to_fill_value(self, dtype):
if dtype == np.int32:
fill_value = self.FILL_VALUE
elif dtype == np.float32:
fill_value = np.nan
elif np.issubdtype(dtype, np.bool):
fill_value = False
else:
raise TypeError(f'Incorrect dtype: `{dtype}`')
return fill_value
def matrix_set_dtype(self, matrix, dtype):
""" Change the dtype and fill_value to match it. """
mask = (matrix == self.FILL_VALUE) | np.isnan(matrix)
matrix = matrix.astype(dtype)
matrix[mask] = self._dtype_to_fill_value(dtype)
return matrix
def matrix_put_on_full(self, matrix):
""" Convert matrix from being horizon-shaped to cube-shaped. """
if matrix.shape[:2] != self.field.spatial_shape:
background = np.full(self.field.spatial_shape, self._dtype_to_fill_value(matrix.dtype), dtype=matrix.dtype)
background[self.i_min:self.i_max + 1, self.x_min:self.x_max + 1] = matrix
else:
background = matrix
return background
def matrix_fill_to_num(self, matrix, value):
""" Change the matrix values at points where horizon is absent to a supplied one. """
if matrix.dtype == np.int32:
mask = (matrix == self.FILL_VALUE)
elif matrix.dtype == np.float32:
mask = np.isnan(matrix)
elif np.issubdtype(matrix.dtype, np.bool):
mask = ~matrix
matrix[mask] = value
return matrix
def matrix_num_to_fill(self, matrix, value):
""" Mark points equal to value as absent ones. """
if value is np.nan:
mask = np.isnan(matrix)
else:
mask = (matrix == value)
matrix[mask] = self._dtype_to_fill_value(matrix.dtype)
return matrix
def matrix_normalize(self, matrix, mode):
""" Normalize matrix values.
Parameters
----------
mode : bool, str, optional
If `min-max` or True, then use min-max scaling.
If `mean-std`, then use mean-std scaling.
If False, don't scale matrix.
"""
values = matrix[self.presence_matrix]
if mode in ['min-max', True]:
min_, max_ = np.nanmin(values), np.nanmax(values)
matrix = (matrix - min_) / (max_ - min_)
elif mode == 'mean-std':
mean, std = np.nanmean(values), np.nanstd(values)
matrix = (matrix - mean) / std
else:
raise ValueError(f'Unknown normalization mode `{mode}`.')
return matrix
def matrix_smooth_out(self, matrix, kernel=None, kernel_size=7, sigma=2., margin=5, iters=1):
""" Smooth the depth matrix to produce floating point numbers. """
smoothed = smooth_out(matrix, kernel=kernel, kernel_size=kernel_size, sigma=sigma,
margin=margin, fill_value=self.FILL_VALUE, preserve=True, iters=iters)
return smoothed
def matrix_enlarge(self, matrix, width=3):
""" Increase visibility of a sparse carcass metric. Should be used only for visualization purposes. """
if matrix.ndim == 3 and matrix.shape[-1] != 1:
return matrix
# Convert all the nans to a number, so that `dilate` can work with it
matrix = matrix.copy().astype(np.float32).squeeze()
matrix[np.isnan(matrix)] = self.FILL_VALUE
# Apply dilations along both axis
structure = np.ones((1, 3), dtype=np.uint8)
dilated1 = dilate(matrix, structure, iterations=width)
dilated2 = dilate(matrix, structure.T, iterations=width)
# Mix matrices
matrix = np.full_like(matrix, np.nan)
matrix[dilated1 != self.FILL_VALUE] = dilated1[dilated1 != self.FILL_VALUE]
matrix[dilated2 != self.FILL_VALUE] = dilated2[dilated2 != self.FILL_VALUE]
mask = (dilated1 != self.FILL_VALUE) & (dilated2 != self.FILL_VALUE)
matrix[mask] = (dilated1[mask] + dilated2[mask]) / 2
# Fix zero traces
matrix[np.isnan(self.field.std_matrix)] = np.nan
return matrix
@staticmethod
def pca_transform(data, n_components=3, **kwargs):
""" Reduce number of channels along the depth axis. """
flattened = data.reshape(-1, data.shape[-1])
mask = np.isnan(flattened).any(axis=-1)
pca = PCA(n_components, **kwargs)
transformed = pca.fit_transform(flattened[~mask])
n_components = transformed.shape[-1]
result = np.full((*data.shape[:2], n_components), np.nan).reshape(-1, n_components)
result[~mask] = transformed
result = result.reshape(*data.shape[:2], n_components)
return result
# Technical matrices
@property
def binary_matrix(self):
""" Boolean matrix with `true` values at places where horizon is present and `false` everywhere else. """
return (self.matrix > 0).astype(np.bool)
@property
def presence_matrix(self):
""" A convenient alias for binary matrix in cubic coordinate system. """
return self._presence_matrix()
@lru_cache(maxsize=1)
def _presence_matrix(self):
""" A method for getting binary matrix in cubic coordinates. Allows for introspectable cache. """
return self.full_binary_matrix
# Scalars computed from depth map
@property
def coverage(self):
""" Ratio between number of present values and number of good traces in cube. """
return len(self) / (np.prod(self.field.spatial_shape) - np.sum(self.field.zero_traces))
@property
def number_of_holes(self):
""" Number of holes inside horizon borders. """
holes_array = self.filled_matrix != self.binary_matrix
_, num = label(holes_array, connectivity=2, return_num=True, background=0)
return num
@property
def perimeter(self):
""" Number of points in the borders. """
return np.sum((self.borders_matrix == 1).astype(np.int32))
@property
def solidity(self):
""" Ratio of area covered by horizon to total area inside borders. """
return len(self) / np.sum(self.filled_matrix)
# Matrices computed from depth map
@property
def borders_matrix(self):
""" Borders of horizons (borders of holes inside are not included). """
filled_matrix = self.filled_matrix
structure = np.ones((3, 3))
eroded = binary_erosion(filled_matrix, structure, border_value=0)
return filled_matrix ^ eroded # binary difference operation
@property
def boundaries_matrix(self):
""" Borders of horizons (borders of holes inside included). """
binary_matrix = self.binary_matrix
structure = np.ones((3, 3))
eroded = binary_erosion(binary_matrix, structure, border_value=0)
return binary_matrix ^ eroded # binary difference operation
@property
def filled_matrix(self):
""" Binary matrix with filled holes. """
structure = np.ones((3, 3))
filled_matrix = binary_fill_holes(self.binary_matrix, structure)
return filled_matrix
def grad_along_axis(self, axis=0):
""" Change of heights along specified direction. """
grad = np.diff(self.matrix, axis=axis, prepend=np.int32(0))
grad[np.abs(grad) > self.h_min] = self.FILL_VALUE
grad[self.matrix == self.FILL_VALUE] = self.FILL_VALUE
return grad
@property
def grad_i(self):
""" Change of heights along iline direction. """
return self.grad_along_axis(0)
@property
def grad_x(self):
""" Change of heights along xline direction. """
return self.grad_along_axis(1)
# Carcass properties: should be used only if the horizon is a carcass
@property
def is_carcass(self):
""" Check if the horizon is a sparse carcass. """
return len(self) / self.filled_matrix.sum() < 0.5
@property
def carcass_ilines(self):
""" Labeled inlines in a carcass. """
uniques, counts = np.unique(self.points[:, 0], return_counts=True)
return uniques[counts > 256]
@property
def carcass_xlines(self):
""" Labeled xlines in a carcass. """
uniques, counts = np.unique(self.points[:, 1], return_counts=True)
return uniques[counts > 256]
# Retrieve data from seismic along horizon
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_cube_values(self, window=1, offset=0, chunk_size=256, **_):
""" Get values from the cube along the horizon.
Parameters
----------
window : int
Width of data slice along the horizon.
offset : int
Offset of data slice with respect to horizon heights matrix.
chunk_size : int
Size of data along height axis processed at a time.
"""
low = window // 2
high = max(window - low, 0)
chunk_size = min(chunk_size, self.h_max - self.h_min + window)
background = np.zeros((self.field.ilines_len, self.field.xlines_len, window), dtype=np.float32)
for h_start in range(max(low, self.h_min), self.h_max + 1, chunk_size):
h_end = min(h_start + chunk_size, self.h_max + 1)
# Get chunk from the cube (depth-wise)
location = (slice(None), slice(None),
slice(h_start - low, min(h_end + high, self.field.depth)))
data_chunk = self.field.geometry.load_crop(location, use_cache=False)
# Check which points of the horizon are in the current chunk (and present)
idx_i, idx_x = np.asarray((self.matrix != self.FILL_VALUE) &
(self.matrix >= h_start) &
(self.matrix < h_end)).nonzero()
heights = self.matrix[idx_i, idx_x]
# Convert spatial coordinates to cubic, convert height to current chunk local system
idx_i += self.i_min
idx_x += self.x_min
heights -= (h_start - offset)
# Subsequently add values from the cube to background, then shift horizon 1 unit lower
for j in range(window):
background[idx_i, idx_x, np.full_like(heights, j)] = data_chunk[idx_i, idx_x, heights]
heights += 1
mask = heights < data_chunk.shape[2]
idx_i = idx_i[mask]
idx_x = idx_x[mask]
heights = heights[mask]
background[~self.presence_matrix] = np.nan
return background
def get_array_values(self, array, shifts=None, grid_info=None, width=5, axes=(2, 1, 0)):
""" Get values from an external array along the horizon.
Parameters
----------
array : np.ndarray
A data-array to make a cut from.
shifts : tuple or None
an offset defining the location of given array with respect to the horizon.
If None, `grid_info` with key `range` must be supplied.
grid_info : dict
Whenever passed, must contain key `range`.
Used for infering shifts of the array with respect to horizon.
width : int
required width of the resulting cut.
axes : tuple
if not None, axes-transposition with the required axes-order is used.
"""
if shifts is None and grid_info is None:
raise ValueError('Either shifts or dataset with filled grid_info must be supplied!')
if shifts is None:
shifts = [grid_info['range'][i][0] for i in range(3)]
shifts = np.array(shifts)
horizon_shift = np.array((self.bbox[0, 0], self.bbox[1, 0]))
if axes is not None:
array = np.transpose(array, axes=axes)
# compute start and end-points of the ilines-xlines overlap between
# array and matrix in horizon and array-coordinates
horizon_shift, shifts = np.array(horizon_shift), np.array(shifts)
horizon_max = horizon_shift[:2] + np.array(self.matrix.shape)
array_max = np.array(array.shape[:2]) + shifts[:2]
overlap_shape = np.minimum(horizon_max[:2], array_max[:2]) - np.maximum(horizon_shift[:2], shifts[:2])
overlap_start = np.maximum(0, horizon_shift[:2] - shifts[:2])
heights_start = np.maximum(shifts[:2] - horizon_shift[:2], 0)
# recompute horizon-matrix in array-coordinates
slc_array = [slice(l, h) for l, h in zip(overlap_start, overlap_start + overlap_shape)]
slc_horizon = [slice(l, h) for l, h in zip(heights_start, heights_start + overlap_shape)]
overlap_matrix = np.full(array.shape[:2], fill_value=self.FILL_VALUE, dtype=np.float32)
overlap_matrix[slc_array] = self.matrix[slc_horizon]
overlap_matrix -= shifts[-1]
# make the cut-array and fill it with array-data located on needed heights
result = np.full(array.shape[:2] + (width, ), np.nan, dtype=np.float32)
iterator = [overlap_matrix + shift for shift in range(-width // 2 + 1, width // 2 + 1)]
for i, surface_level in enumerate(np.array(iterator)):
mask = (surface_level >= 0) & (surface_level < array.shape[-1]) & (surface_level !=
self.FILL_VALUE - shifts[-1])
mask_where = np.where(mask)
result[mask_where[0], mask_where[1], i] = array[mask_where[0], mask_where[1],
surface_level[mask_where].astype(np.int)]
return result
# Generic attributes loading
ATTRIBUTE_TO_ALIAS = {
# Properties
'full_matrix': ['full_matrix', 'heights', 'depths'],
'full_binary_matrix': ['full_binary_matrix', 'presence_matrix', 'masks'],
# Created by `get_*` methods
'amplitudes': ['amplitudes', 'cube_values'],
'metric': ['metric', 'metrics'],
'instant_phases': ['instant_phases', 'iphases'],
'instant_amplitudes': ['instant_amplitudes', 'iamplitudes'],
'fourier_decomposition': ['fourier', 'fourier_decomposition'],
'wavelet_decomposition': ['wavelet', 'wavelet_decomposition'],
'median_diff': ['median_diff', 'mdiff'],
'grad': ['grad', 'gradient'],
'spikes': ['spikes'],
}
ALIAS_TO_ATTRIBUTE = {alias: name for name, aliases in ATTRIBUTE_TO_ALIAS.items() for alias in aliases}
ATTRIBUTE_TO_METHOD = {
'amplitudes' : 'get_cube_values',
'metric' : 'get_metric',
'instant_phases' : 'get_instantaneous_phases',
'instant_amplitudes' : 'get_instantaneous_amplitudes',
'fourier_decomposition' : 'get_fourier_decomposition',
'wavelet_decomposition' : 'get_wavelet_decomposition',
'median_diff': 'get_median_diff_map',
'grad': 'get_gradient_map',
'spikes': 'get_spikes_map',
}
def load_attribute(self, src, location=None, use_cache=True, enlarge=False, **kwargs):
""" Load horizon attribute values at requested location.
This is the intended interface of loading matrices along the horizon, and should be preffered in all scenarios.
To retrieve the attribute, we either use `:meth:~.get_property` or `:meth:~.get_*` methods: as all of them are
wrapped with `:func:~.transformable` decorator, you can use its arguments to modify the behaviour.
Parameters
----------
src : str
Key of the desired attribute. Valid attributes are either properties or aliases, defined
by `ALIAS_TO_ATTRIBUTE` mapping, for example:
- 'cube_values' or 'amplitudes': cube values;
- 'depths' or 'full_matrix': horizon depth map in cubic coordinates;
- 'metrics': random support metrics matrix.
- 'instant_phases': instantaneous phase;
- 'instant_amplitudes': instantaneous amplitude;
- 'fourier' or 'fourier_decomposition': fourier transform with optional PCA;
- 'wavelet' or 'wavelet decomposition': wavelet transform with optional PCA;
- 'masks' or 'full_binary_matrix': mask of horizon;
location : sequence of 3 slices
First two slices are used as `iline` and `xline` ranges to cut crop from.
Last 'depth' slice is not used, since points are sampled exactly on horizon.
If None, `src` is returned uncropped.
enlarge : bool, optional
Whether to enlarge carcass maps. Defaults to True, if the horizon is a carcass, False otherwise.
Should be used only for visualization purposes.
kwargs :
Passed directly to attribute-evaluating methods from :attr:`.ATTRIBUTE_TO_METHOD` depending on `src`.
Examples
--------
Load 'depths' attribute for whole horizon:
>>> horizon.load_attribute('depths')
Load 'cube_values' attribute for requested slice of fixed width:
>>> horizon.load_attribute('cube_values', (x_slice, i_slice, 1), window=10)
Load 'metrics' attribute with specific evaluation parameter and following normalization.
>>> horizon.load_attribute('metrics', metric='local_corrs', normalize='min-max')
"""
src = self.ALIAS_TO_ATTRIBUTE.get(src, src)
enlarge = enlarge and self.is_carcass
if src in self.ATTRIBUTE_TO_METHOD:
method = self.ATTRIBUTE_TO_METHOD[src]
data = getattr(self, method)(use_cache=use_cache, enlarge=enlarge, **kwargs)
else:
data = self.get_property(src, enlarge=enlarge, **kwargs)
# TODO: Someday, we would need to re-write attribute loading methods
# so they use locations not to crop the loaded result, but to load attribute only at location.
if location is not None:
i_slice, x_slice, _ = location
data = data[i_slice, x_slice]
return data
# Specific attributes loading
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_property(self, src, **_):
""" Load a desired instance attribute. Decorated to allow additional postprocessing steps. """
data = getattr(self, src, None)
if data is None:
aliases = list(self.ALIAS_TO_ATTRIBUTE.keys())
raise ValueError(f'Unknown `src` {src}. Expected a matrix-property or one of {aliases}.')
return data
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_instantaneous_amplitudes(self, window=23, depths=None, **kwargs):
""" Calculate instantaneous amplitude along the horizon.
Parameters
----------
window : int
Width of cube values cutout along horizon to use for attribute calculation.
depths : slice, sequence of int or None
Which depth channels of resulted array to return.
If slice or sequence of int, used for slicing calculated attribute along last axis.
If None, infer middle channel index from 'window' and slice at it calculated attribute along last axis.
kwargs :
Passed directly to :meth:`.get_cube_values`.
Notes
-----
Keep in mind, that Hilbert transform produces artifacts at signal start and end. Therefore if you want to get
an attribute with `N` channels along depth axis, you should provide `window` broader then `N`. E.g. in call
`label.get_instantaneous_amplitudes(depths=range(10, 21), window=41)` the attribute will be first calculated
by array of `(xlines, ilines, 41)` shape and then the slice `[..., ..., 10:21]` of them will be returned.
"""
depths = [window // 2] if depths is None else depths
amplitudes = self.get_cube_values(window, use_cache=False, **kwargs)
result = np.abs(hilbert(amplitudes)).astype(np.float32)[:, :, depths]
return result
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_instantaneous_phases(self, window=23, depths=None, **kwargs):
""" Calculate instantaneous phase along the horizon.
Parameters
----------
window : int
Width of cube values cutout along horizon to use for attribute calculation.
depths : slice, sequence of int or None
Which depth channels of resulted array to return.
If slice or sequence of int, used for slicing calculated attribute along last axis.
If None, infer middle channel index from 'window' and slice at it calculated attribute along last axis.
kwargs :
Passed directly to :meth:`.get_cube_values`.
Notes
-----
Keep in mind, that Hilbert transform produces artifacts at signal start and end. Therefore if you want to get
an attribute with `N` channels along depth axis, you should provide `window` broader then `N`. E.g. in call
`label.get_instantaneous_phases(depths=range(10, 21), window=41)` the attribute will be first calculated
by array of `(xlines, ilines, 41)` shape and then the slice `[..., ..., 10:21]` of them will be returned.
"""
depths = [window // 2] if depths is None else depths
amplitudes = self.get_cube_values(window, use_cache=False, **kwargs)
result = np.angle(hilbert(amplitudes)).astype(np.float32)[:, :, depths]
return result
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_metric(self, metric='support_corrs', supports=50, agg='nanmean', **kwargs):
""" Cached metrics calcucaltion with disabled plotting option.
Parameters
----------
metric, supports, agg :
Passed directly to :meth:`.HorizonMetrics.evaluate`.
kwargs :
Passed directly to :meth:`.HorizonMetrics.evaluate`.
"""
metrics = self.metrics.evaluate(metric=metric, supports=supports, agg=agg,
enlarge=False, plot=False, savepath=None, **kwargs)
return metrics
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_fourier_decomposition(self, window=50, **_):
""" Cached fourier transform calculation follower by dimensionaluty reduction via PCA.
Parameters
----------
window : int
Width of amplitudes slice to calculate fourier transform on.
"""
amplitudes = self.load_attribute('amplitudes', window=window)
result = np.abs(np.fft.rfft(amplitudes))
return result
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_wavelet_decomposition(self, widths=range(1, 14, 3), window=50, **_):
""" Cached wavelet transform calculation followed by dimensionaluty reduction via PCA.
Parameters
----------
widths : list of numbers
Widths of wavelets to calculate decomposition for.
window : int
Width of amplitudes slice to calculate wavelet transform on.
"""
amplitudes = self.load_attribute('amplitudes', window=window)
result_shape = *amplitudes.shape[:2], len(widths)
result = np.empty(result_shape, dtype=np.float32)
for idx, width in enumerate(widths):
wavelet = ricker(window, width).reshape(1, 1, -1)
result[:, :, idx] = convolve(amplitudes, wavelet, mode='constant')[:, :, window // 2]
return result
def get_zerocrossings(self, side, window=15):
""" Get matrix of depths shifted to nearest point of sign change in cube values.
Parameters
----------
side : -1 or 1
Whether to look for sign change above the horizon (-1) or below (1).
window : positive int
Width of data slice above/below the horizon made along its surface.
"""
values = self.get_cube_values(window=window, offset=window // 2 * side, fill_value=0)
# reverse array along depth axis for invariance
values = values[:, :, ::side]
sign = np.sign(values)
# value 2 in the array below mark cube values sign change along depth axis
cross = np.abs(np.diff(sign, axis=-1))
# put 2 at points, where cube values are precisely equal to zero
zeros = sign[:, :, :-1] == 0
cross[zeros] = 2
# obtain indices of first sign change occurences for every trace
# if trace doesn't change sign, corresponding index of sign change is 0
cross_indices = np.argmax(cross == 2, axis=-1)
# get cube values before sign change
start_points = self.matrix_to_points(cross_indices).T
start_values = values[tuple(start_points)]
# get cube values after sign change
stop_points = start_points + np.array([[0], [0], [1]])
stop_values = values[tuple(stop_points)]
# calculate additional float shifts towards true zero-crossing point
float_shift = start_values - stop_values
# do not perform division at points, where both 'start' and 'stop' values are 0
np.divide(start_values, float_shift, out=float_shift, where=float_shift != 0)
# treat obtained indices as shifts for label depths matrix
shift = cross_indices.astype(np.float32)
# apply additional float shifts to shift matrix
shift += float_shift.reshape(shift.shape)
# account for shift matrix sign change
shift *= side
result = self.full_matrix + shift
return result
# Despiking maps
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_median_diff_map(self, convolve_mode='m', kernel_size=11, kernel=None, margin=0, iters=2, threshold=2, **_):
""" Compute difference between depth map and its median filtered counterpart. """
convolved = special_convolve(self.full_matrix, mode=convolve_mode, kernel=kernel, kernel_size=kernel_size,
margin=margin, iters=iters, fill_value=self.FILL_VALUE)
spikes = self.full_matrix - convolved
if threshold is not None:
spikes[np.abs(spikes) < threshold] = 0
spikes[self.field.zero_traces == 1] = np.nan
return spikes
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_gradient_map(self, threshold=0, **_):
""" Compute combined gradient map along both directions.
Parameters
----------
threshold : number
Threshold to consider a difference to be a spike.
"""
grad_i = self.load_attribute('grad_i', on_full=True, dtype=np.float32, use_cache=False)
grad_x = self.load_attribute('grad_x', on_full=True, dtype=np.float32, use_cache=False)
grad_i[np.abs(grad_i) <= threshold] = 0
grad_x[np.abs(grad_x) <= threshold] = 0
grad = grad_i + grad_x
grad[self.field.zero_traces == 1] = np.nan
return grad
@lru_cache(maxsize=1, apply_by_default=False, copy_on_return=True)
@transformable
def get_spikes_map(self, spikes_mode='median', threshold=1., dilation=5,
kernel_size=11, kernel=None, margin=0, iters=2, **_):
""" Locate spikes on a horizon.
Parameters
----------
mode : str
If 'gradient', then use gradient map to locate spikes.
If 'median', then use median diffs to locate spikes.
threshold : number
Threshold to consider a difference to be a spike.
dilation : int
Number of iterations for binary dilation algorithm to increase the spikes.
kernel_size, kernel, margin, iters
Parameters for median differences computation.
"""
if spikes_mode.startswith('m'):
spikes = self.load_attribute('median_diff', mode='m', kernel=kernel, kernel_size=kernel_size,
margin=margin, iters=iters, threshold=threshold)
elif spikes_mode.startswith('g'):
spikes = self.load_attribute('gradient', threshold=threshold)
else:
raise ValueError(f'Wrong mode passed, {spikes_mode}')
if dilation:
spikes = np.nan_to_num(spikes)
spikes = binary_dilation(spikes, iterations=dilation).astype(np.float32)
spikes[self.field.zero_traces == 1] = np.nan
return spikes
|
from struct import Struct
def write_records(records, format, f):
record_struct = Struct(format)
for r in records:
f.write(record_struct.pack(*r))
def unpack_records(format, data):
record_struct = Struct(format)
return (record_struct.unpack_from(data, offset) for offset in range(0, len(data), record_struct.size))
if __name__ == '__main__':
records = [(1, 2.3, 4.5),
(6, 7.8, 9.0),
(12, 13.4, 56.7)]
with open('data.b', 'wb') as f:
write_records(records, '<idd', f)
from collections import namedtuple
Record = namedtuple('Record', ['kind', 'x', 'y'])
with open('data.b', 'rb') as f:
data = f.read()
for rec in unpack_records('<idd', data):
record = Record(*rec)
print record
|
from controllers.FakeController import FakeController
from lib.Vehicle import Vehicle
class FakeVehicle(Vehicle):
def __init__(self):
controller = FakeController()
super().__init__(controller)
if __name__ == '__main__':
vehicle = FakeVehicle()
vehicle.listen()
|
import unittest
import coc_package
class TestSubtractFunction(unittest.TestCase):
def test_add_for_ints(self):
self.assertEqual(coc_package.subtract(3, 5), 3 - 5)
def test_add_error(self):
with self.assertRaises(AttributeError):
coc_package.subtract(3, "5")
if __name__ == '__main__':
unittest.main()
|
from django.db import models
from django.db.models.deletion import CASCADE
# Create your models here.
class Location(models.Model):
name = models.CharField(max_length=200)
address = models.CharField(max_length=300)
def __str__(self):
return self.address
class Participant(models.Model):
username = models.CharField(max_length=100)
email = models.EmailField(unique=True)
def __str__(self):
return self.username
class Meetup(models.Model):
title = models.CharField(max_length=200)
slug = models.SlugField(unique=True)
description = models.TextField()
organizer_email = models.EmailField()
date = models.DateField()
image = models.ImageField(upload_to='images')
location = models.ForeignKey(Location, on_delete=models.CASCADE)
participant = models.ManyToManyField(Participant, blank=True, null=True)
def __str__(self):
return self.title
|
import cx_Oracle as ora
username = 'test'
password = 'abcd1234'
ip = '192.168.194.103'
port = '1521'
srvnm = 'orcl'
tnsnm = ora.makedsn(ip, port, service_name=srvnm)
conn = ora.connect(username, password, dsn=tnsnm)
print('Connection Success!')
curs = conn.cursor()
# sqlid = input("请输入sqlid: ")
sqlid = 'b7ghr8z9mm79s'
sqlplan = "select * from table(dbms_xplan.display_cursor('{}'))".format(sqlid)
# print(df)
rr = curs.execute(sqlplan)
for result in rr:
if ''.join(result) != ' ':
print(''.join(result))
|
import pygame
import os
from Card import Card
from Player import Player
class PlayerSprite(pygame.sprite.Sprite):
def __init__(self, name, room, uniqueID, hand):
super(PlayerSprite, self).__init__()
self.ID = uniqueID
self.name = name
self.room = room
self.surf = pygame.Surface((30, 30))
self.hand = hand
self.charImage = None
myfont = pygame.font.SysFont("monospace", 15)
if (name == "PLUM"):
#self.surf.fill((103, 58, 183))
#self.label = myfont.render("P" + str(uniqueID), 1, (45, 0, 125))
self.charImage = pygame.image.load(os.path.abspath("images/spr_plum.png"))
elif (name == "GREEN"):
#self.surf.fill((76, 175, 80))
#self.label = myfont.render("P" + str(uniqueID), 1, (6, 95, 10))
self.charImage = pygame.image.load(os.path.abspath("images/spr_green.png"))
elif (name == "SCARLET"):
#self.surf.fill((244, 67, 54))
#self.label = myfont.render("P" + str(uniqueID), 1, (150, 13, 0))
self.charImage = pygame.image.load(os.path.abspath("images/spr_scarlet.png"))
elif (name == "MUSTARD"):
#self.surf.fill((255, 193, 7))
#self.label = myfont.render("P" + str(uniqueID), 1, (185, 123, 0))
self.charImage = pygame.image.load(os.path.abspath("images/spr_mustard.png"))
elif (name == "WHITE"):
#self.surf.fill((250, 250, 250))
#self.label = myfont.render("P" + str(uniqueID), 1, (160, 160, 160))
self.charImage = pygame.image.load(os.path.abspath("images/spr_white.png"))
elif (name == "PEACOCK"):
#self.surf.fill((33, 150, 243))
#self.label = myfont.render("P" + str(uniqueID), 1, (0, 100, 173))
self.charImage = pygame.image.load(os.path.abspath("images/spr_peacock.png"))
self.rect = self.surf.get_rect()
#self.labelRect = self.label.get_rect()
self.charImage = pygame.transform.scale(self.charImage, (30, 30))
self.surf.blit(self.charImage, (0, 0))
# for initial position only
self.rect.y = 50 + (36 * uniqueID)
self.rect.x = 50
def draw(self, win):
win.blit(self.surf, self.rect)
#win.blit(self.label, [self.rect.x + 5, self.rect.y + 6])
#Getting possible moves, not handling shortcuts right now.
def get_possible_moves(self):
if self.room == "H1":
return [["w","e"],[0,2]]
elif self.room == "H2":
return [["w","e"],[2,4]]
elif self.room == "H6":
return [["w","e"],[8,10]]
elif self.room == "H7":
return [["w","e"],[10,12]]
elif self.room == "H11":
return [["w","e"],[16,18]]
elif self.room == "H12":
return [["w","e"],[18,20]]
elif self.room == "H3":
return [["n","s"],[0,8]]
elif self.room == "H4":
return [["n","s"],[2,10]]
elif self.room == "H5":
return [["n","s"],[4,12]]
elif self.room == "H8":
return [["n","s"],[8,16]]
elif self.room == "H9":
return [["n","s"],[10,18]]
elif self.room == "H10":
return [["n","s"],[12,20]]
elif self.room == "STUDY":
return [["e","s","se"],[1,5,10]]
elif self.room == "HALL":
return [["w","e","s"],[1,3,6]]
elif self.room == "LOUNGE":
return [["w","s","sw"],[3,7,10]]
elif self.room == "LIBRARY":
return [["n","e","s"],[5,9,13]]
elif self.room == "BILLIARD":
return [["w","n","e","s"],[9,6,11,14]]
elif self.room == "DINING":
return [["w","n","s"],[11,7,15]]
elif self.room == "CONSERVATORY":
return [["n","e","ne"],[13,17,10]]
elif self.room == "BALL":
return [["w","n","e"],[17,14,19]]
elif self.room == "KITCHEN":
return [["w","n","nw"],[19,15,10]]
def create_player_obj(self):
return Player(self.get_room_num(),self.ID,self.hand)
def get_room_num(self):
roomNum = 0
if self.room == "H1":
roomNum = 1
elif self.room == "H2":
roomNum = 3
elif self.room == "H6":
roomNum = 9
elif self.room == "H7":
roomNum = 11
elif self.room == "H11":
roomNum = 17
elif self.room == "H12":
roomNum = 19
elif self.room == "H3":
roomNum = 5
elif self.room == "H4":
roomNum = 6
elif self.room == "H5":
roomNum = 7
elif self.room == "H8":
roomNum = 13
elif self.room == "H9":
roomNum = 14
elif self.room == "H10":
roomNum = 15
elif self.room == "STUDY":
roomNum = 0
elif self.room == "HALL":
roomNum = 2
elif self.room == "LOUNGE":
roomNum = 4
elif self.room == "LIBRARY":
roomNum = 8
elif self.room == "BILLIARD":
roomNum = 10
elif self.room == "DINING":
roomNum = 12
elif self.room == "CONSERVATORY":
roomNum = 16
elif self.room == "BALL":
roomNum = 18
elif self.room == "KITCHEN":
roomNum = 20
return roomNum
|
# Task: Find the last ten digits of the number: 28433 * 2 ^ (7830457) + 1
# Take the last ten digits of the result of the expression
# C * E^B + D
def takeDigitFromExpression(C, E, B, D, numDigit):
myMod = 10 ** numDigit
return (((C % myMod) * pow(E, B, myMod)) % myMod + D % myMod) % myMod
|
from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.game_index, name='game_index'),
url(r'^(?P<id>[0-9]+)/$', views.game, name='game'),
url(r'^(?P<id>[0-9]+)/(?P<res>.+)$', views.res, name='res'),
]
|
__author__ = 'brianmendoza'
from Bio import Entrez, SeqIO
import webbrowser
import re
import os
class GenBankFile:
def __init__(self, organism):
Entrez.email = "bmendoz1@vols.utk.edu"
self.directory = "/Users/brianmendoza/Desktop/GenBank_files/"
self.org = organism
def setOrg(self, org):
self.org = org
def setDirectory(self, path, org):
self.directory = path
self.setOrg(org)
def convertToFasta(self):
orgfile = self.directory + self.org + ".gbff"
output = "/Users/brianmendoza/Desktop/GenBank_files/FASTAs/" + self.org + ".fna"
SeqIO.convert(orgfile, "genbank", output, "fasta")
def parseAnnotation(self):
gb_file = self.directory + self.org + ".gbff"
records = SeqIO.parse(open(gb_file,"r"), "genbank")
# create table for multi-targeting reference
table = {}
count = 0
for record in records:
count += 1
chrmnumber = str(count)
table[chrmnumber] = []
for feature in record.features:
if feature.type == 'CDS': # hopefully gene and CDS are the same
# getting the location...
loc = str(feature.location)
out = re.findall(r"[\d]+", loc)
start = out[0]
end = out[1]
if len(out) > 2: # to account for "joined" domains
end = out[3]
# locus_tag and product...
if 'locus_tag' in feature.qualifiers:
ltag = feature.qualifiers['locus_tag']
elif 'gene' in feature.qualifiers:
ltag = feature.qualifiers['gene']
if 'product' not in feature.qualifiers:
prod = feature.qualifiers['note']
else:
prod = feature.qualifiers['product']
# adding it all up...
tup = (start, end, ltag, prod)
table[chrmnumber].append(tup)
return table
def getChromSequence(self, index):
gb_file = self.directory + self.org + ".gbff"
records = SeqIO.parse(open(gb_file,"r"), "genbank")
count = 0
for record in records:
count += 1
if count == index:
cstr = record.seq
return cstr
class GffFile:
def __init__(self, organism):
self.directory = "/Users/brianmendoza/Desktop/GenBank_Files/"
self.org = organism
|
import urllib
from BeautifulSoup import *
url = raw_input('Enter - ')
html = urllib.urlopen(url).read()
soup = BeautifulSoup(html)
#Retrieve all of the span tags
tags = soup('span')
num2= []
num3= []
for tag in tags:
num1 = 'Contents:',tag.contents[0]
for x in num1:
num2 = int(num1[1])
num3.append(num2)
print sum(num3)
|
from functools import reduce
def f(x):
return x*x
r = map(f,[1,2,3,4,5,6,7,8,9])
print(list(r))
print(list(map(str, [1, 2, 3, 4, 5, 6, 7, 8, 9])))
def fn(x,y):
return x*10+y
print(reduce(fn,[1,3,5,7,9]))
def not_empty(s):
return s and s.strip()
print(list(filter(not_empty,['A',' ','',None,'B',' ','C'])))
def _odd_iter():
n = 1
while True:
n = n + 2
yield n
def _not_divisible(n):
return lambda x: x % n >0
def primes():
yield 2
it = _odd_iter()
while True:
n = next(it)
yield n
it = filter(_not_divisible(n),it)
# 打印1000以内的素数:
for n in primes():
if n < 100:
print(n)
else:
break
def is_palindrome(n):
a=str(n)[::-1]
return int(a)==n
output = filter(is_palindrome, range(1, 1000))
print('1~1000:', list(output))
if list(filter(is_palindrome, range(1, 200))) == [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 22, 33, 44, 55, 66, 77, 88, 99, 101, 111, 121, 131, 141, 151, 161, 171, 181, 191]:
print('测试成功!')
else:
print('测试失败!')
print( sorted(['bob', 'about', 'Zoo', 'Credit'], key=str.lower, reverse=True))
def by_name(t):
return t[0].lower()
def by_score(t):
return t[1]
Lsorted = [('Bob', 75), ('Adam', 92), ('Bart', 66), ('Lisa', 88)]
print(sorted(Lsorted,key=lambda x:x[0]))
print(sorted(Lsorted,key=lambda x:x[1]))
|
# -*- extra stuff goes here -*-
import permissions
# make permissions available for GenericSetup
permissions
def initialize(context):
"""Initializer called when used as a Zope 2 product."""
|
# http://www.practicepython.org/exercise/2014/04/25/12-list-ends.html
def principioFinal(lista):
listares = [item for item in lista if (item == lista[0] or item == lista[len(lista)-1])]
return listares
a = [5, 10, 15, 20, 25]
print(a)
print (principioFinal(a))
|
import model
from args import get_args
from data import DataLoader
from data import DcardDataset
from data import customed_collate_fn
import pickle
from utils import load_training_args
import torch
def infer(
total_test,
test_id,
test_x,
sentence_length,
my_model,
batch_size,
output_filename,
use_cuda=True):
if use_cuda:
my_model = my_model.cuda()
dcard_test_dataset = DcardDataset(
total_test, test_x, test_id, sentence_length)
test_loader = torch.utils.data.DataLoader(
dataset=dcard_test_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=customed_collate_fn
)
preds = None
with torch.no_grad():
for (x, x_id, length) in test_loader:
if use_cuda:
x, x_id, length = x.cuda(), x_id.cuda(), length.cuda()
pred = my_model.forward(x, length)
pred[pred >= 0.5] = 1
pred[pred < 0.5] = 0
pred = pred[torch.sort(x_id)[1]]
if preds is None:
preds = pred
else:
preds = torch.cat([preds, pred])
with open(output_filename, 'w') as f:
f.write('id,label\n')
for i, ele in enumerate(preds):
f.write('%d,%d\n' % (i, ele))
def get_test_data(filename, word_dict_filename):
dl = DataLoader(
create_word_dict=False,
word_dict_filename=args.word_dict_filename)
test_x = dl.load_data_x(filename)
sentence_length = dl.get_sentence_length()
return test_x, sentence_length
def load_model(model_name, model_filename, args_filename):
try:
model_class_object = getattr(model, model_name)
except AttributeError:
raise Exception('Model %s not found in model.py' % model_name)
training_args = load_training_args(args_filename)
training_args['load_model_filename'] = model_filename
my_model = model_class_object(training_args, train=False)
return my_model
if __name__ == '__main__':
args = get_args(train=False)
use_cuda = args.no_cuda
test_x, sentence_length = \
get_test_data(args.test_x_filename, args.word_dict_filename)
total_test = len(test_x)
test_id = [i for i in range(total_test)]
my_model = load_model(args.model, args.model_filename, args.args_filename)
my_model.eval()
print('Start infering...')
infer(
total_test=total_test,
test_id=test_id,
test_x=test_x,
sentence_length=sentence_length,
my_model=my_model,
batch_size=args.batch_size,
output_filename=args.output,
use_cuda=use_cuda)
|
#!/usr/bin/env python
import sys
import numpy as np
import scipy.stats
import logging
import codecs
from numpy import float64
from scipy.sparse import dok_matrix, csr_matrix, coo_matrix
from sklearn.preprocessing import normalize
logging.basicConfig(
format="[ %(levelname)-10s %(module)-8s %(asctime)s %(relativeCreated)-10d ] %(message)s",
datefmt="%H:%M:%S:%m",
level=logging.DEBUG)
vocab_file = "/scratch/01813/roller/corpora/webko/TermDoc/target-labels.txt"
assoc_file = "/home/01813/roller/tmp/imsgrounded/data/associations/big_assoc_vectors.txt"
nn_file = "/home/01813/roller/tmp/imsgrounded/data/nn.txt"
assoc_counts = {}
resp_counts = {}
dots_to_compute = set()
nns = set([l.strip() for l in codecs.getreader('utf-8')(open(nn_file)).readlines()])
logging.info("reading vocab")
with codecs.getreader('utf-8')(open(vocab_file)) as f:
lines = f.readlines()
lines = (l.strip() for l in lines)
lines = (l.split("\t")[1] for l in lines if l)
lines = list(lines)
lines = (l[:l.rindex("/")] for l in lines)
vocab = set(lines)
logging.info("reading assoc")
num_ignored = 0
num_oov = 0
with codecs.getreader('utf-8')(open(assoc_file)) as f:
for line in f:
line = line.strip()
if not line: continue
cue, resp, count = line.split("\t")
assert (cue, resp) not in assoc_counts
if cue not in nns:
#logging.info("ignoring b/c not in list of NN compounds.")
num_ignored += 1
continue
if cue == resp:
logging.info( "ignoring line b/c the response is the cue...")
num_ignored += 1
continue
if False and resp not in vocab:
#logging.info( "ignoring line b/c response (%s) is OOV..." % line)
num_oov += 1
continue
dots_to_compute.add(cue)
assoc_counts[(cue, resp)] = int(count)
resp_counts[resp] = resp_counts.get(resp, 0) + int(count)
logging.info("reading vs (pass 1)")
# get ids for dimensions and shape of matrix
targets = dict()
dims = dict()
R = 75678
C = 1038883
data, rows, cols = [], [], []
from collections import defaultdict
fast_target_lookup = defaultdict(list)
with codecs.getreader('utf-8')(sys.stdin) as vs:
#with codecs.getreader('utf-8')(open(sys.argv[1])) as vs:
for line in vs:
line = line.strip()
#dim, target, weight = line.split("\t")
#weight = float(weight)
target, dim, freq, lmi, pmi = line.split("\t")
lmi = float(freq) * float(pmi)
#weight = float(freq)
weight = lmi
if target not in targets:
fast_target_lookup[target[:target.rindex('/')]].append(len(targets))
targets[target] = len(targets)
t = targets[target]
if dim not in dims:
dims[dim] = len(dims)
d = dims[dim]
data.append(weight)
rows.append(t)
cols.append(d)
coo = coo_matrix((data, (rows, cols)))
del data, rows, cols
sp_vectors = coo.tocsr()
del coo
# now we need to get out only the vectors for which we need ranked cosines:
logging.info("Looking up vectors we need ranks for")
row_ids = []
row_names = []
baddies = []
for word in dots_to_compute:
if word + '/NN' in targets:
row_ids.append(targets[word + '/NN'])
row_names.append(word)
else:
baddies.append(word)
from itertools import count, izip
row_lookup = dict(izip(row_names, count()))
logging.warning("bads: %d / goods: %d" % (len(baddies), len(row_ids)))
logging.warning("baddies: %s" % baddies)
logging.info("extracting just nn vectors.")
nn_vectors = sp_vectors[row_ids]
# pairwise cosine
from sklearn.metrics.pairwise import cosine_similarity
logging.info("computing pairwise cosines")
coses = cosine_similarity(nn_vectors, sp_vectors)
del sp_vectors
def percentile_ranked(similarities):
return np.ceil(scipy.stats.rankdata(similarities)) / len(similarities)
logging.info("ranking...")
ranked_coses = np.array(map(percentile_ranked, coses))
# okay, got all our cosines. let's add up the averages with assocs
logging.info("Now computing ranked averages.")
hits = []
num_foundhigher = 0
for (cue, resp), cnt in assoc_counts.iteritems():
if cue not in row_lookup:
continue
rightids = fast_target_lookup[resp]
if not rightids:
continue
leftid = row_lookup[cue]
sims = ranked_coses[leftid,:]
options = sims[rightids]
num_foundhigher += len(options) - 1
hits += [max(options)] * cnt
print "num skipped: %d" % num_ignored
print "num oov: %d" % num_oov
print "num higher: %d" % num_foundhigher
print "Average sim of located associations: %f" % np.average(hits)
print "Std dev of located associations: %f" % np.std(hits)
print "Percentiles [.05, .10, .25, .5, .75, .90, .95] ="
print " [%.8f, %.8f, %.8f, %.8f, %.8f, %.8f, %.8f]" % tuple([scipy.stats.scoreatpercentile(hits, p) for p in [5, 10, 25, 50, 75, 90, 95]])
mean_ctr95, var_ctr, std_ctr = scipy.stats.bayes_mvs(hits, alpha=0.95)
mean_ctr50, var_ctr, std_ctr = scipy.stats.bayes_mvs(hits, alpha=0.50)
(m, (lb, ub)) = mean_ctr95
(m, (mlb, mub)) = mean_ctr50
print "bayes_mvs = [%.8f %.8f %.8f %.8f %.8f]" % (lb, mlb, m, mub, ub)
#print "Average sim of all allocations: %f" % (sum(hits) / sum(assoc_counts.values()))
|
myFloat = 2.0
# string (a string of characters)
myString = "Bacon Pancakes"
print (myFloat)
print (myString)
|
# Exercício 5.15 - Livro
total = 0
while True:
preco = 0
invalido = False
cod = int(input('Código do produto (0 para sair): '))
if cod == 0:
print('=-=' * 10)
break
elif cod == 1:
preco = 0.5
elif cod == 2:
preco = 1
elif cod == 3:
preco = 4
elif cod == 5:
preco = 7
elif cod == 9:
preco = 8
else:
invalido = True
print('Produto inválido!')
if not invalido:
qtd = int(input('Quantidade de vendas: '))
total += (qtd * preco)
print('=-=' * 10)
print(f'Total: R${total:.2f}')
|
from evidence_retrieval.data_source import DataSource
from evidence_retrieval.data_source import ChineseTokenizer
import os
a = DataSource
print(a.get_evidence('牙疼'))
# ..
print(os.pardir)
# 当前文件夹路径
print(os.path.dirname(__file__))
# 上层路径
print(os.path.abspath(os.path.dirname(__file__)+os.path.sep+os.pardir))
# /
print(os.path.sep)
print(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir))
# 上上层路径
print(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
|
# import psutil
import os
from os.path import join
from time import time
import pygame, sys, platform, psutil
import pygame.display
# Inicialização Pygame
pygame.init()
pygame.font.init()
# Define e mostra a tela
largura_tela = 1376
altura_tela = 800
tela = pygame.display.set_mode((largura_tela, altura_tela))
pygame.display.init()
# Título da Tela e ciclo de atualização da tela
pygame.display.set_caption("Gerenciador de tarefas.")
clock = pygame.time.Clock()
count = 60
# Configuração do tamanho da fonte a ser usada
font = pygame.font.Font(None, 32)
# Definições de superfícies das barras
s1 = pygame.surface.Surface((largura_tela, altura_tela / 5))
s2 = pygame.surface.Surface((largura_tela, altura_tela / 5))
s3 = pygame.surface.Surface((largura_tela, altura_tela / 5))
s4 = pygame.surface.Surface((largura_tela, altura_tela / 5))
s5 = pygame.surface.Surface((largura_tela, altura_tela / 5))
# Definição de cores utilizadas na tela
cordofundo = (0, 0, 0)
cordabarra = (0, 0, 255)
cordoindice = (255, 0, 0)
cordafonte = (255, 255, 255)
def getDiretorio():
try:
diretorio = input("Entre com o caminho do diretório : ")
arquivos = [f for f in os.listdir(diretorio)]
arq = []
for i in arquivos:
tipo = i , extension = os.path.splitext(diretorio)
obj = {
"Nome Arquivo": i,
"Tamanho":os.path.getsize(join(diretorio, i)),
"Diretório": join(diretorio, i),
"Data de criação": time.ctime(os.path.getmtime(i)),
"Data de Modificação": time.ctime(os.path.getctime(i)),
"Tipo": tipo
}
arq.append(obj)
return [m for m in arq]
except Exception as erro:
print(str(erro))
def mostraMem():
mem = psutil.virtual_memory()
return mem
def mostraCPU():
cpu = psutil.cpu_percent(interval=0)
return cpu
def mostraDisco():
disco = psutil.disk_usage('.')
return disco
def desenha_barra_mem():
mem = psutil.virtual_memory()
larg = largura_tela - 2 * 20
s1.fill(cordofundo)
pygame.draw.rect(s1, cordabarra, (20, 50, larg, 70))
tela.blit(s1, (0, 0))
larg = larg * mem.percent / 100
pygame.draw.rect(s1, cordoindice, (20, 50, larg, 70))
tela.blit(s1, (0, 0))
total = round(mem.total / (1024 * 1024 * 1024), 2)
texto_barra = "Uso de Memória (Total: " + str(total) + "GB) (Utilizando: " + str(mem.percent) + " %):"
text = font.render(texto_barra, 1, cordafonte)
tela.blit(text, (20, 10))
def desenha_barra_cpu():
cpu = mostraCPU()
largura = largura_tela - 2 * 20
s2.fill(cordofundo)
pygame.draw.rect(s2, cordabarra, (20, 50, largura, 70))
tela.blit(s2, (0, altura_tela / 4))
largura = largura * cpu / 100
pygame.draw.rect(s2, cordoindice, (20, 50, largura, 70))
tela.blit(s2, (0, altura_tela / 4))
texto_barra = "Uso de CPU: (" + str(cpu) + " %):"
texto_proc = "Cpu: (" + str(platform.processor()) + "):"
text = font.render(texto_barra, 1, cordafonte)
text_proc = font.render(texto_proc, 1, cordafonte)
tela.blit(text, (20, (altura_tela / 4)))
tela.blit(text_proc, (20, (altura_tela / 4) + 25))
def desenha_uso_hd():
disco = mostraDisco()
largura = largura_tela - 2 * 20
s3.fill(cordofundo)
pygame.draw.rect(s3, cordabarra, (20, 50, largura, 70))
tela.blit(s3, (0, 2 * altura_tela / 4))
largura = largura * disco.percent / 100
pygame.draw.rect(s3, cordoindice, (20, 50, largura, 70))
tela.blit(s3, (0, 2 * altura_tela / 4))
texto_barra = "Uso de Disco: (" + str(disco.percent) + " %):"
text = font.render(texto_barra, 1, cordafonte)
tela.blit(text, (20, (2 * altura_tela / 4)))
def desenha_uso_hd2():
disco = mostraDisco()
largura = largura_tela - 2 * 20
s4.fill(cordofundo)
pygame.draw.rect(s4, cordabarra, (20, 50, largura, 70))
tela.blit(s4, (0, 3 * altura_tela / 4))
largura = largura * disco.percent / 100
pygame.draw.rect(s4, cordoindice, (20, 50, largura, 70))
tela.blit(s4, (0, 3 * altura_tela / 4))
texto_barra = "Uso de Disco: (" + str(disco.percent) + " %):"
text = font.render(texto_barra, 1, cordafonte)
tela.blit(text, (20, (3 * altura_tela / 4)))
# Loop da Tela
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if count == 60:
desenha_barra_cpu()
desenha_barra_mem()
desenha_uso_hd()
desenha_uso_hd2()
count = 0
pygame.display.update()
clock.tick(60)
count += 1
|
import os, subprocess, logging
"""
copied from network,
"""
def amr2mp3(amr_path, mp3_path=None):
""" convert amr to mp3 just amr file to mp3 file
"""
path, name = os.path.split(amr_path)
if name.split('.')[-1] != 'amr':
print('not a amr file')
return 0
if mp3_path is None or mp3_path.split('.')[-1] != 'mp3':
mp3_path = os.path.join(path, name + '.mp3')
error = subprocess.call(['ffmpeg', '-i', amr_path, mp3_path])
if error:
logging.error('[Convert Error]:Convert file-%s to mp3 failed' % amr_path)
return 0
return mp3_path
|
#!usr/bin/env python3
# DECORATOR --
# def vermelho(retorno_funcao):
# def modificaCor(retorno_funcao):
# return f'\033[91m{retorno_funcao}\033[0m'
# return modificaCor
#
# @vermelho # Decorator - Recebe o retorno da funcao abaixo e executa
# def texto(texto):
# return texto
#
#print(texto('Uma palavra'))
# import requests as r
#
# data = {
# "name":"Abner"
# }
#
# res = r.get('http://gen-net.herokuapp.com/api/users', params=data)
# print(res.json())
import requests
res = requests.post('http://127.0.0.1/api?id=12')
print(res)
# res = res.json()
# print(res['nome'])
|
def is_pal(n):
return n == n[::-1]
def is_skjult(n):
return is_pal(str(int(n) + int(n[::-1])))
count = 0
for i in range(123454322):
n = str(i)
if is_pal(n):
continue
elif is_skjult(n):
count += i
print(count)
|
from argparse import ArgumentParser
from microstrategy_api.microstrategy_com import MicroStrategyCom
import logging
log = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description="Reset a users password")
parser.add_argument('server', type=str,)
parser.add_argument('admin_user', type=str,)
parser.add_argument('admin_password', type=str,)
parser.add_argument('user', type=str,)
args = parser.parse_args()
new_password = input("New password:")
with MicroStrategyCom(server=args.server,
user_id=args.admin_user,
password=args.admin_password) as com_server:
log.debug("MSTR connect good")
com_server.reset_password(user_id=args.user,
new_password=new_password)
if __name__ == '__main__':
main()
|
# I, Michael Catania, agree to the Stevens Honor Code
# This program determines whether a certain date is real
def main():
date = input("Enter a date in month/day/year form:")
m,d,y = date.split('/')
mo = int(m)
day = int(d)
ye = int(y)
if(mo==1 or mo==3 or mo==5 or mo==7 or mo==8 or mo==10 or mo==12):
end=31
elif(mo==4 or mo==6 or mo==9 or mo==11):
end=30
else:
end=28
if(mo < 1 or mo > 12):
print("Date is invalid")
elif(day < 1 or day > end):
print("Date is invalid")
else:
print("Date is valid")
main()
|
# 정수 N개로 이루어진 수열 A와 정수 X가 주어진다.
# 이때, A에서 X보다 작은 수를 모두 출력하는 프로그램을 작성하시오.
# N, X 입력
N,X = map(int, input().split())
# 수열 A를 이루는 정수 N개 입력
A = list(map(int, input().split()))
for i in range(N):
if A[i]<X:
print(A[i], end=' ')
|
#!/usr/bin/env python
import webapp2
import jinja2
import os
from objects.usermeta import UserMeta
from objects.player import *
from objects.team import Team
from utilities import *
from google.appengine.api import users
from google.appengine.ext import db
jinja_environment = jinja2.Environment(loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
jinja_environment.globals['logout']=users.create_logout_url('/')
jinja_environment.globals['page']='team'
class SelectHandler(webapp2.RequestHandler):
def get(self):
user=get_meta()
if not user:
self.redirect("/")
team=Team.all().filter("user =",user).get()
if team:
self.redirect("/team/edit")
players=Player.all().filter("active =",True).fetch(100)
template_values={'user_meta':user,'players':players,'budget':user.budget}
template=jinja_environment.get_template('templates/select_team.html')
self.response.out.write(template.render(template_values))
def post(self):
user_meta=get_meta()
if not user_meta:
self.redirect("/")
team=Team.all().filter('user =',user_meta).get()
if team:
self.redirect("/team/edit")
team = Team(user=user_meta,game=next_game())
budget=user_meta.budget
selected_batsmen=self.request.get_all('batting')
selected_bowlers=self.request.get_all('bowling')
selected_fieldsmen=self.request.get_all('fielding')
if len(selected_batsmen) > 5 or len(selected_bowlers) > 3 or len(selected_fieldsmen) > 3:
self.response.out.write("Error: too many selected")
return
if not self.request.get('captain'):
self.response.out.write("Error: no captain selected")
return
captain=self.request.get('captain').split(':',1)
if captain[0] not in ['bat','bowl','field']:
self.response.out.write("Error: invalid captain selection")
return
if captain[0] == 'bat' and captain[1] not in selected_batsmen:
self.response.out.write("Error: captain must be a member of team")
return
if captain[0] == 'bowl' and captain[1] not in selected_bowlers:
self.response.out.write("Error: captain must be a member of team")
return
if captain[0] == 'field' and captain[1] not in selected_fieldsmen:
self.response.out.write("Error: captain must be a member of team")
return
team.captain_type=db.Category(captain[0])
team.captain=db.get(captain[1])
for key in selected_batsmen:
player=db.get(key)
budget-=player.batting_price
team.batsmen.append(player.key())
for key in selected_bowlers:
player=db.get(key)
budget-=player.bowling_price
team.bowlers.append(player.key())
for key in selected_fieldsmen:
player=db.get(key)
budget-=player.fielding_price
team.fielders.append(player.key())
if budget < 0:
self.response.out.write("You went over budget")
return
#if team.game.round > 1:
# team1=Team(user=team.user,game=Game.all().filter('round =',1).get(),batsmen=team.batsmen,bowlers=team.bowlers,fielders=team.fielders,captain=team.captain,captain_type=team.captain_type)
# team1.put()
team.put()
user_meta.budget = budget
save_user(user_meta)
self.redirect('/team')
class ViewHandler(webapp2.RequestHandler):
def view_game(self,team_id=None):
user_meta=get_meta()
if not user_meta:
self.redirect("/")
team_user=None
if team_id:
team_user=UserMeta.get_by_id(int(team_id))
else:
team_user=get_meta()
round=int(self.request.get('round'))
game=Game.all().filter("round =",round).get()
team=get_team(team_user,game)
if not team:
self.response.out.write("Error: team not found")
return
batsmen=[]
dnbat=[]
bowlers=[]
dnbowl=[]
fielders=[]
dnf=[]
batsmen=PlayerGame.all().filter("game =",game).filter("player IN",team.batsmen).run()
bowlers=PlayerGame.all().filter("game =",game).filter("player IN",team.bowlers).run()
fielders=PlayerGame.all().filter("game =",game).filter("player IN",team.fielders).run()
template_values={'user_meta':user_meta,'lockout':lockout(),'current_round':current_round,'batsmen':batsmen,'bowlers':bowlers,'fielders':fielders,'team':team,'game':game}
if check_mobile():
template=jinja_environment.get_template('templates/mobile/game.html')
else:
template=jinja_environment.get_template('templates/game.html')
self.response.out.write(template.render(template_values))
def get(self,team_id=None):
user_meta=get_meta()
if not user_meta:
self.redirect("/")
if self.request.get('round'):
self.view_game(team_id)
return
team_user=None
if team_id:
team_user=UserMeta.get_by_id(int(team_id))
else:
team_user = get_meta()
game=next_game()
round=current_round()-1
if round < 0:
round = 5
if not game:
game=Game.all().filter("round =", round).get()
team=get_team(team_user,game)
if not team:
if team_user.key()!=user_meta.key():
self.response.out.write("Error: team not found")
else:
self.redirect('/team/select')
return
batsmen=[]
bowlers=[]
fielders=[]
for key in team.batsmen:
batsmen.append(Player.get(key))
for key in team.bowlers:
bowlers.append(Player.get(key))
for key in team.fielders:
fielders.append(Player.get(key))
template_values={'user_meta':user_meta,'lockout': lockout(),'current_round':current_round(),'batsmen':batsmen,'bowlers':bowlers,'fielders':fielders,'team_user':team_user,'team':team}
if check_mobile():
template=jinja_environment.get_template('templates/mobile/view_team.html')
else:
template=jinja_environment.get_template('templates/view_team.html')
self.response.out.write(template.render(template_values))
class EditHandler(webapp2.RequestHandler):
def get(self):
if lockout():
self.response.out.write('Lockout in place')
return
user = get_meta()
if not user:
self.redirect("/")
game = next_game()
team=get_team(user,game)
template_values={'user_meta':user,'lockout':lockout(),'budget':user.budget,'round_trades':user.round_trades,'total_trades':user.total_trades}
template_values['selected_batsmen'], template_values['available_batsmen'] = selected_available(team.batsmen)
template_values['selected_bowlers'], template_values['available_bowlers'] = selected_available(team.bowlers)
template_values['selected_fielders'], template_values['available_fielders'] = selected_available(team.fielders)
template_values['captain_type']=team.captain_type
template_values['captain']=team.captain
template=jinja_environment.get_template('templates/edit_team.html')
self.response.out.write(template.render(template_values))
def post(self):
if lockout():
self.response.out.write('Lockout in place')
return
user = get_meta()
if not user:
self.redirect("/")
game = next_game()
team=get_team(user,game)
# batsmen
dropped_batsmen = self.request.get_all('dropped_batsmen')
picked_batsmen = self.request.get_all('picked_batsmen')
dropped_bowlers = self.request.get_all('dropped_bowlers')
picked_bowlers = self.request.get_all('picked_bowlers')
dropped_fielders = self.request.get_all('dropped_fielders')
picked_fielders = self.request.get_all('picked_fielders')
trades = len(picked_batsmen)+len(picked_bowlers)+len(picked_fielders)
if trades > user.round_trades:
self.response.out.write("Error: insufficient trades remaining")
return
user.round_trades -= trades
user.total_trades -= trades
captain=self.request.get('captain')
if(captain):
tokens=captain.split(':',1)
captain_player=Player.get_by_id(int(tokens[1]))
if tokens[0] not in ['bat','bowl','field']:
self.response.out.write("Error: invalid captain selection")
return
if tokens[0] == 'bat' and (tokens[1] in dropped_batsmen or captain_player.key() not in team.batsmen):
self.response.out.write("Error: captain must be a member of team")
return
if tokens[0] == 'bowl' and (tokens[1] in dropped_bowlers or captain_player.key() not in team.bowlers):
self.response.out.write("Error: captain must be a member of team")
return
if tokens[0] == 'field' and (tokens[1] in dropped_fielders or captain_player.key() not in team.fielders):
self.response.out.write("Error: captain must be a member of team")
return
team.captain_type=db.Category(tokens[0])
team.captain=captain_player
else:
team.captain=None
team.captain_type=None
budget=user.budget
for p in dropped_batsmen:
player = Player.get_by_id(int(p))
if team.drop_player(player,'batsman'):
budget += player.batting_price
else:
self.response.out.write("Error: dropped player not in team")
return
for p in dropped_bowlers:
player = Player.get_by_id(int(p))
if team.drop_player(player,'bowler'):
budget += player.bowling_price
else:
self.response.out.write("Error: dropped player not in team")
return
for p in dropped_fielders:
player = Player.get_by_id(int(p))
if team.drop_player(player,'fielder'):
budget += player.fielding_price
else:
self.response.out.write("Error: dropped player not in team")
return
for p in picked_batsmen:
player = Player.get_by_id(int(p))
if team.pick_player(player,'batsman'):
budget -= player.batting_price
else:
self.response.out.write("Error")
return
for p in picked_bowlers:
player = Player.get_by_id(int(p))
if team.pick_player(player,'bowler'):
budget -= player.bowling_price
else:
self.response.out.write("Error")
return
for p in picked_fielders:
player = Player.get_by_id(int(p))
if team.pick_player(player,'fielder'):
budget -= player.fielding_price
else:
self.response.out.write("Error")
return
if budget < 0:
self.response.out.write("Error: budget exceeded")
return
user.budget = budget
save_user(user)
team.put()
self.redirect('/team')
class LadderHandler(webapp2.RequestHandler):
def get(self):
user_meta=get_meta()
if not user_meta:
self.redirect("/")
teams=UserMeta.all().order('-total_points')
template_values={'user_meta':user_meta,'page':'ladder','teams':teams}
if check_mobile():
template=jinja_environment.get_template('templates/mobile/ladder.html')
else:
template=jinja_environment.get_template('templates/ladder.html')
self.response.out.write(template.render(template_values))
app = webapp2.WSGIApplication([('/team/ladder',LadderHandler),
('/team/select',SelectHandler),
('/team',ViewHandler),
('/team/edit',EditHandler),
webapp2.Route('/team/<team_id>',handler=ViewHandler)],debug=True)
|
n = int(input())
a = list(map(int, input().split()))
a = [i + i%2 - 1 for i in a]
print(*a)
|
from collections import defaultdict
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from .. import nets
from ..analysis.searchstims import compute_d_prime
from ..engine.abstract_trainer import AbstractTrainer
from ..transforms.functional import tile
class VOCAssayer:
"""class for running "behavioral assay" of models using Pascal VOC / Visual Search Difficulty dataset"""
NUM_WORKERS = 4
def __init__(self,
net_name,
model,
loss_func,
testset,
restore_path,
mode='classify',
batch_size=64,
sigmoid_threshold=0.5,
device='cuda',
num_workers=NUM_WORKERS,
data_parallel=False,
):
"""
Parameters
----------
net_name : str
name of convolutional neural net architecture to train.
One of {'alexnet', 'VGG16'}
model : torch.nn.Module
actual instance of network.
loss_func : str
that represents loss function and target that should be used with it.
Used to determine targets for computing loss, and for metrics to use
when determining whether to stop early due to metrics computed on
validation set.
testset : torch.Dataset or torchvision.Visiondataset
test data, represented as a class.
restore_path : Path
path to directory where checkpoints and train models were saved
mode : str
training mode. One of {'classify', 'detect'}.
'classify' is standard image classification.
'detect' trains to detect whether specified target is present or absent.
Default is 'classify'.
batch_size : int
number of training samples per batch
sigmoid_threshold : float
threshold to use when converting sigmoid outputs to binary vectors.
Only used for VSD dataset, where multi-label outputs are expected.
device : str
One of {'cpu', 'cuda'}
num_workers : int
Number of workers used when loading data in parallel. Default is 4.
data_parallel : bool
if True, use torch.nn.dataparallel to train network on multiple GPUs. Default is False.
"""
self.net_name = net_name
self.data_parallel = data_parallel
if data_parallel:
model = nn.DataParallel(model)
self.restore_path = restore_path
best_ckpt_path = restore_path.parent.joinpath(
restore_path.name + AbstractTrainer.BEST_VAL_ACC_CKPT_SUFFIX
)
if not best_ckpt_path.exists():
ckpt_path = restore_path.parent.joinpath(
restore_path.name + AbstractTrainer.DEFAULT_CKPT_SUFFIX)
if not ckpt_path.exists():
raise ValueError(
f'did not find a checkpoint file in restore path: {restore_path}.\n'
f'Looked for a checkpoint saved upon best val accuracy: {best_ckpt_path.name} \n'
f'and for a checkpoint saved during or at the end of training: {ckpt_path.name}'
)
self.ckpt_path_loaded_from = ckpt_path
else:
self.ckpt_path_loaded_from = best_ckpt_path
checkpoint = torch.load(self.ckpt_path_loaded_from)
model.load_state_dict(checkpoint['model'])
model.to(device)
self.model = model
self.device = device
self.testset = testset
self.test_loader = DataLoader(self.testset, batch_size=batch_size,
shuffle=False, num_workers=num_workers,
pin_memory=True)
self.mode = mode
self.batch_size = batch_size
self.sigmoid_threshold = sigmoid_threshold
self.sigmoid_activation = torch.nn.Sigmoid()
self.softmax_activation = torch.nn.Softmax()
self.loss_func = loss_func
@classmethod
def from_config(cls,
net_name,
num_classes,
loss_func,
testset,
mode='classify',
embedding_n_out=512,
**kwargs):
"""factory function that creates instance of VOCAssayer from options specified in config.ini file
Parameters
----------
net_name : str
name of neural network architecture. Used when restoring model, checkpoints, etc.
num_classes : int
number of classes. Default is 2 (target present, target absent).
loss_func : str
type of loss function to use. One of {'CE', 'InvDPrime', 'triplet'}. Default is 'CE',
the standard cross-entropy loss. 'InvDPrime' is inverse D prime. 'triplet' is triplet loss
used in face recognition and biometric applications.
testset : torch.Dataset or torchvision.Visiondataset
split of dataset for testing model, represented as a class.
mode : str
training mode. One of {'classify', 'detect'}.
'classify' is standard image classification.
'detect' trains to detect whether specified target is present or absent.
Default is 'classify'.
embedding_n_out : int
for DetectNet, number of output features from input embedding.
I.e., the output size of the linear layer that accepts the
one hot vector querying whether a specific class is present as input.
Default is 512.
kwargs : keyword arguments to VOCAssayer
Returns
-------
assayer : VOCAssayer
instance of class, initialized with passed attributes.
"""
if net_name == 'alexnet':
model = nets.alexnet.build(pretrained=False, progress=False, num_classes=num_classes)
elif net_name == 'VGG16':
model = nets.vgg16.build(pretrained=False, progress=False, num_classes=num_classes)
elif 'cornet' in net_name.lower():
model = nets.cornet.build(model_name=net_name, pretrained=False, num_classes=num_classes)
else:
raise ValueError(
f'invalid value for net_name: {net_name}'
)
if mode == 'detect':
# remove final output layer, will replace
if net_name == 'alexnet' or net_name == 'VGG16':
model.classifier = model.classifier[:-1]
elif 'cornet' in net_name.lower():
# for CORnet models, also need to remove 'output' layer (just an Identity)
model.decoder = model.decoder[:-2]
a_sample = next(iter(testset))
tmp_img = a_sample['img'].unsqueeze(0) # add batch dim
tmp_out = model(tmp_img)
vis_sys_n_features_out = tmp_out.shape[-1] # (batch, n features)
model = nets.detectnet.DetectNet(vis_sys=model,
num_classes=num_classes,
vis_sys_n_out=vis_sys_n_features_out,
embedding_n_out=embedding_n_out)
if loss_func in {'CE', 'CE-largest', 'CE-random'}:
criterion = nn.CrossEntropyLoss()
elif loss_func == 'BCE':
criterion = nn.BCEWithLogitsLoss()
else:
raise ValueError(
f'invalid value for loss function: {loss_func}'
)
return cls(net_name=net_name,
model=model,
loss_func=loss_func,
mode=mode,
testset=testset,
**kwargs)
def assay(self):
"""assay behavior of trained model
Returns
-------
results : dict
with following key-value pairs:
arrays : dict
inputs / outputs to networks as numpy arrays
y_true_onehot : numpy.ndarray
true classes present in image, encoded as "one" hot vectors
(actually can be more than one class present)
out : numpy.ndarray
output of network after non-linear activation has been applied,
sigmoid if trained with binary cross entropy loss,
or softmax if trained with cross entropy
y_pred : numpy.ndarray
prediction after thresholding (for sigmoid)
or finding argmax (softmax) of output
df_test : pandas.DataFrame
where each row is a sample from the dataset
"""
self.model.eval()
total = int(np.ceil(len(self.testset) / self.batch_size))
pbar = tqdm(self.test_loader)
# lists of numpy arrays that get concatenated at the end,
# save for further analysis if required
arrays = defaultdict(list)
# will use with Pandas.DataFrame.from_records()
# to make dataframe of test results, where each row is one sample from test set
image_records = defaultdict(list)
trial_records = defaultdict(list)
with torch.no_grad():
for i, batch in enumerate(pbar):
pbar.set_description(f'batch {i} of {total}')
if self.mode == 'classify':
# ---- get outputs ----
x_batch, y_true_onehot_batch = batch['img'].to(self.device), batch['target'].to(self.device)
batch_size, n_classes = y_true_onehot_batch.shape # used for np.split below
arrays['y_true_onehot'].append(y_true_onehot_batch.cpu().numpy())
out_batch = self.model(x_batch)
# ---- pass outputs through activation ----
if self.loss_func == 'BCE':
out_batch = self.sigmoid_activation(out_batch)
y_pred_batch = (out_batch > self.sigmoid_threshold).float()
elif self.loss_func == 'CE-largest' or self.loss_func == 'CE-random':
out_batch = self.softmax_activation(out_batch)
_, y_pred_batch = torch.max(out_batch.data, 1)
# -- convert to a one-hot representation to compute TP, FP, d prime, accuracy, etc. --
# make tensor below for every batch, in case it changes size (e.g. for last batch)
y_pred_softmax_onehot_batch = torch.FloatTensor(y_true_onehot_batch.shape).to(
y_true_onehot_batch.device # just copy this tensor so it's the same shape / dtype
)
y_pred_softmax_onehot_batch.zero_() # but then zero out
y_pred_softmax_onehot_batch.scatter_(1, torch.unsqueeze(y_pred_batch, 1), 1)
# note we make 'y_pred_batch' name point at the tensor we just made
y_pred_batch = y_pred_softmax_onehot_batch
# ---- save outputs to concatenate and return with results
arrays['out'].append(out_batch.cpu().numpy()) # raw output of network
arrays['y_pred'].append(y_pred_batch.cpu().numpy())
# ---- compute true positive, false positive, true negative, false negative
TP = ((y_true_onehot_batch == 1) & (y_pred_batch == 1)).sum(dim=1).cpu().numpy()
FP = ((y_true_onehot_batch == 0) & (y_pred_batch == 1)).sum(dim=1).cpu().numpy()
TN = ((y_true_onehot_batch == 0) & (y_pred_batch == 0)).sum(dim=1).cpu().numpy()
FN = ((y_true_onehot_batch == 1) & (y_pred_batch == 0)).sum(dim=1).cpu().numpy()
elif self.mode == 'detect':
img_batch, target_batch = batch['img'], batch['target']
batch_size, n_classes = target_batch.shape # used for this code block and np.split below
img_batch = tile(img_batch, dim=0, n_tile=n_classes) # repeat each img n_classes number of times
# make a diagonal so we can query for every possible class in the image
query_expanded = torch.cat(batch_size * [torch.diag(torch.ones(n_classes, ))])
target_batch = target_batch.flatten()
target_batch = target_batch.unsqueeze(1) # add back non-batch ind, so target matches output shape
img_batch = img_batch.to(self.device)
query_batch = query_expanded.to(self.device)
target_batch = target_batch.to(self.device)
# --- split into batches, keeping same batch size in case for same reason there's batch size fx
# for "assay", need to re-assemble outputs with same shape as we would get for 'classify'
out_batch = []
y_pred_batch = []
y_true_onehot_batch = []
for img, query, target in zip(
torch.split(img_batch, self.batch_size),
torch.split(query_batch, self.batch_size),
torch.split(target_batch, self.batch_size)
):
y_true_onehot_batch.append(target.cpu().numpy())
out = self.model(img, query)
out = self.sigmoid_activation(out)
out_batch.append(out.cpu().numpy())
y_pred = (out > self.sigmoid_threshold).float()
y_pred_batch.append(y_pred.cpu().numpy())
out_batch = np.concatenate(out_batch).reshape(-1, n_classes)
y_pred_batch = np.concatenate(y_pred_batch).reshape(-1, n_classes)
y_true_onehot_batch = np.concatenate(y_true_onehot_batch).reshape(-1, n_classes)
for key, val in zip(
('out', 'y_pred', 'y_true_onehot'),
(out_batch, y_pred_batch, y_true_onehot_batch)
):
arrays[key].append(val)
TP = ((y_true_onehot_batch == 1) & (y_pred_batch == 1)).sum(axis=1)
FP = ((y_true_onehot_batch == 0) & (y_pred_batch == 1)).sum(axis=1)
TN = ((y_true_onehot_batch == 0) & (y_pred_batch == 0)).sum(axis=1)
FN = ((y_true_onehot_batch == 1) & (y_pred_batch == 0)).sum(axis=1)
# now loop through each sample in batch to add to records; these will be rows in dataframe
index_batch = batch['index']
img_indices_list = index_batch.cpu().numpy().tolist()
img_paths = [
Path(self.testset.images[idx]) for idx in img_indices_list
]
img_names = [img_path.name for img_path in img_paths]
n_items = arrays['y_true_onehot'][-1].sum(axis=1).astype(int)
vsd_score_batch = batch['vsd_score']
zip_for_loop = zip(
torch.unbind(index_batch),
img_paths,
img_names,
torch.unbind(vsd_score_batch),
TP.tolist(),
FP.tolist(),
TN.tolist(),
FN.tolist(),
n_items.tolist(),
# below, by zipping these arrays, we get one row for each step in iteration
arrays['out'][-1],
arrays['y_pred'][-1],
arrays['y_true_onehot'][-1],
)
for tuple in zip_for_loop:
image_row = dict(zip(
['voc_test_index', 'img_path', 'img_name', 'vsd_score',
'TP', 'FP', 'TN', 'FN', 'n_items'],
tuple[:-3])
)
for key in image_row.keys():
value = image_row[key]
if isinstance(value, torch.Tensor):
value = value.cpu().numpy().item()
image_row[key] = value
image_records[key].append(value)
# now treat each class as a trial, regardless of mode, and add as a row to trial_records
probs, preds, target_present_vec = tuple[-3:]
for class_ind, (prob, pred, target_present) in enumerate(zip(
probs.tolist(), preds.tolist(), target_present_vec.tolist())):
for key, value in image_row.items():
trial_records[key].append(value)
trial_records['class'].append(class_ind)
trial_records['prob'].append(prob)
trial_records['pred'].append(pred)
trial_records['target_present'].append(target_present)
arrays = {k: np.concatenate(v) for k, v in arrays.items()}
images_df = pd.DataFrame.from_records(image_records)
trials_df = pd.DataFrame.from_records(trial_records)
y_pred_all = arrays['y_pred'].ravel()
y_true_all = arrays['y_true_onehot'].ravel()
acc = accuracy_score(y_pred=y_pred_all, y_true=y_true_all)
_, _, d_prime = compute_d_prime(y_pred=y_pred_all, y_true=y_true_all)
return {
'arrays': arrays,
'images_df': images_df,
'trials_df': trials_df,
'acc': acc,
'd_prime': d_prime,
}
|
import sys
import random
t = None
def triangle():
side_length = random.randint(5,50)
for x in range(1,4):
t.forward(side_length)
t.left(360/3)
t.up()
t.forward(side_length+10)
t.down()
def circle():
radius = random.randint(5,18)
t.circle(radius)
t.up()
t.forward(radius*2+5)
t.down()
def square():
side_length = random.randint(5,50)
for x in range(1,5):
t.forward(side_length)
t.left(90)
t.up()
t.forward(side_length+10)
t.down()
def draw_random():
quantity = random.randint(1,16)
ans = random.choice([circle,square,triangle])
if ans == circle():
radius = random.randint(5,18)
for x in range (1,quantity):
circle()
if ans == square():
side_length = random.randint(5,50)
for x in range (1,quantity):
square()
if ans == triangle():
side_length = random.randint(5,50)
for x in range (1,quantity):
triangle()
|
"""This takes a dictionary, adds values, changes them and displays those chages."""
Dict = {"name": "Chris", "city": "Seattle", "cake": "chocolate"}
print Dict
del Dict["cake"]
print Dict
Dict["fruit"] = "Mango"
print Dict.keys()
print Dict.values()
print "cake" in Dict
print "Mango" in Dict
print "Mango" in Dict.values()
Dict2 = range(16)
Dict3 = []
for i in Dict2:
Dict3.append(hex(i))
Dict4 = dict(zip(Dict2, Dict3))
print Dict4
Dict5 = {}
for key, val in Dict.items():
Dict5[key] = val.count('a')
print Dict5
s2 = set()
s3 = set()
s4 = set()
for i in range(21):
if i % 2 == False:
s2.add(i)
if i % 3 == False:
s3.add(i)
if i % 4 == False:
s4.add(i)
print s2
print s3
print s4
print s3.issubset(s2)
print s4.issubset(s2)
Set1 = set("Python")
Set1.add('i')
print Set1
Set2 = frozenset("marathon")
print Set1.union(Set2)
print Set1.intersection(Set2)
|
import torch
from torch.utils.data import Dataset
import string
translator = str.maketrans('', '', string.punctuation)
import random
import glob
from PIL import Image
import numpy as np
from torch.nn.utils.rnn import pad_sequence
import pickle
DATA_DIR = '../../data/'
FRAMES_DIR = '../../data/processed-frames/'
ENVS_DIR = '../../data/envs/'
TRAIN_VIDEOS = list(range(80))
VALID_VIDEOS = list(range(80, 100))
TEST_VIDEOS = [50, 92, 95]
class PadBatch:
def __init__(self):
pass
def __call__(self, batch):
traj_right_batch, traj_left_batch, traj_center_batch, \
lang_batch, lang_enc_batch, traj_len_batch, lang_len_batch, labels_batch, obj_batch, \
env_batch, weight_batch = zip(*batch)
traj_right_batch = pad_sequence(traj_right_batch, batch_first=True)
traj_left_batch = pad_sequence(traj_left_batch, batch_first=True)
traj_center_batch = pad_sequence(traj_center_batch, batch_first=True)
lang_enc_batch = pad_sequence(lang_enc_batch, batch_first=True)
lang_enc_batch = torch.from_numpy(np.array(lang_enc_batch))
traj_len_batch = torch.Tensor(traj_len_batch)
lang_len_batch = torch.Tensor(lang_len_batch)
weight_batch = torch.Tensor(weight_batch)
labels_batch = torch.Tensor(labels_batch)
return traj_right_batch, traj_left_batch, traj_center_batch, \
lang_batch, lang_enc_batch, traj_len_batch, lang_len_batch, \
labels_batch, obj_batch, env_batch, weight_batch
class Data(Dataset):
def __init__(self, mode, repeat=10):
self.vocab = pickle.load(open('{}/vocab_train.pkl'.format(DATA_DIR), 'rb'))
self.descriptions = self.load_descriptions(mode)
self.video_ids = self.get_video_ids(mode)
self.N_OBJ = 13
self.N_ENV = len(self.video_ids)
self.repeat = repeat
def get_video_ids(self, mode):
if mode == 'train':
video_ids = TRAIN_VIDEOS
elif mode == 'valid':
video_ids = VALID_VIDEOS
else:
raise NotImplementedError('Invalid mode!')
for vid in TEST_VIDEOS:
try:
video_ids.remove(vid)
except ValueError:
pass
return video_ids
def __len__(self):
return 2 * self.N_OBJ * self.N_ENV * self.repeat
def encode_description(self, descr):
result = []
for w in descr.split():
try:
t = self.vocab.index(w)
except ValueError:
t = self.vocab.index('<unk>')
result.append(t)
return torch.Tensor(result)
def load_descriptions(self, mode):
descriptions = pickle.load(open('{}/{}_descr.pkl'.format(DATA_DIR, mode), 'rb'))
result = {}
for i in descriptions.keys():
descr_list = descriptions[i]
result[i] = [(d, self.encode_description(d)) for d in descr_list]
return result
def load_env_objects(self, obj, env):
result = []
with open('{}/obj{}-env{}.txt'.format(ENVS_DIR, obj, env)) as f:
for line in f.readlines():
line = line.replace('(', '').replace(',', '').replace(')', '')
parts = line.split()
x = eval(parts[0])
y = eval(parts[1])
obj = eval(parts[2])
result.append(obj)
return result
def load_frames(self, obj, env):
frames_r = torch.from_numpy(torch.load(open('{}/obj{}-env{}-right-50x50.pt'.format(FRAMES_DIR, obj, env), 'rb')))
frames_l = torch.from_numpy(torch.load(open('{}/obj{}-env{}-left-50x50.pt'.format(FRAMES_DIR, obj, env), 'rb')))
frames_c = torch.from_numpy(torch.load(open('{}/obj{}-env{}-center-50x50.pt'.format(FRAMES_DIR, obj, env), 'rb')))
n_frames_total = len(frames_r)
n_frames = np.random.randint(1, (n_frames_total+1))
weight = (n_frames / n_frames_total)
frames_r = frames_r[:n_frames]
frames_l = frames_l[:n_frames]
frames_c = frames_c[:n_frames]
while True:
selected = np.random.random(n_frames) > 0.9
if np.sum(selected) > 0:
break
frames_r = frames_r[selected]
frames_l = frames_l[selected]
frames_c = frames_c[selected]
return frames_r, frames_l, frames_c, n_frames_total, weight
def get_descr(self, obj, label, env_objects):
if label == 1:
t = np.random.randint(0, len(self.descriptions[obj]))
descr, descr_enc = self.descriptions[obj][t]
else:
# select an alternate language
tt = np.random.random()
alt_obj = env_objects[1:]
if len(alt_obj) == 0:
alt_obj = list(range(0,obj)) + list(range(obj+1,self.N_OBJ))
obj_ = np.random.choice(alt_obj)
t = np.random.randint(0, len(self.descriptions[obj_])-1)
descr, descr_enc = self.descriptions[obj_][t]
return descr, descr_enc
def __getitem__(self, index):
if index >= len(self) // 2:
label = 0
index -= len(self) // 2
else:
label = 1
obj = index // (self.repeat * self.N_ENV)
env = self.video_ids[index % self.N_ENV]
env_objects = self.load_env_objects(obj, env)
frames_r, frames_l, frames_c, n_frames_total, weight = self.load_frames(obj, env)
descr, descr_enc = self.get_descr(obj, label, env_objects)
label = 2*label - 1
return frames_r, frames_l, frames_c, descr, descr_enc, len(frames_r), len(descr_enc), label, obj, env, weight
|
import imported
# only works if no __init__.py in this dir
def test_doit():
assert imported.doit() == 999
|
from tkinter import *
from tkinter import messagebox
from tkinter import Menu
from tkinter import *
from tkinter import filedialog, Tk
import os
class ventana:
def __init__(self, inter):
self.interfaz = inter
self.interfaz.geometry("1020x720")
self.interfaz.title(" Bitxelart")
self.crearVentana()
def crearVentana(self):
Button(self.interfaz, command=self.original, text="Original", font='arial 14', bg='white', fg='blue').place(
x=50, y=120, width=130, height=30)
Button(self.interfaz, command=self.original, text="Mirror X", font='arial 14', bg='white', fg='blue').place(
x=50, y=230, width=130, height=30)
Button(self.interfaz, command=self.original, text="Mirror Y", font='arial 14', bg='white', fg='blue').place(
x=50, y=320, width=130, height=30)
Button(self.interfaz, command=self.original, text="Double Mirror", font='arial 14', bg='white',
fg='blue').place(x=50, y=420, width=130, height=30)
barraMenu = Menu(self.interfaz)
self.interfaz.config(menu=barraMenu, width=300, height=300)
cargarMenu = Menu(barraMenu)
analizarMenu = Menu(barraMenu)
reportesMenu = Menu(barraMenu)
salirMenu = Menu(barraMenu)
barraMenu.add_cascade(label='Cargar', menu=cargarMenu)
barraMenu.add_cascade(label='Analizar', menu=analizarMenu)
barraMenu.add_cascade(label='Reportes', menu=reportesMenu)
barraMenu.add_cascade(label='Salir', menu=salirMenu)
cargarMenu.add_command(label='Seleccionar archivo', command = self.abrir)
analizarMenu.add_command(label='Analizando')
salirMenu.add_command(label='Salir del programa', command = self.salir)
def original(self):
print("Original")
def abrir(self):
Tk().withdraw()
archivo = filedialog.askopenfile(
title="Seleccionar un archivo LFP",
initialdir="./",
filetypes=(
("Archivos PXLA", "*.pxla"),
("Todos los archivos", "*.*")
)
)
if archivo is None:
print("Error de lectura")
return None
else:
texto = archivo.read()
archivo.close()
print("Lectura Exitosa")
return texto
def salir(self):
self.interfaz.destroy()
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a string S and a string T,
# find the minimum window in S which will contain all the characters in T in complexity O(n).
# For example,
# S = "ADOBECODEBANC"
# T = "ABC"
# Minimum window is "BANC".
# Note:
# If there is no such window in S that covers all characters in T, return the empty string "".
# If there are multiple such windows, you are guaranteed that there will always be only one unique minimum window in S.
# 268 / 268 test cases passed.
# Status: Accepted
# Runtime: 222 ms
# Your runtime beats 44.98 % of python submissions.
import collections
class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
need = collections.Counter(t)
missing = len(t)
i = I = J = 0
for j, v in enumerate(s, 1):
missing -= need[v] > 0 # Equivalent to if need[v] > 0: missing -= 1
need[v] -= 1
if not missing:
while i < j and need[s[i]] < 0:
need[s[i]] += 1
i += 1
if not J or j - i <= J - I:
J = j
I = i
return s[I:J]
if __name__ == '__main__':
# print(Solution().minWindow(
# "ADOBECODEBANC",
# "ABC",
# ))
print(Solution().minWindow(
"A",
"A",
))
|
import socket
import struct
import time
import numpy as np
import datetime
from multiprocessing import Process
from threading import Thread
import cv2
import math
from src.utils.templates.workerprocess import WorkerProcess
from simple_pid import PID
class LaneKeeping(WorkerProcess):
pid = PID(Ki = 0.05, Kd = 0.01)
# ===================================== INIT =========================================
def __init__(self, inPs, outPs):
"""Process used for sending images over the network. UDP protocol is used. The
image is compressed before it is send.
Used for visualizing your raspicam from PC.
Parameters
----------
inPs : list(Pipe)
List of input pipes, only the first pipe is used to transfer the captured frames.
outPs : list(Pipe)
List of output pipes (not used at the moment)
"""
super(LaneKeeping,self).__init__(inPs, outPs)
# ===================================== RUN ==========================================
def run(self):
"""Apply the initializing methods and start the threads.
"""
super(LaneKeeping,self).run()
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the sending thread.
"""
if self._blocker.is_set():
return
streamTh = Thread(name='StreamSending',target = self._the_thread, args= (self.inPs[0], self.outPs[0], ))
streamTh.daemon = True
self.threads.append(streamTh)
# ===================================== SEND THREAD ==================================
def _the_thread(self, inP, outP):
"""Sending the frames received thought the input pipe to remote client by using a socket.
Parameters
----------
inP : Pipe
Input pipe to read the frames from other process.
"""
def laneKeeping(img):
height = 480
width = 640
img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
img = img[(int(height/1.8)):height, 0:width]
img = cv2.GaussianBlur(img, (7,7), 0)
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 21, -8)
total = 0.0
lines = cv2.HoughLinesP(img, rho=6, theta=np.pi/60, threshold=160, lines=np.array([]), minLineLength=40, maxLineGap=25)
for line in lines:
for x1, y1, x2, y2 in line:
if y2 != y1:
total = total + (x2 - x1) / (y2 - y1)
return total
while True:
try:
stamps, img = inP.recv()
val = laneKeeping(img)
val /= 3.5
val = self.pid(val)
print(val)
outP.send(val)
except Exception as e:
print("Lane keeping error:")
print(e)
|
import vcr
from sirepo_bluesky import SirepoBluesky
@vcr.use_cassette('vcr_cassettes/test_smoke_sirepo.yml')
def test_smoke_sirepo():
sim_id = '87XJ4oEb'
sb = SirepoBluesky('http://10.10.10.10:8000')
data, schema = sb.auth('srw', sim_id)
assert 'beamline' in data['models']
@vcr.use_cassette('vcr_cassettes/test_sirepo_flyer.yml')
def test_sirepo_flyer():
from re_config import RE, db, ROOT_DIR
from sirepo_flyer import SirepoFlyer
import bluesky.plans as bp
params_to_change = []
for i in range(1, 5 + 1):
key1 = 'Aperture'
parameters_update1 = {'horizontalSize': i * .1, 'verticalSize': (6 - i) * .1}
key2 = 'Lens'
parameters_update2 = {'horizontalFocalLength': i + 10}
params_to_change.append({key1: parameters_update1,
key2: parameters_update2})
sirepo_flyer = SirepoFlyer(sim_id='87XJ4oEb', server_name='http://10.10.10.10:8000',
root_dir=ROOT_DIR, params_to_change=params_to_change,
watch_name='W60', run_parallel=False)
RE(bp.fly([sirepo_flyer]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 21 10:15:26 2017
@author: Rafael Rocha
"""
import os
import numpy as np
# import my_utils as ut
from glob import glob
from skimage.transform import resize
from skimage.io import imread
from skimage.exposure import equalize_hist
from skimage import img_as_ubyte, img_as_float
# from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, StratifiedShuffleSplit
img_dir = '/Users/pma009/Documents/Banco_de_imagens/MeliponasImageDataStore/'
dir_prefix = '*/'
file_prefix = '*.jpeg'
# img_rows, img_cols = 128, 256
# x, y = ut.extract_2(img_dir, dir_prefix, file_prefix)
dir_list = glob(os.path.join(img_dir, dir_prefix))
img_list = []
labels_list = []
lin = 256
col = 512
# print(range(np.size(dir_list)))
for i in range(np.size(dir_list)):
for filename in glob(os.path.join(dir_list[i], file_prefix)):
im = imread(filename)
im = resize(im, [lin, col, 3])
im = equalize_hist(im)
im = img_as_ubyte(im)
im = img_as_float(im)
img_list.append(im)
train_len = len(dir_list[i])
labels_list.append(i)#dir_list[i][train_len - 2])#y_true[i])#
# print(i)
# var_perm = np.random.permutation(np.size(labels_list))
X = np.array(img_list, dtype=np.float64)
y = np.array(labels_list, dtype=np.uint8)
# x, x_test, y, y_test = train_test_split(x, y, test_size=.2, shuffle=False)
#
# skf = StratifiedKFold(n_splits=5, shuffle=False)
#
# for train_index, test_index in skf.split(x, y):
# print("TRAIN:", train_index, "TEST:", test_index)
# x_test, y_test = x[test_index], y[test_index]
# x, y = x, y#x[train_index], y[train_index]
# break
skf = StratifiedKFold(n_splits=10, shuffle=True,random_state=42)
shufflesplit = StratifiedShuffleSplit(n_splits=2, random_state=42)
skf.get_n_splits(X, y)
print(skf) # doctest: +NORMALIZE_WHITESPACE
shufflesplit.get_n_splits(X, y)
print(shufflesplit)
# StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
# for train_index, test_index in skf.split(X, y):
# # print("TRAIN:", train_index, "TEST:", test_index)
# X_train, X_test = X[train_index], X[test_index]
# y_train, y_test = y[train_index], y[test_index]
# print(y_test)
for train_index, test_index in shufflesplit.split(X, y):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
print(y_test)
def sample(y):
d_c = np.bincount(y)
s = int(d_c.min() * 0.8)
a1 = np.random.choice(np.arange(d_c[0]), size=s, replace=False)
a2 = np.random.choice(
np.arange(d_c[0], d_c[0] + d_c[1]), size=s, replace=False)
a3 = np.random.choice(
np.arange(d_c[0] + d_c[1], sum(d_c)), size=s, replace=False)
a = np.concatenate([a1, a2, a3])
print(d_c)
return a
samples = []
for i in range(5):
samples.append(sample(y))
np.savez('train_test_splits_Meliponas8especies256x512_2', samples=samples,
x=X, y=y, x_test=x_test, y_test=y_test, train_index=train_index, test_index=test_index)
|
# Created by: David Wertenteil
from Crypto.Cipher import AES
import os
# --------------- Functions -------------------------------------
def SIZE():
return 16
def div_16(o):
r = []
for i in range(0, len(o), SIZE()):
r.append(o[i:i + SIZE()])
return r
def xor(o1, o2):
return [ord(a) ^ ord(b) for a, b in zip(o1, o2)]
def cut(o):
c = int(o[-1], SIZE())
return o[:SIZE() - c]
def n_of_clip(str):
t = str.replace("oceans_aes-audio=65000-video=370000-", "")
return int(t)
def iv(i):
if i > 16:
return None
return str(bin(i)[2:].zfill(16))
# ----------------------- The CBC Section ------------------------------
# -------------- Getting the key: ------------------
key = open("oceans.key", "rb")
key1 = key.read()
key.close()
obj = AES.new(key1, AES.MODE_ECB)
# --------------- Making list of .ts files: --------
t = os.listdir("./")
ts = []
for i in t:
[temp, ex] = os.path.splitext(i)
if ex == '.ts' and 'new' not in temp:
ts.append(i)
# --------- Description each file: ---------------------
for i in ts:
[num, ex] = os.path.splitext(i)
f = open(i, 'rb')
c = f.read()
f.close()
parts = div_16(c)
dec = []
#Doing the first step of decoding:
temp = obj.decrypt(parts[0])
dec.append(xor(iv(n_of_clip(num)), temp))
for p in range(1, len(parts)):
dec.append(xor(parts[p - 1], obj.decrypt(parts[p])))
# Removing the padding:
for i in range(dec[-1][-1]):
dec[-1].pop()
name = 'new'+str(n_of_clip(num))+".ts"
print(name)
new = open(name,'wb')
for i1 in dec:
for i2 in i1:
new.write(chr(i2))
new.close()
|
# -*- coding: utf-8 -*-
from django.conf.urls import include, url
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^user/login$', views.user_login),
url(r'^user/create$', views.user_create),
url(r'^user/logout$', views.user_logout),
]
|
# Endi shu while operatoridan foydalanim taxmin qilish oýinini yasaymiz
# Biz bitta luboy sonni yashiramiz agar oýin ishtirokchisi 3 marta imkoniyat beriladi bu yashiringan nomerni topish uchun
# Yashiringan sonni topsa yutadi agar 3 marta urunishda xam topolmasa yutqazadi
guess_count=0# bu degani hisoblashni 0 dan boshla degani
guess_limit=3 # bu degani taxmin qilishni 3 marta amalga oshirish mumkin degani
secret_number=8 # bu biz yashirib qoýgan son shuni topish kerak bo'ladi
while guess_count<guess_limit:
guess=int(input("Guess:")) # bu degani shu qaysi sonni topish uchun kiritish
guess_count=guess_count+1 # bu degani 1 tadan oshirib bor imkoniyatni degani
if guess==secret_number:
print("You won ! :) ")
break # bu degani shu shart bajarilsa boshqa takrorlama shu yerida to'xta degani
else:
print("Sorry you field! :(")
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase, LiveServerTestCase
from main.views import _split_message
from main.models import Wall
from django.contrib.auth.models import User
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core import management
from os import environ
from selenium import webdriver
from main.management.commands._listen_for_tweets import TwitterListener
import random
import tweepy
import time
class SMSTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('Bob', None, 'Bob')
self.user.save()
self.wall = Wall(hashtag="#abc", phone_number="+11112223333",
user=self.user)
self.wall2 = Wall(hashtag="#qwe", phone_number="+11234567891",
user=self.user)
self.wall.save()
def test_single_wall(self):
print "Single wall"
self.assertEquals(_split_message('This is a sentence', ''), (self.wall.hashtag, 'This is a sentence'))
print "Done"
def test_phone_number(self):
print "Phone number"
self.wall2.save()
self.assertEquals(_split_message('Test message', '+12223334444'), (None, None))
self.assertEquals(_split_message('Hello world',
self.wall.phone_number), (self.wall.hashtag, 'Hello world'))
self.assertEquals(_split_message('Hello world',
self.wall.phone_number), (self.wall.hashtag, 'Hello world'))
self.assertEquals(_split_message('Hello world', '+12223334444'), (None, None))
print "Done"
#def test_purchase_phone_number(self):
class WallTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('Bob', None, 'Bob')
self.user.save()
self.wall = Wall(hashtag="#abc", phone_number="+11112223333",
user=self.user)
self.wall.save()
#def test_display_wall(self):
class AcceptanceTest(LiveServerTestCase):
def setUp(self):
print "Starting browser"
self.browser = webdriver.Firefox()
auth = tweepy.OAuthHandler(settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET)
auth.set_access_token(settings.TWITTER_ACCESS_TOKEN,
settings.TWITTER_ACCESS_TOKEN_SECRET)
self.twitter = tweepy.API(auth)
self.twitter_listener = TwitterListener()
def tearDown(self):
self.browser.close()
self.twitter_listener.exit()
def test_send_tweet(self):
print "Browser test"
self.browser.get(self.live_server_url + reverse('main.views.index'))
self.assertEqual(self.browser.title, "Texting Wall")
self.browser.get(self.live_server_url + reverse('main.views.create_account'))
self.assertEqual(self.browser.title, "Create account")
self.browser.find_element_by_id("id_username").send_keys("test")
self.browser.find_element_by_id("id_password1").send_keys("test")
self.browser.find_element_by_id("id_password2").send_keys("test")
self.browser.find_element_by_id("id_password2").submit()
self.assertEqual(self.browser.current_url, self.live_server_url +
reverse('main.views.index'))
self.browser.get(self.live_server_url + reverse('main.views.new_wall'))
twitter_hashtag = "#" + str(random.random())
message = "%s %s" % (twitter_hashtag, random.random())
hashtag = self.browser.find_element_by_id("inputHashtag")
hashtag.send_keys(twitter_hashtag)
hashtag.submit()
self.twitter_listener.update()
print message
self.twitter.update_status(message)
time.sleep(.5)
self.browser.find_element_by_xpath("//p[contains(text(), '%s')]" %
message)
print "Done"
|
from sqlcompletion import suggest_type
def test_empty_string_suggests_keywords():
suggestion = suggest_type('', len(''))
assert suggestion == (['keywords'], [''])
def test_select_suggests_cols_with_table_scope():
suggestion = suggest_type('SELECT FROM tabl', len('SELECT '))
assert suggestion == ('columns-and-functions', ['tabl'])
def test_where_suggests_columns_functions():
suggestion = suggest_type('SELECT * FROM tabl WHERE ', len('SELECT * FROM tabl WHERE '))
assert suggestion == ('columns-and-functions', ['tabl'])
def test_lparen_suggests_cols():
suggestion = suggest_type('SELECT MAX( FROM tbl', len('SELECT MAX('))
assert suggestion == ('columns', ['tbl'])
def test_select_suggests_cols_and_funcs():
suggestion = suggest_type('SELECT ', len('SELECT '))
assert suggestion == ('columns-and-functions', [])
def test_from_suggests_tables():
suggestion = suggest_type('SELECT * FROM ', len('SELECT * FROM '))
assert suggestion == ('tables', [])
def test_distinct_suggests_cols():
suggestion = suggest_type('SELECT DISTINCT ', len('SELECT DISTINCT '))
assert suggestion == ('columns', [])
def test_col_comma_suggests_cols():
suggestion = suggest_type('SELECT a, b, FROM tbl', len('SELECT a, b,'))
assert suggestion == ('columns-and-functions', ['tbl'])
def test_table_comma_suggests_tables():
suggestion = suggest_type('SELECT a, b FROM tbl1, ', len('SELECT a, b FROM tbl1, '))
assert suggestion == ('tables', [])
def test_into_suggests_tables():
suggestion = suggest_type('INSERT INTO ', len('INSERT INTO '))
assert suggestion == ('tables', [])
def test_insert_into_lparen_suggests_cols():
suggestion = suggest_type('INSERT INTO abc (', len('INSERT INTO abc ('))
assert suggestion == ('columns', ['abc'])
def test_insert_into_lparen_partial_text_suggests_cols():
suggestion = suggest_type('INSERT INTO abc (i', len('INSERT INTO abc (i'))
assert suggestion == ('columns', ['abc'])
def test_insert_into_lparen_comma_suggests_cols():
suggestion = suggest_type('INSERT INTO abc (id,', len('INSERT INTO abc (id,'))
assert suggestion == ('columns', ['abc'])
def test_partially_typed_col_name_suggests_col_names():
suggestion = suggest_type('SELECT * FROM tabl WHERE col_n', len('SELECT * FROM tabl WHERE col_n'))
assert suggestion == ('columns-and-functions', ['tabl'])
def test_dot_suggests_cols_of_a_table():
suggestion = suggest_type('SELECT tabl. FROM tabl', len('SELECT tabl.'))
assert suggestion == ('columns', ['tabl'])
def test_dot_suggests_cols_of_an_alias():
suggestion = suggest_type('SELECT t1. FROM tabl1 t1, tabl2 t2', len('SELECT t1.'))
assert suggestion == ('columns', ['tabl1'])
def test_dot_col_comma_suggests_cols():
suggestion = suggest_type('SELECT t1.a, t2. FROM tabl1 t1, tabl2 t2', len('SELECT t1.a, t2.'))
assert suggestion == ('columns', ['tabl2'])
def test_sub_select_suggests_keyword():
suggestion = suggest_type('SELECT * FROM (', len('SELECT * FROM ('))
assert suggestion == ('keywords', [])
def test_sub_select_table_name_completion():
suggestion = suggest_type('SELECT * FROM (SELECT * FROM ', len('SELECT * FROM (SELECT * FROM '))
assert suggestion == ('tables', [])
def test_sub_select_col_name_completion():
suggestion = suggest_type('SELECT * FROM (SELECT FROM abc', len('SELECT * FROM (SELECT '))
assert suggestion == ('columns-and-functions', ['abc'])
def test_sub_select_multiple_col_name_completion():
suggestion = suggest_type('SELECT * FROM (SELECT a, FROM abc', len('SELECT * FROM (SELECT a, '))
assert suggestion == ('columns-and-functions', ['abc'])
def test_sub_select_dot_col_name_completion():
suggestion = suggest_type('SELECT * FROM (SELECT t. FROM tabl t', len('SELECT * FROM (SELECT t.'))
assert suggestion == ('columns', ['tabl'])
|
import unittest
from katas.kyu_7.counting_occurrence_of_digits import List
class ListTestCase(unittest.TestCase):
def setUp(self):
self.lst = List()
def test_equals(self):
self.assertEqual(self.lst.count_spec_digits(
[1, 1, 2, 3, 1, 2, 3, 4], [1, 3]), [(1, 3), (3, 2)])
def test_equals_2(self):
self.assertEqual(self.lst.count_spec_digits(
[-18, -31, 81, -19, 111, -888], [1, 8, 4]),
[(1, 7), (8, 5), (4, 0)])
def test_equals_3(self):
self.assertEqual(self.lst.count_spec_digits(
[-77, -65, 56, -79, 6666, 222], [1, 8, 4]),
[(1, 0), (8, 0), (4, 0)])
def test_equals_4(self):
self.assertEqual(self.lst.count_spec_digits(
[], [1, 8, 4]), [(1, 0), (8, 0), (4, 0)])
|
import Addmodule as ad
Key = input("Enter Developer name \t")
ad.yashdictionary(Key)
|
#!/usr/bin/env python3
from autobahn.twisted.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from twisted.python import log
from twisted.internet import reactor
import threading, sys, codecs, os, random, time, queue
CODIGO = '-1::Salir'.encode('ASCII', 'ignore')
LONGITUD = 207
conexiones = []
class WebSocketServer(WebSocketServerProtocol):
'''
* @brief Muestra que se conectó un cliente
* details Se imprime en salida estandar cada vez que se conecta un
* nuevo cliente
* @param[in] request: Brinda información específica como la ip del
* cliente
* @param[out] N/A
* @remark N/A
* @return N/A
* @exception N/A
* @author Jeisson Hidalgo
* @date 28-09-20
'''
def onConnect(self, request):
print(f"Client connecting: {request.peer}")
conexiones.append(self)
'''
* @brief Envía la información propia y la de los vecinos
* @details Se imprime en salida estandar un mensaje de
* confirmación además de enviarle a la página web la información
* del ID propio y los IDs de los vecinos
* @param[in] N/A
* @param[out] N/A
* @remark N/A
* @return N/A
* @exception N/A
* @author Jeisson Hidalgo
* @date 28-09-20
'''
def onOpen(self):
print("WebSocket connection open.")
self.sendMessage(f"id\t{ids[0]}".encode('utf8'), False)
print("Envié # nodo")
idVecinos = 'vecinos'
for i in range(1, len(ids)):
idVecinos += ' ' + ids[i]
self.sendMessage(f"{idVecinos}".encode('utf8'), False)
print("Envié # vecinos")
'''
* @brief Imprime en salida estandar los mensajes que llegan
* @details Esta subrutina imprime información de los mensajes que
* llegan desde el web socket con destino al agente azul, para
* luego ser enviados a su respectivo nodo de destino
* @param[in] [1] payload: Es el cuerpo del mensaje
* [2] isBinary: Nos dice si el mensaje es binario o no
* @param[out] N/A
* @remark N/A
* @return N/A
* @exception N/A
* @author Jeisson Hidalgo
* @date 28-09-20
'''
def onMessage(self, payload, isBinary):
if isBinary:
print(f"Binary message received: {len(payload)} bytes")
else:
print(f"Text message received: {payload.decode('utf8')}")
salidas.put(payload)
'''
* @brief Imprime en salida estandar cuando se cierra una conexión
* details Esta subrutina imprime un mensaje cuando un cliente
* cierra el navegador.
* @remark N/A
* @return N/A
* @exception N/A
* @author Jeisson Hidalgo
* @date 28-9-20
'''
def onClose(self, wasClean, code, reason):
print(f"WebSocket connection closed: {reason}")
conexiones.pop(0)
salidas.put(CODIGO)
'''
* @brief Envía mensajes a la página web
* @details Esta subrutina envía mensajes a la página web por medio
* del web socket, para que el usuario pueda visualizarlo
* @param[in] [1] mensaje: Es el mensaje que se envía a la página
* web, para que el usuario pueda visualizarlo
* @param[out] N/A
* @remark N/A
* @return N/A
* @exception N/A
* author Jeisson Hidalgo
* date 15-11-20
'''
def enviarMensaje(self, mensaje):
mensaje = mensaje.decode('utf8')
self.sendMessage(f'{mensaje}'.encode('utf8'), False)
'''
* @brief Subrutina para enviar mensajes al agente azul
* @details Esta subrutina utiliza uno de los pipes para enviarle
* los mensajes al agente azul, el cual se encarga de pasar este
* mensaje al nodo verde, estos mensajes llegan por el web socket
* por parte del usuario
* @param[in] N/A
* @param[out] N/A
* @pre Este método es ejecutado por un hilo, debe permanecer
* corriendo durante toda la ejecucuión
* @remark Debe implementar sleep para que el pipe no falle
* @return N/A
* @exception N/A
* @author Johel Phillips Ugalde B75821
* @date 01-12-20
'''
def enviar():
fd = os.open(pipeSalida, os.O_WRONLY)
while True:
mensaje = salidas.get(block = True)
salidas.task_done()
if len(mensaje) >= LONGITUD:
mensaje = mensaje[:LONGITUD-1]
os.write(fd, mensaje + '\x00'.encode('ASCII', 'ignore'))
time.sleep(0.01)
if(mensaje.find(CODIGO) != -1):
break
os.close(fd)
'''
* @brief Subrutina para recibir mensajes del agente azul y
* reenviarlos por el web socket a la página web
* @details Esta subrutina utiliza uno de los pipes para recibir
* los mensajes provenientes de otro usuario conectado a un nodo
* vecino y reenviarlos a la página web, para que el usuario conectado
* a este nodo pueda visualizar sus mensajes
* @param[in] N/A
* @param[out] N/A
* @pre Este método es ejecutado por un hilo, debe permanecer
* corriendo durante toda la ejecucuión
* @remark Debe implementar sleep para que el pipe no falle
* @return N/A
* @exception N/A
* @author Johel Phillips Ugalde B75821
* @date 01-12-20
'''
def recibir():
final = '\x00'.encode('ASCII', 'ignore')
fd = open(pipeEntrada, 'br')
while True:
mensaje = fd.read(LONGITUD)
time.sleep(0.01)
if(mensaje[2] != "'"):
if(mensaje.find(CODIGO[:3]) != -1):
reactor.callFromThread(reactor.stop)
break
posicion = mensaje.find(final)
mensaje = mensaje[:posicion]
conexiones[0].enviarMensaje(mensaje)
fd.close()
'''
* @brief Subrutina para almacenar la información de los nodos
* @details Esta subrutina divide las datos que entran como tercer
* argumento del programa, para tener acceso a los ids de los nodos
* vecinos y el id propio
* @param[in] [1] ids: Esta lista almacenará los datos de los ids
* [2] nodos: Hilera que contiene los ids de los nodos
* @author Johel Phillips Ugalde B75821
* @date 20-11-20
'''
def almacenarIDs(ids, nodos):
len = nodos.count(',')
for i in range(len):
pos = nodos.find(',')
ids.append(nodos[:pos])
nodos = nodos[pos+1:]
if __name__ == '__main__':
pipeEntrada = sys.argv[1]
pipeSalida = sys.argv[2]
nodos = sys.argv[3]
host = sys.argv[4]
port = int(sys.argv[5])
ids = []
salidas = queue.Queue()
emisor = threading.Thread(target = enviar)
receptor = threading.Thread(target = recibir)
almacenarIDs(ids, nodos)
log.startLogging(sys.stdout)
factory = WebSocketServerFactory(f"ws://{host}:{port}")
factory.protocol = WebSocketServer
reactor.listenTCP(port, factory)
emisor.start()
receptor.start()
reactor.run()
emisor.join()
receptor.join()
salidas.join()
|
# -*- coding: utf-8 -*-
"""
@author: xiaoke
@file: lengthOfLIS.py
@time:2020-04-09 14:44
@file_desc:
"""
class Solution(object):
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
if l<=1:
return l
dp = [1] * l # 截止当前的最长上升序列
for i in range(1, l):
prev = []
for j in range(i):
if nums[j]<nums[i]:
prev.append(dp[j])
if len(prev)>0:
dp[i] = max(prev) + 1
# print(dp)
return max(dp)
if __name__ == '__main__':
nums = [10,9,2,5,3,7,101,18]
solu = Solution
solu.lengthOfLIS(solu, nums)
|
# 提示说维护两个数组,后缀对应排除左移,前缀对应排除右移
# 两个合并考虑,应该能做
class Solution:
def minimumTime(self, s: str) -> int:
n = len(s)
post, pre = [0] * (n+1), 0
# 维护后缀dp
for i in range(n-1, -1, -1):
if s[i] == "0":
post[i] = post[i+1]
else:
post[i] = min(post[i+1]+2, n-i)
# 维护前缀dp
# 滚动数组优化
minv = post[0]
for i in range(n):
pre = min(pre+2, i+1)
minv = min(minv, pre + post[i+1])
return minv
# 将移除某些中间的车厢可以合并到一边
# 前缀和可以用滚动数组优化,合并到它中
class Solution:
def minimumTime(self, s: str) -> int:
ans = n = len(s)
pre = 0
for i, ch in enumerate(s):
if ch == '1':
pre = min(pre + 2, i + 1)
ans = min(ans, pre + n - 1 - i)
return ans
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from DeepConvNetwork import DeepConvNet
from dataset.mnist import load_mnist
(a_train, b_train), (a_test, b_test) = load_mnist(flatten=False)
network = DeepConvNet()
network.load_params("deep_convnet_params.pkl")
sampled = 1000
a_test = a_test[:sampled]
b_test = b_test[:sampled]
print("Calculate Accuracy (float64) ... ")
print(network.accuracy(a_test, b_test))
# Convert to float16
a_test = a_test.astype(np.float16)
for param in network.params.values():
param[...] = param.astype(np.float16)
print("Calculate Accuracy (float16) ... ")
print(network.accuracy(a_test, b_test))
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
import struct
from .. import api
class OleAut32(api.ApiHandler):
name = 'oleaut32'
apihook = api.ApiHandler.apihook
impdata = api.ApiHandler.impdata
def __init__(self, emu):
super(OleAut32, self).__init__(emu)
super(OleAut32, self).__get_hook_attrs__(self)
@apihook('SysAllocString', argc=1)
def SysAllocString(self, emu, argv, ctx={}):
"""
BSTR SysAllocString(
const OLECHAR *psz
);
"""
psz, = argv
alloc_str = self.read_mem_string(psz, 2)
if alloc_str:
argv[0] = alloc_str
alloc_str += '\x00'
ws = alloc_str.encode('utf-16le')
ws_len = len(ws)
# https://docs.microsoft.com/en-us/previous-versions/windows/desktop/automat/bstr
bstr_len = 4 + ws_len
bstr = self.mem_alloc(bstr_len)
bstr_bytes = struct.pack('<I', ws_len - 2) + ws
self.mem_write(bstr, bstr_bytes)
return bstr + 4
return 0
@apihook('SysFreeString', argc=1)
def SysFreeString(self, emu, argv, ctx={}):
"""
void SysFreeString(
BSTR bstrString
);
"""
argv[0] = self.read_wide_string(argv[0])
return
|
from .create import CreateTaskController
from .load import LoadTaskController
from .remove import RemoveTaskController
|
import pickle
import os
from tqdm import tqdm
import sys
import torch
import numpy as np
import random
from torch.utils.data import TensorDataset
import pymongo
sys.path.append('/home/chenxichen/pycharm_remote/pycharm_py3.6torch/paper_one')
def save_pkl_data(data, filename):
data_pkl = pickle.dumps(data)
with open(filename, 'wb') as fp:
fp.write(data_pkl)
def load_pkl_data(filename):
with open(filename, 'rb') as fp:
data_pkl = fp.read()
return pickle.loads(data_pkl)
class TrainTextExample(object):
def __init__(self, question_id, q_content, pos_ans_id, p_ans_content, neg_ans_id, n_ans_content):
self.question_id = question_id
self.q_content = q_content
self.pos_ans_id = pos_ans_id
self.p_ans_content = p_ans_content
self.neg_ans_id = neg_ans_id
self.n_ans_content = n_ans_content
def __str__(self):
self.__repr__()
def __repr__(self):
s = "{"
s += f'question_id: {self.question_id}'
s += f'||q_content: {self.q_content}\n'
s += f'pos_ans_id: {self.pos_ans_id}'
s += f'||p_ans_content: {self.p_ans_content}\n'
s += f'neg_ans_id: {self.neg_ans_id}'
s += f'||n_ans_content: {self.n_ans_content}'
s += "}"
return s
class TrainFeatures(object):
"""A single set of features of data."""
def __init__(
self,
question_id,
pos_ans_id,
neg_ans_id,
q_input_ids,
q_input_mask,
q_segment_ids,
p_a_input_ids,
p_a_input_mask,
p_a_segment_ids,
n_a_input_ids,
n_a_input_mask,
n_a_segment_ids,
):
self.question_id = question_id
self.pos_ans_id = pos_ans_id
self.neg_ans_id = neg_ans_id
self.q_input_ids = q_input_ids
self.q_input_mask = q_input_mask
self.q_segment_ids = q_segment_ids
self.p_a_input_ids = p_a_input_ids
self.p_a_input_mask = p_a_input_mask
self.p_a_segment_ids = p_a_segment_ids
self.n_a_input_ids = n_a_input_ids
self.n_a_input_mask = n_a_input_mask
self.n_a_segment_ids = n_a_segment_ids
class DevTextExample(object):
def __init__(self, question_id, q_content, ans_id, a_content, cnt, label):
self.question_id = question_id
self.q_content = q_content
self.ans_id = ans_id
self.a_content = a_content
self.cnt = cnt
self.label = label
def __str__(self):
self.__repr__()
def __repr__(self):
s = "{"
s += f'question_id: {self.question_id}'
s += f'||q_content: {self.q_content}\n'
s += f'ans_id: {self.ans_id}'
s += f'||a_content: {self.a_content}\n'
s += f'cnt: {self.cnt}'
s += f'||label: {self.label}'
s += "}"
return s
class DevFeatures(object):
"""A single set of features of data."""
def __init__(
self,
question_id,
ans_id,
q_input_ids,
q_input_mask,
q_segment_ids,
a_input_ids,
a_input_mask,
a_segment_ids,
cnt,
label,
q_w2v_ids=None,
a_w2v_ids=None,
):
self.question_id = question_id
self.ans_id = ans_id
self.q_input_ids = q_input_ids
self.q_input_mask = q_input_mask
self.q_segment_ids = q_segment_ids
self.a_input_ids = a_input_ids
self.a_input_mask = a_input_mask
self.a_segment_ids = a_segment_ids
self.cnt = cnt
self.label = label
self.q_w2v_ids = q_w2v_ids
self.a_w2v_ids = a_w2v_ids
class TestFeatures(object):
"""A single set of features of data."""
def __init__(
self,
question_id,
ans_id,
q_input_ids,
q_input_mask,
q_segment_ids,
a_input_ids,
a_input_mask,
a_segment_ids,
cnt,
label,
q_w2v_ids=None,
a_w2v_ids=None,
):
self.question_id = question_id
self.ans_id = ans_id
self.q_input_ids = q_input_ids
self.q_input_mask = q_input_mask
self.q_segment_ids = q_segment_ids
self.a_input_ids = a_input_ids
self.a_input_mask = a_input_mask
self.a_segment_ids = a_segment_ids
self.cnt = cnt
self.label = label
self.q_w2v_ids = q_w2v_ids
self.a_w2v_ids = a_w2v_ids
class TestTextExample(object):
def __init__(self, question_id, q_content, ans_id, a_content, cnt, label):
self.question_id = question_id
self.q_content = q_content
self.ans_id = ans_id
self.a_content = a_content
self.cnt = cnt
self.label = label
def __str__(self):
self.__repr__()
def __repr__(self):
s = "{"
s += f'question_id: {self.question_id}'
s += f'||q_content: {self.q_content}\n'
s += f'ans_id: {self.ans_id}'
s += f'||a_content: {self.a_content}\n'
s += f'cnt: {self.cnt}'
s += f'||label: {self.label}'
s += "}"
return s
def convert_bert_feature(content, tokenizer, max_seq_length):
content_token = tokenizer.tokenize(content)
if len(content_token) > (max_seq_length - 1):
content_token = content_token[:max_seq_length - 1]
content_token = ["[CLS]"] + content_token
input_ids = tokenizer.convert_tokens_to_ids(content_token)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
return input_ids, input_mask, segment_ids
def convert_w2v_id(content, max_seq_length):
content_ids = [w2v_vocab2id.get(token, w2v_vocab2id['UNK']) for token in content]
if len(content_ids) > max_seq_length:
content_ids = content_ids[:max_seq_length]
padding = [0] * (max_seq_length - len(content_ids))
content_ids += padding
return content_ids
def convert_train_text_to_features(text_examples, tokenizer, max_seq_length):
features = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert train text examples to features')
for example in text_examples:
question_id = example.question_id
q_content = example.q_content
pos_ans_id = example.pos_ans_id
p_ans_content = example.p_ans_content
neg_ans_id = example.neg_ans_id
n_ans_content = example.n_ans_content
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
p_a_input_ids, p_a_input_mask, p_a_segment_ids = convert_bert_feature(
p_ans_content, tokenizer, max_seq_length)
n_a_input_ids, n_a_input_mask, n_a_segment_ids = convert_bert_feature(
n_ans_content, tokenizer, max_seq_length)
features.append(
TrainFeatures(
question_id=int(question_id),
pos_ans_id=int(pos_ans_id),
neg_ans_id=int(neg_ans_id),
q_input_ids=q_input_ids,
q_input_mask=q_input_mask,
q_segment_ids=q_segment_ids,
p_a_input_ids=p_a_input_ids,
p_a_input_mask=p_a_input_mask,
p_a_segment_ids=p_a_segment_ids,
n_a_input_ids=n_a_input_ids,
n_a_input_mask=n_a_input_mask,
n_a_segment_ids=n_a_segment_ids,
)
)
pbar.update(1)
return features
def convert_train_text_to_features_mongo(text_examples, tokenizer, max_seq_length):
sample_index = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert train text examples to features')
for index, example in enumerate(text_examples):
question_id = example.question_id
q_content = example.q_content
pos_ans_id = example.pos_ans_id
p_ans_content = example.p_ans_content
neg_ans_id = example.neg_ans_id
n_ans_content = example.n_ans_content
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
p_a_input_ids, p_a_input_mask, p_a_segment_ids = convert_bert_feature(
p_ans_content, tokenizer, max_seq_length)
n_a_input_ids, n_a_input_mask, n_a_segment_ids = convert_bert_feature(
n_ans_content, tokenizer, max_seq_length)
data = {
'index': index,
'question_id': int(question_id),
'pos_ans_id': int(pos_ans_id),
'neg_ans_id': int(neg_ans_id),
'q_input_ids': q_input_ids,
'q_input_mask': q_input_mask,
'q_segment_ids': q_segment_ids,
'p_a_input_ids': p_a_input_ids,
'p_a_input_mask': p_a_input_mask,
'p_a_segment_ids': p_a_segment_ids,
'n_a_input_ids': n_a_input_ids,
'n_a_input_mask': n_a_input_mask,
'n_a_segment_ids': n_a_segment_ids,
}
collection.insert(data)
sample_index.append(index)
pbar.update(1)
return sample_index
def convert_train_text_to_features_mongo_w2v(text_examples, tokenizer, max_seq_length):
sample_index = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert train text examples to features')
for index, example in enumerate(text_examples):
question_id = example.question_id
q_content = example.q_content
pos_ans_id = example.pos_ans_id
p_ans_content = example.p_ans_content
neg_ans_id = example.neg_ans_id
n_ans_content = example.n_ans_content
q_w2v_ids = convert_w2v_id(q_content, max_seq_length)
p_a_w2v_ids = convert_w2v_id(p_ans_content, max_seq_length)
n_a_w2v_ids = convert_w2v_id(n_ans_content, max_seq_length)
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
p_a_input_ids, p_a_input_mask, p_a_segment_ids = convert_bert_feature(
p_ans_content, tokenizer, max_seq_length)
n_a_input_ids, n_a_input_mask, n_a_segment_ids = convert_bert_feature(
n_ans_content, tokenizer, max_seq_length)
data = {
'index': index,
'question_id': int(question_id),
'pos_ans_id': int(pos_ans_id),
'neg_ans_id': int(neg_ans_id),
'q_input_ids': q_input_ids,
'q_input_mask': q_input_mask,
'q_segment_ids': q_segment_ids,
'p_a_input_ids': p_a_input_ids,
'p_a_input_mask': p_a_input_mask,
'p_a_segment_ids': p_a_segment_ids,
'n_a_input_ids': n_a_input_ids,
'n_a_input_mask': n_a_input_mask,
'n_a_segment_ids': n_a_segment_ids,
'q_w2v_ids': q_w2v_ids,
'p_a_w2v_ids': p_a_w2v_ids,
'n_a_w2v_ids': n_a_w2v_ids,
}
collection.insert(data)
sample_index.append(index)
pbar.update(1)
return sample_index
def convert_dev_text_to_features(text_examples, tokenizer, max_seq_length):
features = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert dev text examples to features')
for example in text_examples:
question_id = example.question_id
q_content = example.q_content
ans_id = example.ans_id
a_content = example.a_content
cnt = example.cnt
label = example.label
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
a_input_ids, a_input_mask, a_segment_ids = convert_bert_feature(
a_content, tokenizer, max_seq_length)
features.append(
DevFeatures(
question_id=int(question_id),
ans_id=int(ans_id),
q_input_ids=q_input_ids,
q_input_mask=q_input_mask,
q_segment_ids=q_segment_ids,
a_input_ids=a_input_ids,
a_input_mask=a_input_mask,
a_segment_ids=a_segment_ids,
cnt=int(cnt),
label=int(label),
)
)
pbar.update(1)
return features
def convert_dev_text_to_features_w2v(text_examples, tokenizer, max_seq_length):
features = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert dev text examples to features')
for example in text_examples:
question_id = example.question_id
q_content = example.q_content
ans_id = example.ans_id
a_content = example.a_content
cnt = example.cnt
label = example.label
q_w2v_ids = convert_w2v_id(q_content, max_seq_length)
a_w2v_ids = convert_w2v_id(a_content, max_seq_length)
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
a_input_ids, a_input_mask, a_segment_ids = convert_bert_feature(
a_content, tokenizer, max_seq_length)
features.append(
DevFeatures(
question_id=int(question_id),
ans_id=int(ans_id),
q_input_ids=q_input_ids,
q_input_mask=q_input_mask,
q_segment_ids=q_segment_ids,
a_input_ids=a_input_ids,
a_input_mask=a_input_mask,
a_segment_ids=a_segment_ids,
q_w2v_ids=q_w2v_ids,
a_w2v_ids=a_w2v_ids,
cnt=int(cnt),
label=int(label),
)
)
pbar.update(1)
return features
def convert_test_text_to_features(text_examples, tokenizer, max_seq_length):
features = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert test text examples to features')
for example in text_examples:
question_id = example.question_id
q_content = example.q_content
ans_id = example.ans_id
a_content = example.a_content
cnt = example.cnt
label = example.label
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
a_input_ids, a_input_mask, a_segment_ids = convert_bert_feature(
a_content, tokenizer, max_seq_length)
features.append(
TestFeatures(
question_id=int(question_id),
ans_id=int(ans_id),
q_input_ids=q_input_ids,
q_input_mask=q_input_mask,
q_segment_ids=q_segment_ids,
a_input_ids=a_input_ids,
a_input_mask=a_input_mask,
a_segment_ids=a_segment_ids,
cnt=int(cnt),
label=int(label),
)
)
pbar.update(1)
return features
def convert_test_text_to_features_w2v(text_examples, tokenizer, max_seq_length):
features = []
with tqdm(total=len(text_examples)) as pbar:
pbar.set_description('convert test text examples to features')
for example in text_examples:
question_id = example.question_id
q_content = example.q_content
ans_id = example.ans_id
a_content = example.a_content
cnt = example.cnt
label = example.label
q_w2v_ids = convert_w2v_id(q_content, max_seq_length)
a_w2v_ids = convert_w2v_id(a_content, max_seq_length)
q_input_ids, q_input_mask, q_segment_ids = convert_bert_feature(
q_content, tokenizer, max_seq_length)
a_input_ids, a_input_mask, a_segment_ids = convert_bert_feature(
a_content, tokenizer, max_seq_length)
features.append(
TestFeatures(
question_id=int(question_id),
ans_id=int(ans_id),
q_input_ids=q_input_ids,
q_input_mask=q_input_mask,
q_segment_ids=q_segment_ids,
a_input_ids=a_input_ids,
a_input_mask=a_input_mask,
a_segment_ids=a_segment_ids,
cnt=int(cnt),
label=int(label),
q_w2v_ids=q_w2v_ids,
a_w2v_ids=a_w2v_ids,
)
)
pbar.update(1)
return features
def get_data(args, file_class, save_mode='local', use_w2v=False):
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
# load features
features = ''
if file_class == 'train':
if save_mode == 'local':
save_dir = args.train_text_data.split('/')
save_dir[-1] = f'train_features_data_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
features = load_pkl_data(save_dir)
else:
text_examples = load_pkl_data(args.train_text_data)
features = convert_train_text_to_features(text_examples, tokenizer, args.max_seq_length)
save_pkl_data(features, save_dir)
elif save_mode == 'mongo':
save_dir = args.train_text_data.split('/')
if use_w2v:
save_dir[-1] = f'train_sample_index_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'train_sample_index_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
train_sample_index = load_pkl_data(save_dir)
else:
text_examples = load_pkl_data(args.train_text_data)
if use_w2v:
train_sample_index = convert_train_text_to_features_mongo_w2v(text_examples, tokenizer, args.max_seq_length)
else:
train_sample_index = convert_train_text_to_features_mongo(text_examples, tokenizer, args.max_seq_length)
save_pkl_data(train_sample_index, save_dir)
return train_sample_index
elif file_class == 'dev':
save_dir = args.dev_text_data.split('/')
if use_w2v:
save_dir[-1] = f'dev_features_data_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'dev_features_data_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
features = load_pkl_data(save_dir)
else:
text_examples = load_pkl_data(args.dev_text_data)
if use_w2v:
features = convert_dev_text_to_features_w2v(text_examples, tokenizer, args.max_seq_length)
else:
features = convert_dev_text_to_features(text_examples, tokenizer, args.max_seq_length)
save_pkl_data(features, save_dir)
elif file_class == 'test':
save_dir = args.test_text_data.split('/')
if use_w2v:
save_dir[-1] = f'test_features_data_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'test_features_data_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
features = load_pkl_data(save_dir)
else:
text_examples = load_pkl_data(args.test_text_data)
if use_w2v:
features = convert_test_text_to_features_w2v(text_examples, tokenizer, args.max_seq_length)
else:
features = convert_test_text_to_features(text_examples, tokenizer, args.max_seq_length)
save_pkl_data(features, save_dir)
return features
def seed_everything(seed=1029):
'''
设置整个开发环境的seed
:param seed:
:param device:
:return:
'''
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
def get_tensor_dataset(args, file_class, save_mode='local', reload=True, use_w2v=False):
tensor_dataset = ''
if file_class == 'train':
if save_mode == 'local':
save_dir = args.train_text_data.split('/')
save_dir[-1] = f'train_features_tensor_dataset_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
tensor_dataset = load_pkl_data(save_dir)
else:
print('building train features tensor dataset')
print("get_data(args, file_class='train')")
train_features = get_data(args, file_class='train')
print('building tensor')
question_id_tensor = torch.tensor([feature.question_id for feature in train_features])
pos_ans_id_tensor = torch.tensor([feature.pos_ans_id for feature in train_features])
neg_ans_id_tensor = torch.tensor([feature.neg_ans_id for feature in train_features])
q_input_ids_tensor = torch.tensor([feature.q_input_ids for feature in train_features])
q_input_mask_tensor = torch.tensor([feature.q_input_mask for feature in train_features])
q_segment_ids_tensor = torch.tensor([feature.q_segment_ids for feature in train_features])
p_a_input_ids_tensor = torch.tensor([feature.p_a_input_ids for feature in train_features])
p_a_input_mask_tensor = torch.tensor([feature.p_a_input_mask for feature in train_features])
p_a_segment_ids_tensor = torch.tensor([feature.p_a_segment_ids for feature in train_features])
n_a_input_ids_tensor = torch.tensor([feature.n_a_input_ids for feature in train_features])
n_a_input_mask_tensor = torch.tensor([feature.n_a_input_mask for feature in train_features])
n_a_segment_ids_tensor = torch.tensor([feature.n_a_segment_ids for feature in train_features])
tensor_dataset = TensorDataset(
question_id_tensor,
pos_ans_id_tensor,
neg_ans_id_tensor,
q_input_ids_tensor,
q_input_mask_tensor,
q_segment_ids_tensor,
p_a_input_ids_tensor,
p_a_input_mask_tensor,
p_a_segment_ids_tensor,
n_a_input_ids_tensor,
n_a_input_mask_tensor,
n_a_segment_ids_tensor,
)
print('save data')
save_pkl_data(tensor_dataset, save_dir)
elif save_mode == 'mongo':
save_dir = args.train_text_data.split('/')
if use_w2v:
save_dir[-1] = f'train_sample_index_dataset_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'train_sample_index_dataset_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
tensor_dataset = load_pkl_data(save_dir)
else:
print('building train features tensor dataset')
print("get_data(args, file_class='train')")
train_sample_index = get_data(args, file_class='train', save_mode=save_mode, use_w2v=use_w2v)
sample_index_tensor = torch.tensor([sample_index for sample_index in train_sample_index])
tensor_dataset = TensorDataset(sample_index_tensor)
save_pkl_data(tensor_dataset, save_dir)
elif file_class == 'dev':
save_dir = args.dev_text_data.split('/')
if use_w2v:
save_dir[-1] = f'dev_features_tensor_dataset_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'dev_features_tensor_dataset_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir) and reload:
tensor_dataset = load_pkl_data(save_dir)
else:
print('building dev features tensor dataset')
dev_features = get_data(args, file_class='dev', use_w2v=use_w2v)
question_id_tensor = torch.tensor([feature.question_id for feature in dev_features])
ans_id_tensor = torch.tensor([feature.ans_id for feature in dev_features])
q_input_ids_tensor = torch.tensor([feature.q_input_ids for feature in dev_features])
q_input_mask_tensor = torch.tensor([feature.q_input_mask for feature in dev_features])
q_segment_ids_tensor = torch.tensor([feature.q_segment_ids for feature in dev_features])
a_input_ids_tensor = torch.tensor([feature.a_input_ids for feature in dev_features])
a_input_mask_tensor = torch.tensor([feature.a_input_mask for feature in dev_features])
a_segment_ids_tensor = torch.tensor([feature.a_segment_ids for feature in dev_features])
cnt_tensor = torch.tensor([feature.cnt for feature in dev_features])
label_tensor = torch.tensor([feature.label for feature in dev_features])
q_w2v_ids_tensor = torch.tensor([feature.q_w2v_ids for feature in dev_features])
a_w2v_ids_tensor = torch.tensor([feature.a_w2v_ids for feature in dev_features])
tensor_dataset = TensorDataset(
question_id_tensor,
ans_id_tensor,
q_input_ids_tensor,
q_input_mask_tensor,
q_segment_ids_tensor,
a_input_ids_tensor,
a_input_mask_tensor,
a_segment_ids_tensor,
cnt_tensor,
label_tensor,
q_w2v_ids_tensor,
a_w2v_ids_tensor,
)
save_pkl_data(tensor_dataset, save_dir)
elif file_class == 'test':
save_dir = args.test_text_data.split('/')
if use_w2v:
save_dir[-1] = f'test_features_tensor_dataset_{args.max_seq_length}_use_w2v'
else:
save_dir[-1] = f'test_features_tensor_dataset_{args.max_seq_length}'
save_dir = "/".join(save_dir)
if os.path.exists(save_dir):
tensor_dataset = load_pkl_data(save_dir)
else:
print('building test features tensor dataset')
test_features = get_data(args, file_class='test', use_w2v=use_w2v)
question_id_tensor = torch.tensor([feature.question_id for feature in test_features])
ans_id_tensor = torch.tensor([feature.ans_id for feature in test_features])
q_input_ids_tensor = torch.tensor([feature.q_input_ids for feature in test_features])
q_input_mask_tensor = torch.tensor([feature.q_input_mask for feature in test_features])
q_segment_ids_tensor = torch.tensor([feature.q_segment_ids for feature in test_features])
a_input_ids_tensor = torch.tensor([feature.a_input_ids for feature in test_features])
a_input_mask_tensor = torch.tensor([feature.a_input_mask for feature in test_features])
a_segment_ids_tensor = torch.tensor([feature.a_segment_ids for feature in test_features])
cnt_tensor = torch.tensor([feature.cnt for feature in test_features])
label_tensor = torch.tensor([feature.label for feature in test_features])
q_w2v_ids_tensor = torch.tensor([feature.q_w2v_ids for feature in test_features])
a_w2v_ids_tensor = torch.tensor([feature.a_w2v_ids for feature in test_features])
tensor_dataset = TensorDataset(
question_id_tensor,
ans_id_tensor,
q_input_ids_tensor,
q_input_mask_tensor,
q_segment_ids_tensor,
a_input_ids_tensor,
a_input_mask_tensor,
a_segment_ids_tensor,
cnt_tensor,
label_tensor,
q_w2v_ids_tensor,
a_w2v_ids_tensor,
)
save_pkl_data(tensor_dataset, save_dir)
return tensor_dataset
if __name__ == '__main__':
from config import Config
args = Config().args
client = pymongo.MongoClient("mongodb://localhost:27017")
db = client['cmcqa_epilepsy']
use_w2v = True
if use_w2v:
collection = db['train_use_w2v']
w2v_vocab2id = load_pkl_data(args.w2v_vocab2id)
else:
collection = db['train']
#print("get_data(args, file_class='train')")
#_ = get_data(args, file_class='train')
#print("get_data(args, file_class='dev')")
#_ = get_data(args, file_class='dev')
#print("get_data(args, file_class='test')")
#_ = get_data(args, file_class='test')
#print("get_tensor_dataset(args, file_class='train')")
_ = get_tensor_dataset(args, file_class='train', save_mode='mongo', use_w2v=use_w2v)
#print("get_tensor_dataset(args, file_class='dev')")
_ = get_tensor_dataset(args, file_class='dev', reload=False, use_w2v=use_w2v)
#print("get_tensor_dataset(args, file_class='test')")
_ = get_tensor_dataset(args, file_class='test', reload=False, use_w2v=use_w2v)
print('end')
|
import os
def renaming():
#variables
savedPath = os.getcwd()
print("Current Directory " + savedPath)
# 1) get the file name and open it
fileList = os.listdir("/Users/jamekaechols/Desktop/PythonShenanigans/HiringManagersOpenME")
print(fileList)
os.chdir("/Users/jamekaechols/Desktop/PythonShenanigans/HiringManagersOpenME")
# 2) get the files and rename them
for fileNames in fileList:
remove = "0123456789"
table = str.maketrans('','',remove)
os.rename(fileNames,fileNames.translate(table))
print(fileNames)
renaming()
|
import heapq
def topKFrequent(nums, k):
obj = {}
for i in nums:
if i in obj:
obj[i] += 1
else:
obj[i] = 1
maxH = []
for key in obj:
heapq.heappush(maxH, (obj[key], key))
if len(maxH) > k:
heapq.heappop(maxH)
res = []
while len(maxH) > 0:
res.append(maxH[0][1])
heapq.heappop(maxH)
return res
print(topKFrequent([1, 1, 1, 2, 2, 3], 2))
|
#!/proj/sot/ska3/flight/bin/python
#####################################################################################
# #
# run_dea_perl_script.py: run DEA related perl scripts #
# THIS MUST BE RUN ON R2D2-V OR C3PO-V WHERE /dsops/ IS VISIBLE #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 02, 2021 #
# #
#####################################################################################
import os
import sys
import re
import getpass
import subprocess
#
#--- reading directory list
#
path = '/data/mta/Script/MTA_limit_trends/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append path to a private folder
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
dea_dir = bin_dir + '/DEA/'
infile = dea_dir + 'past_dump_list'
infile2 = dea_dir + 'past_dump_list~'
ofile = dea_dir + 'today_dump_files'
repository = dea_dir + 'RDB/'
input_dir = '/dsops/GOT/input/'
#
#--- just in a case acistoodir is not set
#
if "ACISTOOLSDIR" not in os.environ:
os.environ['ACISTOOLSDIR'] = dea_dir
try:
os.execv(sys.argv[0], sys.argv)
except Exception:
print('Failed re-exec:', exc)
sys.exit(1)
#------------------------------------------------------------------------------------
#-- run_dea_perl_script: run dea extraction perl scripts --
#------------------------------------------------------------------------------------
def run_dea_perl_script():
"""
run dea extraction perl scripts
input: none, but read from /dsops/GOT/input/. must be run on r2d2-v or c3po-v
output: <repository>/deahk_<temp/elec>.rdb
"""
data_list = find_new_dump()
run_dea_perl(data_list)
#------------------------------------------------------------------------------------
#-- find_new_dump: create a list of new dump data files --
#------------------------------------------------------------------------------------
def find_new_dump():
"""
create a list of new dump data files
input: none, but read from /dsops/GOT/input/
output: dlist --- a list of new data file names
"""
#
#--- read the list of the data already processed
#
with open(infile, 'r') as f:
plist = [line.strip() for line in f.readlines()]
#
#--- find the last entry
#
last_entry = plist[-1]
cmd = ' mv ' + infile + ' ' + infile2
os.system(cmd)
#
#--- create the current data list
#
cmd = 'ls -rt /dsops/GOT/input/*Dump_EM*.gz > ' + infile
os.system(cmd)
if os.stat(infile).st_size == 0:
cmd = 'cp -f ' + infile2 + ' ' + infile
os.system(cmd)
with open(infile, 'r') as f:
data = [line.strip() for line in f.readlines()]
#
#---- find the data which are not processed yet and print out
#
chk = 0
dlist = []
line = ''
for ent in data:
if chk == 0:
if ent == last_entry:
chk = 1
continue
else:
dlist.append(ent)
return dlist
#------------------------------------
#-- tail: functions like tail command
#------------------------------------
def tail(f, n=10):
proc = subprocess.Popen(['tail', '-n', str(n), f], stdout=subprocess.PIPE)
lines = list(proc.stdout.readlines())
lines = [x.decode() for x in lines]
if len(lines) == 1:
return lines[0]
elif len(lines) == 0:
return ''
else:
return lines
#---------------------
#-- smart_append: appends a processed data file into an existing data set without repeating time entries.
#-- Note: Designed for this projects rdb files where time is recorded as the frist tesxt entry in each line. does not work in general.
#-----------------------
def smart_append(file, append):
if os.path.isfile(file) == False:
cmd = f"cp {append} {file}"
os.system(cmd)
return
else:
endtime = float(tail(file,n=1).strip().split()[0])
with open(append,'r') as f:
for line in f:
data = line.strip()
if data != '':
chk = 0
if float(data.split()[0]) > endtime:
with open(file,'a+') as f:
f.write(line)
#------------------------------------------------------------------------------------
#-- run_dea_perl: run perl scripts to extract data from dump data --
#------------------------------------------------------------------------------------
def run_dea_perl(dlist):
"""
run perl scripts to extract data from dump data
input: dlist --- a list of dump data file names
output: <repository>/deahk_<temp/elec>.rdb
"""
for ifile in dlist:
atemp = re.split('\/', ifile)
btemp = re.split('_', atemp[-1])
year = str(btemp[0])
#
#--- following is Peter Ford script to extract data from dump data
#
cmd = '/bin/gzip -dc ' + ifile + ' | ' + dea_dir + 'getnrt -O | ' + dea_dir + 'deahk.pl'
os.system(cmd)
cmd = dea_dir + 'out2in.pl deahk_temp.tmp deahk_temp_in.tmp ' + year
os.system(cmd)
cmd = dea_dir + 'out2in.pl deahk_elec.tmp deahk_elec_in.tmp ' + year
os.system(cmd)
#
#--- 5 min resolution
#
cmd = dea_dir + 'average1.pl -i deahk_temp_in.tmp -o deahk_temp.rdb'
os.system(cmd)
smart_append(f"{repository}/deahk_temp_week{year}.rdb","deahk_temp.rdb")
cmd = dea_dir + 'average1.pl -i deahk_elec_in.tmp -o deahk_elec.rdb'
os.system(cmd)
smart_append(f"{repository}/deahk_elec_week{year}.rdb","deahk_elec.rdb")
#
#--- one hour resolution
#
cmd = dea_dir + 'average2.pl -i deahk_temp_in.tmp -o deahk_temp.rdb'
os.system(cmd)
smart_append(f"{repository}/deahk_temp_short.rdb","deahk_temp.rdb")
cmd = dea_dir + 'average2.pl -i deahk_elec_in.tmp -o deahk_elec.rdb'
os.system(cmd)
smart_append(f"{repository}/deahk_elec_short.rdb","deahk_elec.rdb")
#
#--- clean up
#
cmd = 'rm -rf deahk_*.tmp deahk_*.rdb '
os.system(cmd)
#------------------------------------------------------------------------------------
if __name__ == "__main__":
#
#--- Create a lock file and exit strategy in case of race conditions
#
name = os.path.basename(__file__).split(".")[0]
user = getpass.getuser()
if os.path.isfile(f"/tmp/{user}/{name}.lock"):
sys.exit(f"Lock file exists as /tmp/{user}/{name}.lock. Process already running/errored out. Check calling scripts/cronjob/cronlog.")
else:
os.system(f"mkdir -p /tmp/{user}; touch /tmp/{user}/{name}.lock")
run_dea_perl_script()
#
#--- Remove lock file once process is completed
#
os.system(f"rm /tmp/{user}/{name}.lock")
|
"""
Fabric deployment script.
"""
from fabric import task
def dotenv(git_ref):
return f"APP_COMMIT={git_ref}"
def archive(c):
ref = c.local('git rev-parse --short HEAD', hide=True).stdout.strip()
c.local(f"git archive -o {ref}.tar.gz HEAD")
return ref
def build(c):
c.local('yarn build-prod')
@task
def deploy(c):
git_ref = archive(c)
archive_filename = git_ref + '.tar.gz'
with c.cd('/srv/app'):
c.run(f"mkdir {git_ref}")
with c.cd(f"/srv/app/{git_ref}"):
c.put(archive_filename, remote=f"/srv/app/{git_ref}")
c.run(f"tar xzvf {archive_filename}", hide=True)
c.run('ln -s ../store.db store.db')
c.run(f"echo '{dotenv(git_ref)}' > .env")
build(c)
c.local(f"rsync -rz public {c.user}@{c.host}:/srv/app/{git_ref}")
with c.cd(f"/srv/app/{git_ref}"):
c.run('./install.sh')
c.run('./stop.sh')
c.run('./start.sh')
with c.cd('/srv/app'):
c.run(f"ln -sfn {git_ref} current")
|
import tensorflow as tf
from lazy_property import lazy_property
'''
using structure from https://danijar.com/structuring-your-tensorflow-models/
'''
class Model:
def __init__(self, feature, label):
self.feature = feature
self.label = label
self.prediction
self.optimize
self.error
@lazy_property
def prediction(self):
feature_size = 6
label_size = 4
layer_1_size = 2
with tf.variable_scope('layer_1') as scope:
layer_1_weights = tf.Variable(tf.random_uniform(shape=[feature_size, layer_1_size], minval = 0.001, maxval = 0.01), name="weights")
#layer_1_weights = tf.Variable(tf.constant(0.0, shape=[feature_size, layer_1_size]), name="weights")
layer_1_biases = tf.Variable(tf.constant(0.0, shape = [layer_1_size]), name = "biases")
layer_1 = tf.nn.relu(tf.matmul(self.feature, layer_1_weights) + layer_1_biases)
with tf.variable_scope('layer_output') as scope:
output_weights = tf.Variable(tf.random_uniform(shape = [layer_1_size, label_size], minval = 0.001, maxval = 0.01), name = "weights")
output_biases = tf.Variable(tf.constant(0.0, shape = [label_size]), name = "biases")
output = tf.matmul(layer_1, output_weights) + output_biases
return output
@lazy_property
def optimize(self):
learning_rate = 0.1
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.prediction, labels=self.label))
#return tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(cost), cost
return tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost), cost
@lazy_property
def error(self):
correct_prediction = tf.equal(tf.argmax(self.prediction, 1), tf.argmax(self.label, 1))
return tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
|
import unittest
from Pyskell.Language.EnumList import L
from Pyskell.Language.TypeClasses import *
class HLTest(unittest.TestCase):
def test_haskell_list(self):
l1 = L[1, 2, ...]
l2 = L[[1]]
self.assertTrue(l1 > l2)
self.assertFalse(l1 < l2)
l3 = L[1, 3, ...]
for i in L[1, 3, ...]:
if i > 20:
break
self.assertTrue(i % 2 == 1)
self.assertEqual(29, l3[14])
self.assertTrue(show % (3 ^ (2 ^ l2)), "L[3, 2, 1]")
self.assertTrue(l2 != l3)
@TS(C / [int] >> int)
def summer(_var):
return sum(_var)
self.assertEqual(summer % L[1, ..., 10], 55)
|
import sys
filename = sys.argv[1]
file = open(filename)
numbers = {}
fixes = {}
for line in file.xreadlines():
parts = line.split("\t")
length = len(parts)
if length == 2: continue
if length != 8 and length != 9 and length != 10: print length, line.replace("\t", "|")
time = parts[0][1:]; lat = parts[1][3:]; lon = parts[2][3:]
alt = parts[3][1:]; speed = parts[4][1:]; course = parts[5][1:]
fix = parts[6][1:]; num = parts[7][1:]
fix = ord(fix)
if len(num) == 0: num = 0
num = ord(num)
|
import requests
cdo_token = 'davQIOzciXPWdFXJzJLAZXGfCdyrOEiq'
header = {'token': cdo_token}
base_url = 'https://www.ncdc.noaa.gov/cdo-web/api/v2'
stations_endpoint = '/stations'
params = {'limit': 1000, 'datasetid': 'lcd',
'startdate': '2019-12-01', 'enddate': '2019-12-31'}
# response = requests.get(base_url + stations_endpoint, params=params, headers=header)
response = requests.get(base_url + '/datasets', headers=header)
print('Status: ' + str(response.status_code))
print(response.text)
|
n = int(input())
arr = list(map(int,input().strip().split()))[:n]
stor = []
past = 0
for i in range(n):
if past + arr[i] >= arr[i]:
past = past + arr[i]
else:
past = arr[i]
stor.append(past)
ans = max(stor)
print(ans)
|
#!/usr/bin/python
def StrToInt(data):
return ((ord(data[0]) << 24) +
(ord(data[1]) << 16) +
(ord(data[2]) << 8) +
(ord(data[3])))
def IntToStr(num):
result = ''
for _ in range(4):
char = chr(num & 255)
result = char + result
num >>= 8
return result
def ReadByteArray(data):
return map(ord, data)
def ReadIntArray(data):
result = []
length = len(data)
assert length % 4 == 0
for k in range(length / 4):
result.append(StrToInt(data[k*4:k*4 + 4]))
return result
def ReadFile(filename):
in_file = open(filename, 'r')
content = in_file.read()
in_file.close()
return content
def Int32ToStr(num):
result = ''
assert num < 2**32
assert num >= 0
for _ in range(4):
char = chr(num & 255)
result = char + result
num >>= 8
return result
def Int16ToStr(num):
assert num >= 0
assert num < 2**16
result = ''
for _ in range(2):
char = chr(num & 255)
result = char + result
num >>= 8
return result
def EncodeString(string):
assert type(string) == unicode
return Int32ToStr(len(string)) + ''.join(map(Int16ToStr, map(ord, string)))
def EncodeJsonStr(string):
result = ''
for ch in string:
if ch in ['"']:
result += '\\' + ch
else:
result += ch
return '"' + result + '"'
def EncodeJson(value):
if type(value) == int:
return str(value)
if type(value) == long:
return str(value)
if type(value) == float:
return str(value)
if type(value) == str:
return EncodeJsonStr(value)
if type(value) == unicode:
return EncodeJsonStr(value.encode('utf8'))
if type(value) == list:
return EncodeJson(dict(enumerate(value)))
if type(value) == dict:
result = []
for k, v in value.iteritems():
result.append(EncodeJson(k) + ':' + EncodeJson(v))
return '{' + ', '.join(result) + '}'
raise Exception('Value %s has unsupported type %s' % (value, type(value)))
|
#just try try again
import os
import numpy as np
import random
from PIL import Image
import matplotlib.pyplot as plt
import struct
import time
import logging
logging.basicConfig(level = logging.INFO)
#function: read images from MNIST which represent special numbers, from it to a matrix
#input: filename->the position of the file;
# num->the number of the images needing to be trained
#output: imageMatrix->a matrix, each column is the pixels of a image
# labelArr->a vector, contain iamges represent number
#author: turk zhou
def formMatrix(file_image, file_label, lists, num):
#read image&label binary flow
f_image = open(file_image, 'rb')#'b' to the mode for binary files;'r' read only
f_label = open(file_label, 'rb')
index_image = 0
index_label = 0
buf_image = f_image.read()
buf_label = f_label.read()
f_image.close()
f_label.close()
#read head from the binary flow
magic, images, rows, columns = struct.unpack_from('>IIII', buf_image, index_image)
megic, labels = struct.unpack_from('>II', buf_label, index_label)
index_image += struct.calcsize('>IIII')
index_label += struct.calcsize('>II')
imageArr = np.zeros((rows*columns, num))
labelArr = [0]*num
assert(num < images)
pushValue = 0
for i in xrange(num):
image = Image.new('L', (columns, rows))
value = int(struct.unpack_from('>B', buf_label, index_label)[0])
if value in lists:
labelArr[pushValue] = value
imageArr[:,pushValue] = (np.array(struct.unpack_from('>784B', buf_image, index_image)).T)/(255.0)
index_test = index_image
#for x in xrange(rows):
#for y in xrange(columns):
#image.putpixel((y,x), int(struct.unpack_from('>B', buf_image, index_test)[0]))
#index_test += struct.calcsize('>B')
#image.save('test' + str(pushValue) + '.bmp')
pushValue = pushValue+1
index_image += struct.calcsize('>784B')
index_label += struct.calcsize('>B')
imageMatrix = np.mat(imageArr)
labelMatrix = np.mat(labelArr)
return imageMatrix[:, :pushValue], labelMatrix[:, :pushValue]
############################# network parameters ################################
global w1_C
global b1
global iteraNum
iteraNum = 1000
global debug
debug = 1
############################# network parameters ################################
#train the network, generate the parameters
#input: trainMatrix->the matrix which is formed by train datas
# trainY->the label of each train sample
#output: none
#author: turk zhou
def train_Net(trainMatrix, trainY_R):
#initilize parameters
global w1_C
global b1
global debug
matRnum = trainMatrix.shape[0] #parameter dimension
matCnum = trainMatrix.shape[1]
w1_C = np.mat( np.random.rand(matRnum, 1) )/matRnum
b1 = random.random()
rate = 0.01
decent = np.mat(np.random.rand(matCnum, iteraNum))
for time in xrange(iteraNum):
#forward propagation
z1_R = w1_C.T*trainMatrix + b1
a1_R = 1.0/(1 + np.exp(-z1_R))
#backword propagation
dz_R = a1_R - trainY_R#real value
dw_C = (1.0/matCnum)*trainMatrix*dz_R.T
db = (1.0/matCnum)*np.sum(dz_R)
decent[:,time] = a1_R.T
if debug:
if time <= -1:
#print w1_C
print z1_R
print a1_R
print trainY_R
print '######################'
w1_C = w1_C - rate*dw_C
b1 = b1 - rate*db
if debug:#plot a gradient descent procedure
decent = decent.getA()
#plt.plot(decent[10])
#plt.plot(decent[11])
#plt.plot(decent[12])
#plt.plot(decent[13])
#plt.show()
#test the network, return the correct rate
#input: trainMatrix->the matrix which is formed by train datas
# trainY->the label of each train sample
#output: none
#author: turk zhou
def test_Net(testMatrix, testY_R):
Value_R = w1_C.T*testMatrix + b1
testValue_R = 1.0/(1 + np.exp(-Value_R))
testValue_R = testValue_R.getA()
testY_R = testY_R.getA()
print testValue_R.shape
print testY_R.shape
plt.plot(testValue_R[0],'.')
plt.plot(testY_R[0],'.')
plt.show()
print '######################'
imagepath = '../project/train-images-idx3-ubyte/train-images.idx3-ubyte'
labelpath = '../project/train-labels-idx1-ubyte/train-labels.idx1-ubyte'
imagepathTest = '../project/t10k-images-idx3-ubyte/t10k-images.idx3-ubyte'
labelpathTest = '../project/t10k-labels-idx1-ubyte/t10k-labels.idx1-ubyte'
lists = [0,1]
def main():
X, Y_R = formMatrix(imagepath, labelpath, lists, 4000)
train_Net(X, Y_R)
test_X, test_Y_R = formMatrix(imagepathTest, labelpathTest, lists, 9999)
test_Net(test_X, test_Y_R)
if __name__ == '__main__':
main()
|
n1 = int(input('type a random number'))
d = n1 * 2
t = n1 * 3
r = n1 ** n1
print('You typed {} the double is {} the triple is {} and raiz is {}'.format(n1,d,t,r))
|
from PySide2.QtWidgets import QApplication, QWidget, QVBoxLayout, QLabel, QFileDialog, QPushButton
from PySide2.QtGui import QPixmap
from PySide2.QtCore import Qt
from GPSPhoto import gpsphoto
import webbrowser
import os
import sys
import time
class Window(QWidget):
def __init__(self):
super().__init__()
self.fname = ""
self.setWindowTitle("GENEPIX Geolocalisation")
self.setGeometry(300, 300, 800, 600)
self.main_layout = QVBoxLayout()
self.lbl_logo = QLabel(self)
self.lbl_logo.setPixmap(QPixmap(os.path.join("img", "genepix_localizer.png")))
self.lbl_browsed_img = QLabel(self)
self.lbl_browsed_img.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.setStyleSheet("background-color: #222; color: #fff; font-weight: bold;")
self.btn_browse = QPushButton("Browse photo...")
self.btn_browse.setStyleSheet("background-color: #00f2ec; color: #222; padding: 15px; border-radius: 3px;")
self.btn_browse.clicked.connect(self.browse)
self.btn_search = QPushButton("Geolocaliser")
self.btn_search.setStyleSheet("background-color: #ff074b; padding: 15px; border-radius: 3px;")
self.btn_search.clicked.connect(self.geoloc)
self.main_layout.addWidget(self.lbl_logo)
self.main_layout.addWidget(self.lbl_browsed_img)
self.main_layout.addWidget(self.btn_browse)
self.main_layout.addWidget(self.btn_search)
self.setLayout(self.main_layout)
def geoloc(self):
print(self.fname)
if self.fname == "":
print("Please browse for a photo first")
return
data = gpsphoto.getGPSData(self.fname)
if "Latitude" not in data.keys() or "Longitude" not in data.keys():
print(f"Couldn't get gps data from exif on this image : {self.fname}")
return
print(data['Latitude'], data['Longitude'])
try:
webbrowser.open(f"http://maps.google.com/?q={data['Latitude']},{data['Longitude']}")
except Exception as e:
print(f"Error while trying to open webbrowser")
return
def browse(self):
file_output = QFileDialog.getOpenFileName(self, 'Open file', 'c:\\', "Image files (*.jpg *.gif)")
if not file_output:
return
self.fname = file_output[0]
browsed_img = QPixmap(self.fname).scaled(300, 300, Qt.KeepAspectRatio)
self.lbl_browsed_img.setPixmap(browsed_img)
if __name__ == '__main__':
myApp = QApplication()
window = Window()
window.show()
myApp.exec_()
sys.exit(0)
|
# coding= UTF-8
#
# Author: Fing
# Date : 2017-12-03
#
import numpy as np
import scipy
import sys
sys.path.append('/home/suhas/Desktop/Sem2/audio-classification/data_try/libsvm-3.24/python/')
from svmutil import *
import sklearn
from sklearn.model_selection import train_test_split
# Load data from numpy file
X_1 = np.load('db1.npy',allow_pickle = True)
X_1 = X_1[::,1:50:]
#X = X[::,1:10:1]
X_2 = np.load('db4energy.npy')
X_2 = X_2[::,1:6:1]
#X_3 = np.load('feat_mel.npy')
X = np.concatenate((X_1,X_2),axis=1)
y = np.load('labels.npy').ravel()
#X = np.load('db4_6.npy')
#y = np.load('db4_labels.npy').ravel()
# Split data into training and test subsets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Read data in LIBSVM format
m = svm_train(y_train, X_train, '-c 4')
p_label, p_acc, p_val = svm_predict(y_test, X_test, m)
svm_save_model('libsvm.model', m)
#m = svm_load_model('libsvm.model')
print(p_acc)
|
import sys, os, time
from gevent_zeromq import zmq
import monitor
ctx = zmq.Context()
sub_addr, router_addr = sys.argv[1:]
sub_sock = ctx.socket(zmq.SUB)
sub_sock.setsockopt(zmq.SUBSCRIBE, '')
sub_sock.connect(sub_addr)
req_sock = ctx.socket(zmq.REQ)
req_sock.connect(router_addr)
class Plugin(object):
last_heartbeat = 0
def _send(self, msg):
req_sock.send_json(msg)
return req_sock.recv_json()
def __init__(self):
self.handlers = []
self.raw_handlers = []
self.tickers = []
self.monitor = monitor.Monitor()
def addHandler(self, exp, handler):
self.handlers.append((exp, handler))
def addRawHandler(self, exp, handler):
self.raw_handlers.append((exp, handler))
def addTicker(self, ticker):
self.tickers.append(ticker)
def run(self):
last_monitor_check = 0
last_ticker = 0
while True:
now = time.time()
if now - last_monitor_check > 5:
if self.monitor.check_ages():
sys.exit(0)
last_monitor_check = now
if now - self.last_heartbeat > 10:
self._send({"action": "heartbeat", "pid": os.getpid()})
self.last_heartbeat = now
# Call tickers
if now - last_ticker > 10:
for t in self.tickers:
t(self)
last_ticker = now
if 0 == sub_sock.poll(10000):
continue
msg = sub_sock.recv_json()
if 'privmsg' in msg:
for exp, handler in self.handlers:
text = msg['privmsg']['text']
m = exp.match(text)
if m:
handler(self, msg, m)
for exp, handler in self.raw_handlers:
m = exp.match(msg['data'])
if m:
handler(self, msg, m)
def call(self, method, args=None, kwargs=None):
msg = {"action": "call", "method": method}
if args is not None:
msg['args'] = args
if kwargs is not None:
msg['kwargs'] = kwargs
return self._send(msg)
def log(self, msg):
return self.call("log", args=(msg,))
def send(self, msg):
return self.call("send", kwargs={"data": msg})
def reply(self, msg, channel=None, nick=None):
return self.call("reply", kwargs={"msg": msg, "channel": channel, "nick": nick})
|
import os
import time
import numpy as np
import pandas as pd
import scipy.io as sio
from IPython.display import display
import matplotlib.pyplot as plt
import pywt
import scipy.stats
import datetime as dt
from collections import defaultdict, Counter
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import AdaBoostClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
dict_classifiers = {
"Gradient Boosting Classifier": GradientBoostingClassifier(),
"Random Forest": RandomForestClassifier(),
"Logistic Regression": LogisticRegression(),
"Nearest Neighbors": KNeighborsClassifier(),
"Decision Tree": DecisionTreeClassifier(),
"Linear SVM": SVC(),
"Neural Net": MLPClassifier(alpha = 1),
"Naive Bayes": GaussianNB(),
"AdaBoost": AdaBoostClassifier(),
"Gaussian Process": GaussianProcessClassifier()
}
def batch_classify(X_train, Y_train, X_test, Y_test, no_classifiers = 5, verbose = True):
"""
This method, takes as input the X, Y matrices of the Train and Test set.
And fits them on all of the Classifiers specified in the dict_classifier.
Usually, the SVM, Random Forest and Gradient Boosting Classifier take quiet some time to train.
So it is best to train them on a smaller dataset first and
decide whether you want to comment them out or not based on the test accuracy score.
"""
dict_models = {}
for classifier_name, classifier in list(dict_classifiers.items())[:no_classifiers]:
t_start = time.clock()
classifier.fit(X_train, Y_train)
t_end = time.clock()
t_diff = t_end - t_start
train_score = classifier.score(X_train, Y_train)
test_score = classifier.score(X_test, Y_test)
dict_models[classifier_name] = {'model': classifier, 'train_score': train_score, 'test_score': test_score, 'train_time': t_diff}
if verbose:
print("trained {c} in {f:.2f} s".format(c=classifier_name, f=t_diff))
return dict_models
def get_train_test(df, y_col, x_cols, ratio):
#этот метод преобразует фрейм данных в набор обучния и тестов
mask = np.random.rand(len(df)) < ratio
df_train = df[mask]
df_test = df[~mask]
Y_train = df_train[y_col].values
Y_test = df_test[y_col].values
X_train = df_train[x_cols].values
X_test = df_test[x_cols].values
return df_train, df_test, X_train, Y_train, X_test, Y_test
def display_dict_models(dict_models, sort_by='test_score'):
cls = [key for key in dict_models.keys()]
test_s = [dict_models[key]['test_score'] for key in cls]
training_s = [dict_models[key]['train_score'] for key in cls]
training_t = [dict_models[key]['train_time'] for key in cls]
df_ = pd.DataFrame(data=np.zeros(shape=(len(cls),4)), columns = ['classifier', 'train_score', 'test_score', 'train_time'])
for ii in range(0,len(cls)):
df_.loc[ii, 'classifier'] = cls[ii]
df_.loc[ii, 'train_score'] = training_s[ii]
df_.loc[ii, 'test_score'] = test_s[ii]
df_.loc[ii, 'train_time'] = training_t[ii]
display(df_.sort_values(by=sort_by, ascending=False))
def calculate_entropy(list_values):
counter_values = Counter(list_values).most_common()
probabilities = [elem[1]/len(list_values) for elem in counter_values]
entropy=scipy.stats.entropy(probabilities)
return entropy
def calculate_statistics(list_values):
n5 = np.nanpercentile(list_values, 5)
n25 = np.nanpercentile(list_values, 25)
n75 = np.nanpercentile(list_values, 75)
n95 = np.nanpercentile(list_values, 95)
median = np.nanpercentile(list_values, 50)
mean = np.nanmean(list_values)
std = np.nanstd(list_values)
var = np.nanvar(list_values)
rms = np.nanmean(np.sqrt(list_values**2))
return [n5, n25, n75, n95, median, mean, std, var, rms]
def calculate_crossings(list_values):
zero_crossing_indices = np.nonzero(np.diff(np.array(list_values) > 0))[0]
no_zero_crossings = len(zero_crossing_indices)
mean_crossing_indices = np.nonzero(np.diff(np.array(list_values) > np.nanmean(list_values)))[0]
no_mean_crossings = len(mean_crossing_indices)
return [no_zero_crossings, no_mean_crossings]
def get_features(list_values):
entropy = calculate_entropy(list_values)
crossings = calculate_crossings(list_values)
statistics = calculate_statistics(list_values)
return [entropy] + crossings + statistics
def get_uci_har_features(dataset, labels, waveletname):
uci_har_features = []
for signal_no in range(0, len(dataset)):
features = []
for signal_comp in range(0,dataset.shape[2]):
signal = dataset[signal_no, :, signal_comp]
list_coeff = pywt.wavedec(signal, waveletname)
for coeff in list_coeff:
features += get_features(coeff)
uci_har_features.append(features)
X = np.array(uci_har_features)
Y = np.array(labels)
return X, Y
#загружаем датасет
activities_description = {
1: 'walking',
2: 'walking upstairs',
3: 'walking downstairs',
4: 'sitting',
5: 'standing',
6: 'laying'
}
def read_signals(filename):
with open(filename, 'r') as fp:
data = fp.read().splitlines()
data = map(lambda x: x.rstrip().lstrip().split(), data)
data = [list(map(float, line)) for line in data]
return data
def read_labels(filename):
with open(filename, 'r') as fp:
activities = fp.read().splitlines()
activities = list(map(int, activities))
return activities
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation, :, :]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
INPUT_FOLDER_TRAIN = './data/UCI_HAR/train/InertialSignals/'
INPUT_FOLDER_TEST = './data/UCI_HAR/test/InertialSignals/'
INPUT_FILES_TRAIN = ['body_acc_x_train.txt', 'body_acc_y_train.txt', 'body_acc_z_train.txt',
'body_gyro_x_train.txt', 'body_gyro_y_train.txt', 'body_gyro_z_train.txt',
'total_acc_x_train.txt', 'total_acc_y_train.txt', 'total_acc_z_train.txt']
INPUT_FILES_TEST = ['body_acc_x_test.txt', 'body_acc_y_test.txt', 'body_acc_z_test.txt',
'body_gyro_x_test.txt', 'body_gyro_y_test.txt', 'body_gyro_z_test.txt',
'total_acc_x_test.txt', 'total_acc_y_test.txt', 'total_acc_z_test.txt']
LABELFILE_TRAIN = './data/UCI_HAR/train/y_train.txt'
LABELFILE_TEST = './data/UCI_HAR/test/y_test.txt'
train_signals, test_signals = [], []
for input_file in INPUT_FILES_TRAIN:
signal = read_signals(INPUT_FOLDER_TRAIN + input_file)
train_signals.append(signal)
train_signals = np.transpose(np.array(train_signals), (1, 2, 0))
for input_file in INPUT_FILES_TEST:
signal = read_signals(INPUT_FOLDER_TEST + input_file)
test_signals.append(signal)
test_signals = np.transpose(np.array(test_signals), (1, 2, 0))
train_labels = read_labels(LABELFILE_TRAIN)
test_labels = read_labels(LABELFILE_TEST)
[no_signals_train, no_steps_train, no_components_train] = np.shape(train_signals)
[no_signals_test, no_steps_test, no_components_test] = np.shape(train_signals)
no_labels = len(np.unique(train_labels[:]))
print("The train dataset contains {} signals, each one of length {} and {} components ".format(no_signals_train, no_steps_train, no_components_train))
print("The test dataset contains {} signals, each one of length {} and {} components ".format(no_signals_test, no_steps_test, no_components_test))
print("The train dataset contains {} labels, with the following distribution:\n {}".format(np.shape(train_labels)[0], Counter(train_labels[:])))
print("The test dataset contains {} labels, with the following distribution:\n {}".format(np.shape(test_labels)[0], Counter(test_labels[:])))
uci_har_signals_train, uci_har_labels_train = randomize(train_signals, np.array(train_labels))
uci_har_signals_test, uci_har_labels_test = randomize(test_signals, np.array(test_labels))
#Генерация функций для UCI-HAR
waveletname = 'rbio3.1'
X_train, Y_train = get_uci_har_features(uci_har_signals_train, uci_har_labels_train, waveletname)
X_test, Y_test = get_uci_har_features(uci_har_signals_test, uci_har_labels_test, waveletname)
#классификация обучения и наборов тестов
models = batch_classify(X_train, Y_train, X_test, Y_test)
display_dict_models(models)
|
# Dijkstra's algorithm for shortest paths
# Adapted from David Eppstein, UC Irvine, 4 April 2002
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/117228
from priodict import priorityDictionary
from Node2D import Node2DGraph
def Dijkstra(graph,vertexCompare,start,end=None):
"""
Find shortest paths from the start vertex to all
vertices nearer than or equal to the end.
The input graph G is assumed to have the following
representation: A vertex can be any object that can
be used as an index into a dictionary. G is a
dictionary, indexed by vertices. For any vertex v,
G[v] is itself a dictionary, indexed by the neighbors
of v. For any edge v->w, G[v][w] is the length of
the edge. This is related to the representation in
<http://www.python.org/doc/essays/graphs.html>
where Guido van Rossum suggests representing graphs
as dictionaries mapping vertices to lists of neighbors,
however dictionaries of edges have many advantages
over lists: they can store extra information (here,
the lengths), they support fast existence tests,
and they allow easy modification of the graph by edge
insertion and removal. Such modifications are not
needed here but are important in other graph algorithms.
Since dictionaries obey iterator protocol, a graph
represented as described here could be handed without
modification to an algorithm using Guido's representation.
Of course, G and G[v] need not be Python dict objects;
they can be any other object that obeys dict protocol,
for instance a wrapper in which vertices are URLs
and a call to G[v] loads the web page and finds its links.
The output is a pair (D,P) where D[v] is the distance
from start to v and P[v] is the predecessor of v along
the shortest path from s to v.
Dijkstra's algorithm is only guaranteed to work correctly
when all edge lengths are positive. This code does not
verify this property for all edges (only the edges seen
before the end vertex is reached), but will correctly
compute shortest paths even for some graphs with negative
edges, and will raise an exception if it discovers that
a negative edge has caused it to make a mistake.
"""
'''Original:
final_distances = {} # dictionary of final distances
predecessors = {} # dictionary of predecessors
estimated_distances = priorityDictionary() # est.dist. of non-final vert.
estimated_distances[start] = 0
for vertex in estimated_distances:
final_distances[vertex] = estimated_distances[vertex]
if vertex == end: break
for neighbor in vertex.neighbors:
path_distance = final_distances[vertex] + graph[vertex][edge]
if edge in final_distances:
if path_distance < final_distances[edge]:
raise ValueError, \
"Dijkstra: found better path to already-final vertex"
elif edge not in estimated_distances or path_distance < estimated_distances[edge]:
estimated_distances[edge] = path_distance
predecessors[edge] = vertex
return (final_distances,predecessors)
'''
final_distances = {} # dictionary of final distances
predecessors = {} # dictionary of predecessors
estimated_distances = priorityDictionary() # est.dist. of non-final vert.
estimated_distances[start] = 0
endVertex = ''
for vertex in estimated_distances:
#vertex is a graph node, with chord and neighbors
final_distances[vertex] = estimated_distances[vertex]
if vertexCompare(vertex.chord, end):
endVertex = vertex
break
for neighbor in vertex.neighbors:
#neighbor is a list of the neighbor's [x,y] indices @[0] and its distance @[1]
nIndX = neighbor[0][0]
nIndY = neighbor[0][1]
neighborVertex = graph[nIndX][nIndY]
path_distance = final_distances[vertex] + neighbor[1]#graph[vertex][edge]
#if edge in final_distances:
#if chordToString(neighbor.chord) in [chordToString(i.chord) for i in final_distances]:
if neighborVertex in final_distances:
if path_distance < final_distances[neighborVertex]:#probably won't trigger for dupe chords, because their objects will differ
raise ValueError, \
"Dijkstra: found better path to already-final vertex"
#elif edge not in estimated_distances or path_distance < estimated_distances[edge]:
elif neighborVertex not in estimated_distances or path_distance < estimated_distances[neighborVertex]:
estimated_distances[neighborVertex] = path_distance
predecessors[neighborVertex] = vertex
if endVertex == '':
print 'could not find it'
return (final_distances,predecessors,endVertex)
def shortestPath(graph,vertexCompare,start,end):
"""
Find a single shortest path from the given start vertex
to the given end vertex.
The input has the same conventions as Dijkstra().
The output is a list of the vertices in order along
the shortest path.
"""
#print 'finding path from start: ' + start.nodeToString() + ' to ' + str(end)
final_distances,predecessors,endVertex = Dijkstra(graph,vertexCompare,start,end)
if endVertex =='':
return -1
path = []
while 1:
path.append(endVertex)
if vertexCompare(endVertex.chord, start.chord): break
endVertex = predecessors[endVertex]
path.reverse()
return path
|
# Copyright (c) 2016-2023 Knuth Project developers.
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
{
"targets": [
{
"target_name": "<(module_name)",
'product_dir': '<(module_path)',
"sources": [ "src/kth-native.cpp",
"src/node.cpp",
"src/string_list.cpp",
"src/chain/chain.cpp",
"src/chain/header.cpp",
"src/chain/block.cpp",
"src/chain/merkle_block.cpp",
"src/chain/point.cpp",
"src/chain/transaction.cpp",
"src/chain/input.cpp",
"src/chain/output.cpp",
"src/chain/output_point.cpp",
"src/chain/tools.cpp",
"src/chain/script.cpp",
"src/chain/input_list.cpp",
"src/chain/output_list.cpp",
"src/chain/transaction_list.cpp",
"src/chain/block_list.cpp",
"src/chain/history_compact_list.cpp",
"src/chain/history_compact.cpp",
"src/chain/stealth_compact.cpp",
"src/chain/stealth_compact_list.cpp",
"src/config/authority.cpp",
"src/config/blockchain_settings.cpp",
"src/config/checkpoint.cpp",
"src/config/database_settings.cpp",
"src/config/endpoint.cpp",
"src/config/network_settings.cpp",
"src/config/node_settings.cpp",
"src/config/settings.cpp",
"src/wallet/ec_private.cpp",
"src/wallet/ec_public.cpp",
"src/wallet/elliptic_curve.cpp",
"src/wallet/hd_private.cpp",
"src/wallet/hd_public.cpp",
"src/wallet/payment_address.cpp",
"src/wallet/wallet.cpp",
],
'variables': {
'install_py': '<(DEPTH)/install.py',
},
"xcode_settings": {
'MACOSX_DEPLOYMENT_TARGET': '10.15',
'OTHER_CFLAGS': [
"-std=c++17",
],
},
'actions': [
{
'action_name': 'Install',
'inputs': [
'>(install_py)',
],
'outputs': [''],
# 'action': ['<!(node -p "process.env.npm_config_python || \\"python\\"")','>@(_inputs)', '<(DEPTH)', "<(target_arch)"]
'action': ['<!(node -p "process.env.npm_config_python || \\"python\\"")','>@(_inputs)', '<(DEPTH)']
},
],
'defines': [
'KTH_LIB_STATIC',
'KTH_LOG_LIBRARY_SPDLOG',
'KTH_CURRENCY_BCH',
],
# https://docs.microsoft.com/en-us/dotnet/api/microsoft.visualstudio.vcprojectengine.runtimelibraryoption?view=visualstudiosdk-2019
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '1', # /MTd
# 'RuntimeLibrary': '3', # /MDd
'AdditionalOptions': [ '/std:c++17' ]
},
},
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '0', # /MT
# 'RuntimeLibrary': '2', # /MD
'AdditionalOptions': [ '/std:c++17' ]
},
},
},
},
'conditions': [
['OS=="linux"', {
"include_dirs": ["<!(node -e \"require('nan')\")", "./deps/include", "../deps/include", "./include", "../include"],
"cflags": [
"-std=c++17",
"-Wno-deprecated-declarations",
"-Wno-unused-result",
"-Wno-cast-function-type",
""
],
"cflags_cc": [
"-std=c++17",
"-Wno-deprecated-declarations",
"-Wno-unused-result",
"-Wno-cast-function-type",
""
],
'libraries': [
"-L<(module_root_dir)/deps/lib/",
'-lc-api',
'-lnode',
'-lblockchain',
'-lnetwork',
'-lconsensus',
'-ldatabase',
'-ldomain',
'-linfrastructure',
'-llmdb',
'-lboost_date_time',
'-lboost_iostreams',
'-lboost_locale',
'-lboost_program_options',
'-lboost_system',
'-lboost_thread',
'-lsecp256k1',
'-lbz2',
'-lgmp',
'-lz',
],
}],
['OS=="mac"', {
"cflags": [
"-std=c++17",
""
],
"cflags_cc": [
"-std=c++17",
""
],
"include_dirs": ["<!(node -e \"require('nan')\")", "./deps/include", "../deps/include", "./include", "../include"],
'libraries': [
"-L<(module_root_dir)/deps/lib/",
'-lc-api',
'-lnode',
'-lblockchain',
'-lnetwork',
'-lconsensus',
'-ldatabase',
'-ldomain',
'-linfrastructure',
'-llmdb',
# '-llmdbd',
'-lboost_date_time',
'-lboost_iostreams',
'-lboost_locale',
'-lboost_program_options',
'-lboost_system',
'-lboost_thread',
'-lsecp256k1',
'-lbz2',
'-lgmp',
'-lz',
],
}],
['OS=="win"', {
"include_dirs": ["<!(node -e \"require('nan')\")", "<(module_root_dir)/deps/include", "<(module_root_dir)/include"],
'libraries': [
'<(module_root_dir)/deps/lib/c-api.lib',
'<(module_root_dir)/deps/lib/node.lib',
'<(module_root_dir)/deps/lib/blockchain.lib',
'<(module_root_dir)/deps/lib/network.lib',
'<(module_root_dir)/deps/lib/consensus.lib',
'<(module_root_dir)/deps/lib/database.lib',
'<(module_root_dir)/deps/lib/domain.lib',
'<(module_root_dir)/deps/lib/infrastructure.lib',
'<(module_root_dir)/deps/lib/lmdb.lib',
'<(module_root_dir)/deps/lib/libboost_date_time.lib',
'<(module_root_dir)/deps/lib/libboost_iostreams.lib',
'<(module_root_dir)/deps/lib/libboost_locale.lib',
'<(module_root_dir)/deps/lib/libboost_program_options.lib',
'<(module_root_dir)/deps/lib/libboost_system.lib',
'<(module_root_dir)/deps/lib/libboost_thread.lib',
'<(module_root_dir)/deps/lib/secp256k1.lib',
'<(module_root_dir)/deps/lib/mpir.lib',
]
}]
],
}
]
}
|
import os
from tacotron.synthesizer import Synthesizer
import tensorflow as tf
def tacotron_synthesize(sentences):
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' # ignore warnings https://stackoverflow.com/questions/47068709/
output_dir = 'A'
checkpoint_path = tf.train.get_checkpoint_state('trained_model').model_checkpoint_path
print('####### checkpoint_path', checkpoint_path)
synth = Synthesizer()
synth.load(checkpoint_path)
os.makedirs(output_dir, exist_ok=True)
for i, text in enumerate(sentences):
synth.synthesize(text, i + 1, output_dir, None)
print('Results at: {}'.format(output_dir))
sentences = [
'San Pablo Catholic University',
'Final Career Project',
'I like to study computer science',
'in being comparatively modern',
'has never been surpassed'
]
tacotron_synthesize(sentences)
|
from collections import Counter
def solution(k, tangerine):
answer = 0
T = Counter(tangerine)
for i in sorted(T.values(), reverse=True):
if k > 0:
k -= i
answer += 1
return answer
|
from itertools import product
import numpy as np
import pandas as pd
from ROOT import RooRealVar, RooCategory
class ConfigurationError(Exception):
pass
class FitParameters(dict):
class RealVar():
def __init__(self, *args):
self.var = RooRealVar(args[0], args[0], *args[1:])
@property
def min(self):
return self.var.getMin()
@min.setter
def min(self, val):
self.var.setMin(val)
@property
def val(self):
return self.var.getVal()
@property
def max(self):
return self.var.getMax()
@max.setter
def max(self, val):
self.var.setMax(val)
def __getattr__(self, name):
return getattr(self.var, name)
def __repr__(self):
ret = f'RooRealVar {self.var.GetName()} = {self.var.getVal()}'
if not self.var.isConstant():
ret += f' [{self.min}, {self.max}]'
return ret
def __init__(self):
self.param_list = set()
def __setattr__(self, name, value):
if hasattr(self, name):
if np.isreal(value):
getattr(self, name).setVal(value)
return
else:
print(f'warning: overriding "{name}"')
super().__setattr__(name, value)
def __setitem__(self, key, value):
self.__setattr__(key, value)
def __getitem__(self, key):
return self.__getattribute__(key)
@property
def r(self):
class bag(dict):
pass
params = bag()
for name in self.param_list:
param = getattr(self, name)
if type(param) == FitParameters.RealVar:
setattr(params, name, param.var)
params[name] = param.var
elif type(param) == RooCategory:
setattr(params, name, param)
params[name] = param
return params
def add_param(self, name, *args):
param = FitParameters.RealVar(name, *args)
self.param_list.add(name)
setattr(self, name, param)
def expand_params(self, template, *args, **kwargs):
names = kwargs.keys()
values = kwargs.values()
for parameter_values in product(*values):
name = template.format(
**{n: v
for n, v in zip(names, parameter_values)})
self.add_param(name, *args)
def add_observable(self, name, *args, values=None):
if values is None and not args:
raise ConfigurationError('Need to define either values or default '
'RooRealVar args.')
if values is not None:
min, max = np.min(values), np.max(values)
self.add_param(name, min, max)
else:
self.add_param(name, *args)
def add_category(self, name, values):
cat = RooCategory(name, name)
for val in pd.unique(values):
cat.defineType(str(val), int(val))
self.param_list.add(name)
setattr(self, name, cat)
def pop(self, key, default_value=None):
if key in self.param_list:
self.param_list.remove(key)
return self.__dict__.pop(key, default_value)
def glob(self, expr):
from fnmatch import fnmatch
return [getattr(self, p) for p in self.param_list if fnmatch(p, expr)]
def __repr__(self):
string = f'Parameter collection\n{len(self.param_list)} parameters:'
for p in sorted(list(self.param_list)):
parameter = getattr(self, p)
string += f'\n{p}'
if parameter.isConstant():
string += '\tconst'
elif type(parameter) == RooRealVar:
string += f'\t{getattr(self, p).getMin():.2g}'
string += f'\t{getattr(self, p).getMax():.2g}'
return string
class Plotter:
def __init__(self, sampling_steps=1000):
self.sampling_steps = sampling_steps
def sample_pdf(self, pdf, variable, norm=None):
import ROOT as R
from scipy.integrate import trapz
"""
for some reason, pdf.plotOn yields the correct pdf while scanning
does not work as expected...
"""
xs = []
ys = []
curve = pdf.plotOn(variable.frame(),
R.RooFit.Precision(1e-5)).getCurve()
for x in np.linspace(variable.getMin(), variable.getMax(),
self.sampling_steps):
variable.setVal(x)
xs.append(x)
# still need to interpolate to get the same shape everywhere
ys.append(curve.interpolate(x))
xs, ys = np.array([xs, ys])
if norm is not None:
integral = trapz(ys, xs)
ys /= integral
ys *= norm
return xs, ys
|
# Generated by Django 2.2.6 on 2019-10-09 21:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gtin', '0002_gtinbasedata'),
]
operations = [
migrations.DeleteModel(
name='GTINBaseData',
),
]
|
from django.contrib import admin
from . models import *
class PostAdmin(admin.ModelAdmin):
search_fields = ('name', 'email', 'body')
admin.site.register(Post, PostAdmin)
admin.site.register(Category)
admin.site.register(Tag)
admin.site.register(Slider)
admin.site.register(Studlife)
admin.site.register(Ads)
admin.site.register(Idiom)
admin.site.register(Competition)
admin.site.register(Abiturient)
admin.site.register(PhotoGallery)
admin.site.register(Structure)
admin.site.register(StructureCategory)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.