text
stringlengths 8
6.05M
|
|---|
from django.contrib import admin
from myblog.models import Board
admin.site.register(Board)
|
n=0
def Epsilon(n):
if (1+2**-n)==(1+2**(-n-1)):
return 2**-n
else:
return Epsilon(n+1)
print(Epsilon(n))
def EpsilonConCiclo(n):
while (1+2**-n)!=1+2**(-n-1):
n+=1
return(2**-n)
print(EpsilonConCiclo(n))
|
from django.views.generic import FormView
from django.urls import reverse_lazy
from django.contrib import messages
from django.utils.translation import gettext as _
from django.utils import translation
from .models import Service, Employee, Feature, Plan, Client
from .forms import ContactForm
class IndexView(FormView):
template_name = 'index.html'
form_class = ContactForm
success_url = reverse_lazy('index')
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
lang = translation.get_language()
context['services'] = Service.objects.order_by('?').all()
context['employees'] = Employee.objects.order_by('?').all()
context['featurescolumn1'] = Feature.objects.order_by('?').all()[:3]
context['featurescolumn2'] = Feature.objects.order_by('?').all()[3:6]
context['plans'] = Plan.objects.all()
context['clients'] = Client.objects.order_by('?').all()
context['lang'] = lang
translation.activate(lang)
return context
def form_valid(self, form, *args, **kwargs):
form.send_mail()
messages.success(self.request, _('E-mail enviado com sucesso!'))
return super(IndexView, self).form_valid(form, *args, **kwargs)
def form_invalid(self, form, *args, **kwargs):
messages.error(self.request, _('Erro ao enviar o e-mail.'))
return super(IndexView, self).form_invalid(form, *args, **kwargs)
|
# -*- coding: utf-8 -*-
from pyspark import SparkContext
from pyspark.sql import *
from pyspark.sql.types import *
import dateutil.parser as date
import json
from pymongo import MongoClient
spark = SparkSession\
.builder\
.master("spark://stack-02:7077")\
.config("spark.cores.max", 2)\
.appName("SocialDataService")\
.getOrCreate()
sc = spark.sparkContext
def getSocialDataByStartAndEnd(start, end):
socialDataParquet = "hdfs://stack-02:9000/SocialDataRepository/SOCIALDATA.parquet"
socialDataDF = spark.read.parquet(socialDataParquet)
socialDataDF = socialDataDF.sort(socialDataDF.created_at.desc())
socialData = socialDataDF.where(start >= socialDataDF.created_at).where(socialDataDF.created_at <= end).collect()
sd_list = []
for sd in socialData:
sd_list.append(sd.asDict())
return sd_list
def getTweetDataByStartAndEnd(start, end):
client = MongoClient('mongodb://10.0.1.3:27017/')
db = client['SocialData']
tweet_collection = db.tweet
tweets = tweet_collection.find({"created_at": {"$gte": date.parse(start), "$lte": date.parse(end)}})
tw_list = []
for tw in tweets:
del tw['_id']
tw_list.append(tw)
return tw_list
def getAllSocialData():
socialDataParquet = "hdfs://stack-02:9000/SocialDataRepository/SOCIALDATA.parquet"
socialDataDF = spark.read.parquet(socialDataParquet)
socialData = socialDataDF.collect()
sd_list = []
for sd in socialData:
sd_list.append(sd.asDict())
return sd_list
def getAllQuery():
queryParquet = "hdfs://stack-02:9000/SocialDataRepository/QUERY.parquet"
queryDF = spark.read.parquet(queryParquet)
queries = queryDF.collect()
q_list = []
for q in queries:
q_list.append(q.asDict())
return q_list
def getPlaceById(place_id):
# placeParquet = "../SocialDataRepository/PLACE.parquet"
placeParquet = "hdfs://stack-02:9000/SocialDataRepository/PLACE.parquet"
placeDF = spark.read.parquet(placeParquet)
place = placeDF.where(placeDF.id == place_id).collect()
place = place[0].asDict()
return place
def get_predicted():
client = MongoClient('mongodb://10.0.1.3:27017/')
db = client['SocialData']
predicted_collection = db.predicted
predicted = predicted_collection.find().sort("_id", -1).limit(1)
for p in predicted:
predicted = p
del predicted['_id']
return predicted
def save_predicted(predicted):
client = MongoClient('mongodb://10.0.1.3:27017/')
db = client['SocialData']
predicted_collection = db.predicted
result = predicted_collection.insert_one({'id': predicted['id'], 'predicted': predicted['predicted']}).inserted_id
return result
|
#! python3
from pandas import read_csv
data = [{
"name": "Bob",
"gender": "male",
"birthday": "1992-10-08"
}, {
"name": "Kavey",
"gender": "female",
"birthday": "1995-05-12"
}]
with open('data.csv', 'a', newline='') as csvfile:
fieldnames = ['name', 'gender', 'birthday']
writer = read_csv.DictWriter(csvfile, fieldnames=fieldnames)
#writer.writeheader()
writer.writerows(data)
with open('data.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
print(row)
df = read_csv('data.csv', encoding='gbk')
print(df)
|
import logging
import logging.config
import os
import subprocess
from . import DockerWrapper
from .PlatformClient import PlatformClient
from . import PlatformStructs as Pstruct
import dotenv
import time
import traceback
class Mediator(PlatformClient):
def __init__(self):
super().__init__()
self.logger = logging.getLogger("Mediator")
# logging.config.fileConfig(os.path.dirname(__file__)+'/Modicum-log.conf')
self.logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
formatter = logging.Formatter("---%(name)s---: \n%(message)s\n\r")
ch.setFormatter(formatter)
self.logger.addHandler(ch)
self.job_offers = {}
self.resource_offers = {}
self.registered = False
self.myMatches = {}
path = dotenv.find_dotenv('.env', usecwd=True)
dotenv.load_dotenv(path)
def register(self, account, arch, instructionPrice, bandwidthPrice, dockerBandwidthPrice):
self.logger.info("A: Registering")
self.account = account
self.contract.registerMediator(self.account, arch, instructionPrice, bandwidthPrice, True, dockerBandwidthPrice)
def getJob(self, tag, matchID, JID, execute, ijoid):
_DIRIP_ = os.environ.get('DIRIP')
_DIRPORT_ = os.environ.get('DIRPORT')
_KEY_ = os.environ.get('pubkey')
_SSHKEY_ = os.environ.get('sshkey')
_SSHPORT_ = os.environ.get('SSHPORT')
_WORKPATH_ = os.environ.get('WORKPATH')
statusJob=0
cpuTime=0
endStatus="Completed"
try :
self.logger.info("L: Requesting Permission to get job")
msg = self.DC.getPermission(msg["host"], msg["port"], msg["ijoid"], msg["job"], msg["pubkey"])
self.user = msg['user']
self.groups = msg['groups']
self.logger.info("L: permission granted? : %s" % (msg['exitcode'] == 0))
if msg['exitcode'] == 0:
remote_user = self.DC.getUsername(_DIRIP_, _DIRPORT_, JID)
self.logger.info("Check job size")
result = self.DC.getSize(_DIRIP_, _SSHPORT_, self.user, remote_user, tag, _SSHKEY_)
self.logger.info("Job size is: \n %s" %result)
lines = result.split("\n")
for line in lines:
self.logger.info(line)
if "json" in line and "input" in line:
input_exists=True
elif "tar" in line:
image_exits=True
# size = line.split("\t")[0]
elif "total" in line :
size = line.split("\t")[0]
self.logger.info(size)
if image_exits :
if input_exists:
remotePath = tag
self.logger.info([_DIRIP_,_SSHPORT_,self.user, JID, tag,_WORKPATH_ ,_SSHKEY_])
localPath = "%s/%s/" %(_WORKPATH_, tag)
if os.path.isfile("%s/%s:latest.tar" %(localPath,tag)): #HACK
self.logger.info("image exists, skip downloading it.")
localPath = "%s/input" %localPath
remotePath = "%s/input" %tag
os.makedirs(localPath, exist_ok=True) #HACK
self.logger.info("localPath: %s" %localPath)
self.logger.info("remotePath: %s" %remotePath)
self.logger.info("K: get job = %s" %ijoid)
self.DC.getData(_DIRIP_,_SSHPORT_, self.user, remote_user, remotePath, localPath,_SSHKEY_)
self.logger.info("K: got job = %s" %ijoid)
else:
statusJob=3
endStatus="JobNotFound"
self.logger.info("Input does not exist")
else:
statusJob=3
endStatus="JobNotFound"
self.logger.info("Image does not exist")
else:
self.logger.info("Done.. but permission denied")
statusJob=9
endStatus="DirectoryUnavailable"
except :
self.logger.info(traceback.format_exc())
statusJob=3
endStatus="JobNotFound"
try:
if execute and statusJob==0:
images = self.dockerClient.images.list(name=tag)
self.logger.info(images)
if not images:
self.logger.info("Image not loaded. loading image... ")
DockerWrapper.loadImage(self.dockerClient, "%s/%s/%s:latest.tar" %(_WORKPATH_, tag,tag))
self.logger.info("Image is loaded")
jobname = "mm_%s" %matchID
input = "%s/%s/input" %(_WORKPATH_,tag)
output = "%s/%s/output" %(_WORKPATH_,tag)
appinput = "/app/input"
appoutput = "/app/output"
self.logger.info("Starting Docker for job = %s" %ijoid)
container = DockerWrapper.runContainer(self.dockerClient, tag, jobname, input, output,appinput,appoutput)
# container.reload()
lid = container.attrs["Id"]
self.logger.info("container ID for job %s: %s" %(lid, ijoid))
self.logger.info("G: running job = %s" %ijoid)
cpu_old = -1
stopping = False
cmd = "cat /sys/fs/cgroup/cpuacct/docker/%s/cpuacct.stat | grep -oP '(?<=user ).*'" %lid
self.logger.info(container.status)
while container.status != "running":
time.sleep(1)
container.reload()
self.logger.info(container.status)
while container.status == "running":
try:
completedprocess = subprocess.getoutput(cmd) #HACK the internet says something else should be used
cpuTime = int(completedprocess) * 10
except ValueError as err:
self.logger.info("Process is done... probably")
self.logger.info("error is : %s" %err)
self.logger.info("error type is : %s" %type(err))
self.logger.info("G: %s to run job = %s" %(cpuTime, ijoid))
self.logger.info("Stopping Docker for job = %s" %ijoid)
stopping = True
# if lid in err:
# self.logger.info("Process is done")
startReload = time.time()
container.reload()
reloadDuration = time.time() - startReload
self.logger.info("reload took: %s" %reloadDuration)
self.logger.info("Container is : %s" %container.status)
self.logger.info("duration: %s ms" %cpuTime)
if cpu_old == cpuTime or container.status != "running":
if not stopping:
self.logger.info("G: %s to run job = %s" %(cpuTime, ijoid))
self.logger.info("Stopping Docker for job = %s" %ijoid)
stopping = True
else:
cpu_old = cpuTime
time.sleep(1)
self.logger.info("Docker stopped for job = %s" %ijoid)
# self.logger.info("J: Send result to DIRECTORY for job = %s" %ijoid)
# self.DC.publishData(_DIRIP_, _SSHPORT_, self.user,tag,output,_SSHKEY_)
# self.logger.info("J: Data sent for job = %s" %ijoid)
if self.account:
#TODO resultHash
resultHash = "b599cff993a602c14e6d45beab7a48c25e6753b7106cd6173488e843a7158060"
resultHash_int = int(resultHash, 16)
# #TODO FAILURE HANDLING
self.logger.info("M: post mediation result: %s, RP at fault" %endStatus)
if self.myMatches[matchID]['resHash'] == resultHash_int:
pass
self.contract.postMediationResult(self.account, True,
matchID, endStatus, tag, resultHash_int, cpuTime, 0, 0, 'CorrectResults', 'JobCreator')
else:
self.contract.postMediationResult(self.account, True,
matchID, endStatus, tag, resultHash_int, cpuTime, 0, 0,
'WrongResults', 'ResourceProvider')
self.logger.info("Done")
return 0
except :
self.logger.info(traceback.format_exc())
statusJob=8
endStatus="ExceptionOccured"
if statusJob!=0:
if self.account:
#TODO resultHash
resultHash = "b599cff993a602c14e6d45beab7a48c25e6753b7106cd6173488e843a7158060"
resultHash_int = int(resultHash, 16)
# #TODO FAILURE HANDLING
self.logger.info("M: Post Mediation result: %s, JC at fault" %endStatus)
self.contract.postMediationResult(self.account, True,
matchID, endStatus, tag, resultHash_int, cpuTime, 0, 0, 'InvalidResultStatus', 'JobCreator')
def getJob_old(self, tag, matchID, JID, execute):
_DIRIP_ = os.environ.get('DIRIP')
_DIRPORT_ = os.environ.get('DIRPORT')
_KEY_ = os.environ.get('pubkey')
_SSHKEY_ = os.environ.get('sshkey')
_SSHPORT_ = os.environ.get('SSHPORT')
_WORKPATH_ = os.environ.get('WORKPATH')
statusJob=0
cpuTime=0
endStatus="Completed"
self.logger.info("Requesting Permission to get job")
msg = self.DC.getPermission(_DIRIP_, _DIRPORT_,self.account,tag,_KEY_)
self.user = msg['user']
self.groups = msg['groups']
self.logger.info("permission granted? : %s" %(msg['exitcode'] == 0))
if msg['exitcode'] == 0:
remotePath = tag
self.logger.info([_DIRIP_,_SSHPORT_,self.user, JID, tag,_WORKPATH_ ,_SSHKEY_])
localPath = "%s/%s/" %(_WORKPATH_, tag)
if os.path.isfile("%s/%s:latest.tar" %(localPath,tag)): #HACK
self.logger.info("image exists, skip downloading it.")
localPath = "%s/input" %localPath
remotePath = "%s/input" %tag
os.makedirs(localPath, exist_ok=True) #HACK
self.logger.info(localPath)
self.logger.info("get job")
self.DC.getData(_DIRIP_,_DIRPORT_,_SSHPORT_, self.user, JID, remotePath, localPath,_SSHKEY_)
if execute:
images = self.dockerClient.images.list(name=tag)
self.logger.info(images)
if not images:
self.logger.info("Image not loaded. loading image... ")
DockerWrapper.loadImage(self.dockerClient, "%s/%s/%s:latest.tar" %(_WORKPATH_, tag,tag))
self.logger.info("Image is loaded")
self.logger.info("running job")
jobname = "mm_%s" %matchID
input = "%s/%s/input" %(_WORKPATH_,tag)
output = "%s/%s/output" %(_WORKPATH_,tag)
appinput = "/app/input"
appoutput = "/app/output"
container = DockerWrapper.runContainer(self.dockerClient, tag, jobname, input, output,appinput,appoutput)
container.reload()
lid = container.attrs["Id"]
self.logger.info("container ID: %s" %lid)
while container.status == "running":
self.logger.info("Container is : %s" %container.status)
cmd = "cat /sys/fs/cgroup/cpuacct/docker/%s/cpuacct.stat | grep -oP '(?<=user ).*'" %lid
completedprocess = subprocess.getoutput(cmd) #HACK the internet says something else should be used
cpuTime = int(completedprocess) * 10
print("duration: %s ms" %cpuTime)
time.sleep(1)
container.reload()
self.logger.info("Done running")
self.logger.info("Send result to DIRECTORY")
self.DC.publishData(_DIRIP_, _SSHPORT_, self.user,tag,output,_SSHKEY_)
if self.account:
#TODO resultHash
resultHash = "b599cff993a602c14e6d45beab7a48c25e6753b7106cd6173488e843a7158060"
resultHash_int = int(resultHash, 16)
# #TODO FAILURE HANDLING
self.logger.info("M: post mediation result")
if self.myMatches[matchID]['resHash'] == resultHash_int:
self.contract.postMediationResult(self.account, True,
matchID, 'Completed', tag, resultHash_int, cpuTime, 0, 0, 'CorrectResults', 'JobCreator')
else:
self.contract.postMediationResult(self.account, True,
matchID, 'Completed', tag, resultHash_int, cpuTime, 0, 0,
'WrongResults', 'ResourceProvider')
self.logger.info("Done")
return 0
else:
return 0
else:
self.logger.info("Done.. but permission denied")
return message
def CLIListener(self):
active = True
while active:
pass
def platformListener(self):
self.active = True
while self.active:
events = self.contract.poll_events()
# self.logger.info("poll contract events")
for event in events:
params = event['params']
name = event['name']
if name == "MediatorRegistered" :
self.logger.info("A: %s" %name)
elif name == "ResourceOfferPosted":
self.logger.info(name)
offer = Pstruct.ResourceOffer(
params['offerId'], params['addr'],
params['instructionPrice'], params['instructionCap'],
params['memoryCap'], params['localStorageCap'],
params['bandwidthCap'], params['bandwidthPrice'],
params['dockerBandwidthCap'], params['dockerBandwidthPrice'], params['deposit'])
self.resource_offers[params['offerId']] = offer
elif name == "JobOfferPosted":
self.logger.info(name)
offer = Pstruct.JobOffer(
params['offerId'], params['jobCreator'], params['size'],
params['arch'], params['instructionLimit'], params['ramLimit'],
params['localStorageLimit'], params['bandwidthLimit'],
params['instructionMaxPrice'], params['bandwidthMaxPrice'],
params['dockerBandwidthMaxPrice'], params['completionDeadline'], params['deposit'])
self.job_offers[params['offerId']] = offer
elif name == "Matched":
self.logger.info(name)
joid = params['jobOfferId']
roid = params['resourceOfferId']
mid = params['mediator']
matchID = params['matchId']
if mid == self.account:
self.myMatches[matchID] = {
'joid': joid,
'roid': roid
}
self.logger.info('I was matched: %s' % params['matchId'])
elif name == "ResultPosted":
self.logger.info(name)
if params['matchId'] in self.myMatches:
self.myMatches[params['matchId']]['resHash'] = params['hash']
elif name == "JobAssignedForMediation":
self.logger.info(name)
if params['matchId'] in self.myMatches:
matchID = params['matchId']
joid = self.myMatches[matchID]['joid']
JID = self.job_offers[joid].jobCreator
tag = self.job_offers[joid].uri
JID = JID = self.job_offers[joid].jobCreator
ijoid = self.job_offers[joid].ijoid
self.getJob(tag, matchID, JID, True, ijoid)
elif name == "MediationResultPosted":
self.logger.info("M: %s" %name)
elif name == "DebugString":
self.logger.info(params["str"])
self.wait()
|
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import itertools
import logging
from abc import ABC, ABCMeta
from dataclasses import dataclass
from enum import Enum
from pathlib import PurePath
from typing import Any, ClassVar, Iterable, Optional, Sequence, TypeVar, cast
from pants.base.deprecated import deprecated
from pants.core.goals.multi_tool_goal_helper import SkippableSubsystem
from pants.core.goals.package import BuiltPackage, EnvironmentAwarePackageRequest, PackageFieldSet
from pants.core.subsystems.debug_adapter import DebugAdapterSubsystem
from pants.core.util_rules.distdir import DistDir
from pants.core.util_rules.environments import (
ChosenLocalEnvironmentName,
EnvironmentName,
SingleEnvironmentNameRequest,
)
from pants.core.util_rules.partitions import (
PartitionerType,
PartitionMetadataT,
Partitions,
_BatchBase,
_PartitionFieldSetsRequestBase,
)
from pants.engine.addresses import Address, UnparsedAddressInputs
from pants.engine.collection import Collection
from pants.engine.console import Console
from pants.engine.desktop import OpenFiles, OpenFilesRequest
from pants.engine.engine_aware import EngineAwareReturnType
from pants.engine.env_vars import EnvironmentVars, EnvironmentVarsRequest
from pants.engine.fs import EMPTY_FILE_DIGEST, Digest, FileDigest, MergeDigests, Snapshot, Workspace
from pants.engine.goal import Goal, GoalSubsystem
from pants.engine.internals.session import RunId
from pants.engine.process import (
FallibleProcessResult,
InteractiveProcess,
InteractiveProcessResult,
ProcessResultMetadata,
)
from pants.engine.rules import Effect, Get, MultiGet, collect_rules, goal_rule, rule
from pants.engine.target import (
FieldSet,
FieldSetsPerTarget,
FieldSetsPerTargetRequest,
IntField,
NoApplicableTargetsBehavior,
SourcesField,
SpecialCasedDependencies,
StringField,
StringSequenceField,
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest,
Targets,
ValidNumbers,
parse_shard_spec,
)
from pants.engine.unions import UnionMembership, UnionRule, distinct_union_type_per_subclass, union
from pants.option.option_types import BoolOption, EnumOption, IntOption, StrListOption, StrOption
from pants.util.collections import partition_sequentially
from pants.util.docutil import bin_name
from pants.util.logging import LogLevel
from pants.util.memo import memoized, memoized_property
from pants.util.meta import classproperty
from pants.util.strutil import help_text, softwrap
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class TestResult(EngineAwareReturnType):
# A None exit_code indicates a backend that performs its own test discovery/selection
# (rather than delegating that to the underlying test tool), and discovered no tests.
exit_code: int | None
stdout_bytes: bytes
stdout_digest: FileDigest
stderr_bytes: bytes
stderr_digest: FileDigest
addresses: tuple[Address, ...]
output_setting: ShowOutput
# A None result_metadata indicates a backend that performs its own test discovery/selection
# and either discovered no tests, or encounted an error, such as a compilation error, in
# the attempt.
result_metadata: ProcessResultMetadata | None
partition_description: str | None = None
coverage_data: CoverageData | None = None
# TODO: Rename this to `reports`. There is no guarantee that every language will produce
# XML reports, or only XML reports.
xml_results: Snapshot | None = None
# Any extra output (such as from plugins) that the test runner was configured to output.
extra_output: Snapshot | None = None
# True if the core test rules should log that extra output was written.
log_extra_output: bool = False
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@staticmethod
def no_tests_found(address: Address, output_setting: ShowOutput) -> TestResult:
"""Used when we do test discovery ourselves, and we didn't find any."""
return TestResult(
exit_code=None,
stdout_bytes=b"",
stderr_bytes=b"",
stdout_digest=EMPTY_FILE_DIGEST,
stderr_digest=EMPTY_FILE_DIGEST,
addresses=(address,),
output_setting=output_setting,
result_metadata=None,
)
@staticmethod
def no_tests_found_in_batch(
batch: TestRequest.Batch[_TestFieldSetT, Any], output_setting: ShowOutput
) -> TestResult:
"""Used when we do test discovery ourselves, and we didn't find any."""
return TestResult(
exit_code=None,
stdout_bytes=b"",
stderr_bytes=b"",
stdout_digest=EMPTY_FILE_DIGEST,
stderr_digest=EMPTY_FILE_DIGEST,
addresses=tuple(field_set.address for field_set in batch.elements),
output_setting=output_setting,
result_metadata=None,
partition_description=batch.partition_metadata.description,
)
@staticmethod
def from_fallible_process_result(
process_result: FallibleProcessResult,
address: Address,
output_setting: ShowOutput,
*,
coverage_data: CoverageData | None = None,
xml_results: Snapshot | None = None,
extra_output: Snapshot | None = None,
log_extra_output: bool = False,
) -> TestResult:
return TestResult(
exit_code=process_result.exit_code,
stdout_bytes=process_result.stdout,
stdout_digest=process_result.stdout_digest,
stderr_bytes=process_result.stderr,
stderr_digest=process_result.stderr_digest,
addresses=(address,),
output_setting=output_setting,
result_metadata=process_result.metadata,
coverage_data=coverage_data,
xml_results=xml_results,
extra_output=extra_output,
log_extra_output=log_extra_output,
)
@staticmethod
def from_batched_fallible_process_result(
process_result: FallibleProcessResult,
batch: TestRequest.Batch[_TestFieldSetT, Any],
output_setting: ShowOutput,
*,
coverage_data: CoverageData | None = None,
xml_results: Snapshot | None = None,
extra_output: Snapshot | None = None,
log_extra_output: bool = False,
) -> TestResult:
return TestResult(
exit_code=process_result.exit_code,
stdout_bytes=process_result.stdout,
stdout_digest=process_result.stdout_digest,
stderr_bytes=process_result.stderr,
stderr_digest=process_result.stderr_digest,
addresses=tuple(field_set.address for field_set in batch.elements),
output_setting=output_setting,
result_metadata=process_result.metadata,
coverage_data=coverage_data,
xml_results=xml_results,
extra_output=extra_output,
log_extra_output=log_extra_output,
partition_description=batch.partition_metadata.description,
)
@memoized_property
@deprecated(
removal_version="2.19.0.dev0", hint="Use `TestResult.stdout_bytes` instead of `stdout`."
)
def stdout(self) -> str:
return self.stdout_bytes.decode(errors="replace")
@memoized_property
@deprecated(
removal_version="2.19.0.dev0", hint="Use `TestResult.stderr_bytes` instead of `stderr`."
)
def stderr(self) -> str:
return self.stderr_bytes.decode(errors="replace")
@property
def description(self) -> str:
if len(self.addresses) == 1:
return self.addresses[0].spec
return f"{self.addresses[0].spec} and {len(self.addresses)-1} other files"
@property
def path_safe_description(self) -> str:
if len(self.addresses) == 1:
return self.addresses[0].path_safe_spec
return f"{self.addresses[0].path_safe_spec}+{len(self.addresses)-1}"
def __lt__(self, other: Any) -> bool:
"""We sort first by exit code, then alphanumerically within each group."""
if not isinstance(other, TestResult):
return NotImplemented
if self.exit_code == other.exit_code:
return self.description < other.description
if self.exit_code is None:
return True
if other.exit_code is None:
return False
return abs(self.exit_code) < abs(other.exit_code)
def artifacts(self) -> dict[str, FileDigest | Snapshot] | None:
output: dict[str, FileDigest | Snapshot] = {
"stdout": self.stdout_digest,
"stderr": self.stderr_digest,
}
if self.xml_results:
output["xml_results"] = self.xml_results
return output
def level(self) -> LogLevel:
if self.exit_code is None:
return LogLevel.DEBUG
return LogLevel.INFO if self.exit_code == 0 else LogLevel.ERROR
def message(self) -> str:
if self.exit_code is None:
return "no tests found."
status = "succeeded" if self.exit_code == 0 else f"failed (exit code {self.exit_code})"
message = f"{status}."
if self.partition_description:
message += f"\nPartition: {self.partition_description}"
if self.output_setting == ShowOutput.NONE or (
self.output_setting == ShowOutput.FAILED and self.exit_code == 0
):
return message
output = ""
if self.stdout_bytes:
output += f"\n{self.stdout_bytes.decode(errors='replace')}"
if self.stderr_bytes:
output += f"\n{self.stderr_bytes.decode(errors='replace')}"
if output:
output = f"{output.rstrip()}\n\n"
return f"{message}{output}"
def metadata(self) -> dict[str, Any]:
return {"addresses": [address.spec for address in self.addresses]}
def cacheable(self) -> bool:
"""Is marked uncacheable to ensure that it always renders."""
return False
class ShowOutput(Enum):
"""Which tests to emit detailed output for."""
ALL = "all"
FAILED = "failed"
NONE = "none"
@dataclass(frozen=True)
class TestDebugRequest:
process: InteractiveProcess
# Prevent this class from being detected by pytest as a test class.
__test__ = False
class TestDebugAdapterRequest(TestDebugRequest):
"""Like TestDebugRequest, but launches the test process using the relevant Debug Adapter server.
The process should be launched waiting for the client to connect.
"""
@union
@dataclass(frozen=True)
class TestFieldSet(FieldSet, metaclass=ABCMeta):
"""The fields necessary to run tests on a target."""
sources: SourcesField
__test__ = False
_TestFieldSetT = TypeVar("_TestFieldSetT", bound=TestFieldSet)
@union
class TestRequest:
"""Base class for plugin types wanting to be run as part of `test`.
Plugins should define a new type which subclasses this type, and set the
appropriate class variables.
E.g.
class DryCleaningRequest(TestRequest):
tool_subsystem = DryCleaningSubsystem
field_set_type = DryCleaningFieldSet
Then register the rules which tell Pants about your plugin.
E.g.
def rules():
return [
*collect_rules(),
*DryCleaningRequest.rules(),
]
"""
tool_subsystem: ClassVar[type[SkippableSubsystem]]
field_set_type: ClassVar[type[TestFieldSet]]
partitioner_type: ClassVar[PartitionerType] = PartitionerType.DEFAULT_ONE_PARTITION_PER_INPUT
supports_debug: ClassVar[bool] = False
supports_debug_adapter: ClassVar[bool] = False
__test__ = False
@classproperty
def tool_name(cls) -> str:
return cls.tool_subsystem.options_scope
@distinct_union_type_per_subclass(in_scope_types=[EnvironmentName])
class PartitionRequest(_PartitionFieldSetsRequestBase[_TestFieldSetT]):
def metadata(self) -> dict[str, Any]:
return {"addresses": [field_set.address.spec for field_set in self.field_sets]}
@distinct_union_type_per_subclass(in_scope_types=[EnvironmentName])
class Batch(_BatchBase[_TestFieldSetT, PartitionMetadataT]):
@property
def single_element(self) -> _TestFieldSetT:
"""Return the single element of this batch.
NOTE: Accessing this property will raise a `TypeError` if this `Batch` contains
>1 elements. It is only safe to be used by test runners utilizing the "default"
one-input-per-partition partitioner type.
"""
if len(self.elements) != 1:
description = ""
if self.partition_metadata.description:
description = f" from partition '{self.partition_metadata.description}'"
raise TypeError(
f"Expected a single element in batch{description}, but found {len(self.elements)}"
)
return self.elements[0]
@property
def description(self) -> str:
if self.partition_metadata and self.partition_metadata.description:
return f"test batch from partition '{self.partition_metadata.description}'"
return "test batch"
def debug_hint(self) -> str:
if len(self.elements) == 1:
return self.elements[0].address.spec
return f"{self.elements[0].address.spec} and {len(self.elements)-1} other files"
def metadata(self) -> dict[str, Any]:
return {
"addresses": [field_set.address.spec for field_set in self.elements],
"partition_description": self.partition_metadata.description,
}
@classmethod
def rules(cls) -> Iterable:
yield from cls.partitioner_type.default_rules(cls, by_file=False)
yield UnionRule(TestFieldSet, cls.field_set_type)
yield UnionRule(TestRequest, cls)
yield UnionRule(TestRequest.PartitionRequest, cls.PartitionRequest)
yield UnionRule(TestRequest.Batch, cls.Batch)
if not cls.supports_debug:
yield from _unsupported_debug_rules(cls)
if not cls.supports_debug_adapter:
yield from _unsupported_debug_adapter_rules(cls)
class CoverageData(ABC):
"""Base class for inputs to a coverage report.
Subclasses should add whichever fields they require - snapshots of coverage output, XML files,
etc.
"""
_CD = TypeVar("_CD", bound=CoverageData)
@union(in_scope_types=[EnvironmentName])
class CoverageDataCollection(Collection[_CD]):
element_type: ClassVar[type[_CD]] # type: ignore[misc]
@dataclass(frozen=True)
class CoverageReport(ABC):
"""Represents a code coverage report that can be materialized to the terminal or disk."""
# Some coverage systems can determine, based on a configurable threshold, whether coverage
# was sufficient or not. The test goal will fail the build if coverage was deemed insufficient.
coverage_insufficient: bool
def materialize(self, console: Console, workspace: Workspace) -> PurePath | None:
"""Materialize this code coverage report to the terminal or disk.
:param console: A handle to the terminal.
:param workspace: A handle to local disk.
:return: If a report was materialized to disk, the path of the file in the report one might
open first to start examining the report.
"""
...
def get_artifact(self) -> tuple[str, Snapshot] | None:
return None
@dataclass(frozen=True)
class ConsoleCoverageReport(CoverageReport):
"""Materializes a code coverage report to the terminal."""
report: str
def materialize(self, console: Console, workspace: Workspace) -> None:
console.print_stderr(f"\n{self.report}")
return None
@dataclass(frozen=True)
class FilesystemCoverageReport(CoverageReport):
"""Materializes a code coverage report to disk."""
result_snapshot: Snapshot
directory_to_materialize_to: PurePath
report_file: PurePath | None
report_type: str
def materialize(self, console: Console, workspace: Workspace) -> PurePath | None:
workspace.write_digest(
self.result_snapshot.digest, path_prefix=str(self.directory_to_materialize_to)
)
console.print_stderr(
f"\nWrote {self.report_type} coverage report to `{self.directory_to_materialize_to}`"
)
return self.report_file
def get_artifact(self) -> tuple[str, Snapshot] | None:
return f"coverage_{self.report_type}", self.result_snapshot
@dataclass(frozen=True)
class CoverageReports(EngineAwareReturnType):
reports: tuple[CoverageReport, ...]
@property
def coverage_insufficient(self) -> bool:
"""Whether to fail the build due to insufficient coverage."""
return any(report.coverage_insufficient for report in self.reports)
def materialize(self, console: Console, workspace: Workspace) -> tuple[PurePath, ...]:
report_paths = []
for report in self.reports:
report_path = report.materialize(console, workspace)
if report_path:
report_paths.append(report_path)
return tuple(report_paths)
def artifacts(self) -> dict[str, Snapshot | FileDigest] | None:
artifacts: dict[str, Snapshot | FileDigest] = {}
for report in self.reports:
artifact = report.get_artifact()
if not artifact:
continue
artifacts[artifact[0]] = artifact[1]
return artifacts or None
class TestSubsystem(GoalSubsystem):
name = "test"
help = "Run tests."
# Prevent this class from being detected by pytest as a test class.
__test__ = False
@classmethod
def activated(cls, union_membership: UnionMembership) -> bool:
return TestRequest in union_membership
class EnvironmentAware:
extra_env_vars = StrListOption(
help=softwrap(
"""
Additional environment variables to include in test processes.
Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
`ENV_VAR` to copy the value of a variable in Pants's own environment.
"""
),
)
debug = BoolOption(
default=False,
help=softwrap(
"""
Run tests sequentially in an interactive process. This is necessary, for
example, when you add breakpoints to your code.
"""
),
)
# See also `run.py`'s same option
debug_adapter = BoolOption(
default=False,
help=softwrap(
"""
Run tests sequentially in an interactive process, using a Debug Adapter
(https://microsoft.github.io/debug-adapter-protocol/) for the language if supported.
The interactive process used will be immediately blocked waiting for a client before
continuing.
This option implies `--debug`.
"""
),
)
force = BoolOption(
default=False,
help="Force the tests to run, even if they could be satisfied from cache.",
)
output = EnumOption(
default=ShowOutput.FAILED,
help="Show stdout/stderr for these tests.",
)
use_coverage = BoolOption(
default=False,
help="Generate a coverage report if the test runner supports it.",
)
open_coverage = BoolOption(
default=False,
help=softwrap(
"""
If a coverage report file is generated, open it on the local system if the
system supports this.
"""
),
)
report = BoolOption(default=False, advanced=True, help="Write test reports to `--report-dir`.")
default_report_path = str(PurePath("{distdir}", "test", "reports"))
_report_dir = StrOption(
default=default_report_path,
advanced=True,
help="Path to write test reports to. Must be relative to the build root.",
)
shard = StrOption(
default="",
help=softwrap(
"""
A shard specification of the form "k/N", where N is a positive integer and k is a
non-negative integer less than N.
If set, the request input targets will be deterministically partitioned into N disjoint
subsets of roughly equal size, and only the k'th subset will be used, with all others
discarded.
Useful for splitting large numbers of test files across multiple machines in CI.
For example, you can run three shards with `--shard=0/3`, `--shard=1/3`, `--shard=2/3`.
Note that the shards are roughly equal in size as measured by number of files.
No attempt is made to consider the size of different files, the time they have
taken to run in the past, or other such sophisticated measures.
"""
),
)
timeouts = BoolOption(
default=True,
help=softwrap(
"""
Enable test target timeouts. If timeouts are enabled then test targets with a
`timeout=` parameter set on their target will time out after the given number of
seconds if not completed. If no timeout is set, then either the default timeout
is used or no timeout is configured.
"""
),
)
timeout_default = IntOption(
default=None,
advanced=True,
help=softwrap(
"""
The default timeout (in seconds) for a test target if the `timeout` field is not
set on the target.
"""
),
)
timeout_maximum = IntOption(
default=None,
advanced=True,
help="The maximum timeout (in seconds) that may be used on a test target.",
)
batch_size = IntOption(
"--batch-size",
default=128,
advanced=True,
help=softwrap(
"""
The target maximum number of files to be included in each run of batch-enabled
test runners.
Some test runners can execute tests from multiple files in a single run. Test
implementations will return all tests that _can_ run together as a single group -
and then this may be further divided into smaller batches, based on this option.
This is done:
1. to avoid OS argument length limits (in processes which don't support argument files)
2. to support more stable cache keys than would be possible if all files were operated \
on in a single batch
3. to allow for parallelism in test runners which don't have internal \
parallelism, or -- if they do support internal parallelism -- to improve scheduling \
behavior when multiple processes are competing for cores and so internal parallelism \
cannot be used perfectly
In order to improve cache hit rates (see 2.), batches are created at stable boundaries,
and so this value is only a "target" max batch size (rather than an exact value).
NOTE: This parameter has no effect on test runners/plugins that do not implement support
for batched testing.
"""
),
)
def report_dir(self, distdir: DistDir) -> PurePath:
return PurePath(self._report_dir.format(distdir=distdir.relpath))
class Test(Goal):
subsystem_cls = TestSubsystem
environment_behavior = Goal.EnvironmentBehavior.USES_ENVIRONMENTS
__test__ = False
class TestTimeoutField(IntField, metaclass=ABCMeta):
"""Base field class for implementing timeouts for test targets.
Each test target that wants to implement a timeout needs to provide with its own concrete field
class extending this one.
"""
alias = "timeout"
required = False
valid_numbers = ValidNumbers.positive_only
help = help_text(
"""
A timeout (in seconds) used by each test file belonging to this target.
If unset, will default to `[test].timeout_default`; if that option is also unset,
then the test will never time out. Will never exceed `[test].timeout_maximum`. Only
applies if the option `--test-timeouts` is set to true (the default).
"""
)
def calculate_from_global_options(self, test: TestSubsystem) -> Optional[int]:
if not test.timeouts:
return None
if self.value is None:
if test.timeout_default is None:
return None
result = test.timeout_default
else:
result = self.value
if test.timeout_maximum is not None:
return min(result, test.timeout_maximum)
return result
class TestExtraEnvVarsField(StringSequenceField, metaclass=ABCMeta):
alias = "extra_env_vars"
help = help_text(
"""
Additional environment variables to include in test processes.
Entries are strings in the form `ENV_VAR=value` to use explicitly; or just
`ENV_VAR` to copy the value of a variable in Pants's own environment.
This will be merged with and override values from `[test].extra_env_vars`.
"""
)
def sorted(self) -> tuple[str, ...]:
return tuple(sorted(self.value or ()))
class TestsBatchCompatibilityTagField(StringField, metaclass=ABCMeta):
alias = "batch_compatibility_tag"
@classmethod
def format_help(cls, target_name: str, test_runner_name: str) -> str:
return f"""
An arbitrary value used to mark the test files belonging to this target as valid for
batched execution.
It's _sometimes_ safe to run multiple `{target_name}`s within a single test runner process,
and doing so can give significant wins by allowing reuse of expensive test setup /
teardown logic. To opt into this behavior, set this field to an arbitrary non-empty
string on all the `{target_name}` targets that are safe/compatible to run in the same
process.
If this field is left unset on a target, the target is assumed to be incompatible with
all others and will run in a dedicated `{test_runner_name}` process.
If this field is set on a target, and its value is different from the value on some
other test `{target_name}`, then the two targets are explicitly incompatible and are guaranteed
to not run in the same `{test_runner_name}` process.
If this field is set on a target, and its value is the same as the value on some other
`{target_name}`, then the two targets are explicitly compatible and _may_ run in the same
test runner process. Compatible tests may not end up in the same test runner batch if:
* There are "too many" compatible tests in a partition, as determined by the \
`[test].batch_size` config parameter, or
* Compatible tests have some incompatibility in Pants metadata (i.e. different \
`resolve`s or `extra_env_vars`).
When tests with the same `batch_compatibility_tag` have incompatibilities in some other
Pants metadata, they will be automatically split into separate batches. This way you can
set a high-level `batch_compatibility_tag` using `__defaults__` and then have tests
continue to work as you tweak BUILD metadata on specific targets.
"""
async def _get_test_batches(
core_request_types: Iterable[type[TestRequest]],
targets_to_field_sets: TargetRootsToFieldSets,
local_environment_name: ChosenLocalEnvironmentName,
test_subsystem: TestSubsystem,
) -> list[TestRequest.Batch]:
def partitions_get(request_type: type[TestRequest]) -> Get[Partitions]:
partition_type = cast(TestRequest, request_type)
field_set_type = partition_type.field_set_type
applicable_field_sets: list[TestFieldSet] = []
for target, field_sets in targets_to_field_sets.mapping.items():
if field_set_type.is_applicable(target):
applicable_field_sets.extend(field_sets)
partition_request = partition_type.PartitionRequest(tuple(applicable_field_sets))
return Get(
Partitions,
{
partition_request: TestRequest.PartitionRequest,
local_environment_name.val: EnvironmentName,
},
)
all_partitions = await MultiGet(
partitions_get(request_type) for request_type in core_request_types
)
return [
request_type.Batch(
cast(TestRequest, request_type).tool_name, tuple(batch), partition.metadata
)
for request_type, partitions in zip(core_request_types, all_partitions)
for partition in partitions
for batch in partition_sequentially(
partition.elements,
key=lambda x: str(x.address) if isinstance(x, FieldSet) else str(x),
size_target=test_subsystem.batch_size,
size_max=2 * test_subsystem.batch_size,
)
]
async def _run_debug_tests(
batches: Iterable[TestRequest.Batch],
environment_names: Sequence[EnvironmentName],
test_subsystem: TestSubsystem,
debug_adapter: DebugAdapterSubsystem,
) -> Test:
debug_requests = await MultiGet(
(
Get(
TestDebugRequest,
{batch: TestRequest.Batch, environment_name: EnvironmentName},
)
if not test_subsystem.debug_adapter
else Get(
TestDebugAdapterRequest,
{batch: TestRequest.Batch, environment_name: EnvironmentName},
)
)
for batch, environment_name in zip(batches, environment_names)
)
exit_code = 0
for debug_request, environment_name in zip(debug_requests, environment_names):
if test_subsystem.debug_adapter:
logger.info(
softwrap(
f"""
Launching debug adapter at '{debug_adapter.host}:{debug_adapter.port}',
which will wait for a client connection...
"""
)
)
debug_result = await Effect(
InteractiveProcessResult,
{
debug_request.process: InteractiveProcess,
environment_name: EnvironmentName,
},
)
if debug_result.exit_code != 0:
exit_code = debug_result.exit_code
return Test(exit_code)
@goal_rule
async def run_tests(
console: Console,
test_subsystem: TestSubsystem,
debug_adapter: DebugAdapterSubsystem,
workspace: Workspace,
union_membership: UnionMembership,
distdir: DistDir,
run_id: RunId,
local_environment_name: ChosenLocalEnvironmentName,
) -> Test:
if test_subsystem.debug_adapter:
goal_description = f"`{test_subsystem.name} --debug-adapter`"
no_applicable_targets_behavior = NoApplicableTargetsBehavior.error
elif test_subsystem.debug:
goal_description = f"`{test_subsystem.name} --debug`"
no_applicable_targets_behavior = NoApplicableTargetsBehavior.error
else:
goal_description = f"The `{test_subsystem.name}` goal"
no_applicable_targets_behavior = NoApplicableTargetsBehavior.warn
shard, num_shards = parse_shard_spec(test_subsystem.shard, "the [test].shard option")
targets_to_valid_field_sets = await Get(
TargetRootsToFieldSets,
TargetRootsToFieldSetsRequest(
TestFieldSet,
goal_description=goal_description,
no_applicable_targets_behavior=no_applicable_targets_behavior,
shard=shard,
num_shards=num_shards,
),
)
request_types = union_membership.get(TestRequest)
test_batches = await _get_test_batches(
request_types,
targets_to_valid_field_sets,
local_environment_name,
test_subsystem,
)
environment_names = await MultiGet(
Get(
EnvironmentName,
SingleEnvironmentNameRequest,
SingleEnvironmentNameRequest.from_field_sets(batch.elements, batch.description),
)
for batch in test_batches
)
if test_subsystem.debug or test_subsystem.debug_adapter:
return await _run_debug_tests(
test_batches, environment_names, test_subsystem, debug_adapter
)
results = await MultiGet(
Get(TestResult, {batch: TestRequest.Batch, environment_name: EnvironmentName})
for batch, environment_name in zip(test_batches, environment_names)
)
# Print summary.
exit_code = 0
if results:
console.print_stderr("")
for result in sorted(results):
if result.exit_code is None:
# We end up here, e.g., if we implemented test discovery and found no tests.
continue
if result.exit_code != 0:
exit_code = result.exit_code
if result.result_metadata is None:
# We end up here, e.g., if compilation failed during self-implemented test discovery.
continue
console.print_stderr(_format_test_summary(result, run_id, console))
if result.extra_output and result.extra_output.files:
path_prefix = str(distdir.relpath / "test" / result.path_safe_description)
workspace.write_digest(
result.extra_output.digest,
path_prefix=path_prefix,
)
if result.log_extra_output:
logger.info(
f"Wrote extra output from test `{result.addresses[0]}` to `{path_prefix}`."
)
if test_subsystem.report:
report_dir = test_subsystem.report_dir(distdir)
merged_reports = await Get(
Digest,
MergeDigests(result.xml_results.digest for result in results if result.xml_results),
)
workspace.write_digest(merged_reports, path_prefix=str(report_dir))
console.print_stderr(f"\nWrote test reports to {report_dir}")
if test_subsystem.use_coverage:
# NB: We must pre-sort the data for itertools.groupby() to work properly, using the same
# key function for both. However, you can't sort by `types`, so we call `str()` on it.
all_coverage_data = sorted(
(result.coverage_data for result in results if result.coverage_data is not None),
key=lambda cov_data: str(type(cov_data)),
)
coverage_types_to_collection_types = {
collection_cls.element_type: collection_cls # type: ignore[misc]
for collection_cls in union_membership.get(CoverageDataCollection)
}
coverage_collections = []
for data_cls, data in itertools.groupby(all_coverage_data, lambda data: type(data)):
collection_cls = coverage_types_to_collection_types[data_cls]
coverage_collections.append(collection_cls(data))
# We can create multiple reports for each coverage data (e.g., console, xml, html)
coverage_reports_collections = await MultiGet(
Get(
CoverageReports,
{
coverage_collection: CoverageDataCollection,
local_environment_name.val: EnvironmentName,
},
)
for coverage_collection in coverage_collections
)
coverage_report_files: list[PurePath] = []
for coverage_reports in coverage_reports_collections:
report_files = coverage_reports.materialize(console, workspace)
coverage_report_files.extend(report_files)
if coverage_report_files and test_subsystem.open_coverage:
open_files = await Get(
OpenFiles, OpenFilesRequest(coverage_report_files, error_if_open_not_found=False)
)
for process in open_files.processes:
_ = await Effect(
InteractiveProcessResult,
{process: InteractiveProcess, local_environment_name.val: EnvironmentName},
)
for coverage_reports in coverage_reports_collections:
if coverage_reports.coverage_insufficient:
logger.error(
softwrap(
"""
Test goal failed due to insufficient coverage.
See coverage reports for details.
"""
)
)
# coverage.py uses 2 to indicate failure due to insufficient coverage.
# We may as well follow suit in the general case, for all languages.
exit_code = 2
return Test(exit_code)
_SOURCE_MAP = {
ProcessResultMetadata.Source.MEMOIZED: "memoized",
ProcessResultMetadata.Source.RAN: "ran",
ProcessResultMetadata.Source.HIT_LOCALLY: "cached locally",
ProcessResultMetadata.Source.HIT_REMOTELY: "cached remotely",
}
def _format_test_summary(result: TestResult, run_id: RunId, console: Console) -> str:
"""Format the test summary printed to the console."""
assert (
result.result_metadata is not None
), "Skipped test results should not be outputted in the test summary"
if result.exit_code == 0:
sigil = console.sigil_succeeded()
status = "succeeded"
else:
sigil = console.sigil_failed()
status = "failed"
environment = result.result_metadata.execution_environment.name
environment_type = result.result_metadata.execution_environment.environment_type
source = result.result_metadata.source(run_id)
source_str = _SOURCE_MAP[source]
if environment:
preposition = "in" if source == ProcessResultMetadata.Source.RAN else "for"
source_desc = (
f" ({source_str} {preposition} {environment_type} environment `{environment}`)"
)
elif source == ProcessResultMetadata.Source.RAN:
source_desc = ""
else:
source_desc = f" ({source_str})"
elapsed_print = ""
total_elapsed_ms = result.result_metadata.total_elapsed_ms
if total_elapsed_ms is not None:
elapsed_secs = total_elapsed_ms / 1000
elapsed_print = f"in {elapsed_secs:.2f}s"
suffix = f" {elapsed_print}{source_desc}"
return f"{sigil} {result.description} {status}{suffix}."
@dataclass(frozen=True)
class TestExtraEnv:
env: EnvironmentVars
@rule
async def get_filtered_environment(test_env_aware: TestSubsystem.EnvironmentAware) -> TestExtraEnv:
return TestExtraEnv(
await Get(EnvironmentVars, EnvironmentVarsRequest(test_env_aware.extra_env_vars))
)
@memoized
def _unsupported_debug_rules(cls: type[TestRequest]) -> Iterable:
"""Returns a rule that implements TestDebugRequest by raising an error."""
@rule(_param_type_overrides={"request": cls.Batch})
async def get_test_debug_request(request: TestRequest.Batch) -> TestDebugRequest:
raise NotImplementedError("Testing this target with --debug is not yet supported.")
return collect_rules(locals())
@memoized
def _unsupported_debug_adapter_rules(cls: type[TestRequest]) -> Iterable:
"""Returns a rule that implements TestDebugAdapterRequest by raising an error."""
@rule(_param_type_overrides={"request": cls.Batch})
async def get_test_debug_adapter_request(request: TestRequest.Batch) -> TestDebugAdapterRequest:
raise NotImplementedError(
"Testing this target type with a debug adapter is not yet supported."
)
return collect_rules(locals())
# -------------------------------------------------------------------------------------------
# `runtime_package_dependencies` field
# -------------------------------------------------------------------------------------------
class RuntimePackageDependenciesField(SpecialCasedDependencies):
alias = "runtime_package_dependencies"
help = help_text(
f"""
Addresses to targets that can be built with the `{bin_name()} package` goal and whose
resulting artifacts should be included in the test run.
Pants will build the artifacts as if you had run `{bin_name()} package`.
It will include the results in your test's chroot, using the same name they would normally
have, but without the `--distdir` prefix (e.g. `dist/`).
You can include anything that can be built by `{bin_name()} package`, e.g. a `pex_binary`,
`python_aws_lambda_function`, or an `archive`.
"""
)
class BuiltPackageDependencies(Collection[BuiltPackage]):
pass
@dataclass(frozen=True)
class BuildPackageDependenciesRequest:
field: RuntimePackageDependenciesField
@rule(desc="Build runtime package dependencies for tests", level=LogLevel.DEBUG)
async def build_runtime_package_dependencies(
request: BuildPackageDependenciesRequest,
) -> BuiltPackageDependencies:
unparsed_addresses = request.field.to_unparsed_address_inputs()
if not unparsed_addresses:
return BuiltPackageDependencies()
tgts = await Get(Targets, UnparsedAddressInputs, unparsed_addresses)
field_sets_per_tgt = await Get(
FieldSetsPerTarget, FieldSetsPerTargetRequest(PackageFieldSet, tgts)
)
packages = await MultiGet(
Get(BuiltPackage, EnvironmentAwarePackageRequest(field_set))
for field_set in field_sets_per_tgt.field_sets
)
return BuiltPackageDependencies(packages)
def rules():
return [
*collect_rules(),
]
|
'''
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from __future__ import print_function
from apiclient import discovery
from httplib2 import Http
from oauth2client import file, client, tools
SCOPES = 'https://www.googleapis.com/auth/drive.readonly.metadata'
store = file.Storage('storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('client_id.json', SCOPES)
creds = tools.run_flow(flow, store)
DRIVE = discovery.build('drive', 'v3', http=creds.authorize(Http()))
files = DRIVE.files().list().execute().get('files', [])
for f in files:
print(f['name'], f['mimeType'])
|
from random import randint, choice
from glm import vec3
from game.base.entity import Entity
from game.constants import CLOUD_IMAGE_PATHS
class Cloud(Entity):
if randint(0, 10) <= 5:
hdg = -1
else:
hdg = 1
def __init__(self, app, scene, pos: vec3, z_vel: float):
vel = vec3(randint(0, 15) * Cloud.hdg, 0, z_vel)
super().__init__(
app, scene, choice(CLOUD_IMAGE_PATHS), position=pos, velocity=vel, scale=4
)
|
from django.shortcuts import render
# Create your views here.
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import CreateModelMixin,ListModelMixin,DestroyModelMixin,RetrieveModelMixin
from . import models
from . import serializers
from rest_framework_jwt.authentication import JSONWebTokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
class DiseaseView(GenericViewSet,ListModelMixin):
queryset = models.disease.objects.filter(is_delete=False,is_show=True)
serializer_class = serializers.DiseaseSerializer
class DiseaseDetailView(GenericViewSet,RetrieveModelMixin):
queryset = models.disease.objects.filter(is_delete=False,is_show=True)
serializers_class = serializers.DiseaseDetailSerializer
|
# Generated by Django 2.0 on 2018-01-30 16:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hamyar', '0011_auto_20180130_1624'),
('madadju', '0008_report'),
('madadkar', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Receipt',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_receive', models.DateField()),
('date_send', models.DateField()),
('content', models.CharField(max_length=500)),
('hamyar', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hamyar.Hamyar')),
('madadju', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='madadju.Madadju')),
('madadkar', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='madadkar.Madadkar')),
],
),
]
|
# Generated by Django 3.1.4 on 2020-12-27 12:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('user_id', models.CharField(max_length=1024)),
('machine', models.BinaryField(blank=True, max_length=1048576)),
('asd', models.CharField(default='1', max_length=1)),
],
),
]
|
import sys
import csv
import os, random
from flask import Flask, render_template, url_for, request, Markup, redirect
app = Flask(__name__)
@app.route('/')
def main():
return render_template('base.html')
@app.route('/analysis/<lat>/<lng>', methods=['GET'])
def ana(lat, lng):
# getData gets a 2D array
# data[i][0] = url
# data[i][1] = lat
# data[i][2] = lng
if request.method == 'GET':
data, heatmap = getData(lat, lng)
links = ""
for datapoint in data:
links = links + datapoint[0] + "|||"
links = links + "000|||"
for datapoint in data:
links = links + datapoint[1] + "|||"
links = links + "000|||"
for datapoint in data:
links = links + datapoint[2] + "|||"
links = links + "000|||"
for datap in heatmap:
links = links + datap[0] + "|||"
links = links + "000|||"
for datap in heatmap:
links = links + datap[1] + "|||"
links = links + "000|||"
for datap in heatmap:
links = links + datap[2] + "|||"
print(links)
return links
def getData(lat, lng):
#data = getMartinsData(lat,lng)
data = []
data.append(['https://twitter.com/cnnbrk/status/1190272548887498753','-119.068','34.3558','2019-11-02 04:55:17'])
data.append(['https://twitter.com/SuaveHabibi/status/1188803056667770886','-120.088','34.6119','2019-11-02 02:24:50']);
for i in range(30):
data.append(["https://twitter.com/tweeter/status/489879052157595649?ref_src=twsrc%5Etfw", str(float(lat) - 0.5 + random.random()), str(float(lng) - 0.5 + random.random()), i])
heatmap = ericMethod(data)
return data, heatmap
def ericMethod(data):
return [['34.36930492706232', '-119.10185276526958', '1'], ['34.368365960097414', '-119.135715155373', '2'], ['34.36631810673867', '-119.16958717304676', '3'], ['34.36915429839143', '-119.2034688210281', '4']];
#heatmap = []
#for x in range(3):
# for line in data:
# heatmap.append([str(float(line[1]) - 0.05 + random.random()/10), str(float(line[2]) - 0.05 + random.random()/10), str(line[1] + line[2])])
#return
def getMartinsData(lat, lng):
return [['https://twitter.com/cnnbrk/status/1190272548887498753',
'-119.068',
'34.3558',
'2019-11-02 04:55:17'],
['https://twitter.com/SuaveHabibi/status/1188803056667770886',
'-120.088',
'34.6119',
'2019-11-02 02:24:50'],
['https://twitter.com/SuaveHabibi/status/1188803056667770886',
'-119.068',
'34.3558',
'2019-11-02 00:29:21'],
['https://twitter.com/i/web/status/1190354874724241409',
'-119.068',
'34.3558',
'2019-11-01 19:48:12'],
['https://www.instagram.com/p/B4V6x1enFuf/?igshid=1l3iqpsfixb8y',
'-119.07023461',
'34.21165938',
'2019-11-02 00:12:01'],
['https://twitter.com/i/web/status/1190403872247955456',
'-119.068',
'34.3558',
'2019-11-01 23:02:54'],
['https://twitter.com/i/web/status/1190391756384280576',
'-119.068',
'34.3558',
'2019-11-01 22:14:45'],
['https://www.instagram.com/p/B4VdNP4AOD9/?igshid=1xib49vf776ms',
'-119.068',
'34.3558',
'2019-11-01 19:55:07'],
['https://www.instagram.com/p/B4VWXDflRYN/?igshid=15zv3b3bw58mk',
'-119.0391',
'34.2231',
'2019-11-01 18:54:32'],
['https://twitter.com/i/web/status/1190317838306828288',
'-119.13599',
'34.38263',
'2019-11-01 17:21:02'],
['https://twitter.com/i/web/status/1190308350015131650',
'-119.0391',
'34.2231',
'2019-11-01 16:43:20'],
['https://twitter.com/i/web/status/1190277558857682945',
'-118.997115',
'34.302212',
'2019-11-01 14:40:58'],
['https://twitter.com/i/web/status/1190245740569694208',
'-118.997115',
'34.302212',
'2019-11-01 12:34:32'],
['https://twitter.com/i/web/status/1190219423614820352',
'-119.068',
'34.3558',
'2019-11-01 10:49:58'],
['https://twitter.com/i/web/status/1190206073430392833',
'-119.182',
'34.1913',
'2019-11-01 09:56:55'],
['https://twitter.com/i/web/status/1190198568826658816',
'-119.0391',
'34.2231',
'2019-11-01 09:27:06'],
['https://twitter.com/i/web/status/1190143035616612352',
'-118.997115',
'34.302212',
'2019-11-01 05:46:26'],
['https://twitter.com/i/web/status/1190119129048465410',
'-118.997115',
'34.302212',
'2019-11-01 04:11:26'],
['https://www.instagram.com/p/B4Tm8Nql9PN/?igshid=1wi67my54b55w',
'-119.0391',
'34.2231',
'2019-11-01 02:40:17']];
|
# Generated by Django 2.0.7 on 2019-06-19 16:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('storage', '0003_auto_20190619_1536'),
]
operations = [
migrations.AddField(
model_name='data',
name='name',
field=models.CharField(default='', max_length=30),
),
]
|
# -*- coding: utf-8 *-*
import hashlib
import logging
import pickle
import re
import urllib
import warnings
import sys
import tormysql
import tornado
from tornado import gen
from tornado.concurrent import is_future
from tornado.options import options as opts
def query_finish(result):
return result
class DB():
def __init__(self, _host, _port, _db, _db_user, _db_password):
self.pool = None
self.logger = logging.getLogger('database')
self.host = _host
self.port = _port
self.db = _db
self.db_user = _db_user
self.db_password = _db_password
self.init()
def init(self):
self.pool = tormysql.ConnectionPool(
max_connections=20, # max open connections
idle_seconds=7200, # conntion idle timeout time, 0 is not timeout
wait_connection_timeout=3, # wait connection timeout
host=self.host,
user=self.db_user,
passwd=self.db_password,
db=self.db,
charset="utf8",
cursorclass=tormysql.cursor.OriginDictCursor
)
@tornado.gen.engine
def query(self, sql, params, callback):
# sql = self.clean_sql(sql,params)
try:
if self.pool.closed:
self.init()
with (yield self.pool.Connection()) as conn:
with conn.cursor() as cursor:
yield cursor.execute(sql)
datas = cursor.fetchall()
# print datas
callback(query_finish(datas))
yield self.pool.close()
except:
callback(None)
@tornado.gen.engine
def get(self, sql, params, callback):
print callback.__name__
sql = self.clean_sql(sql,params)
print sql
try:
if self.pool.closed:
self.init()
with (yield self.pool.Connection()) as conn:
with conn.cursor() as cursor:
yield cursor.execute(sql)
datas = cursor.fetchone()
if isinstance(datas,list):
datas = datas[0]
callback(query_finish(datas))
yield self.pool.close()
except:
callback(None)
@tornado.gen.engine
def update_by_dict(self, tablename, idname, rowdict, callback):
if self.pool.closed:
self.init()
try:
with (yield self.pool.Connection()) as conn:
with conn.cursor() as cursor:
yield cursor.execute("describe %s" % tablename)
field_list = cursor.fetchall()
allowed_keys = set(row["Field"] for row in field_list)
keys = allowed_keys.intersection(rowdict)
# if len(rowdict) > len(keys):
# unknown_keys = set(rowdict) - allowed_keys
# logging.error("skipping keys: %s", ", ".join(unknown_keys))
update_list = []
for key in keys:
if(key == idname):continue
update_list.append("%s ='%s'" % (key, self.to_string(rowdict[key])))
update_str = ", ".join(update_list)
print update_str
sql = "Update %s set %s where %s = '%s'" %(tablename,update_str,idname,rowdict[idname])
print sql
yield cursor.execute(sql)
except:
yield conn.rollback()
else:
yield conn.commit()
callback(None)
def escape_str(self, str):
str = tornado.escape.utf8(str)
str = str.replace(" or ", "")
str = str.replace(" and ", "")
str = str.replace("'","\\'")
return str.replace('%', '%%')
def clean_sql(self,sql, _params):
if not _params:
return sql
if isinstance(_params, str):
_params = "'%s'" % self.to_string(_params)
sql = sql % _params
else:
params = []
values = []
for param in _params:
params.append(self.to_string(param))
print params
if values:
params = ', '.join(values)
sql = sql % params[1:-1]
else:
sql = sql % tuple(params)
return sql
def to_string(self,temp):
if isinstance(temp, basestring):
return self.escape_str(temp)
else:
return str(temp)
|
"""
# 基于链表实现循环链表
"""
import os
import logging
logger = logging.getLogger(__name__)
class Node(object):
"""创建链表节点数据结构"""
def __init__(self, value, next=None):
self.value = value
self.next = next
class Error(Exception):
"""异常处理"""
def __init__(self, msg='empty'):
super().__init__(self)
self.msg = msg
def __str__(self):
return "ErrorMsg: ".format(self.msg)
class CircularQueueByLinked(object):
"""基于链表实现循环队列"""
def __init__(self, capacity=3):
self.tail: Node = None # 循环队列只有一个哨兵, 链尾指向链头
self.size = 0
self.capacity = 3
def is_empty(self):
return self.size == 0
def __len__(self):
return self.size
def enqueue(self, value):
"""入列"""
if self.size > self.capacity:
raise Error(msg="队列容量已满,不能插入新的元素。")
new_node = Node(value)
if self.is_empty():
# 空队列
new_node.next = new_node # next指向自身
else:
# 队尾插入元素
new_node.next = self.tail.next # 新的元素指向对首(循环队列队尾永远指向队首)
self.tail.next = new_node
self.tail = new_node # 队尾指向当前节点
self.size += 1
def dequeue(self):
"""出列"""
if self.is_empty():
raise Error(msg="队列没有数据。")
head = self.tail.next
if self.size == 1:
self.tail = None # 队列仅一个元素,取出元素之后队列为空
else:
sec_head = head.next
self.tail.next = sec_head # 队列尾部指向新的头部,即被取出元素的next
self.size -= 1
return head
def get_head(self):
if self.is_empty():
raise Error()
return self.tail.next # tail next 即为 head
if __name__ == '__main__':
logging.basicConfig(format="[%(asctime)s %(filename)s:%(lineno)s] %(message)s",
level=logging.INFO,
filename=None,
filemode="a")
logger.info("Start")
S = CircularQueueByLinked()
S.enqueue(1)
logger.info("{} - {} - {}".format(S.get_head(), len(S), S.size))
logger.info(S.dequeue())
S.enqueue(2)
logger.info("{} - {}".format(S.get_head(), len(S)))
logger.info(S.dequeue())
S.enqueue(3)
logger.info("{} - {}".format(S.get_head(), len(S)))
logger.info(S.dequeue())
S.enqueue(4)
logger.info("{} - {}".format(S.get_head(), len(S)))
S.enqueue(5)
S.enqueue(6)
logger.info('len: {}'.format(len(S)))
|
# import unittest
#
# from selenium import webdriver
# from selenium.webdriver.chrome.webdriver import WebDriver
#
# from webdriver_manager.chrome import ChromeDriverManager
#
# from tests.testCancelRequest import CancelChangeRequest
# from tests.testCloseRequest import CloseChangeRequests
# from tests.testCreateRequest import CreateChangeRequest
#
#
# class Test_Base_Case(unittest.TestCase):
# driver: WebDriver = None
#
# @classmethod
# def setUpClass(cls):
# cls.driver = webdriver.Chrome(ChromeDriverManager().install())
#
# @classmethod
# def tearDownClass(cls) -> None:
# cls.driver.close()
# cls.driver.quit()
# del cls
#
#
# class Test_CancelChangeRequest(Test_Base_Case):
#
# @classmethod
# def setUpClass(cls) -> None:
# super().setUpClass()
#
# def test_cancel_change(self):
# self.cancel_change = CancelChangeRequest(self.driver)
# self.cancel_change.test_cancel_change()
#
#
# class Test_CloseChangeRequests(Test_Base_Case):
#
# @classmethod
# def setUpClass(cls) -> None:
# super().setUpClass()
#
# def test_close_request(self):
# self.closeRequest = CloseChangeRequests(self.driver)
# self.closeRequest.test_close_requests()
#
#
# class Test_CreateChangeRequest(Test_Base_Case):
#
# @classmethod
# def setUpClass(cls) -> None:
# super().setUpClass()
#
# def test_create_change(self):
# self.createChange = CreateChangeRequest(self.driver)
# self.createChange.test_create_change()
#
#
# if __name__ == "__main__":
# unittest.main(warnings='ignore')
|
class TreeNode(object):
""" Definition of a binary tree node."""
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
# max depth top down
def mD(self, root):
return max(self.mD(root.left), self.mD(root.right)) + 1 if root else 0
# Tree Node
# 3
# / \
# 9 20
# / \ / \
# 7 6 15 17
# / /
# 11 14
# \
# 2
root = TreeNode(3)
root.left = TreeNode(9)
root.left.left = TreeNode(7)
root.left.right = TreeNode(6)
root.left.right.left = TreeNode(11)
root.right = TreeNode(20)
root.right.left = TreeNode(15)
root.right.right = TreeNode(17)
root.right.right.left = TreeNode(14)
root.right.right.left.right = TreeNode(2)
result = Solution().mD(root)
print(result)
# Should be 5
|
#!/usr/bin/env python
"""
Takes a list of bibcodes and gets the ADS data in a latex list of items and writes it out to a file.
You can get all your bibcodes by going to this URL (replacing 'Birnstiel,+T.'
with your name and initial):
http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?author=Birnstiel,+T.&jou_pick=NO&data_type=Custom&sort=NDATE&format=%5C%25R
select those bibcodes you want and copy them into the array below.
"""
LASTNAME='Birnstiel'
bibcodes = [
'2014ApJ...780..153B',
'2013Sci...340.1199V',
'2013A&A...550L...8B',
'2012A&A...545A..81P',
'2012A&A...544L..16W',
'2012A&A...540A..73W',
'2012A&A...539A.148B',
'2012A&A...538A.114P',
'2010A&A...516L..14B',
'2010A&A...513A..79B',
'2009A&A...503L...5B']
import urllib2
FILE = 'pub_list.tex'
format = '\\\\item %\\13M: \\\\textit{%\\t}, %\\j (%\\Y) vol. %\\V, %\\p%-\\P\\.'
bib_split = '\r\n'
URL = r'http://adsabs.harvard.edu/cgi-bin/nph-abs_connect?db_key=ALL&warnings=YES&version=1&bibcode=%s&nr_to_return=1000&start_nr=1&data_type=Custom&sort=NDATE&format=%s'%(urllib2.quote(bib_split.join(bibcodes)),urllib2.quote(format))
#
# get the data
#
response = urllib2.urlopen(URL)
html = response.read()
response.close()
#
# split in lines
#
html = html.split('\n')
#
# cut the header
#
while '\item' not in html[0]: del html[0]
while '' in html: html.remove('')
pubs = []
pub = []
for j,i in enumerate(html):
if '\\item' in i:
#
# when a new publication starts
#
pubs+= [pub]
pub = [i]
else:
#
# else: keep adding the line
#
pub += [i]
#
#
#
#
# remove empty entries
#
while [] in pubs: pubs.remove([])
#
# write results
#
f = open(FILE,'w')
# write header if necessary
#f.write(r'\begin{supertabular}{p{\textwidth}}'+'\n')
for pub in pubs:
line = ''.join(pub)
line = line.replace('&','\&')
line = line.replace(LASTNAME,'\\textbf{'+LASTNAME+'}')
# write line ending if necessary
#ending = '\\\\[-0.2cm] \n'
ending = '\n'
if 'Thesis' in line: line=line.replace(' vol. ','')
f.write(line+ending)
# write fooder if necessary
#f.write(r'\end{supertabular}')
f.close()
|
#!/usr/bin/env python3
# coding: utf-8
#
"""Print the commands in some ("my") directories of the ones listed in PATH
These "my" directories are determined as:
(1) Directories beginning with my home directory (for something like
/home/me/bin) if this directory is not listed with a '-' sign at
the beginning in the environment variable MYCMDSPATH
(2) Additional directories explicitly specified in the environment
variable MYCMDSPATH
"""
import sys
import os
import os.path as osp
import fnmatch
import shutil
import argh
HOME = 'HOME'
PATH = 'PATH'
MYCMDSPATH = 'MYCMDSPATH'
def dirs_starting_with_HOME(mycmdspath):
"""
find all directories in PATH which contain HOME
:return: list of directories
"""
try:
path = os.environ[PATH]
home = os.environ[HOME]
excluded_paths = [ d[1:] for d in mycmdspath.split(':') if d.startswith('-')]
python_dir = osp.dirname(sys.executable)
excluded_paths.append(python_dir)
return [d for d in path.split(':') if d.startswith(home) and not d in excluded_paths]
except KeyError:
return []
def dirs_from_MYCMDSPATH(mycmdspath):
"""
find all directories in MYCMDSPATH
:return: list of directories
"""
try:
path = os.environ[PATH]
included_paths = [ d for d in mycmdspath.split(':') if not d.startswith('-')]
return [d for d in path.split(':') if d in included_paths]
except KeyError:
return []
def add_new_to_master_list(new, master):
"""
add elements of a new list to a master list if an element is new,
i.e. does not exist in the master list
:param new: list with new elements
:param master: list the elements are added to
:return: combined list
"""
res = master
for el in new:
if not el in res:
res.append(el)
return res
class ColumnPrinter():
"""
A class which takes collects print entries and issues
a print line to stdout when all columns are filled
"""
def __init__(self, col_num, full_width):
self.cols_in_row = col_num
self.col_width = full_width // col_num
# setup
self.entries_in_row = []
self.entry_format_str = '{0:%s}' % self.col_width
def print(self, entry=None, flush=False):
"""
Print an entry. None entries print nothing.
flush=True enforces leftover entries are printed
"""
# add entry
if entry is not None:
self.entries_in_row.append(entry)
# possibly print entries and reset
if flush or len(self.entries_in_row) >= self.cols_in_row:
printed = False
for (i,e) in enumerate(self.entries_in_row):
if i < len(self.entries_in_row) - 1:
# add spacing
e = self.entry_format_str.format(e)
print(e, end='')
printed = True
if printed:
print()
# clear entries_in_row for start of a new row
self.entries_in_row = []
def print_if_match(self, fname, patterns):
"""
Print a command if it matches one of some patterns
:param fname: the command to print
:param patterns: list of patterns to match against
"""
for p in patterns:
q = p if '*' in p else ('*%s*' % p)
if fnmatch.fnmatch(fname, q):
self.print(fname)
break # one match is enough!
@argh.arg('patterns', nargs='*', metavar='PATTERN',
help="(file glob) pattern to filter commands on")
@argh.arg('-a', '--all-files',
help="list not only executable commands, but all files")
@argh.arg('-1', '--single-column',
help="list one command per line")
@argh.arg('-d', '--list-dirs',
help="list directories which are searched")
@argh.arg('-e', '--list-env',
help="show environment variables used")
def listmycmds(patterns,
all_files=False,
single_column=False,
list_dirs=False,
list_env=False):
# Create list of dirs to search through
try:
mycmdspath = os.environ[MYCMDSPATH]
except KeyError:
mycmdspath = ''
dirs = add_new_to_master_list(dirs_starting_with_HOME(mycmdspath), [])
dirs = add_new_to_master_list(dirs_from_MYCMDSPATH(mycmdspath), dirs)
# list/show settings only
if list_dirs or list_env:
if list_dirs:
for d in dirs:
print(d)
if list_env:
for v in [HOME, PATH, MYCMDSPATH]:
print(f"${v} = {os.getenv(v)}")
sys.exit(9)
# pattern default
if patterns == []:
patterns.append('*')
# col_num default
col_num = 2
if single_column or not sys.stdout.isatty():
col_num = 1
# width default
full_width = 80 # print never wider than this
term_size = shutil.get_terminal_size(fallback=(full_width, 24))
# get_terminal_size fails sometimes... TODO: try some other way to determine?
if term_size.columns == 0:
# Special case when no columns could be determined - act like no tty
col_num = 1
full_width = min(term_size.columns, full_width)
# setup column printing
printer = ColumnPrinter(col_num, full_width)
for d in dirs:
try:
names_in_dir = os.listdir(d)
except FileNotFoundError:
names_in_dir = []
for fname in names_in_dir:
fpath = osp.join(d, fname)
if os.path.isfile(fpath):
if all_files or os.access(fpath, os.X_OK):
printer.print_if_match(fname, patterns)
printer.print(flush=True)
if __name__ == '__main__':
argh.dispatch_command(listmycmds)
|
import unittest
from katas.beta.get_number_from_string import get_number_from_string
class GetNumberFromStringTestCase(unittest.TestCase):
def test_equal_1(self):
self.assertEqual(get_number_from_string('1'), 1)
def test_equal_2(self):
self.assertEqual(get_number_from_string('123'), 123)
def test_equal_3(self):
self.assertEqual(get_number_from_string('this is number: 7'), 7)
def test_equal_4(self):
self.assertEqual(get_number_from_string('$100 000 000'), 100000000)
def test_equal_5(self):
self.assertEqual(get_number_from_string('hell5o wor6ld'), 56)
def test_equal_6(self):
self.assertEqual(
get_number_from_string('one1 two2 three3 four4 five5'), 12345
)
|
#!/usr/bin/python3
import socket
import math
import time
import pigpio # For managing any I/O
import threading
import queue
import xml.etree.ElementTree as ET
# pifighterinit does some of the initialisation for the program - getting mode set up, etc.
from pifighterinit import *
import pifighterstrip as strip
import pifightercomms as comms
import pifightermatrix as Matrix
import pifighteraccel as Accel
import queue
# Brightness settings for showing accelerations
Brightness = [0,0,2,2,4,4,15,15]
strip.InitStrip() # Get the Neopixel strip set up.
# Max acceleration rate in the display period
MaxAccel = [0,0,0]
SampleNum = 0
Matrix.Setup()
Matrix.Intro()
# If workout mode, then run through sequences of attacks.
if (int(Mode) == WorkoutMode):
# Setting up and starting the Attack thread.
print ("Setting Up Attack Seq")
PunchThread = strip.AttackSeqThread(1, "Attack Thread", 1)
PunchThread.start()
# Connect to server and send data
#data = "<User>pi-fighter User is {} </User>" .format(UserName)
# Send data to the server
#ServerSocket.sendall(bytes( data + "\n", "utf-8"))
# Receive data from the server and shut down
#received = str(sock.recv(1024), "utf-8")
#print("Sent: {}".format(data))
#print("Received: {}".format(received))
elif (int(Mode) == KickButtMode):
# Get list of Opponents
SendToServer("<OpponentList></OpponentList>", "utf-8")
time.sleep(3)
ServerData = ServerSocket.recv(10*1024)
# Decode to ASCII so it can be processed.
ServerStr = ServerData.decode('ascii')
print (ServerStr)
# Put the data into an XML Element Tree
ServerElement = ET.fromstring(ServerStr)
# Processing of Opponent Information - create a list of opponents
OpponentList=[]
if (ServerElement.tag == 'OpponentList'):
for Child in ServerElement:
#print (Child.tag + Child.text)
if (Child.tag=='Opponent'):
OpponentInfo = Child.text
OpponentList.append(OpponentInfo)
i = 0
for Opponent in OpponentList:
print ("{:d}. {}" .format(i,Opponent))
i += 1
SelectedOpponent = input("Select the number of the Opponent you would most like to spar with")
print ("{} is your selected opponent - calling them to the ring" .format(OpponentList[int(SelectedOpponent)]))
SelectedOppStr = "<SelectedOpponent>{}</SelectedOpponent>".format(OpponentList[int(SelectedOpponent)])
print (SelectedOppStr)
while(1):
# Grab Accelerometer Data
ScaledAccel = Accel.GetScaledAccelValues() # X,Y,Z acceleration
#print (ScaledAccel)
date_string = datetime.datetime.now().date()
time_string = datetime.datetime.now().time()
AttackInfo = "<Attack><Date>{}</Date><Time>{}</Time><XAccel>{:2.3}</XAccel><YAccel>{:2.3f}</YAccel><ZAccel>{:2.3f}</ZAccel></Attack>" .format(date_string, time_string, ScaledAccel[0], ScaledAccel[1], ScaledAccel[2])
logging.info(AttackInfo)
# Update Max Accel if needed - loop through XYZ acceleration
for i in range(3):
# Sorting out maximmum acceleration for this period
if (abs(ScaledAccel[i]) > MaxAccel[i]):
MaxAccel[i] = abs(ScaledAccel[i])
# Check to see if this cycle is one in which we need to report the max accel - configurable.
if (SampleNum % DISPLAY_FREQ == 0):
print (SampleNum, MaxAccel)
MaxAccelInfo = "<Attack><Date>{}</Date><Time>{}</Time><XAccel>{:2.3}</XAccel><YAccel>{:2.3f}</YAccel><ZAccel>{:2.3f}</ZAccel></Attack>".format(date_string, time_string, MaxAccel[0], MaxAccel[1], MaxAccel[2])
# print(MaxAccelInfo)
#ServerSocket.sendall(bytes(MaxAccelInfo, "utf-8"))
# Put the attack information into the queue for processing. Only do this if the Z Accel > 1.5 - no
# processing otherwise. The Queue sends it on to the server.
if (MaxAccel[2] > 1.5):
CommQueue.put_nowait(MaxAccelInfo)
# Draw pattern based on z accel (into chip) - scales 16g into 15 patterns by /2.1
Matrix.DrawPattern(int(MaxAccel[2]/2.1), Brightness[int(MaxAccel[2]/2.1)])
MaxAccel[0] = 0
MaxAccel[1] = 0
MaxAccel[2] = 0
time.sleep(SAMPLE_SLEEP/1000)
SampleNum +=1
'''
# Put the data into an XML Element Tree
try:
#print ("DEBUG")
#ServerElement = ET.fromstring(ServerStr)
# Check to see if there is anything from the server
# self.request is the TCP socket connected to the client
ServerData = ServerSocket.recv(10*1024)
# Decode to ASCII so it can be processed.
ServerStr = ServerData.decode('ascii')
print (ServerStr)
# Put the data into an XML Element Tree
ServerElement = ET.fromstring(ServerStr)
# Processing of Opponent Information
if (ServerElement.tag == 'OpponentInfo'):
for Child in ServerElement:
print (Child.tag + Child.text)
if (Child.tag=='HealthPoints'):
OpponentHealthPoints = float(Child.text)
IndicateHealthPoints(OpponentHealthPoints, 1)
# Processing of an Attack from the opponent
elif (ServerElement.tag == 'Attack'):
for Child in ServerElement:
# ZAccel does the damage - ignore if less than 2g
if (Child.tag == 'ZAccel'):
Damage = float(Child.text)
if (Damage >2):
UserHealthPoints = UserHealthPoints - Damage
print (UserHealthPoints)
IndicateHealthPoints (UserHealthPoints, 0)
# Determine if Opponent is Defeated
if (UserHealthPoints < 0):
print ("Your opponent kicked your butt")
print (Child.tag + Child.text)
if (Child.tag=='HealthPoints'):
OpponentHealthPoints = float(Child.text)
IndicateHealthPoints(OpponentHealthPoints, 1)
else:
print("Unable to process messsage from server: " + ServerStr)
except BlockingIOError:
0 # Do nothing - expecting blocking issues if nothing to receive
except:
print ("Unexpected Error")
raise
# If Opponent Information Received.
if (ClientElement.tag == 'Attack'):
# Read through all the information
for Child in ClientElement:
#print (Child.tag)
# ZAccel does the damage - ignore if less than 2g
if (Child.tag == 'ZAccel'):
#print(Child.text)
Damage = float(Child.text)
if (Damage >2):
HealthPoints = HealthPoints - Damage
print (HealthPoints)
# Determine if Opponent is Defeated
if (HitPoints < 0):
if (OppoonentDefeated == FALSE):
print ("That dude is finished")
OpponentDefeated = True
# Send Opponent Information to the Client for display or other usage.
SendOpponentInfo()
'''
'''
except KeyboardInterrupt:
print ("Fine - quit see if I care - jerk")
exit()
except:
#rint("Unexpected error:", sys.exc_info()[0])
raise
'''
|
# Generated by Django 3.0.5 on 2020-11-22 15:00
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth', '0011_update_proxy_permissions'),
('user', '0002_customuser_alias'),
]
operations = [
migrations.AddField(
model_name='customuser',
name='group',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='auth.Group'),
),
]
|
class Tree(object):
def __init__(self):
self.attribute=""
self.children={}
self.prediction=""
self.informationGain=0
def setPrediction(self,label):
self.prediction=label
def getPrediction(self):
return self.prediction
def setAttribute(self,attr):
self.attribute=attr
def getAttribute(self):
return self.attribute
|
n = int(input("Digite um numero: "))
if n < 0:
print ("Numero invalido. Digite apenas valores positivos ")
if n == 0 or n == 1:
print (f"{n} é um caso especial.")
else:
if n == 2:
print("2 é primo")
elif n % 2 == 0:
print (f"{n} não é primo, pois 2 é o único numero par primo")
else:
x = 3
while x < n:
if n % x == 0:
break
x = x + 2
if x == n:
print(f"{n} é Primo")
else:
print(f"{n} não é Primo, pois é divisível por {x}")
|
# unreal.AssetToolsHelpers
# https://api.unrealengine.com/INT/PythonAPI/class/AssetToolsHelpers.html
# unreal.AssetTools
# https://api.unrealengine.com/INT/PythonAPI/class/AssetTools.html
# unreal.EditorAssetLibrary
# https://api.unrealengine.com/INT/PythonAPI/class/EditorAssetLibrary.html
# All operations can be slow. The editor should not be in play in editor mode. It will not work on assets of the type level.
# Possible Directory Paths:
# '/Game/MyFolder'
# '/Game/MyFolder/'
# Possible Asset Paths:
# '/Game/MyFolder/MyAsset.MyAsset'
# '/Game/MyFolder/MyAsset'
# unreal.AssetRenameData
# https://api.unrealengine.com/INT/PythonAPI/class/AssetRenameData.html
# unreal.Package
# https://api.unrealengine.com/INT/PythonAPI/class/Package.html
# unreal.EditorLoadingAndSavingUtils
# https://api.unrealengine.com/INT/PythonAPI/class/EditorLoadingAndSavingUtils.html
# unreal.AssetImportTask
# https://api.unrealengine.com/INT/PythonAPI/class/AssetImportTask.html
# unreal.AssetTools
# https://api.unrealengine.com/INT/PythonAPI/class/AssetTools.html
# unreal.FbxImportUI
# https://api.unrealengine.com/INT/PythonAPI/class/FbxImportUI.html
# unreal.FbxMeshImportData
# https://api.unrealengine.com/INT/PythonAPI/class/FbxMeshImportData.html
# unreal.FbxStaticMeshImportData
# https://api.unrealengine.com/INT/PythonAPI/class/FbxStaticMeshImportData.html
# unreal.FbxSkeletalMeshImportData
# https://api.unrealengine.com/INT/PythonAPI/class/FbxSkeletalMeshImportData.html
# unreal.FbxAssetImportData
# https://api.unrealengine.com/INT/PythonAPI/class/FbxAssetImportData.html
# unreal.FbxAnimSequenceImportData
# https://api.unrealengine.com/INT/PythonAPI/class/FbxAnimSequenceImportData.html
# unreal.FBXAnimationLengthImportType
# https://api.unrealengine.com/INT/PythonAPI/class/FBXAnimationLengthImportType.html
# unreal.LinearColor
# https://api.unrealengine.com/INT/PythonAPI/class/LinearColor.html
# unreal.Factory
# https://api.unrealengine.com/INT/PythonAPI/class/Factory.html
import unreal
# asset_path: str : Path of asset to create
# unique_name: bool : If True, will add a number at the end of the asset name until unique
# asset_class: obj unreal.Class : The asset class
# asset_factory: obj unreal.Factory : The associated factory of the class.
# return: obj : The created asset
def createGenericAsset(asset_path='', unique_name=True, asset_class=None, asset_factory=None):
if unique_name:
asset_path, asset_name = unreal.AssetToolsHelpers.get_asset_tools().create_unique_asset_name(base_package_name=asset_path, suffix='')
if not unreal.EditorAssetLibrary.does_asset_exist(asset_path=asset_path):
path = asset_path.rsplit('/', 1)[0]
name = asset_path.rsplit('/', 1)[1]
return unreal.AssetToolsHelpers.get_asset_tools().create_asset(asset_name=name, package_path=path, asset_class=asset_class, factory=asset_factory)
return unreal.load_asset(asset_path)
# paths: List of str : Asset paths
def showAssetsInContentBrowser(paths=[]):
unreal.EditorAssetLibrary.sync_browser_to_objects(asset_paths=paths)
# paths: List of str : Asset paths
def openAssets(paths=[]):
loaded_assets = [getPackageFromPath(x) for x in paths]
unreal.AssetToolsHelpers.get_asset_tools().open_editor_for_assets(assets=loaded_assets)
# path: str : Directory path
# return: bool : True if the operation succeeds
def createDirectory(path=''):
return unreal.EditorAssetLibrary.make_directory(directory_path=path)
# from_dir: str : Directory path to duplicate
# to_dir: str : Duplicated directory path
# return: bool : True if the operation succeeds
def duplicateDirectory(from_dir='', to_dir=''):
return unreal.EditorAssetLibrary.duplicate_directory(source_directory_path=from_dir, destination_directory_path=to_dir)
# path: str : Directory path
# return: bool : True if the operation succeeds
def deleteDirectory(path=''):
return unreal.EditorAssetLibrary.delete_directory(directory_path=path)
# path: str : Directory path
# return: bool : True if the directory exists
def directoryExist(path=''):
return unreal.EditorAssetLibrary.does_directory_exist(directory_path=path)
# from_dir: str : Directory path to rename
# to_dir: str : Renamed directory path
# return: bool : True if the operation succeeds
def renameDirectory(from_dir='', to_dir=''):
return unreal.EditorAssetLibrary.rename_directory(source_directory_path=from_dir, destination_directory_path=to_dir)
# from_path str : Asset path to duplicate
# to_path: str : Duplicated asset path
# return: bool : True if the operation succeeds
def duplicateAsset(from_path='', to_path=''):
return unreal.EditorAssetLibrary.duplicate_asset(source_asset_path=from_path, destination_asset_path=to_path)
# path: str : Asset path
# return: bool : True if the operation succeeds
def deleteAsset(path=''):
return unreal.EditorAssetLibrary.delete_asset(asset_path_to_delete=path)
# path: str : Asset path
# return: bool : True if the asset exists
def assetExist(path=''):
return unreal.EditorAssetLibrary.does_asset_exist(asset_path=path)
# from_path: str : Asset path to rename
# to_path: str : Renamed asset path
# return: bool : True if the operation succeeds
def renameAsset(from_path='', to_path=''):
return unreal.EditorAssetLibrary.rename_asset(source_asset_path=from_path, destination_asset_path=to_path)
# Note: This function will also work on assets of the type level. (But might be really slow if the level is huge)
# from_path: str : Asset path to duplicate
# to_path: str : Duplicate asset path
# show_dialog: bool : True if you want to show the confirm pop-up
# return: bool : True if the operation succeeds
def duplicateAssetDialog(from_path='', to_path='', show_dialog=True):
splitted_path = to_path.rsplit('/', 1)
asset_path = splitted_path[0]
asset_name = splitted_path[1]
if show_dialog:
return unreal.AssetToolsHelpers.get_asset_tools().duplicate_asset_with_dialog(asset_name=asset_name, package_path=asset_path, original_object=getPackageFromPath(from_path))
else:
return unreal.duplicate_asset.get_asset_tools().duplicate_asset(asset_name=asset_name, package_path=asset_path, original_object=getPackageFromPath(from_path))
# Note: This function will also work on assets of the type level. (But might be really slow if the level is huge)
# from_path: str : Asset path to rename
# to_path: str : Renamed asset path
# show_dialog: bool : True if you want to show the confirm pop-up
# return: bool : True if the operation succeeds
def renameAssetDialog(from_path='', to_path='', show_dialog=True):
splitted_path = to_path.rsplit('/', 1)
asset_path = splitted_path[0]
asset_name = splitted_path[1]
rename_data = unreal.AssetRenameData(asset=getPackageFromPath(from_path), new_package_path=asset_path, new_name=asset_name)
if show_dialog:
return unreal.AssetToolsHelpers.get_asset_tools().rename_assets_with_dialog(assets_and_names=[rename_data])
else:
return unreal.AssetToolsHelpers.get_asset_tools().rename_assets(assets_and_names=[rename_data])
# path: str : Asset path
# return: bool : True if the operation succeeds
def saveAsset(path='', force_save=True):
return unreal.EditorAssetLibrary.save_asset(asset_to_save=path, only_if_is_dirty = not force_save)
# path: str : Directory path
# return: bool : True if the operation succeeds
def saveDirectory(path='', force_save=True, recursive=True):
return unreal.EditorAssetLibrary.save_directory(directory_path=path, only_if_is_dirty=not force_save, recursive=recursive)
# path: str : Asset path
# return: obj : The loaded asset
def getPackageFromPath(path):
return unreal.load_package(name=path)
# return: obj List : The assets that need to be saved
def getAllDirtyPackages():
packages = []
for x in unreal.EditorLoadingAndSavingUtils.get_dirty_content_packages():
packages.append(x)
for x in unreal.EditorLoadingAndSavingUtils.get_dirty_map_packages():
packages.append(x)
return packages
# show_dialog: bool : True if you want to see the confirm pop-up
# return: bool : True if the operation succeeds
def saveAllDirtyPackages(show_dialog=False):
if show_dialog:
return unreal.EditorLoadingAndSavingUtils.save_dirty_packages_with_dialog(save_map_packages=True, save_content_packages=True)
else:
return unreal.EditorLoadingAndSavingUtils.save_dirty_packages(save_map_packages=True, save_content_packages=True)
# show_dialog: bool : True if you want to see the confirm pop-up
# return: bool : True if the operation succeeds
def savePackages(packages=[], show_dialog=False):
if show_dialog:
return unreal.EditorLoadingAndSavingUtils.save_packages_with_dialog(packages_to_save=packages, only_dirty=False) # only_dirty=False :
else: # looks like that it's not
return unreal.EditorLoadingAndSavingUtils.save_packages(packages_to_save=packages, only_dirty=False) # working properly at the moment
# filename: str : Windows file fullname of the asset you want to import
# destination_path: str : Asset path
# option: obj : Import option object. Can be None for assets that does not usually have a pop-up when importing. (e.g. Sound, Texture, etc.)
# return: obj : The import task object
def buildImportTask(filename='', destination_path='', options=None):
task = unreal.AssetImportTask()
task.set_editor_property('automated', True)
task.set_editor_property('destination_name', '')
task.set_editor_property('destination_path', destination_path)
task.set_editor_property('filename', filename)
task.set_editor_property('replace_existing', True)
task.set_editor_property('save', True)
task.set_editor_property('options', options)
return task
# tasks: obj List : The import tasks object. You can get them from buildImportTask()
# return: str List : The paths of successfully imported assets
def executeImportTasks(tasks=[]):
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks(tasks)
imported_asset_paths = []
for task in tasks:
for path in task.get_editor_property('imported_object_paths'):
imported_asset_paths.append(path)
return imported_asset_paths
# return: obj : Import option object. The basic import options for importing a static mesh
def buildStaticMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', False)
options.set_editor_property('import_materials', False)
options.set_editor_property('import_as_skeletal', False) # Static Mesh
# unreal.FbxMeshImportData
options.static_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.static_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxStaticMeshImportData
options.static_mesh_import_data.set_editor_property('combine_meshes', True)
options.static_mesh_import_data.set_editor_property('generate_lightmap_u_vs', True)
options.static_mesh_import_data.set_editor_property('auto_generate_collision', True)
return options
# return: obj : Import option object. The basic import options for importing a skeletal mesh
def buildSkeletalMeshImportOptions():
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_mesh', True)
options.set_editor_property('import_textures', True)
options.set_editor_property('import_materials', True)
options.set_editor_property('import_as_skeletal', True) # Skeletal Mesh
# unreal.FbxMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.skeletal_mesh_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxSkeletalMeshImportData
options.skeletal_mesh_import_data.set_editor_property('import_morph_targets', True)
options.skeletal_mesh_import_data.set_editor_property('update_skeleton_reference_pose', False)
return options
# skeleton_path: str : Skeleton asset path of the skeleton that will be used to bind the animation
# return: obj : Import option object. The basic import options for importing an animation
def buildAnimationImportOptions(skeleton_path=''):
options = unreal.FbxImportUI()
# unreal.FbxImportUI
options.set_editor_property('import_animations', True)
options.skeleton = unreal.load_asset(skeleton_path)
# unreal.FbxMeshImportData
options.anim_sequence_import_data.set_editor_property('import_translation', unreal.Vector(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_rotation', unreal.Rotator(0.0, 0.0, 0.0))
options.anim_sequence_import_data.set_editor_property('import_uniform_scale', 1.0)
# unreal.FbxAnimSequenceImportData
options.anim_sequence_import_data.set_editor_property('animation_length', unreal.FBXAnimationLengthImportType.FBXALIT_EXPORTED_TIME)
options.anim_sequence_import_data.set_editor_property('remove_redundant_keys', False)
return options
|
import torch
import torch.nn as nn
import math
from IPython import embed
class ReconstructionLoss(torch.nn.Module):
def __init__(self):
super(ReconstructionLoss, self).__init__()
self.tanh = nn.Tanh()
def forward(self, ori_embeds, model_embeds, embed_l):
# (B, L, E)
temp = torch.norm(model_embeds - self.tanh(ori_embeds), dim=2) ** 2
lrec = temp.sum(1) / embed_l
return lrec.mean() # combine the loss across the batch
class TransformationLoss(torch.nn.Module):
def __init__(self, dim, l, use_cuda=False):
super(TransformationLoss, self).__init__()
self.eye = torch.eye(dim, device='cuda' if use_cuda else 'cpu')
self.l = l
def forward(self, W):
temp = self.l * torch.norm(W - self.eye) ** 2
return temp
class AdvBasicLoss(torch.nn.Module):
def __init__(self, trans_dim, trans_param, num_no_adv=None, tot_epochs=20, rho_adv=False, gamma=10,
rec_weight=1, semi_sup=False, use_cuda=False):
super(AdvBasicLoss, self).__init__()
self.rec_loss = ReconstructionLoss()
self.trans_loss = TransformationLoss(dim=trans_dim, l=trans_param, use_cuda=use_cuda)
self.adv_param = 0. # start with the adversary weight set to 0
self.semi_sup = semi_sup
if self.semi_sup:
self.stance_loss = nn.CrossEntropyLoss(ignore_index=3)
else:
self.stance_loss = nn.CrossEntropyLoss()
self.topic_loss = nn.CrossEntropyLoss()
#Adversary is not used for num_no_adv initial epochs
self.use_adv = num_no_adv == 0
self.num_no_adv = num_no_adv
self.tot_epochs = tot_epochs
self.rec_weight = rec_weight
self.i = 0
self.rho_adv = rho_adv
self.gamma = gamma
self.use_cuda = use_cuda
def update_param_using_p(self, epoch):
if epoch >= self.num_no_adv:
self.use_adv = True
tot_epochs_for_calc = self.tot_epochs - self.num_no_adv
epoch_for_calc = epoch - self.num_no_adv
p = epoch_for_calc/tot_epochs_for_calc
self.adv_param = 2/(1 + math.exp(-self.gamma*p)) - 1
else:
self.use_adv = False
def forward(self, pred_info, labels, compute_adv_loss=True, print_=False):
lrec = self.rec_weight * self.rec_loss(ori_embeds=pred_info['text'], model_embeds=pred_info['recon_embeds'],
embed_l=pred_info['text_l'])
lrec_topic = self.rec_weight * self.rec_loss(ori_embeds=pred_info['topic'], model_embeds=pred_info['topic_recon_embeds'],
embed_l=pred_info['topic_l'])
ltrans = self.trans_loss(W=pred_info['W'])
llabel = self.stance_loss(pred_info['stance_pred'], labels)
ladv = torch.tensor(0)
adversarial_loss = torch.tensor(0)
if self.use_cuda:
ladv = ladv.to('cuda')
adversarial_loss = adversarial_loss.to('cuda')
if compute_adv_loss: #Ladv is computed only on the train dataset else it is left as 0.
ladv = self.topic_loss(pred_info['adv_pred'], pred_info['topic_i'])
if self.rho_adv:
adversarial_loss = self.adv_param * self.topic_loss(pred_info['adv_pred_'], pred_info['topic_i'])
else:
adversarial_loss = self.topic_loss(pred_info['adv_pred_'], pred_info['topic_i'])
if print_:
print("lrec - {}, lrec_topic - {}, ltrans - {}, llabel - {}, ladv - {}".format(lrec, lrec_topic, ltrans, llabel, ladv))
self.i += 1
if self.use_adv:
if self.i % 100 == 0:
print("loss: {:.4f} + {:.4f} + {:.4f} - {:.4f}; adv: {:.4f}".format(lrec.item(), ltrans.item(), llabel.item(),
(self.adv_param * ladv).item(), ladv))
return lrec + lrec_topic + ltrans + llabel - self.adv_param * ladv, adversarial_loss
else:
if self.i % 100 == 0:
print("loss: {:.4f} + {:.4f} + {:.4f}; adv: {:.4f}".format(lrec.item(), ltrans.item(), llabel.item(),
ladv))
return lrec + lrec_topic + ltrans + llabel, adversarial_loss
|
"""
折线图:
2.1注意:
1.可以只提供y的数据,也可以提供多对x,y的数据
"""
from matplotlib import pyplot as plt
import numpy as np
x = np.arange(10)
# 绘图方法:
fig = plt.figure()
ax = fig.add_subplot()
ax.plot(x, x**2, 'r-.', x, 2*x, 'b-')
plt.show()
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.utils import timezone
from django.dispatch import receiver
from projects.models import (
Dependency, ProjectDependency, ProjectBuild, generate_projectbuild_id,
ProjectBuildDependency, projectbuild_finished)
from .factories import (
ProjectFactory, DependencyFactory, ProjectBuildFactory)
from jenkins.tests.factories import JobFactory, BuildFactory, ArtifactFactory
class DependencyTest(TestCase):
def test_instantiation(self):
"""We can create Dependencies."""
job = JobFactory.create()
Dependency.objects.create(
name="My Dependency", job=job)
def test_get_current_build(self):
"""
Dependency.get_current_build should return the most recent build that
has completed and was SUCCESSful.
"""
build1 = BuildFactory.create()
build2 = BuildFactory.create(
phase="FINISHED", status="SUCCESS", job=build1.job)
dependency = DependencyFactory.create(job=build1.job)
self.assertEqual(build2, dependency.get_current_build())
def test_get_current_build_with_no_builds(self):
"""
If there are no current builds for a given dependency, then we should
get None.
"""
dependency = DependencyFactory.create()
self.assertEqual(None, dependency.get_current_build())
def test_get_parameters(self):
"""
Dependency.get_build_parameters should return a dictionary parsed from
the parameters property.
"""
dependency = DependencyFactory.create(
parameters="THISVALUE=testing\nTHATVALUE=55")
self.assertEqual(
{"THISVALUE": "testing", "THATVALUE": "55"},
dependency.get_build_parameters())
def test_get_parameters_with_no_parameters(self):
"""
Dependency.get_build_parameters should None if there are no build
parameters.
"""
dependency = DependencyFactory.create(parameters=None)
self.assertIsNone(dependency.get_build_parameters())
class ProjectDependencyTest(TestCase):
def test_instantiation(self):
"""We can create ProjectDependency objects."""
project = ProjectFactory.create()
dependency = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency)
self.assertEqual(
set([dependency]), set(project.dependencies.all()))
def test_auto_track_build(self):
"""
If we create a new build for a dependency of a Project, and the
ProjectDependency is set to auto_track then the current_build should be
updated to reflect the new build.
"""
build1 = BuildFactory.create()
dependency = DependencyFactory.create(job=build1.job)
project = ProjectFactory.create()
project_dependency = ProjectDependency.objects.create(
project=project, dependency=dependency)
project_dependency.current_build = build1
project_dependency.save()
build2 = BuildFactory.create(job=build1.job)
# Reload the project dependency
project_dependency = ProjectDependency.objects.get(
pk=project_dependency.pk)
self.assertEqual(build2, project_dependency.current_build)
def test_new_build_with_no_auto_track_build(self):
"""
If we create a new build for a dependency of a Project, and the
ProjectDependency is not set to auto_track then the current_build
should not be updated.
"""
build1 = BuildFactory.create()
dependency = DependencyFactory.create(job=build1.job)
project = ProjectFactory.create()
project_dependency = ProjectDependency.objects.create(
project=project, dependency=dependency, auto_track=False)
project_dependency.current_build = build1
project_dependency.save()
BuildFactory.create(job=build1.job)
# Reload the project dependency
project_dependency = ProjectDependency.objects.get(
pk=project_dependency.pk)
self.assertEqual(build1, project_dependency.current_build)
class ProjectTest(TestCase):
def test_get_current_artifacts(self):
"""
Project.get_current_artifacts returns the current set of artifacts
for this project.
"""
project = ProjectFactory.create()
job = JobFactory.create()
dependency = DependencyFactory.create(job=job)
ProjectDependency.objects.create(
project=project, dependency=dependency)
build1 = BuildFactory.create(job=job)
build2 = BuildFactory.create(job=job)
ArtifactFactory.create(build=build1)
artifact2 = ArtifactFactory.create(build=build2)
self.assertEqual([artifact2], list(project.get_current_artifacts()))
class ProjectBuildTest(TestCase):
def setUp(self):
self.project = ProjectFactory.create()
self.user = User.objects.create_user("testing")
def test_generate_projectbuild_id(self):
"""
generate_projectbuild_id should generate an id using the date and the
sequence of builds on that date.
e.g. 20140312.1 is the first build on the 12th March 2014
"""
build1 = ProjectBuildFactory.create()
expected_build_id = timezone.now().strftime("%Y%m%d.1")
self.assertEqual(expected_build_id, generate_projectbuild_id(build1))
build2 = ProjectBuildFactory.create(project=build1.project)
expected_build_id = timezone.now().strftime("%Y%m%d.2")
self.assertEqual(expected_build_id, generate_projectbuild_id(build2))
def test_instantiation(self):
"""
We can create ProjectBuilds.
"""
projectbuild = ProjectBuild.objects.create(
project=self.project, requested_by=self.user)
self.assertEqual(self.user, projectbuild.requested_by)
self.assertIsNotNone(projectbuild.requested_at)
self.assertIsNone(projectbuild.ended_at)
self.assertEqual("UNKNOWN", projectbuild.status)
self.assertEqual("UNKNOWN", projectbuild.phase)
def test_build_id(self):
"""
When we create a project build, we should create a unique id for the
build.
"""
projectbuild = ProjectBuildFactory.create()
expected_build_id = timezone.now().strftime("%Y%m%d.0")
self.assertEqual(expected_build_id, projectbuild.build_id)
def test_projectbuild_updates_when_build_created(self):
"""
If we have a ProjectBuild with a dependency, which is associated with a
job, and we get a build from that job, then if the build_id is correct,
we should associate the build dependency with that build.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
from projects.helpers import build_project
projectbuild = build_project(project, queue_build=False)
build1 = BuildFactory.create(
job=dependency1.job, build_id=projectbuild.build_id)
build_dependencies = ProjectBuildDependency.objects.filter(
projectbuild=projectbuild)
self.assertEqual(2, build_dependencies.count())
dependency = build_dependencies.get(dependency=dependency1)
self.assertEqual(build1, dependency.build)
dependency = build_dependencies.get(dependency=dependency2)
self.assertIsNone(dependency.build)
def test_project_build_status_when_all_dependencies_have_builds(self):
"""
When we have FINISHED builds for all the dependencies, the projectbuild
state should be FINISHED.
"""
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
from projects.helpers import build_project
projectbuild = build_project(project, queue_build=False)
for job in [dependency1.job, dependency2.job]:
BuildFactory.create(
job=job, build_id=projectbuild.build_id, phase="FINISHED")
projectbuild = ProjectBuild.objects.get(pk=projectbuild.pk)
self.assertEqual("SUCCESS", projectbuild.status)
self.assertEqual("FINISHED", projectbuild.phase)
self.assertIsNotNone(projectbuild.ended_at)
def test_project_build_sends_finished_signal(self):
"""
When we set the projectbuild status to finished, we should signal this.
"""
@receiver(projectbuild_finished, sender=ProjectBuild)
def handle_signal(sender, projectbuild, **kwargs):
self.projectbuild = projectbuild
project = ProjectFactory.create()
dependency1 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency1)
dependency2 = DependencyFactory.create()
ProjectDependency.objects.create(
project=project, dependency=dependency2)
from projects.helpers import build_project
projectbuild = build_project(project, queue_build=False)
for job in [dependency1.job, dependency2.job]:
BuildFactory.create(
job=job, build_id=projectbuild.build_id, phase="FINISHED")
self.assertEqual(projectbuild, self.projectbuild)
|
from abc import ABCMeta, abstractmethod, abstractproperty
import config
class Response:
__metaclass__ = ABCMeta
@property
@abstractmethod
def _message(self):
pass
@property
def message(self):
message_ = self._message + config.MESSAGE_DELIMINATOR
print(message_)
return message_.encode("ascii")
class Ack(Response):
@property
def _message(self):
return "HELLO"
class AuthYes(Response):
@property
def _message(self):
return "AUTHYES"
class AuthNo(Response):
@property
def _message(self):
return "AUTHNO"
class SignIn(Response):
def __init__(self, user):
self.user = user
@property
def _message(self):
return "SIGNIN:" + self.user
class SignOff(Response):
def __init__(self, user):
self.user = user
@property
def _message(self):
return "SIGNOFF:" + self.user
class UserList(Response):
def __init__(self, clients):
self.clients = clients
@property
def _message(self):
return ", ".join([client.username for client in self.clients])
class UserMessage(Response):
def __init__(self, from_client, message):
self.from_client = from_client
self.user_message = message
@property
def _message(self):
return "From:" + self.from_client + ":" + self.user_message
class UserExists(Response):
@property
def _message(self):
return "UNIQNO"
class Info(Response):
def __init__(self, info):
self.info = info
@property
def _message(self):
return "# " + self.info
|
#!/usr/bin/python3
"""
MODEL_CITY MODULE
Provides the class and methods to interact with the cities table in the DB.
"""
from sqlalchemy import Column, ForeignKey, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class City(Base):
"""Represents a City in the DB"""
__tablename__ = 'cities'
id = Column(Integer, autoincrement="auto",
nullable=False, primary_key=True)
name = Column(String(128), nullable=False)
state_id = Column(Integer, ForeignKey('states.id'), nullable=False)
|
class Solution(object):
def maxProfit(self, prices):
if prices is None or len(prices)==0: return 0
#Min, Max
max_profits = [[0,0], [0,0]]
latest = max_profits[0]
#Find min-max ranges in array
for i in range(len(prices)):
if prices[i] <= prices[latest[0]]:
latest = [i, i]
max_profits.append(latest)
elif prices[i] > prices[latest[1]]:
latest[1] = i
return max(map(lambda a: prices[a[1]]-prices[a[0]], max_profits))
print(Solution().maxProfit([7, 1, 5, 3, 6, 4]))#5
print(Solution().maxProfit([7, 6, 4, 3, 1]))#0
print(Solution().maxProfit([]))#0
print(Solution().maxProfit([2,4,1]))#2
print(Solution().maxProfit([4,7,2,1]))#3
|
#!/usr/bin/env python
import json
import sys
import urllib2
twitter_url = 'http://search.twitter.com/search.json?q=from:Hashtag_Fresno'
response = urllib2.urlopen(twitter_url)
data = json.loads(response.read())
for tweet in data['results'][:5]:
print tweet['created_at']
print tweet['text']
print '' # blank line between tweets
|
#%%
import requests
from bs4 import BeautifulSoup
import smtplib
import time
URL = r"https://www.bestbuy.com/site/acer-s271hl-27-led-fhd-monitor-black/6051018.p?skuId=6051018"
headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.117 Safari/537.36'}
page = requests.get(url=URL, headers= headers)
soup = BeautifulSoup(page.content, 'html.parser')
title = soup.find("div", {"class": "shop-product-title"}).get_text()
title = title.split("if ('")[0]
price = soup.find("div", {"class": "priceView-hero-price priceView-customer-price"}).get_text()
price = float(price[1:4])
price_list = []
price_list.append(price)
def check_price():
page = requests.get(url=URL, headers= headers)
soup = BeautifulSoup(page.content, 'html.parser')
title = soup.find("div", {"class": "shop-product-title"}).get_text()
title = title.split("if ('")[0]
price = soup.find("div", {"class": "priceView-hero-price priceView-customer-price"}).get_text()
price = float(price[1:4])
if price != price_list[-1]:
price_list.append(price)
if(price < price_list[-2]):
send_email()
print("email sent!")
else:
print('No Price Update!')
def send_email():
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo()
server.login('*******@gmail.com','**********')
subject = "Acer Monitor Price Fell Down!"
body = f"Acer monitor's price in Best Buy fell down. New price is {price_list[-1]}"
msg = f"Subject: {subject}\n\n{body}"
server.sendmail(
'**********@gmail.com',
'********@hotmail.com',
msg
)
print("Email sent")
server.quit()
while(True):
check_price()
time.sleep(60*60)
|
import flask
import multiprocessing
import os
import tempfile
from six.moves import cStringIO as StringIO
from smqtk.utils import SmqtkObject
from smqtk.utils import file_utils
script_dir = os.path.dirname(os.path.abspath(__file__))
class FileUploadMod (SmqtkObject, flask.Blueprint):
"""
Flask blueprint for file uploading.
"""
def __init__(self, name, parent_app, working_directory, url_prefix=None):
"""
Initialize uploading module
:param parent_app: Parent Flask app
:type parent_app: smqtk.Web.search_app.base_app.search_app
:param working_directory: Directory for temporary file storage during
upload up to the time a user takes control of the file.
:type working_directory: str
"""
super(FileUploadMod, self).__init__(
name, __name__,
static_folder=os.path.join(script_dir, 'static'),
url_prefix=url_prefix
)
# TODO: Thread safety
self.parent_app = parent_app
self.working_dir = working_directory
# TODO: Move chunk storage to database for APACHE multiprocessing
# File chunk aggregation
# Top level key is the file ID of the upload. The dictionary
# underneath that is the index ID'd chunks. When all chunks are
# present, the file is written and the entry in this map is removed.
#: :type: dict of (str, dict of (int, StringIO))
self._file_chunks = {}
# Lock per file ID so as to not collide when uploading multiple chunks
#: :type: dict of (str, RLock)
self._fid_locks = {}
# FileID to temporary path that a completed file is located at.
self._completed_files = {}
#
# Routing
#
@self.route('/upload_chunk', methods=["POST"])
@self.parent_app.module_login.login_required
def upload_file():
"""
Handle arbitrary file upload to OS temporary file storage, recording
file upload completions.
"""
form = flask.request.form
self._log.debug("POST form contents: %s" % str(flask.request.form))
fid = form['flowIdentifier']
current_chunk = int(form['flowChunkNumber'])
total_chunks = int(form['flowTotalChunks'])
filename = form['flowFilename']
#: :type: FileStorage
chunk_data = flask.request.files['file']
with self._fid_locks.setdefault(fid, multiprocessing.RLock()):
# Create new entry in chunk map / add to existing entry
# - Need to explicitly copy the buffered data as the file object
# closes between chunk messages.
self._file_chunks.setdefault(fid, {})[current_chunk] \
= StringIO(chunk_data.read())
message = "Uploaded chunk #%d of %d for file '%s'" \
% (current_chunk, total_chunks, filename)
if total_chunks == len(self._file_chunks[fid]):
self._log.debug("[%s] Final chunk uploaded",
filename+"::"+fid)
# have all chucks in memory now
try:
# Combine chunks into single file
file_ext = os.path.splitext(filename)[1]
file_saved_path = self._write_file_chunks(
self._file_chunks[fid], file_ext
)
self._log.debug("[%s] saved from chunks: %s",
filename+"::"+fid, file_saved_path)
# now in file, free up dict memory
self._completed_files[fid] = file_saved_path
message = "[%s] Completed upload" % (filename+"::"+fid)
except IOError as ex:
self._log.debug("[%s] Failed to write combined chunks",
filename+"::"+fid)
message = "Failed to write out combined chunks for " \
"file %s: %s" % (filename, str(ex))
raise RuntimeError(message)
except NotImplementedError as ex:
message = "Encountered non-implemented code path: %s" \
% str(ex)
raise RuntimeError(message)
finally:
# remove chunk map entries
del self._file_chunks[fid]
del self._fid_locks[fid]
# Flow only displays return as a string, so just returning the
# message component.
return message
@self.route("/completed_uploads")
@self.parent_app.module_login.login_required
def completed_uploads():
return flask.jsonify(self._completed_files)
def upload_post_url(self):
"""
:return: The url string to give to the JS upload zone for POSTing file
chunks.
:rtype: str
"""
return self.url_prefix + '/upload_chunk'
def get_path_for_id(self, file_unique_id):
"""
Get the path to the temp file that was uploaded.
It is the user's responsibility to remove this file when it is done
being used, or move it else where.
:param file_unique_id: Unique ID of the uploaded file
:type file_unique_id: str
:return: The path to the complete uploaded file.
"""
return self._completed_files[file_unique_id]
def clear_completed(self, file_unique_id):
"""
Clear the completed file entry in our cache. This should be called after
taking responsibility for an uploaded file.
This does NOT delete the file.
:raises KeyError: If the given unique ID does not correspond to an
entry in our completed cache.
:param file_unique_id: Unique ID of an uploaded file to clear from the
completed cache.
:type file_unique_id: str
"""
del self._completed_files[file_unique_id]
# noinspection PyMethodMayBeStatic
def _write_file_chunks(self, chunk_map, file_extension=''):
"""
Given a mapping of chunks, write their contents to a temporary file,
returning the path to that file.
Returned file path should be manually removed by the user.
:param chunk_map: Mapping of integer index to file-like chunk
:type chunk_map: dict of (int, StringIO)
:param file_extension: String extension to suffix the temporary file
with
:type file_extension: str
:raises OSError: OS problems creating temporary file or writing it out.
:return: Path to temporary combined file
:rtype: str
"""
# Make sure write dir exists...
if not os.path.isdir(self.working_dir):
file_utils.safe_create_dir(self.working_dir)
tmp_fd, tmp_path = tempfile.mkstemp(file_extension,
dir=self.working_dir)
self._log.debug("Combining chunks into temporary file: %s", tmp_path)
tmp_file = open(tmp_path, 'wb')
for idx, chunk in sorted(chunk_map.items(), key=lambda p: p[0]):
data = chunk.read()
tmp_file.write(data)
tmp_file.close()
return tmp_path
|
# Copyright (c) 2020 Adam Souzis
# SPDX-License-Identifier: MIT
from __future__ import absolute_import
import os.path
import six
from six.moves.configparser import ConfigParser
from supervisor import xmlrpc
try:
from xmlrpc.client import ServerProxy, Fault
except ImportError:
from xmlrpclib import Server as ServerProxy
from xmlrpclib import Fault
from ..configurator import Configurator
from ..support import abspath
# support unix domain socket connections
# (we want to connect the same way as supervisorctl does for security and to automatically support multiple instances)
def getServerProxy(serverurl=None, username=None, password=None):
# copied from https://github.com/Supervisor/supervisor/blob/b52f49cff287c4d821c2c54d7d1afcd397b699e5/supervisor/options.py#L1718
return ServerProxy(
# dumbass ServerProxy won't allow us to pass in a non-HTTP url,
# so we fake the url we pass into it and always use the transport's
# 'serverurl' to figure out what to attach to
"http://127.0.0.1",
transport=xmlrpc.SupervisorTransport(username, password, serverurl),
)
def _reloadConfig(server, name):
result = server.supervisor.reloadConfig()
return any((name in changed) for changed in result[0])
class SupervisorConfigurator(Configurator):
def run(self, task):
host = task.vars["HOST"]
# if homeDir is a relative path it will be relative to the baseDir of the host instance
# which might be different from the current directory if host is an external instance
confDir = abspath(host.context, host["homeDir"])
conf = host["conf"]
name = task.vars["SELF"]["name"]
confPath = os.path.join(confDir, "programs", name + ".conf")
if six.PY3:
parser = ConfigParser(inline_comment_prefixes=(";", "#"), strict=False)
parser.read_string(conf)
else:
parser = ConfigParser()
parser.readfp(six.StringIO(conf))
serverConfig = dict(parser.items("supervisorctl", vars=dict(here=confDir)))
serverConfig.pop("here", None)
server = getServerProxy(**serverConfig)
error = None
op = task.configSpec.operation
modified = False
try:
if op == "start":
server.supervisor.startProcess(name)
modified = True
elif op == "stop":
server.supervisor.stopProcess(name)
modified = True
elif op == "delete":
if os.path.exists(confPath):
os.remove(confPath)
modified = _reloadConfig(server, name)
elif op == "configure":
program = task.vars["SELF"]["program"]
programDir = os.path.dirname(confPath)
task.logger.debug("writing %s", confPath)
if not os.path.isdir(programDir):
os.makedirs(programDir)
with open(confPath, "w") as conff:
conf = "[program:%s]\n" % name
conf += "\n".join("%s= %s" % (k, v) for k, v in program.items())
if "environment" not in program:
conf += "\nenvironment= "
conf += ",".join(
'%s="%s"' % (k, v.replace("%", "%%"))
for (k, v) in task.getEnvironment(True).items()
)
conff.write(conf)
modified = _reloadConfig(server, name)
server.supervisor.addProcessGroup(name)
except Fault as err:
if (
not (op == "start" and err.faultCode == 60) # ok, 60 == ALREADY_STARTED
and not (op == "stop" and err.faultCode == 70) # ok, 70 == NOT_RUNNING
and not ( # ok, 90 == ALREADY_ADDED
op == "configure" and err.faultCode == 90
)
):
error = "supervisor error: " + str(err)
yield task.done(success=not error, modified=modified, result=error)
|
#!/usr/bin/env python
import rospy
from geometry_msgs.msg import Twist
from turtlesim.msg import Pose
from math import pow, sqrt, atan2
import numpy
x= 0
y= 0
theta = 0
velocityPublisher = rospy.Publisher('/turtle1/cmd_vel', Twist, queue_size=10)
def callback(data):
rospy.loginfo('Mis coordenadas antes de recibir las nuevas son x= %.9f && y= %.9f' %(x, y))
rospy.loginfo(rospy.get_caller_id() + 'I heard coordinates x= %.9f && y= %.9f' %(data.x, data.y))
vel_msg = Twist()
#Primer publish a cmd_vel
vel_msg.linear.x = 0
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
zAngle = getAngle(x, y, data.x, data.y) - theta
rospy.loginfo('angulo resultante = %f' % zAngle)
vel_msg.angular.z = zAngle
velocityPublisher.publish(vel_msg)
rospy.loginfo(vel_msg)
#Se duerme el proceso para que la tortuga gire
rospy.sleep(1.5)
#Segundo publish a cmd_vel
vel_msg.linear.x = euclidean_distance(x, y, data.x, data.y)
vel_msg.linear.y = 0
vel_msg.linear.z = 0
vel_msg.angular.x = 0
vel_msg.angular.y = 0
vel_msg.angular.z = 0
velocityPublisher.publish(vel_msg)
rospy.loginfo(vel_msg)
def euclidean_distance(x1, y1, x2, y2):
return sqrt(pow(x2-x1,2) + pow(y2-y1,2)) # pow(x, y) es lo mismo que x**y
def getAngle(x1, y1, x2, y2):
xDiff = x2 - x1
yDiff = y2 - y1
return atan2(yDiff, xDiff)
#Es necesario obtener la posicion de la tortuga para tener en cuenta el
#angulo theta en todo momento.
def obtainGlobalParams(data):
global x
global y
global theta
x = data.x
y = data.y
theta = data.theta
def listener():
rospy.init_node('coordinates_receiver_and_turtle_pusher', anonymous=True)
rospy.Subscriber('turtle1/pose', Pose, obtainGlobalParams)
rospy.Subscriber('turtleAutoMove', Pose, callback)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
|
import unittest
import numpy as np
from core.field_transformer import FieldTransformer
class FieldTransformerTest(unittest.TestCase):
def test_sets_mark_to_coordinates(self):
field = np.matrix('0 0 0; 0 0 0; 0 0 0')
field_transformer = FieldTransformer(field, 1, (0, 0))
assert (field_transformer.transform() == np.matrix('1 0 0; 0 0 0; 0 0 0')).all()
def test_does_not_mutates_field(self):
field = np.matrix('0 0 0; 0 0 0; 0 0 0')
field_transformer = FieldTransformer(field, 1, (0, 0))
assert (field_transformer.transform() != field).any()
|
def divisors(n):
div = []
for i in range(1, n + 1):
if n % i == 0:
div.append(i)
return len(div)
|
# -*- coding: utf8 -*-
from flask import Blueprint, render_template, request, send_file, Response
import config
from pathlib import Path
import os
from natsort import natsorted, ns
from os import listdir
from os.path import isfile, join, isdir
import get_image_size
from thumbnail import create_thumbnail
from flask import abort
import sys
import re
prefix = "/video"
video_bp = Blueprint("video", __name__, url_prefix=prefix)
@video_bp.after_request
def after_request(response):
response.headers.add("Accept-Ranges", "bytes")
return response
@video_bp.route("/<name>")
@video_bp.route("/<path:name>")
# @auth.login_required
def folder(name=""):
print(name, file=sys.stdout)
response = {"videos": prepare_video(name)}
agent = request.headers.get("User-Agent")
response["type"] = (
"mobile" if ("iphone" or "android") in agent.lower() else "desktop"
)
return render_template("video.html", response=response)
@video_bp.route("/")
def video():
response = {"videos": prepare_video()}
agent = request.headers.get("User-Agent")
response["type"] = (
"mobile" if ("iphone" or "android") in agent.lower() else "desktop"
)
return render_template("video.html", response=response)
def prepare_video(sub_path=""):
result = {"videos": [], "folders": []}
video_path = config.VIDEO_PATH
thumbnail_path = config.THUMBNAIL_PATH
if sub_path:
video_path += sub_path
abs_path = os.path.abspath(video_path)
if not (os.path.exists(video_path) and abs_path.find(config.VIDEO_PATH) == 0):
abort(404)
name = video_path.replace(config.VIDEO_PATH, "").split("/")[:-1]
# Добавить путь, что если внутренняя папка - добавить у урл её
name = "/video/" + "/".join(name)
result["folders"].append(
{
"name": name,
"icon": "/static/images/back.png",
"width": 60,
"height": 60,
}
)
template_path = "{}/{}"
if os.path.isdir(video_path):
for f in sorted(listdir(video_path)):
result_path = join(video_path, f)
if isfile(result_path) and f.split(".")[-1].lower() in config.VIDEO_EXT:
filename_without_ext = f.split(".")[0]
template_thumbnail_file = create_thumbnail(
result_path, thumbnail_path, filename_without_ext
)
result["videos"].append(
{
"name": f,
"path": config.URL_STATIC_VIDEO
+ template_path.format(sub_path, f)
if sub_path
else config.URL_STATIC_VIDEO + f,
"ext": f.split(".")[-1].lower(),
"thumbnail": config.URL_STATIC_THUMBNAIL
+ template_thumbnail_file,
}
)
else:
f = join(video_path, f).replace(config.VIDEO_PATH, "")
result["folders"].append(
{
"name": f,
"path": config.URL_STATIC_VIDEO + f,
"icon": "/static/images/folder.png",
"ext": f.split(".")[-1].lower(),
"width": 60,
"height": 60,
}
)
return result
def get_chunk(full_path="", byte1=None, byte2=None):
file_size = os.stat(full_path).st_size
start = 0
length = 102400
if byte1 < file_size:
start = byte1
if byte2:
length = byte2 + 1 - byte1
else:
length = file_size - start
with open(full_path, "rb") as f:
f.seek(start)
chunk = f.read(length)
return chunk, start, length, file_size
@video_bp.route("/get_thumbnail/<name>")
def get_thumbnail(name):
try:
print(config.URL_STATIC_THUMBNAIL + name)
return send_file(config.THUMBNAIL_PATH + name)
except FileNotFoundError:
abort(404)
@video_bp.route("/get_video/<name>")
@video_bp.route("/get_video/<path:varargs>/<name>")
def get_video(name, varargs=""):
if varargs:
varargs += "/"
file_path = config.VIDEO_PATH + varargs + name
if not os.path.exists(file_path):
abort(404)
range_header = request.headers.get("Range", None)
byte1, byte2 = 0, None
if range_header:
match = re.search(r"(\d+)-(\d*)", range_header)
groups = match.groups()
if groups[0]:
byte1 = int(groups[0])
if groups[1]:
byte2 = int(groups[1])
chunk, start, length, file_size = get_chunk(file_path, byte1, byte2)
resp = Response(
chunk,
206,
mimetype="video/mp4",
content_type="video/mp4",
direct_passthrough=True,
)
resp.headers.add(
"Content-Range",
"bytes {0}-{1}/{2}".format(start, start + length - 1, file_size),
)
return resp
|
entrada = input().split(" ")
a = int(entrada[0])
b = int(entrada[1])
while( a != 0 and b != 0):
resultado = a + b
resultado = str(resultado)
resultado = resultado.replace('0','')
print(resultado)
entrada = input().split(" ")
a = int(entrada[0])
b = int(entrada[1])
|
import serial
from serial import Serial
from time import sleep
import time
import sys
import py222
import solver
import numpy as np
COM_PORT = 'COM4' # 請自行修改序列埠名稱
BAUD_RATES = 9615
ser = serial.Serial(COM_PORT, BAUD_RATES,bytesize=8, timeout=2)
try:
# 接收用戶的輸入值並轉成小寫
while True:
choice = input('輸入"help"給予解魔方提示 輸入"e"退出本程式\n').lower()
#5 白 4 綠 3 青 2 澄 1 藍 0 紅
if choice == 'help':
print('傳送解析指令')
ser.write('os'.encode())
while 1:
if(ser.in_waiting):
mcu_feedback = ser.readline().decode() # 接收回應訊息並解碼
s = mcu_feedback.split(' ')
s.remove('')
print(s)
arr = np.array([s[18],s[17],s[19],s[16],s[13],s[12],s[14],s[15],s[1],s[0],s[2],s[3],s[20],s[23],s[21],s[22],s[5],s[4],s[6],s[7],s[9],s[8],s[10],s[11]])
arr = arr.astype(int)
solver.solveCube(arr)
break
elif choice == 'e':
ser.close()
print('再見!')
sys.exit()
else:
print('指令錯誤…')
except KeyboardInterrupt:
ser.close()
print('再見!')
|
"""
Crea un tabla de multiplicar pero invertida, dando los resultados desde el 10 hasta el 1
"""
numero=int(input("Escribe un número: "))
aux=9
for a in range(1,11):
a+=aux
aux-=2
resultado=numero*a
print("{}*{}={}".format(numero,a,resultado))
|
'''
this program provide 3 functions
1. pcd map convert frame utm to enu
2. merge each small map to global map
3. downsample map uniformly
Need Library:
numpy
open3d
'''
import os
import numpy as np
import open3d
from tqdm import tqdm
# prefixes is list of map to combine
prefixes=[
"Track_A_20201223_231551_Profiler_zfs_0_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_1_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_2_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_3_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_4_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_5_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_6_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_7_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_8_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_9_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_10_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_11_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_12_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_13_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_14_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_15_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_16_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_17_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_18_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_19_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_20_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_21_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_22_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_23_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_24_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_25_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_26_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_27_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_28_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_29_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_30_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_31_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_32_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_33_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_34_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_35_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_36_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_37_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_38_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_39_m_colorized",
"Track_A_20201223_231551_Profiler_zfs_40_m_colorized"
]
def UTMtoLL(UTMNorthing, UTMEasting, UTMZone):
m_RAD2DEG = 180/np.pi
m_DEG2RAD = np.pi/180
k0 = 0.9996
a = 6378137.0
WGS84_E = 0.0818191908
eccSquared = (WGS84_E*WGS84_E)
e1 = (1 - np.sqrt(1 - eccSquared)) / (1 + np.sqrt(1 - eccSquared))
x = UTMEasting - 500000.0
y = UTMNorthing
ZoneNumber = 52
ZoneLetter = 'N'
# if((ZoneLetter - 'N') < 0):
# y = y - 10000000.0
LongOrigin = (ZoneNumber - 1) * 6 - 180 + 3
eccPrimeSquared = (eccSquared) / (1 - eccSquared)
M = y / k0
mu = M / (a*(1 - eccSquared / 4 - 3 * eccSquared*eccSquared / 64 - 5 * eccSquared*eccSquared*eccSquared / 256))
phi1Rad = mu + ((3 * e1 / 2 - 27 * e1*e1*e1 / 32)*np.sin(2 * mu) + (21 * e1*e1 / 16 - 55 * e1*e1*e1*e1 / 32)*np.sin(4 * mu) + (151 * e1*e1*e1 / 96)*np.sin(6 * mu))
N1 = a / np.sqrt(1 - eccSquared*np.sin(phi1Rad)*np.sin(phi1Rad))
T1 = np.tan(phi1Rad)*np.tan(phi1Rad)
C1 = eccPrimeSquared*np.cos(phi1Rad)*np.cos(phi1Rad)
R1 = a*(1 - eccSquared) / ((1 - eccSquared*np.sin(phi1Rad)*np.sin(phi1Rad))**1.5)
D = x / (N1*k0)
Lat = phi1Rad - ((N1*np.tan(phi1Rad) / R1) *(D*D / 2 - (5 + 3 * T1 + 10 * C1 - 4 * C1*C1 - 9 * eccPrimeSquared)*D*D*D*D / 24 + (61 + 90 * T1 + 298 * C1 + 45 * T1*T1 - 252 * eccPrimeSquared - 3 * C1*C1)*D*D*D*D*D*D / 720))
Lat = Lat * m_RAD2DEG
Long = ((D - (1 + 2 * T1 + C1)*D*D*D / 6 + (5 - 2 * C1 + 28 * T1 - 3 * C1*C1 + 8 * eccPrimeSquared + 24 * T1*T1)*D*D*D*D*D / 120) / np.cos(phi1Rad))
Long = LongOrigin + Long * m_RAD2DEG
return ([Lat, Long])
def FnKappaLat(ref_lat, height):
Geod_a = 6378137.0
Geod_e2 = 0.00669437999014
m_RAD2DEG = 180/np.pi
m_DEG2RAD = np.pi/180
# dKappaLat=0.
# Denominator = 0.
# dM = 0.
# estimate the meridional radius
Denominator = np.sqrt(1 - Geod_e2 * (np.sin(ref_lat * m_DEG2RAD))**2)
dM = Geod_a * (1 - Geod_e2) / (Denominator**3)
# Curvature for the meridian
dKappaLat = 1 / (dM + height) * m_RAD2DEG;
return dKappaLat
def FnKappaLon(ref_lat, height):
Geod_a = 6378137.0
Geod_e2 = 0.00669437999014
m_RAD2DEG = 180/np.pi
m_DEG2RAD = np.pi/180
dKappaLon = 0
Denominator = 0
dN = 0
# estimate the normal radius
Denominator = np.sqrt(1 - Geod_e2 * (np.sin(ref_lat * m_DEG2RAD))**2)
dN = Geod_a / Denominator
# Curvature for the meridian
dKappaLon = 1 / ((dN + height) * np.cos(ref_lat * m_DEG2RAD)) * m_RAD2DEG
return dKappaLon
def llh2enu(lat, lon, height, ref_lat, ref_lon, ref_height):
Geod_a = 6378137.0
Geod_e2 = 0.00669437999014
m_RAD2DEG = 180/np.pi
m_DEG2RAD = np.pi/180
dKLat = 0.
dKLon = 0.
m_dRefLatitude_deg = m_dRefLatitude_deg
m_dRefLongitude_deg = ref_lon
height = height - ref_height
# estimate the meridional radius
Denominator = np.sqrt(1 - Geod_e2 * (np.sin(m_dRefLatitude_deg * m_DEG2RAD))**2)
dM = Geod_a * (1 - Geod_e2) / (Denominator**3)
# Curvature for the meridian
dKappaLat = 1 / (dM + height) * m_RAD2DEG;
dKLon = 0
Denominator = 0
dN = 0
# estimate the normal radius
Denominator = np.sqrt(1 - Geod_e2 * (np.sin(m_dRefLatitude_deg * m_DEG2RAD))**2)
dN = Geod_a / Denominator
# Curvature for the meridian
dKLon = 1 / ((dN + height) * np.cos(m_dRefLatitude_deg * m_DEG2RAD)) * m_RAD2DEG
east_m = (lon-m_dRefLongitude_deg)/dKLon
north_m = (lat-m_dRefLatitude_deg)/dKat
return [east_m, north_m, height]
color_map = np.array( ### bgr
[[0, 0, 0], ### unlabeled
[0, 10, 255], ### outlier
[245, 150, 100], ### car
[245, 230, 100], ### bicycle
[250, 80, 100], ### bus
[150, 60, 30], ### motorcycle
[255, 0, 0], ### on rails
[180, 30, 80], ### truck
[200, 40, 255], ### bicyclist
[90, 30, 150], ### motorcyclist
[255, 0, 255], ### road
[255, 150, 255], ### parking
[75, 0, 75], ### sidewalk
[75, 0, 175], ### other-ground
[0, 200, 255], ### building
[50, 120, 255], ### fence
[0, 150, 255], ### other-structure
[170, 255, 150], ### lane-marking
[0, 175, 0], ### vegetation
[0, 60, 135], ### trunk
[80, 240, 150], ### terrain
[150, 240, 255], ### pole
[0, 0, 255], ### traffic-sign
[255, 255, 50], ### other-object
[184, 249, 7], ### other-building
[142, 135, 31], ### kerb
[255, 0, 0], ### traffic-light
[80, 100, 0], ### tunnel-fan
[137, 95, 174], ### tunnel-emergency-light
[255, 0, 171]] ### tunnel-hydrant
)
data_path = "/home/jiwon/pcd_colorized_sy"
is_first_step = True
pcd_npy_edit = np.array([0]);
cnt = 0
for prefix in prefixes:
print("colorizing", prefix)
prefix = os.path.join(data_path, prefix)
pcd_file = prefix + ".pcd"
# label_file = prefix + ".labels"
saved_file = prefix + "_global2.pcd"
pcd = open3d.io.read_point_cloud(pcd_file) #### load .pcd file
pcd_down = pcd.uniform_down_sample(every_k_points=20)
pcd_npy = np.asarray(pcd_down.points)
pcd_cor = np.asarray(pcd_down.colors)
# print(pcd_npy)
for idx in tqdm(range(pcd_npy.shape[0])):
[lat, lon] = UTMtoLL(pcd_npy[idx,1],pcd_npy[idx,0],"52N")
# Set reference position using first position of first file
if is_first_step == True:
# reference_lat = reference point latitude of map
reference_lat = 37.3962732790
# reference_lon = reference point longitude of map
reference_lon = 127.1066872418
# reference_height = reference point height of map
reference_height = 60.37
is_first_step = False
pcd_npy[idx,0:3] = llh2enu(lat, lon, pcd_npy[idx,2], reference_lat, reference_lon, reference_height)
if cnt ==0:
pcd_sum = np.concatenate((pcd_npy,pcd_cor),axis=1)
else:
tmp_pcd_sum = np.concatenate((pcd_npy,pcd_cor),axis=1)
pcd_sum = np.concatenate((pcd_sum,tmp_pcd_sum),axis=0)
cnt = cnt + 1
pcd_final = open3d.geometry.PointCloud()
pcd_final.points = open3d.utility.Vector3dVector(pcd_sum[:, 0:3])
pcd_final.colors = open3d.utility.Vector3dVector(pcd_sum[:, 3:6])
open3d.io.write_point_cloud(saved_file, pcd_final)
print("\nreference_lat : ")
print(reference_lat)
print("\nreference_lon : ")
print(reference_lon)
print("\nreference_Height : ")
print(reference_height)
f = open(data_path+"/configuration.ini",'w')
f.write("[configuration]\n")
f.write("RefLatitude_deg = %f \n"%reference_lat)
f.write("RefLongitude_deg = %f \n"%reference_lon)
f.write("RefHeight_m = %f"%reference_height)
|
#!/usr/bin/env python3
import math
import queue
from functools import cmp_to_key
import fileinput
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __ne__(self, other):
return not self == other
def __lt__(self, other):
if self.x == other.x:
return self.y <= other.y
return self.x <= other.x
def __le__(self, other):
if self.x == other.x:
return self.y < other.y
return self.x < other.x
def __gt__(self, other):
return not self <= other
def __ge__(self, other):
return not self < other
def __repr__(self):
return f"{self.x}\t{self.y}"
class Edge:
def __init__(self, p1, p2):
self.p1 = p1
self.p2 = p2
def angle(self):
y_d = self.p2.y - self.p1.y
x_d = self.p2.x - self.p1.x
return math.atan(y_d / (x_d + 0.000000001))
def left_kink(p1, p2, p3):
d1 = Edge(p1, p2).angle()
d2 = Edge(p2, p3).angle()
return d2 > d1
def right_kink(p1, p2, p3):
return not left_kink(p1, p2, p3)
def get_hull(points, upper=True):
def upper_cmp(p1, p2):
if p1.x == p2.x:
if p1.y == p2.y:
return 0
if p1.y > p2.y:
return -1
return 1
return p1.x - p2.x
points = sorted(points)
p1, p2 = points[0:2]
last = points[-1]
rest = points[2:]
if upper:
rest = sorted(rest, key=cmp_to_key(upper_cmp))
Q = queue.LifoQueue()
Q.put(p1)
Q.put(p2)
for i in rest:
q3 = i
q2 = Q.get()
q1 = Q.get()
if upper:
kink = left_kink(q1, q2, q3)
else:
kink = right_kink(q1, q2, q3)
if kink:
Q.put(q1)
Q.put(q3)
else:
Q.put(q1)
Q.put(q2)
Q.put(q3)
if i == last:
break
hull = []
while not Q.empty():
hull.insert(0, Q.get())
return hull
def graham_scan(points):
upper_hull = get_hull(points, upper=True)
lower_hull = get_hull(points, upper=False)
# remove duplicate start/end points
lower_hull = lower_hull[1:-1]
return upper_hull + lower_hull
def io_graham_scan(points_txt):
if isinstance(points_txt, str):
points_txt = points_txt.split('\n')
points = []
for line in points_txt:
line = line.strip()
x, y = int(line[0]), int(line[-1])
points.append(Point(x, y))
return '\n'.join([str(p) for p in graham_scan(points)])
if __name__ == "__main__":
print(io_graham_scan(fileinput.input()))
|
# -*- coding: utf-8 -*-
class UF:
def __init__(self,N):
def union(self,p,q): # initialize N sites with integer names
def find(self,p): #return component identifier for p
def connected(self,p,q): #return true if p and q are in the same component
def count(): #number of components
|
from typing import Tuple, List, Optional, Dict, Any, Set
from . import Verb, WorkloadExceededError
from .interface import Block
from itertools import islice
def build_node_tree(message : str) -> List['Interpreter.Node']:
"""
build_node_tree will take a message and get every possible match
"""
nodes = []
previous = r""
starts = []
for i, ch in enumerate(message):
if ch == "{" and previous != r'\\':
starts.append(i)
if ch == "}" and previous != r'\\':
if len(starts) == 0:
continue
coords = (starts.pop(), i)
n = Interpreter.Node(coords)
nodes.append(n)
previous = ch
return nodes
class Interpreter(object):
def __init__(self, blocks : List[Block]):
self.blocks : List[Block] = blocks
class Node(object):
def __init__(self, coordinates : Tuple[int,int], ver : Verb = None):
self.output : Optional[str] = None
self.verb : Verb = ver
self.coordinates : Tuple[int,int] = coordinates
def __str__(self):
return str(self.verb)+" at "+str(self.coordinates)
class Context(object):
"""
Interpreter.Context is a simple packaged class that makes it
convenient to make Blocks have a small method signature.
`self.verb` will be the verbs context, has all 3 parts of a verb,
payload(the main data), the declaration(the name its calling) and
the parameter(settings and modifiers)
`self.original_message` will contain the entire message before
it was edited. This is convenient for various post and pre
processes.
`self.interpreter` is the reference to the `Interpreter` object
that is currently handling the process. Use this reference to get
and store variables that need to persist across processes. useful
for caching heavy calculations.
"""
def __init__(self, verb : Verb, res : 'Interpreter.Response', inter : 'Interpreter', og : str):
self.verb : Verb = verb
self.original_message : str = og
self.interpreter : 'Interpreter' = inter
self.response : 'Interpreter.Response' = res
class Response(object):
"""
Interpreter.Response is another packaged class that contains data
relevent only to the current process, and should not leak out
into interpretation on other tags. This is also what is handed
after a finished response.
`self.actions` is a dict of recommended actions to take with the
response. Think of these as headers in HTTP.
`self.variables` is a dict intended to be shared between all the
blocks. For example if a variable is shared here, any block going
forward can look for it.
`self.body` is the finished, cleaned message with all verbs
interpreted.
"""
def __init__(self):
from .interface import Adapter
self.body : str = None
self.actions : Dict[str, Any] = {}
self.variables : Dict[str, Adapter] = {}
def solve(self, message : str, node_ordered_list, response, charlimit):
final = message
total_work = 0
for i, n in enumerate(node_ordered_list):
# Get the updated verb string from coordinates and make the context
n.verb = Verb(final[n.coordinates[0]:n.coordinates[1]+1])
ctx = Interpreter.Context(n.verb, response, self, message)
# Get all blocks that will attempt to take this
acceptors : List[Block] = [b for b in self.blocks if b.will_accept(ctx)]
for b in acceptors:
value = b.process(ctx)
if value != None: # Value found? We're done here.
n.output = value
break
if n.output == None:
continue # If there was no value output, no need to text deform.
if(charlimit is not None):
total_work = total_work + len(n.output) # Record how much we've done so far, for the rate limit
if(total_work > charlimit):
raise WorkloadExceededError("The TSE interpreter had its workload exceeded. The total characters attempted were " + str(total_work) + "/" + str(charlimit))
start, end = n.coordinates
message_slice_len = (end+1) - start
replacement_len = len(n.output)
differential = replacement_len - message_slice_len # The change in size of `final` after the change is applied
if "TSE_STOP" in response.actions:
return final[:start]+n.output
final = final[:start]+n.output+final[end+1:]
# if each coordinate is later than `start` then it needs the diff applied.
for future_n in islice(node_ordered_list, i+1, None):
new_start = None
new_end = None
if future_n.coordinates[0] > start:
new_start = future_n.coordinates[0] + differential
else:
new_start = future_n.coordinates[0]
if future_n.coordinates[1] > start:
new_end = future_n.coordinates[1] + differential
else:
new_end = future_n.coordinates[1]
future_n.coordinates = (new_start, new_end)
return final
def process(self, message : str, seed_variables : Dict[str, Any] = None, charlimit : Optional[int] = None) -> 'Interpreter.Response':
response = Interpreter.Response()
message_input = message
# Apply variables fed into `process`
if seed_variables is not None:
response.variables = {**response.variables, **seed_variables}
node_ordered_list = build_node_tree(message_input)
output = self.solve(message_input, node_ordered_list, response, charlimit)
# Dont override an overridden response.
if response.body == None:
response.body = output.strip("\n ")
else:
response.body = response.body.strip("\n ")
return response
|
'''Задание 1'''
nums = [14, 21, 565, 18, 33, 20, 102, 108, 167, 891, 400]
for number in nums:
if number % 2 == 0:
nums.remove(number)
print(nums)
'''Задание 2'''
sentence = ['The', 'quick', 'brown', 'fox', 'jumps',
'over', 'the', 'lazy', 'dog']
longer_than_4 = [word for word in sentence if len(word) >= 4]
l_4 = []
print(longer_than_4)
for word in sentence:
if len(word) >= 4:
l_4.append(word)
print(l_4)
'''Задание 3'''
kvadrati = []
for number in range(1, 31):
kvadrati.append(number ** 2)
print(kvadrati)
print('Первые 5 элементов: ', kvadrati[:5])
print('Последние 5 элементов: ', kvadrati[25:])
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from textwrap import dedent
import pytest
from pants.backend.java.compile.javac import rules as javac_rules
from pants.backend.java.dependency_inference.rules import (
InferJavaSourceDependencies,
JavaInferredDependencies,
JavaInferredDependenciesAndExportsRequest,
JavaSourceDependenciesInferenceFieldSet,
)
from pants.backend.java.dependency_inference.rules import rules as dep_inference_rules
from pants.backend.java.target_types import (
JavaSourceField,
JavaSourcesGeneratorTarget,
JunitTestsGeneratorTarget,
)
from pants.backend.java.target_types import rules as java_target_rules
from pants.core.util_rules import config_files, source_files
from pants.engine.addresses import Address, Addresses, UnparsedAddressInputs
from pants.engine.target import (
Dependencies,
DependenciesRequest,
ExplicitlyProvidedDependencies,
InferredDependencies,
Targets,
)
from pants.jvm.jdk_rules import rules as java_util_rules
from pants.jvm.resolve import jvm_tool
from pants.jvm.strip_jar import strip_jar
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.test.junit import rules as junit_rules
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
from pants.util.ordered_set import FrozenOrderedSet
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*config_files.rules(),
*jvm_tool.rules(),
*dep_inference_rules(),
*java_target_rules(),
*java_util_rules(),
*strip_jar.rules(),
*javac_rules(),
*junit_rules(),
*source_files.rules(),
*util_rules(),
QueryRule(Addresses, [DependenciesRequest]),
QueryRule(ExplicitlyProvidedDependencies, [DependenciesRequest]),
QueryRule(InferredDependencies, [InferJavaSourceDependencies]),
QueryRule(JavaInferredDependencies, [JavaInferredDependenciesAndExportsRequest]),
QueryRule(Targets, [UnparsedAddressInputs]),
],
target_types=[JavaSourcesGeneratorTarget, JunitTestsGeneratorTarget, JvmArtifactTarget],
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
@maybe_skip_jdk_test
def test_infer_java_imports_same_target(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 't',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
public class A {}
"""
),
"B.java": dedent(
"""\
package org.pantsbuild.b;
public class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.java"))
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([])
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([])
@maybe_skip_jdk_test
def test_infer_java_imports(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 'a',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
import org.pantsbuild.b.B;
public class A {}
"""
),
"sub/BUILD": dedent(
"""\
java_sources(
name = 'b',
)
"""
),
"sub/B.java": dedent(
"""\
package org.pantsbuild.b;
public class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="a", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("sub", target_name="b", relative_file_path="B.java"))
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([target_b.address])
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([])
@maybe_skip_jdk_test
def test_infer_java_imports_with_cycle(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 'a',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
import org.pantsbuild.b.B;
public class A {}
"""
),
"sub/BUILD": dedent(
"""\
java_sources(
name = 'b',
)
"""
),
"sub/B.java": dedent(
"""\
package org.pantsbuild.b;
import org.pantsbuild.a.A;
public class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="a", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("sub", target_name="b", relative_file_path="B.java"))
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([target_b.address])
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([target_a.address])
@maybe_skip_jdk_test
def test_infer_java_imports_ambiguous(rule_runner: RuleRunner, caplog) -> None:
ambiguous_source = dedent(
"""\
package org.pantsbuild.a;
public class A {}
"""
)
rule_runner.write_files(
{
"a_one/BUILD": "java_sources()",
"a_one/A.java": ambiguous_source,
"a_two/BUILD": "java_sources()",
"a_two/A.java": ambiguous_source,
"b/BUILD": "java_sources()",
"b/B.java": dedent(
"""\
package org.pantsbuild.b;
import org.pantsbuild.a.A;
public class B {}
"""
),
"c/BUILD": dedent(
"""\
java_sources(
dependencies=["!a_two/A.java"],
)
"""
),
"c/C.java": dedent(
"""\
package org.pantsbuild.c;
import org.pantsbuild.a.A;
public class C {}
"""
),
}
)
target_b = rule_runner.get_target(Address("b", relative_file_path="B.java"))
target_c = rule_runner.get_target(Address("c", relative_file_path="C.java"))
# Because there are two sources of `org.pantsbuild.a.A`, neither should be inferred for B. But C
# disambiguates with a `!`, and so gets the appropriate version.
caplog.clear()
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([])
assert len(caplog.records) == 1
assert (
"The target b/B.java imports `org.pantsbuild.a.A`, but Pants cannot safely" in caplog.text
)
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_c))],
) == InferredDependencies([Address("a_one", relative_file_path="A.java")])
@maybe_skip_jdk_test
def test_infer_java_imports_unnamed_package(rule_runner: RuleRunner) -> None:
# A source file without a package declaration lives in the "unnamed package", but may still be
# consumed (but not `import`ed) by other files in the unnamed package.
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(name = 'a')
"""
),
"Main.java": dedent(
"""\
public class Main {
public static void main(String[] args) throws Exception {
Lib l = new Lib();
}
}
"""
),
"Lib.java": dedent(
"""\
public class Lib {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="a", relative_file_path="Main.java"))
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([Address("", target_name="a", relative_file_path="Lib.java")])
@maybe_skip_jdk_test
def test_infer_java_imports_same_target_with_cycle(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 't',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
import org.pantsbuild.b.B;
public class A {}
"""
),
"B.java": dedent(
"""\
package org.pantsbuild.b;
import org.pantsbuild.a.A;
public class B {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.java"))
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_a))],
) == InferredDependencies([target_b.address])
assert rule_runner.request(
InferredDependencies,
[InferJavaSourceDependencies(JavaSourceDependenciesInferenceFieldSet.create(target_b))],
) == InferredDependencies([target_a.address])
@maybe_skip_jdk_test
def test_dependencies_from_inferred_deps(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 't',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
import org.pantsbuild.b.B;
public class A {}
"""
),
"B.java": dedent(
"""\
package org.pantsbuild.b;
public class B {}
"""
),
}
)
target_t = rule_runner.get_target(Address("", target_name="t"))
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.java"))
assert (
rule_runner.request(
ExplicitlyProvidedDependencies, [DependenciesRequest(target_a[Dependencies])]
).includes
== FrozenOrderedSet()
)
# Neither //:t nor either of its source subtargets have explicitly provided deps
assert (
rule_runner.request(
ExplicitlyProvidedDependencies, [DependenciesRequest(target_t.get(Dependencies))]
).includes
== FrozenOrderedSet()
)
assert (
rule_runner.request(
ExplicitlyProvidedDependencies, [DependenciesRequest(target_a.get(Dependencies))]
).includes
== FrozenOrderedSet()
)
assert (
rule_runner.request(
ExplicitlyProvidedDependencies, [DependenciesRequest(target_b.get(Dependencies))]
).includes
== FrozenOrderedSet()
)
# //:t has an automatic dependency on each of its subtargets
assert rule_runner.request(
Addresses, [DependenciesRequest(target_t.get(Dependencies))]
) == Addresses(
[
target_a.address,
target_b.address,
]
)
# A.java has an inferred dependency on B.java
assert rule_runner.request(
Addresses, [DependenciesRequest(target_a.get(Dependencies))]
) == Addresses([target_b.address])
# B.java does NOT have a dependency on A.java, as it would if we just had subtargets without
# inferred dependencies.
assert (
rule_runner.request(Addresses, [DependenciesRequest(target_b.get(Dependencies))])
== Addresses()
)
@maybe_skip_jdk_test
def test_package_private_dep(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 't',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.example;
import org.pantsbuild.example.C;
public class A {
public static void main(String[] args) throws Exception {
C c = new C();
}
}
"""
),
"B.java": dedent(
"""\
package org.pantsbuild.example;
public class B {}
class C {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.java"))
# A.java has an inferred dependency on B.java
assert rule_runner.request(
Addresses, [DependenciesRequest(target_a[Dependencies])]
) == Addresses([target_b.address])
# B.java does NOT have a dependency on A.java, as it would if we just had subtargets without
# inferred dependencies.
assert (
rule_runner.request(Addresses, [DependenciesRequest(target_b[Dependencies])]) == Addresses()
)
@maybe_skip_jdk_test
def test_junit_test_dep(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 'lib',
)
junit_tests(
name = 'tests',
)
"""
),
"FooTest.java": dedent(
"""\
package org.pantsbuild.example;
import org.pantsbuild.example.C;
public class FooTest {
public static void main(String[] args) throws Exception {
C c = new C();
}
}
"""
),
"Foo.java": dedent(
"""\
package org.pantsbuild.example;
public class Foo {}
class C {}
"""
),
}
)
lib = rule_runner.get_target(Address("", target_name="lib", relative_file_path="Foo.java"))
tests = rule_runner.get_target(
Address("", target_name="tests", relative_file_path="FooTest.java")
)
# A.java has an inferred dependency on B.java
assert rule_runner.request(Addresses, [DependenciesRequest(tests[Dependencies])]) == Addresses(
[lib.address]
)
# B.java does NOT have a dependency on A.java, as it would if we just had subtargets without
# inferred dependencies.
assert rule_runner.request(Addresses, [DependenciesRequest(lib[Dependencies])]) == Addresses()
@maybe_skip_jdk_test
def test_exports(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
java_sources(
name = 't',
)
"""
),
"A.java": dedent(
"""\
package org.pantsbuild.a;
import org.pantsbuild.b.B;
public class A {}
"""
),
"B.java": dedent(
"""\
package org.pantsbuild.b;
import org.pantsbuild.c.C;
import org.pantsbuild.d.D;
public class B extends C {}
"""
),
"C.java": dedent(
"""\
package org.pantsbuild.c;
public class C {}
"""
),
"D.java": dedent(
"""\
package org.pantsbuild.d;
public class D {}
"""
),
}
)
target_a = rule_runner.get_target(Address("", target_name="t", relative_file_path="A.java"))
target_b = rule_runner.get_target(Address("", target_name="t", relative_file_path="B.java"))
# B should depend on C and D, but only export C
assert rule_runner.request(
JavaInferredDependencies,
[JavaInferredDependenciesAndExportsRequest(target_b[JavaSourceField])],
) == JavaInferredDependencies(
dependencies=FrozenOrderedSet(
[
Address("", target_name="t", relative_file_path="C.java"),
Address("", target_name="t", relative_file_path="D.java"),
]
),
exports=FrozenOrderedSet(
[
Address("", target_name="t", relative_file_path="C.java"),
]
),
)
# A should depend on B, but not B's dependencies or export types
assert rule_runner.request(
JavaInferredDependencies,
[JavaInferredDependenciesAndExportsRequest(target_a[JavaSourceField])],
) == JavaInferredDependencies(
dependencies=FrozenOrderedSet(
[
Address("", target_name="t", relative_file_path="B.java"),
]
),
exports=FrozenOrderedSet([]),
)
|
from sys import stdin, exit
from random import choice
import Player, Colonies
player_list = []
game = True
class gameSetup(object):
def __init__(self):
self.max_players = 0
self.player_limit = 12
self.starting_era = 0
self.players = 0
def setupGame(self):
self.decision = raw_input("Do you wish to start a new game or load one? \n>")
if 'new game' in self.decision or 'New game' in self.decision or 'New Game' in self.decision:
self.createPlayer()
else:
print 'That is invalid.'
def createPlayer(self):
global player_list
self.input = raw_input("Please enter your desired player name: \n>")
vars()[self.input] = Player.Player(self.input)
self.input_race = raw_input("""Please choose a race to play:
1. Humans
2. Ikhventi \n>""").lower()
if self.input_race in Player.races:
vars()[self.input].race = Player.races[self.input_race]
print vars()[self.input].race.name
vars()[self.input].owned_colonies.append(Colonies.Colony("earth city", vars()[self.input]))
for self.i in vars()[self.input].owned_colonies:
print "You are starting with %s" % self.i.name
print self.i.owner
else:
print "Race not found. Try again." #Not resetting to race selection
player_list.append(vars()[self.input])
print "Do you wish to include another?"
self.input = raw_input("> ")
if self.input == "yes":
self.createPlayer()
else:
print "Then we shall set up the A.I."
self.createAI()
def createAI(self):
global player_list
self.i = 0
self.ai_names = ['bob', 'bill', 'jebediah',
'john', 'is', 'kill', 'tom']
print "How many A.I players do you want?"
self.max_players = raw_input("> ")
self.max = int(self.max_players)
if self.max < self.player_limit:
print "Setting up A.I."
while self.i < self.max:
vars()[self.i] = Player.Player(choice(self.ai_names))
vars()[self.i].is_ai = True
player_list.append(vars()[self.i])
self.i += 1
print player_list
print "Begin game? (Y/N)"
self.startGame()
else:
print "Over the player limit. Try again."
self.createAI()
def startGame(self):
self.input = raw_input("> ")
if self.input == "yes" or self.input == "y":
turnStructure().__init__()
elif self.input == "no" or self.input == "n":
exit(0)
else:
print "Wut?"
class turnStructure(object):
def __init__(self):
global player_list
global game
while game:
for self.current_player in player_list:
self.turnStart()
def turnStart(self):
self.current_player.turn += 1
self.current_player.buildAdvance()
print "----------------"
print "It is now %s's turn %i." % (self.current_player.name, self.current_player.turn)
print "Beginning %s's turn. \n" % self.current_player.name
if self.current_player.is_ai == False:
#Calculate new resources, building queues. Echo updates.
self.turnMid()
else:
#Calculate new resources, building queues. Echo updates.
self.ai_turnMid()
def turnMid(self):
self.input = raw_input("Choose what to do. \n> ")
if "end turn" in self.input:
self.turnEnd()
elif "build" in self.input:
self.choice = raw_input("Do you wish to build a vessel or a structure? \n>")
if self.choice == "structure":
self.current_player.buildOrderStructure()
elif self.choice == "vessel":
self.current_player.buildOrderVessel()
self.turnMid()
elif "colonize" in self.input:
self.col_location = raw_input("Choose a location to found the colony at. \n>")
for self.i in starsystem.bodylist: #Requires edits
if self.col_location == starsystem.bodylist.name: #This too
self.col_location = self.i
self.col_name = raw_input("Now choose a name for the colony. \n>")
self.new_colony = Colonies.Colony(self.col_name, self.current_player, self.col_location)
self.current_player.owned_colonies.append(self.new_colony)
else:
print "This planet does not exist."
self.turnMid()
self.turnMid()
elif "cancel" in self.input:
self.turnMid()
else:
print "u wot m8?"
self.turnMid()
def ai_turnMid(self):
print "I am an A.I named %s. \n" % self.current_player.name
print "----------------"
self.turnEnd()
def turnEnd(self):
#Resolve remaining resources and queued moves.
pass
|
__author__ = 'Sebastian Bernasek'
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
from .base import Base
from .settings import *
from .palettes import Palette, line_colors
class Expression(Base):
"""
Object for plotting expression dynamics.
Attributes:
experiment (data.experiments.Experiment)
colorer (figures.palettes.Palette) - color palette for cell types
greys (figures.palettes.Palette) - color palette for progenitors
Inherited attributes:
fig (matplotlib.figure.Figure)
"""
# set class path and name
path = 'graphics/expression'
def __init__(self, experiment, **kwargs):
"""
Instantiate object for plotting expression dynamics.
Args:
experiment (data.experiments.Experiment)
include_neurons (bool) - if True, compile young R cell data for annotating time windows
kwargs: keyword arguments for early R cell selection
"""
# store experiment
self.experiment = experiment
# assign color palettes
self.colorer = Palette()
self.greys = Palette({'grey': 'grey'})
# initialize figure
self.fig = None
def render(self, channel,
cell_types=None,
scatter=False,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={},
shading=None,
figsize=(2, 1),
ax_kw={},
**kwargs):
"""
Plot expression dynamics for single channel.
Progenitor expression dynamics are shown by default. Additional cell types may be added via the cell_types argument.
Args:
channel (str) - fluorescence channel
cell_types (list) - included cell types to be added
scatter (bool) - if True, add markers for each measurement
interval - if True, add confidence interval for moving average
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for interval construction
shading (str) - color used to shade time window of young cells
fig_size (tuple) - figure size
ax_kw (dict) - keyword arguments for axis formatting
kwargs: keyword arguments for plot function
"""
# create figure
fig, ax = plt.subplots(figsize=figsize)
self.fig = fig
# plot expression dynamics
self.plot(ax, channel,
cell_types=cell_types,
scatter=scatter,
interval=interval,
marker_kw=marker_kw,
line_kw=line_kw,
interval_kw=interval_kw,
ma_kw=ma_kw,
shading=shading,
**kwargs)
# format axis labels
ax.set_ylabel('')
ax.set_xlabel('Time (h)')
# format axis
self._format_ax(ax, **ax_kw)
def render_all_channels(self,
cell_types=None,
scatter=False,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={},
shading=None,
figsize=(2.5, 4.5),
xlim=(-25, 55),
**kwargs):
"""
Plot stacked Pnt/Yan/Ratio expression dynamics.
Progenitor expression dynamics are shown by default. Additional cell types may be added via the cell_types argument.
Args:
cell_types (list) - included cell types to be added
scatter (bool) - if True, add markers for each measurement
interval - if True, add confidence interval for moving average
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for interval construction
shading (str) - color used to shade time window of young cells
fig_size (tuple) - figure size
xlim (tuple) - bounds for x-axis
kwargs: keyword arguments for plot function
"""
# set formatting
kw = dict(cell_types=cell_types,
scatter=scatter,
interval=interval,
marker_kw=marker_kw,
line_kw=line_kw,
interval_kw=interval_kw,
ma_kw=ma_kw,
shading=shading,
**kwargs)
# create figure
fig, axes = plt.subplots(nrows=3, sharex=True, figsize=figsize)
(ax0, ax1, ax2) = axes
self.fig = fig
# plot pnt dynamics
self.plot(ax0, 'ch1_normalized', **kw)
# plot yan dynamics
self.plot(ax1, 'ch2_normalized', **kw)
# plot ratio dynamics
self.plot(ax2, 'logratio', **kw)
# format axis labels
ax0.set_xlabel('')
ax1.set_xlabel('')
ax2.set_xlabel('Time (h)')
ax0.set_ylabel('Pnt (a.u.)')
ax1.set_ylabel('Yan (a.u.)')
ax2.set_ylabel('Ratio')
# format axes (good defaults for wildtype data)
self._format_ax(ax0, xlim, ylim=(0.1,2.1), yticks=np.arange(.2,2.2,.3))
self._format_ax(ax1, xlim, ylim=(0.1,2.3), yticks=np.arange(.2,2.4,.3))
self._format_ax(ax2, xlim, ylim=(-2, 2.1), yticks=np.arange(-2,2.1,.5))
ax2.spines['bottom'].set_position(('outward', 0))
ax2.yaxis.set_major_formatter(FormatStrFormatter('%.1f'))
# adjust spacing
plt.subplots_adjust(hspace=0.15)
def plot(self, ax, channel,
cell_types=None,
scatter=False,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={},
shading=None,
**kwargs):
"""
Plot expression dynamics for each cell type.
Args:
ax (plt.Axis instance) - if None, create axes
channel (str) - fluorescence channel
cell_types (list) - reference cell types to be added
scatter (bool) - if True, add markers for each measurement
interval - if True, add confidence interval for moving average
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for interval construction
shading (str) - color used to shade time window of young cells
kwargs: keyword arguments for Cells.plot_dynamics
Returns:
ax (plt.Axis instance)
"""
# select progenitors
pre = self.experiment.get_cells('pre')
# format markers
grey_shade = 4
marker_kw['color'] = self.greys('grey', 'light', grey_shade)
marker_kw['s'] = 0.5
marker_kw['rasterized'] = True
# format line
line_kw['color'] = 'black'
interval_kw['color'] = 'black'
# set window size
if channel[-4:] == 'flux':
ma_kw['window_size'] = 500
else:
ma_kw['window_size'] = 250
# plot progenitor expression
pre.plot_dynamics(channel, ax,
scatter=scatter,
interval=interval,
marker_kw=marker_kw,
line_kw=line_kw,
interval_kw=interval_kw,
ma_kw=ma_kw)
# add neuron expression
if cell_types is None:
cell_types = []
for types in cell_types:
# format markers and lines
marker_kw['color'] = self.colorer(types[0])
marker_kw['s'] = 2
marker_kw['rasterized'] = True
# if scatter is True, use line_colors for line/interval
if scatter:
line_color = line_colors[types[0]]
else:
line_color = self.colorer(types[0])
line_kw['color'] = line_color
interval_kw['color'] = line_color
# set moving average resolution
# set window size
if channel[-4:] == 'flux':
ma_kw['window_size'] = 150
else:
ma_kw['window_size'] = 75
ma_kw['resolution'] = 5
# select cells
cells = self.experiment.get_cells(types)
# plot dynamics
cells.plot_dynamics(channel, ax,
scatter=scatter,
interval=interval,
marker_kw=marker_kw,
line_kw=line_kw,
interval_kw=interval_kw,
ma_kw=ma_kw,
**kwargs)
# shade early R cell region
if shading is not None:
if channel == 'logratio':
self.shade_window(ax, types, color=shading, ymin=-2.75, ymax=2.75, alpha=0.25)
else:
self.shade_window(ax, types, color=shading, alpha=0.25)
return ax
def shade_window(self, ax, reference,
color='orange',
alpha=0.5,
ymin=-2.5,
ymax=2.5):
"""
Shade time window corresponding to first ten cells of reference type.
Args:
ax (plt.axis instance)
reference (list) - reference cell type
color (str) - shading color
ymin, ymax (float) - shading boundaries
"""
# select reference cells
data = self.experiment.select_by_concurrency(reference, 10, 0, 1)
data = data[data.label.isin(reference)]
# shade time window
tmin, tmax = data.t.min(), data.t.max()
ax.fill_between([tmin, tmax],
[ymin, ymin],
[ymax, ymax],
color=color,
alpha=0.5,
zorder=0)
@staticmethod
def _format_ax(ax,
xlim=(-25, 55),
xticks=None,
ylim=(0, 2.5),
yticks=None,
yspine_lim=None):
"""
Format axis limits, spine limits, and tick positions.
Args:
ax (plt.axis instance)
xlim (tuple) - limits for x-axis
xticks (array like) - tick positions for x-axis
ylim (tuple) - limits for y-axis
yticks (array like) - tick positions for y-axis
yspine_lim (tuple) - limits for y-axis spines
"""
# format x axis
ax.set_xlim(*xlim)
if xticks is None:
xticks = np.arange(xlim[0]+5, xlim[1]+5, 10)
ax.set_xticks(xticks)
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_bounds(*xlim)
# format y axis
ax.set_ylim(*ylim)
if yticks is None:
yticks = np.arange(0., 2.75, .5)
ax.set_yticks(yticks)
ax.yaxis.set_ticks_position('left')
if yspine_lim is None:
yspine_lim = ylim
ax.spines['left'].set_bounds(*yspine_lim)
# format spines
ax.spines['left'].set_position(('outward', 0))
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
class DualExpression(Expression):
"""
Object for comparing expression dynamics between two experimental conditions.
Attributes:
experiments (dict) - {condition: data.experiments.Experiment} pairs for the control and perturbation conditions.
colors (figures.palettes.Palette) - color palettes for control and perturbation conditions.
Inherited attributes:
fig (matplotlib.figure.Figure)
"""
def __init__(self, control, perturbation,
control_color='k',
perturbation_color='r'):
"""
Instantiate object for comparing expression dynamics between a control and perturbation condition.
Args:
control (data.experiments.Experiment) - control data
perturbation (data.experiments.Experiment) - perturbation data
control_color (str) - control color
perturbation_color (str) - perturbation color
"""
# define experiments
self.experiments = {'control': control,
'perturbation': perturbation}
# define color palettes
self.colors = Palette({'control': control_color,
'perturbation': perturbation_color})
# initialize figure
self.fig = None
def render(self,
channel,
cell_types=['pre'],
figsize=(2, 1),
ax_kw={},
**kwargs):
"""
Render expression timeseries comparison figure.
Args:
channel (str) - fluorescence channel
cell_types (list) - cell types to be included
figsize (tuple) - figure size
ax_kw (dict) - keywoard arguments for format_axis
kwargs: keyword arguments for plotting
"""
# create figure
self.fig = self.create_figure(figsize=figsize)
ax = self.fig.axes[0]
# plot expression for each experiment
for exp in self.experiments.keys():
self.plot(ax, exp, channel, cell_types=cell_types, **kwargs)
# format axis
self._format_ax(ax, **ax_kw)
def plot(self, ax, exp, channel,
cell_types=['pre'],
scatter=False,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={},
**kwargs):
"""
Plot expression dynamics for single experiment, channel, and cell type.
Args:
ax (plt.Axis instance) - if None, create axes
exp (str) - experiment key
channel (str) - fluorescence channel
cell_types (list) - cell types to be included
scatter (bool) - if True, add markers for each measurement
interval - if True, add confidence interval for moving average
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for interval construction
kwargs: keyword arguments for Cells.plot_dynamics
Returns:
ax (plt.Axis instance)
"""
# select cells of specified type
cells = self.experiments[exp].get_cells(cell_types)
# define linestyle
if exp == 'perturbation':
line_kw['linestyle'] = 'dashed'
elif exp == 'wildtype':
line_kw['linestyle'] = 'dashed'
else:
line_kw['linestyle'] = 'solid'
# define colors
marker_kw['color'] = self.colors[exp]
line_kw['color'] = 'k'
interval_kw['color'] = self.colors[exp]
ma_kw['window_size'] = 250
# plot expression dynamics
cells.plot_dynamics(channel, ax,
scatter=scatter,
interval=interval,
marker_kw=marker_kw,
line_kw=line_kw,
interval_kw=interval_kw,
ma_kw=ma_kw)
ax.set_xlabel('Time (h)')
return ax
class MultiExpression(DualExpression):
"""
Object for comparing expression dynamics between multiple experimental conditions.
Inherited attributes:
experiments (dict) - {condition: data.experiments.Experiment} pairs for all conditions
colors (figures.palettes.Palette) - color palette for all conditions
fig (matplotlib.figure.Figure)
"""
def __init__(self, *experiments):
"""
Instantiate object for comparing expression dynamics between multiple conditions.
Args:
experiments (*tuple) - list of (measurement data, color) pairs
"""
experiments, colors = list(zip(experiments))
self.experiments = {k: v for k, v in enumerate(experiments)}
self.colors = {k: v for k, v in enumerate(colors)}
self.fig = None
|
class Bag:
# initiate local variables establishing assumptions where bag can hold a maximum of 8 and each size has an associated number
def __init__(self):
self.items = []
self.space_remaining = 8
self.size_values = {"small": 1, "medium": 2, "large": 3}
# adds item to bag if space remaining
def add_item(self, item):
if self.space_remaining >= self.size_values[item["size"]]:
self.items.append(item)
self.space_remaining -= self.size_values[item["size"]]
return True
return False
# stringify bag output
def __str__(self):
output_string = ""
for item in self.items:
output_string = output_string + item["name"] + "\n"
return output_string
|
# -*- coding: utf-8 -*-
"""
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import unicode_literals, absolute_import
from .authorization_code import AuthorizationCodeGrant
from .implicit import ImplicitGrant
from .resource_owner_password_credentials import ResourceOwnerPasswordCredentialsGrant
from .client_credentials import ClientCredentialsGrant
from .refresh_token import RefreshTokenGrant
from .openid_connect import OpenIDConnectBase
from .openid_connect import OpenIDConnectAuthCode
from .openid_connect import OpenIDConnectImplicit
from .openid_connect import OpenIDConnectHybrid
from .openid_connect import OIDCNoPrompt
from .openid_connect import AuthCodeGrantDispatcher
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
urlpatterns = patterns('Avaliacao.Questao.views',
url(r'^responderQuestao/(?P<questao_id>\S+)/$', 'responderQuestao', name='responderQuestao'),
url(r'^corrigirQuestao/(?P<questao_id>\S+)/$', 'corrigirQuestao', name='corrigirQuestao'),
url(r'^retorno_ajax/(?P<questao_id>\S+)/$', 'ajax_retorno_correcao', name='ajax_retorno_correcao'),
url(r'^retorno_gabarito_ajax/(?P<questao_id>\S+)/$', 'ajax_retorno_correcao_gabarito', name='ajax_retorno_correcao_gabarito'),
url(r'^gabarito/(?P<questao_id>\S+)/$', 'gabaritoQuestao', name='gabaritoQuestao'),
url(r'^criar/$', 'criar_questao', name='criar_questao'),
url(r'^editar/(?P<questao_id>\S+)/$', 'editar_questao', name='editar_questao'),
url(r'^criarTipo/$', 'criar_tipo', name='criar_tipo'),
url(r'^exibirQuestao/(?P<questao_id>\S+)/$', 'exibirQuestao', name='exibirQuestao'),
url(r'^exibirFonte/(?P<fonte_id>\S+)/$', 'exibir_arquivo_fonte', name='exibir_arquivo_fonte'),
url(r'^exibirFonteGabarito/(?P<fonte_id>\S+)/$', 'exibir_arquivo_fonte_gabarito', name='exibir_arquivo_fonte_gabarito'),
url(r'^listar/$', 'listar_questoes', name='listar_questoes'),
)
|
# Generated by Django 2.2 on 2022-06-10 02:55
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0015_memberinvite_player'),
]
operations = [
migrations.AlterField(
model_name='group',
name='admin',
field=models.ManyToManyField(blank=True, related_name='admin_groups', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='group',
name='members',
field=models.ManyToManyField(blank=True, related_name='member_groups', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='group',
name='points_to_win',
field=models.CharField(choices=[('11', '11'), ('30', '30'), ('other', 'Other')], default='other', max_length=30, null=True),
),
]
|
import argparse
import gc
import pickle
import re
import time
from functools import partial, reduce
from glob import glob
from itertools import *
from operator import add, iadd
from os import path, remove, stat, walk
import numpy as np
from joblib import Parallel, delayed, dump, load
# from multiprocess import Pool
from pathos.multiprocessing import ProcessingPool as Pool
import parallel_utils3 as par
import utils as u
from sql_utils import (make_session_kw, prepare_heterotachy, prepare_one_rate, psycopg2,
select)
schema = 'sim4'
outgroup_name = '4'
def main(args):
"""example arguments:
leafnames=['1', '2', '3', '4'],
length=250, ndsets=100
outdir='/N/dc2/projects/bkrosenz/deep_ils/results/train_data/',
outfile='test.hdf5', overwrite=True,
procs=4, ngenes=5000)"""
outpath = path.join(args.outdir, args.outfile)
if path.exists(outpath):
if args.overwrite:
remove(outpath)
else:
raise IOError(
"file {} exists. use --overwrite to overwrite".format(outpath)
)
tree_config = u.TreeConfig(
leafnames=args.leafnames,
outgroup=4,
include_outgroup=True,
subtree_sizes=[4]
)
labels = u.leaves2labels(args.leafnames, diag=False)
label_mapper = {
"pdist": partial(u.triu, labels=labels, diag=False)
}
with open('/N/u/bkrosenz/BigRed3/.ssh/db.pwd') as f:
password = f.read().strip()
if args.debug:
print('connecting...')
session, conn = make_session_kw(
username='bkrosenz_root',
password=password,
database='bkrosenz',
schema=schema,
statement_prepare=prepare_one_rate if args.table == 'one_rate' else prepare_heterotachy,
port=5444,
host='10.79.161.8',
with_metadata=False # sasrdspp02.uits.iu.edu'
)
if args.debug:
print('getting species trees...')
ngenes = args.ngenes
n_samps = ngenes * args.ndsets
species_trees = u.pd.read_sql_query(
f'execute sids({args.length},{ngenes})',
con=conn,
index_col='sid'
)
if args.ebl:
species_trees = species_trees[species_trees.ebl == args.ebl]
if species_trees.empty:
print('no species trees found for', args)
exit(1)
mapper = {
"pdist": partial(
u.triu, labels=labels,
diag=False
)
}
def summarize(df):
df2 = par.apply_mapping(df, mapper=mapper)
return par.summarize_chunk(df2, group_cols=['tid'])
results = []
ix = []
if args.debug:
print(species_trees, 'making pool...')
with Pool(args.procs) as pool:
n = 0
for stree in species_trees.itertuples():
if args.debug:
print('stree', stree)
try:
query = f'execute sample_sid({stree.Index},{args.length},{n_samps})'
x = u.pd.read_sql_query(
query, con=conn) if n_samps > 0 else u.pd.DataFrame()
except psycopg2.Error as e:
print(e, 'reconnecting...')
try:
session, conn = make_session_kw(
username='bkrosenz_root',
password=password,
database='bkrosenz',
schema=schema,
statement_prepare=prepare_heterotachy,
port=5444,
host='10.79.161.8',
with_metadata=False # sasrdspp02.uits.iu.edu'
)
x = u.pd.read_sql_query(query,
con=conn)
except:
print("couldn't reconnect")
continue
x.fillna(value=np.nan, inplace=True)
if args.debug:
print("xquery:", query, 'x', x, '\nnum strees:', n)
res = pool.map(summarize, u.chunker(x, args.ngenes))
ix.extend((stree.ebl, stree.ibl, i)
for i in range(len(res)))
results.extend(res)
if args.debug:
if not x.empty:
n += 1
if n > 2:
break
if not results:
print('no records found for', args)
exit(1)
if args.debug:
print(results)
u.write_results(results, ix, outpath)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="""get summary stats for
classification/regression"""
)
parser.add_argument(
"--procs",
"-p",
type=int,
help="num procs",
default=4
)
parser.add_argument(
"--table",
type=str,
help="table to query",
default="heterotachy"
)
parser.add_argument(
"--ndsets",
"-d",
type=int,
help="num dataset",
default=100
)
parser.add_argument(
"--ngenes",
"-g",
type=int,
help="""num genes in each dataset.
All genes must share the same heterotachy multipliers.""",
default=250
)
parser.add_argument(
"--length",
"-l",
type=int,
help="""length of each gene.
If not specified will sample from all lengths in the DB.""",
default=0
)
parser.add_argument(
"--ebl",
type=float,
help="set ebl"
)
parser.add_argument(
"--rfrac",
type=float,
help="""fraction of recombinant
sequences in each dset.
If rfrac is not set and nblocks==1, all seqs will be recombinant"""
)
parser.add_argument(
"--overwrite",
action="store_true",
help="overwrite"
)
# TODO: make these 2 into a mutually exclusive required group
parser.add_argument(
"--outdir",
help="directory to store results files",
default="/N/dc2/projects/bkrosenz/deep_ils/results",
)
parser.add_argument(
"--outfile",
help="output hdf5 name",
default="covs_trio.hdf5"
)
parser.add_argument(
"--leafnames",
nargs="+",
default=(*map(str, range(1, 5)),),
help="taxa names - list or filename",
)
parser.add_argument(
"--outgroup",
help="taxa names - list or filename")
parser.add_argument(
"--debug",
action="store_true",
help="debug")
args = parser.parse_args()
print("\n----\nArguments: {}\n".format(args))
main(args)
print("finished\n")
|
num1 = int(input("Informe o número 1: "))
num2 = int(input("Informe o número 2: "))
print(f'{num1} + {num2} = {num1+num2}')
|
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
import sqlite3
class DeleteL(QDialog):
def __init__(self, *args, **kwargs):
super(DeleteL, self).__init__(*args, **kwargs)
self.QBtn = QPushButton()
self.QBtn.setText("Удалить")
self.setWindowIcon(QIcon("icon/deleteL.png"))
self.setWindowTitle("Удалить работника")
self.setFixedWidth(300)
self.setFixedHeight(100)
self.QBtn.clicked.connect(self.deleteWorker)
layout = QVBoxLayout()
self.deleteinput = QLineEdit()
self.onlyInt = QIntValidator()
self.deleteinput.setValidator(self.onlyInt)
self.deleteinput.setPlaceholderText("№")
layout.addWidget(self.deleteinput)
layout.addWidget(self.QBtn)
self.setLayout(layout)
def deleteWorker(self):
delrol = self.deleteinput.text()
try:
self.conn = sqlite3.connect("database.db")
self.c = self.conn.cursor()
self.c.execute("DELETE from Late WHERE roll="+str(delrol))
self.conn.commit()
self.c.close()
self.conn.close()
QMessageBox.information(QMessageBox(),'Successful','Работник удален')
self.close()
except Exception:
QMessageBox.warning(QMessageBox(), 'Error', 'Не удалось удалить работника')
|
"""
类别:论语
"""
import sqlite3
import os
import json
def make_db(db, path):
sql = '''
CREATE TABLE IF NOT EXISTS "lunyu" (
"id" INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
"chapter" TEXT,
"paragraphs" TEXT
);
'''
print('\r\n论语 正在初始化...')
try:
conn = sqlite3.connect(db)
cur = conn.cursor()
cur.execute(sql)
conn.commit()
lunyu_data = os.path.join(path, 'lunyu.json')
if os.path.exists(lunyu_data) is None:
print('论语 数据文件不存在')
return
print('\t', lunyu_data)
with open(lunyu_data, 'r', encoding='UTF-8') as f:
data_dict = json.load(f)
items = [(str(item['chapter']), str(item['paragraphs']))
for item in data_dict]
cur.executemany(
"insert into lunyu(chapter, paragraphs) values (?,?)", items)
conn.commit()
print('论语 数据处理完毕.')
except Exception as e:
print(e)
conn.rollback()
finally:
conn.close()
|
def handle_sheet(*args):
# TODO
pass
|
n = int(input('Digite um número: '))
s1 = n * 2
s2 = n * 3
s3 = n ** (1/2)
print(f'O dobro de {n} vale {s1}')
print(f'O triplo de {n} vale {s2}')
print(f'A raiz quadrada de {n} é igual a {s3:.2f}')
|
# speedtest.py: FFT benchmark
import sys
import time
import math
import numpy as np
from scipy.fftpack import fft
from streamtools import *
def time_fft(secs=10):
print "Capturing for", secs, "seconds"
stream = InStream()
nblocks = seconds_to_blocks(stream, secs)
avg_read = 0 # should be 44100 Hz
avg_fft = 0 # should be faster than 44100 Hz
for i in range(nblocks):
read_begin = time.time()
block = stream.read_block()
fft_begin = time.time()
spec = np.absolute( fft(block) )
end = time.time()
avg_read += end - read_begin
avg_fft += end - fft_begin
avg_read /= nblocks
avg_fft /= nblocks
avg_read = stream.block_size / avg_read
avg_fft = stream.block_size / avg_fft
print "Read: %.1lf Hz" % avg_read
print "FFT: %.1lf Hz" % avg_fft
time_fft(30)
|
from math import ceil, floor, trunc, pow, sqrt
import emoji
print(emoji.emojize("Ráiz Quadrada :earth_americas:", use_aliases=True))
num = int(input('Digite um número: '))
raiz = sqrt(num)
print('A raíz de {} é igual a {}'.format(num, raiz))
|
# -*- coding: utf-8 -*-
"""
ytelapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class NumberTypeEnum(object):
"""Implementation of the 'NumberType' enum.
The capability supported by the number.Number type either SMS,Voice or
all
Attributes:
ALL: TODO: type description here.
VOICE: TODO: type description here.
SMS: TODO: type description here.
"""
ALL = 'all'
VOICE = 'voice'
SMS = 'sms'
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import json
from textwrap import dedent
from typing import Callable, Optional
import pytest
from pants.backend.python.target_types import PexExecutionMode, PexLayout
from pants.testutil.pants_integration_test import PantsResult, run_pants, setup_tmpdir
def run_generic_test(
*,
entry_point: str = "app.py",
execution_mode: Optional[PexExecutionMode] = None,
include_tools: bool = False,
layout: Optional[PexLayout] = None,
venv_site_packages_copies: bool = False,
) -> Callable[..., PantsResult]:
sources = {
"src_root1/project/app.py": dedent(
"""\
import sys
from utils.strutil import my_file
from codegen.hello_pb2 import Hi
def main():
print("Hola, mundo.", file=sys.stderr)
print(my_file())
sys.exit(23)
if __name__ == "__main__":
main()
"""
),
"src_root1/project/BUILD": dedent(
f"""\
python_sources(name='lib')
pex_binary(
name="binary",
entry_point={entry_point!r},
execution_mode={execution_mode.value if execution_mode is not None else None!r},
include_tools={include_tools!r},
layout={layout.value if layout is not None else None!r},
venv_site_packages_copies={venv_site_packages_copies!r},
)
"""
),
"src_root2/utils/strutil.py": dedent(
"""\
def my_file():
return __file__
"""
),
"src_root2/utils/BUILD": "python_sources()",
"src_root2/codegen/hello.proto": 'syntax = "proto3";\nmessage Hi {{}}',
"src_root2/codegen/BUILD": dedent(
"""\
protobuf_sources()
python_requirement(name='protobuf', requirements=['protobuf'])
"""
),
}
def run(*extra_args: str, **extra_env: str) -> PantsResult:
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
"--backend-packages=pants.backend.codegen.protobuf.python",
f"--source-root-patterns=['/{tmpdir}/src_root1', '/{tmpdir}/src_root2']",
"--pants-ignore=__pycache__",
"--pants-ignore=/src/python",
"run",
f"{tmpdir}/src_root1/project:binary",
*extra_args,
]
return run_pants(args, extra_env=extra_env)
result = run()
assert "Hola, mundo.\n" in result.stderr
file = result.stdout.strip()
assert "src_root2" not in file
assert file.endswith("utils/strutil.py")
if layout == PexLayout.LOOSE:
# Loose PEXs execute their own code directly
assert "pants-sandbox-" in file
else:
assert "pants-sandbox-" not in file
assert result.exit_code == 23
return run
@pytest.mark.parametrize("entry_point", ["app.py", "app.py:main"])
def test_entry_point(
entry_point: str,
):
run_generic_test(entry_point=entry_point)
@pytest.mark.parametrize("execution_mode", [None, PexExecutionMode.VENV])
@pytest.mark.parametrize("include_tools", [True, False])
def test_execution_mode_and_include_tools(
execution_mode: Optional[PexExecutionMode],
include_tools: bool,
):
run = run_generic_test(
execution_mode=execution_mode,
include_tools=include_tools,
)
if include_tools:
result = run("--", "info", PEX_TOOLS="1")
assert result.exit_code == 0, result.stderr
pex_info = json.loads(result.stdout)
assert (execution_mode is PexExecutionMode.VENV) == pex_info["venv"]
assert ("prepend" if execution_mode is PexExecutionMode.VENV else "false") == pex_info[
"venv_bin_path"
]
assert pex_info["strip_pex_env"]
@pytest.mark.parametrize("layout", PexLayout)
def test_layout(
layout: Optional[PexLayout],
):
run_generic_test(layout=layout)
def test_no_strip_pex_env_issues_12057() -> None:
sources = {
"src/app.py": dedent(
"""\
import os
import sys
if __name__ == "__main__":
exit_code = os.environ.get("PANTS_ISSUES_12057")
if exit_code is None:
os.environ["PANTS_ISSUES_12057"] = "42"
os.execv(sys.executable, [sys.executable, *sys.argv])
sys.exit(int(exit_code))
"""
),
"src/BUILD": dedent(
"""\
python_sources(name="lib")
pex_binary(
name="binary",
entry_point="app.py"
)
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src:binary",
]
result = run_pants(args)
assert result.exit_code == 42, result.stderr
def test_local_dist() -> None:
sources = {
"foo/bar.py": "BAR = 'LOCAL DIST'",
"foo/setup.py": dedent(
"""\
from setuptools import setup
# Double-brace the package_dir to avoid setup_tmpdir treating it as a format.
setup(name="foo", version="9.8.7", packages=["foo"], package_dir={{"foo": "."}},)
"""
),
"foo/main.py": "from foo.bar import BAR; print(BAR)",
"foo/BUILD": dedent(
"""\
python_sources(name="lib", sources=["bar.py", "setup.py"])
python_sources(name="main_lib", sources=["main.py"])
python_distribution(
name="dist",
dependencies=[":lib"],
provides=python_artifact(name="foo", version="9.8.7"),
sdist=False,
generate_setup=False,
)
pex_binary(
name="bin",
entry_point="main.py",
# Force-exclude any dep on bar.py, so the only way to consume it is via the dist.
dependencies=[":main_lib", ":dist", "!!:lib"])
"""
),
}
with setup_tmpdir(sources) as tmpdir:
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}']",
"run",
f"{tmpdir}/foo:bin",
]
result = run_pants(args)
assert result.stdout == "LOCAL DIST\n"
def test_run_script_from_3rdparty_dist_issue_13747() -> None:
sources = {
"src/BUILD": dedent(
"""\
python_requirement(name="cowsay", requirements=["cowsay==4.0"])
pex_binary(name="test", script="cowsay", dependencies=[":cowsay"])
"""
),
}
with setup_tmpdir(sources) as tmpdir:
SAY = "moooo"
args = [
"--backend-packages=pants.backend.python",
f"--source-root-patterns=['/{tmpdir}/src']",
"run",
f"{tmpdir}/src:test",
"--",
SAY,
]
result = run_pants(args)
result.assert_success()
assert SAY in result.stdout.strip()
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
# Model
x = tf.place
x = tf.placeholder(tf.float32, [None, 784], name="x")
x_image = tf.reshape(x, [-1,28,28,1])
tf.summary.image('input', x_image, 3)
W = tf.Variable(tf.zeros([784,10]), name="W")
b = tf.Variable(tf.zeros([10]), name="b")
y = tf.nn.softmax(tf.matmul(x,W)+b)
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="label")
tf.summary.histogram('weights', W)
tf.summary.histogram('biases', b)
# Loss func
with tf.name_scope("cross_entropy"):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
tf.summary.scalar('cross_entropy',cross_entropy)
# Training
with tf.name_scope("train"):
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
writer = tf.summary.FileWriter("summary/10")
writer.add_graph(sess.graph)
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy',accuracy)
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_:mnist.test.labels}))
merged_summary = tf.summary.merge_all()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
if(i % 5 == 0):
[test_accury, s] = sess.run([accuracy, merged_summary], feed_dict={x:batch_xs, y_:batch_ys})
writer.add_summary(s,i)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
|
import xml.dom.minidom
def getxml(value=None):
"""获取单节点的数据内容"""
xmlFile = xml.dom.minidom.parse('data.xml')
db = xmlFile.documentElement
itemList = db.getElementsByTagName(value)
item = itemList[0]
return item.firstChild.data
def getUser(parent=None, child=None):
"""获取单节点的数据内容"""
xmlFile = xml.dom.minidom.parse('movies.xml')
db = xmlFile.documentElement
itemList = db.getElementsByTagName(parent)
item = itemList[0]
return item.getAttribute(child)
print(getxml('yan'))
print(getUser('type', 'nick'))
|
"""return true if there is no e in 'word', else false"""
def has_no_e(word):
"""return true if there is e in 'word', else false"""
def has_e(word):
"""return true if word1 contains only letters from word2, else false"""
def uses_only(word1, word2):
"""return true if word1 uses all the letters in word2, else false"""
def uses_all(word1, word2):
"""true/false is the word in alphabetical order?"""
def is_abecedarian(word):
|
'''
@Description: DFS
@Date: 2020-05-31 11:45:50
@Author: Wong Symbol
@LastEditors: Wong Symbol
@LastEditTime: 2020-06-02 11:30:09
'''
'''
DFS
'''
class Graph():
def __init__(self):
# 以字典的结构模拟 邻接表 结构
self.data = {
'a' : ['b', 'c'],
'b' : ['a', 'c', 'd'],
'c' : ['a','b', 'd','e'],
'd' : ['b' , 'c', 'e', 'f'],
'e' : ['c', 'd'],
'f' : ['d']
}
track = []
g = Graph()
visited = set()
res = []
def backtrack(track, choices):
res.extend(track)
for item in choices:
if item not in visited:
print('select num is :', item)
track.append(item)
visited.add(item)
backtrack(track, g.data[item])
track.remove(item)
backtrack(track, g.data['a'])
print(set(res))
# 知乎版本
# 递归版
def DFS(graph, s, queue=[]):
queue.append(s)
for i in graph[s]:
if i not in queue:
DFS(graph, i, queue)
return queue
# 非递归版 借助 list 结构
def DFS(graph, s):
# 借助 list 来实现
stack = []
stack.append(s)
visited = set()
visited.add(s)
while len(stack) > 0:
vertex = stack.pop(0)
nodes = graph[vertex]
for node in nodes:
if node not in visited:
stack.append(node)
visited.add(node)
print(vertex)
g = {
'a' : ['b', 'c'],
'b' : ['a', 'c', 'd'],
'c' : ['a','b', 'd','e'],
'd' : ['b' , 'c', 'e', 'f'],
'e' : ['c', 'd'],
'f' : ['d']
}
print(DFS(g, 'a'))
|
'''
Your function should take in a single parameter (a string `word`)
Your function should return a count of how many occurences of ***"th"*** occur within `word`. Case matters.
Your function must utilize recursion. It cannot contain any loops.
'''
def count_th(word):
# base case:
# if the length of the word is below 2 then
# return 0
if len(word) < 2:
return 0
#
# also if the first letter of the word is "t" and
# the second letter is "h" then count that as "1" plus
# the rest occurences starting the 3rd letter of the
# word going up
elif word[0] == "t" and word[1] == "h":
return 1 + count_th(word[2:])
# and if not then progress the recursion starting
# the 2nd letter of the word going up
else:
return count_th(word[1:])
|
#!/usr/bin/env python3
from memory_profiler import profile
NUM = 5
TOTS = 100000000
lrange = lambda size : list(range(size))
def fun_flat():
alist = lrange(TOTS)
alen = len(alist)
print(alen)
blist = lrange(TOTS)
blen = len(blist)
print(blen)
clist = lrange(TOTS)
clen = len(clist)
print(clen)
dlist = lrange(TOTS)
dlen = len(dlist)
print(dlen)
elist = lrange(TOTS)
elen = len(elist)
print(elen)
def fun_nested():
def nested():
xlist = lrange(TOTS)
xlen = len(xlist)
return xlen
alen = nested()
print(alen)
blen = nested()
print(blen)
clen = nested()
print(clen)
dlen = nested()
print(dlen)
elen = nested()
print(elen)
def fun_loop():
pass
from timeit import timeit
"""
garbage_collector.collect()
print(timeit(fun_flat, number=1))
garbage_collector.collect()
print(timeit(fun_nested, number=1))
garbage_collector.collect()
print(timeit(fun_loop, number=1))
"""
|
name = 'gitoo'
description = 'Odoo third party addons installer.'
url = 'https://github.com/numigi/gitoo'
email = 'contact@numigi.com'
author = 'numigi'
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Given a set of candidate numbers (C) (without duplicates) and a target number (T),
# find all unique combinations in C where the candidate numbers sums to T.
# The same repeated number may be chosen from C unlimited number of times.
# Note:
# All numbers (including target) will be positive integers.
# The solution set must not contain duplicate combinations.
# For example, given candidate set [2, 3, 6, 7] and target 7,
# A solution set is:
# [
# [7],
# [2, 2, 3]
# ]
# Don't need to sort, but it's too slow.
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
combinations = []
def helper(index, candidates, target, combination=[]):
for i in range(index, len(candidates)):
if target < 0:
return
if target == 0:
return combinations.append(combination)
else:
helper(i, candidates, target - candidates[i], combination+[candidates[i]])
helper(0, candidates, target)
return combinations
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates or not target:
return []
def helper(index, candidates, target, combination=[]):
if target == 0:
return combinations.append(combination)
for i in range(index, len(candidates)):
if candidates[i] > target:
break
# combination.append(candidates[i])
# In[44]: timeit('x += [1]', 'x = []', number=1000000)
# Out[44]: 0.21148269201512448
# In[45]: timeit('x.append(1)', 'x = []', number=1000000)
# Out[45]: 0.1697585310030263
# In[52]: timeit('x += 1,', 'x = []', number=1000000) # += 1,
# Out[52]: 0.12951252100174315
# So, use [].append() will be faster.
helper(i, candidates, target - candidates[i], combination + [candidates[i]])
combinations = []
candidates.sort()
helper(0, candidates, target)
return combinations
if __name__ == '__main__':
print(Solution().combinationSum([5, 2, 3, 6, 7], 7))
print(Solution().combinationSum([], 7))
print(Solution().combinationSum([], ''))
# 168 / 168 test cases passed.
# Status: Accepted
# Runtime: 92 ms
# Your runtime beats 79.71 % of python submissions.
# Here is the fastest solution in Leetcode. runtime: 68ms.
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
if not candidates or not target:
return []
candidates.sort()
self.res = []
self.dfs(candidates, 0, target, [])
return self.res
def dfs(self, candidates, start, remain_target, ans):
if remain_target == 0:
self.res.append(ans[:]) # Don't append(ans)!
return
for i in range(start, len(candidates)):
if candidates[i] > remain_target:
break
ans.append(candidates[i])
self.dfs(candidates, i, remain_target - candidates[i], ans)
del ans[-1] # Avoid to copy the list, but the faster way is ans += candidates,
|
from django.shortcuts import render
from rest_framework import viewsets, permissions, status
from rest_framework.response import Response
from .serializers import userSerializer, ImageUserSerializer
from django.contrib.auth.models import User
from .models import ImageUser
from rest_framework.decorators import action
from django.shortcuts import get_object_or_404
import os
# Create your views here.
class UsersRolesView(viewsets.ModelViewSet):
serializer_class = userSerializer
permission_classes = [permissions.IsAuthenticated]
def get_queryset(self):
queryset = self.request.user
return queryset
@action(detail = False, methods = ['post',])
def avatar(self, *args, **kwargs):
user = self.get_queryset()
data = self.request.data
try:
avatar = ImageUser.objects.get(user = user)
if os.path.isfile(avatar.image.path):
os.remove(avatar.image.path)
avatar.image = self.request.FILES['avatar']
avatar.save()
print("aquiii bienn")
return Response(status = status.HTTP_200_OK)
except Exception:
print("aqui otro bien")
ImageUser.objects.create(user = user, image = self.request.FILES['avatar'])
return Response(status = status.HTTP_201_CREATED)
return Response(status = status.HTTP_400_BAD_REQUEST)
@avatar.mapping.get
def avatar_get(self, *args, **kwargs):
serializer = ImageUserSerializer
object_serializer = None
try:
image = ImageUser.objects.get(user = self.request.user)
object_serializer = ImageUserSerializer(image)
except Exception:
return Response(status = status.HTTP_404_NOT_FOUND)
return Response(object_serializer.data, status = status.HTTP_200_OK)
@action(detail = False, methods = ['post',])
def names(self, *args, **kwargs):
first = self.request.data.get('first_name', None)
last = self.request.data.get('last_name', None)
if first and last:
user = self.request.user
user.first_name = first
user.last_name = last
user.save()
return Response(status = status.HTTP_200_OK)
return Response(status = status.HTTP_400_BAD_REQUEST)
def list(self, *args, **kwargs):
query = self.get_queryset()
serializer = self.get_serializer_class()
serializer_response = serializer(query)
return Response(serializer_response.data, status = status.HTTP_200_OK)
|
# Generated by Django 2.1.11 on 2020-01-06 12:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Jpyutc',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.TextField(verbose_name='time')),
('open_data', models.TextField(verbose_name='open_data')),
('high_data', models.TextField(verbose_name='high_data')),
('row_data', models.TextField(verbose_name='row_data')),
('close_data', models.TextField(verbose_name='close_data')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
|
import mcpi.minecraft as minecraft
import mcpi.block as block
mc = minecraft.Minecraft.create()
# desenha uma parede de 5 x 8 à frente do jogador
[x,y,z] = mc.player.getPos()
i = 0
while i < 5:
j = 0
while j < 8:
mc.setBlock(x+i,y+j,z+3,block.STONE)
j += 1
i += 1
|
from rply import LexerGenerator
from rply import Token
def build_lexer():
lexer = LexerGenerator()
# Lexer Analysis Rules
lexer.ignore(' ')
lexer.add("WHATEVR", r"WHATEVR")
lexer.add("VISIBLE", r"VISIBLE")
lexer.add("KTHXBAI", r"KTHXBAI")
lexer.add("GIMME", r"GIMME")
lexer.add("MKAY", r"MKAY")
lexer.add("HAS", r"HAS")
lexer.add("HAI", r"HAI")
lexer.add("ITZ", r"ITZ")
lexer.add("OF", r"OF")
lexer.add("BANG", r"!")
lexer.add("BY", r"BY")
lexer.add("AN", r"AN")
lexer.add("A", r"A")
lexer.add("R", r"R")
lexer.add("I", r"I")
lexer.add("MULTI_COMMENT", r"OBTW [.*|\n]TDLR") # Not working at all!
lexer.add("NEWLINE", "\n")
lexer.add("PRIMITIVE_TYPE", r"NUMBR|NUMBAR|LETTR|TROOF")
lexer.add("NUMBAR_LITERAL", r"-?\d+.\d+")
lexer.add("NUMBR_LITERAL", r"-?\d+")
lexer.add("TROOF_LITERAL", r"[WIN|FAIL]")
lexer.add("YARN_LITERAL", r"[\"|\'].*[\"|\']")
lexer.add("MATH_BINARY_OPERATOR", r"SUM|DIFF|PRODUKT|QUOSHUNT|BIGGR|SMALLR")
lexer.add("MATH_UNARY_OPERATOR", r"FLIP|SQUAR")
lexer.add("LOGICAL_BINARY_OPERATOR", r"BOTH|EIHER|WON")
lexer.add("LOGICAL_UNARY_OPERATOR", r"NOT")
lexer.add("LOGICAL_VARIABLE_OPERATOR", r"ALL|ANY")
lexer.add("COMPARISON_BINARY_OPERATOR", r"SAEM|DIFFRINT|FURSTSMALLR|FURSTBIGGR")
lexer.add("ASSIGNMENT_OPERATOR", r"CORRECT_THIS")
lexer.add("SINGLE_COMMENT", r"BTW.*\n") # New line required to be added to tokens list prior!
lexer.add("IDENTIFIER", r"[a-zA-Z][a-zA-Z_]*")
lexer.add("LETTR_LITERAL", r".")
lexer.add("ERROR", r"^[.]*")
return lexer.build()
def tokenize_LOLcode(lolcode_str):
lexer = build_lexer()
tokens = list(lexer.lex(lolcode_str))
return tokens
def test():
tokens = tokenize_LOLcode("""HAI 1.450
I HAS A result ITZ A NUMBR BTW I like apples
result R 14
VISIBLE result
OBTW This is a
multiline comment
TLDR
KTHXBYE""")
expected = [Token('HAI', 'HAI'), Token('NUMBAR_LITERAL', '1.450'), Token('NEWLINE', '\n'), Token('I', 'I'), Token('HAS', 'HAS'), Token('A', 'A'), Token('IDENTIFIER', 'result'), Token('ITZ', 'ITZ'), Token('A', 'A'), Token('PRIMITIVE_TYPE', 'NUMBR'), Token('NEWLINE', '\n'), Token('IDENTIFIER', 'result'), Token('R', 'R'), Token('NUMBR_LITERAL', '14'), Token('NEWLINE', '\n'), Token('NEWLINE', '\n'), Token('VISIBLE', 'VISIBLE'), Token('IDENTIFIER', 'result'), Token('NEWLINE', '\n'), Token('NEWLINE', '\n'), Token('KTHXBYE', 'KTHXBYE')]
print(tokens)
example_token = tokens[1]
print(example_token.gettokentype())
print(example_token.getstr())
print(expected)
assert expected == tokens
import pprint
pprint.pprint(expected)
test()
|
import os
# get the file path from the user and change the path
path = input('enter the file path :')
file_name = input('input file name :')
os.chdir(path)
file_counter = 1
number_of_digits=int((len(os.listdir(path))/10+1))#to organize the format using .zfill function
# list all file in the directory and renaming it
for file in os.listdir():
old_name, file_ext = os.path.splitext(file)
file_counter_string = str(file_counter).zfill(number_of_digits)
new_name_format ='{}_{}{}'.format(file_counter_string, file_name, file_ext)
print(new_name_format)
os.rename(file, new_name_format)
file_counter = file_counter + 1
|
# coding: utf-8
# # Hypothesis Testing
# This code does the following:
# * Reads the FC files for all the subjects
# * Z-Standardize all the voxel-roi correlation values of each ROI
# * Perform two tailed t-test for each voxel-roi pair correlation across subjects (Autism vs TD)
# In[192]:
import nibabel as nib
import numpy as np
from scipy import stats
from numpy import ma
import scipy.special as special
from statsmodels.stats import multitest
import itertools
import os
from os.path import join as opj
# from nipype.interfaces import afni
import nibabel as nib
import json
import numpy as np
import matching
import pandas as pd
# In[193]:
# Paths
# path_cwd = os.getcwd()
# path_split_list = path_cwd.split('/')
# s = path_split_list[0:-1] # for getting to the parent dir of pwd
# s = opj('/',*s) # *s converts list to path, # very important to add '/' in the begining so it is read as directory later
#
#
#
# # In[194]:
#
#
#
# # json_path = opj(data_directory,'task-rest_bold.json')
#
# json_path = 'scripts/json/paths.json'
# with open(json_path, 'rt') as fp:
# task_info = json.load(fp)
# In[195]:
#
# base_directory = opj(s,task_info["base_directory_for_results"])
# motion_correction_bet_directory = task_info["motion_correction_bet_directory"]
# parent_wf_directory = task_info["parent_wf_directory"]
# # functional_connectivity_directory = task_info["functional_connectivity_directory"]
# functional_connectivity_directory = 'temp_fc'
# coreg_reg_directory = task_info["coreg_reg_directory"]
# atlas_resize_reg_directory = task_info["atlas_resize_reg_directory"]
# data_directory = opj(s,task_info["data_directory"])
# datasink_name = task_info["datasink_name"]
# fc_datasink_name = task_info["fc_datasink_name"]
# # fc_datasink_name = 'temp_dataSink'
# atlasPath = opj(s,task_info["atlas_path"])
#
# hypothesis_test_dir = opj(base_directory, task_info["hypothesis_test_dir"])
# In[211]:
# # Now construct a function that takes a list of SUB_ID's and returns the FC Maps
# def get_subject_fc_file(subject_id_list,fc_file_path, bugs):
# import re
#
# return_fc_maps = []
# fc_file_list = np.load(fc_file_path)
# print('Brain files: ',fc_file_list)
# for subject_id in subject_id_list:
# # print("For subject: ",subject_id)
# found = False
# for brain in fc_file_list:
# sub_id_extracted = re.search('.+_subject_id_(\d+)', brain).group(1)
# if str(subject_id) in bugs:
# # print("In Bugs with subject id ",subject_id)
# found = True
# elif (subject_id == int(sub_id_extracted)):
# found = True
# return_fc_maps.append(brain)
# # print("Found for subject: ",subject_id)
# if found == False: # Some subject was not found Problem!
# print ('Unable to locate Subject: ',int(subject_id),'extracted: ',int(sub_id_extracted))
# return 0
# return return_fc_maps
#
# In[212]:
# In[213]:
# In[214]:
# len(autistic_list),len(td_list)
# In[215]:
# # To Stop execution Raise error:
# raise Exception('Execution stops here!')
# In[216]:
# number_of_fcmaps = len(fc_file_list) #184
# In[217]:
# number_of_fcmaps
# In[218]:
# Author Deepak Singla: singlakdeepak5@gmail.com
def div0( a, b ):
'''
It is meant for ignoring the places where standard deviation
is zero.
'''
""" ignore / 0, div0( [-1, 0, 1], 0 ) -> [0, 0, 0] """
with np.errstate(divide='ignore', invalid='ignore'):
c = np.divide( a, b )
# c[ ~ np.isfinite( c )] = 0 # -inf inf NaN
return c
def calc_mean_and_std(ROICorrMaps, n_subjects, ROIAtlasmask, ddof =1, applyFisher = False):
'''
Function for calculating the mean and standard
deviation of the data. At a time, only one of the nii
file is loaded and the elements keep on adding as we
enumerate over the subjects.
'''
mask = nib.load(ROIAtlasmask).get_data()
mask = ma.masked_object(mask,0).mask
if (n_subjects != 0):
f = nib.load(ROICorrMaps[0])
dimensions = f.get_header().get_data_shape()
print(dimensions)
else:
exit
mask = np.repeat(mask[:, :, :, np.newaxis], dimensions[3], axis=3)
# print(ROICorrMaps)
Sample_mean_Array = np.zeros(dimensions)
Sample_std_Array = np.zeros(dimensions)
Sample_mean_Array = ma.masked_array(Sample_mean_Array,
mask = mask,
fill_value = 0)
Sample_std_Array = ma.masked_array(Sample_std_Array,
mask = mask ,
fill_value = 0)
for count, subject in enumerate(ROICorrMaps):
Corr_data = nib.load(subject).get_data()
Corr_data = ma.masked_array(Corr_data, mask = mask, fill_value = 0)
if applyFisher:
Corr_data = np.arctanh(Corr_data)
Sample_mean_Array += Corr_data
Sample_std_Array += np.square(Corr_data)
print('Done subject ', count+1)
Sample_mean_Array /= n_subjects
# import pdb; pdb.set_trace()
Sample_std_Array = np.sqrt((Sample_std_Array - n_subjects*np.square(Sample_mean_Array))/(n_subjects - ddof))
return Sample_mean_Array,Sample_std_Array
def calc_mean_and_std_if_npy(ROICorrMaps, n_subjects, ddof =1, applyFisher = False):
'''
Function to be used if the file is given in the format
No of ROIs versus All brain voxels in the ROI mapped.
'''
print(ROICorrMaps)
initialize = np.load(ROICorrMaps[0])
initialize = ma.masked_array(initialize)
if applyFisher:
initialize = np.arctanh(initialize)
Sample_mean_Array = ma.masked_array(initialize,
fill_value = 0)
Sample_std_Array = ma.masked_array(np.square(initialize),
fill_value = 0)
del initialize
print('Done subject ', 0)
for count, subject in enumerate(ROICorrMaps[1:]):
Corr_data = np.load(subject)
Corr_data = ma.masked_array(Corr_data)
if applyFisher:
Corr_data = np.arctanh(Corr_data)
Sample_mean_Array += Corr_data
Sample_std_Array += np.square(Corr_data)
print('Done subject ', count+1)
Sample_mean_Array /= n_subjects
Sample_std_Array = np.sqrt((Sample_std_Array - n_subjects*np.square(Sample_mean_Array))/(n_subjects - ddof))
return Sample_mean_Array,Sample_std_Array
def _ttest_1samp(Sample_mean_Array, Sample_std_Array, n_subjects, PopMean = 0.0):
ttest_1samp_for_all = div0((Sample_mean_Array - PopMean) * np.sqrt(n_subjects), Sample_std_Array)
df = n_subjects - 1
# pval = stats.t.sf(np.abs(ttest_1samp_for_all), df)*2
pval = special.betainc(0.5*df, 0.5, df/ (df + ttest_1samp_for_all*ttest_1samp_for_all)).reshape(ttest_1samp_for_all.shape)
# ttest_1samp_for_all, pval = ma.filled(ttest_1samp_for_all), ma.filled(pval)
return ttest_1samp_for_all, pval
def ttest_1samp_for_all_ROIs(ROICorrMaps,
ROIAtlasmask,
PopMean = 0.0,
applyFisher = False):
'''
This is the 1 sample t-test for ROI correlation maps.
df = no of subjects - 1
* ROICorrMaps is the list of filepaths of ROI correlation
maps for a group.
* Each ROI correlation map has the 4th dimension equal to
the number of ROIs.
* It calculates both the ttest as well as the p values.
QUESTIONS???????????????????????????????????????????????
For application of the Fisher transform, I saw that it is
same as the inverse hyperbolic tangent function.
Doubt is regarding the standard deviation of the distribution after
applying Fisher. It was written that the sd is now 1/sqrt(no_of_subjs - 3).
So, that means for each voxel or variable, the sd now becomes this.
Ref: https://docs.scipy.org/doc/numpy/reference/generated/numpy.arctanh.html
https://en.wikipedia.org/wiki/Fisher_transformation
TO BE ASKED!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The ttest will return t value Inf or NaN where the denom is
# zero. See what to return in these places. Ask tomorrow.
'''
n_subjects = len(ROICorrMaps)
assert (n_subjects>0)
Sample_mean_Array, Sample_std_Array = calc_mean_and_std(ROICorrMaps,
n_subjects,
ROIAtlasmask, ddof =1,
applyFisher = applyFisher)
ttest_1samp_for_all, pval = _ttest_1samp(Sample_mean_Array,
Sample_std_Array,
n_subjects,
PopMean = PopMean)
return ttest_1samp_for_all, pval
def ttest_1samp_ROIs_if_npy(ROICorrMaps,
PopMean = 0.0,
applyFisher = False):
n_subjects = len(ROICorrMaps)
assert (n_subjects>0)
Sample_mean_Array, Sample_std_Array = calc_mean_and_std_if_npy( ROICorrMaps,
n_subjects, ddof =1,
applyFisher = applyFisher)
return _ttest_1samp(Sample_mean_Array,
Sample_std_Array,
n_subjects,
PopMean = PopMean)
def _ttest_ind(Sample_mean_ArrayA, Sample_var_ArrayA, n_subjectsA,
Sample_mean_ArrayB,Sample_var_ArrayB, n_subjectsB,
equal_var = True):
if equal_var:
# force df to be an array for masked division not to throw a warning
df = ma.asanyarray(n_subjectsA + n_subjectsB - 2.0)
svar = ((n_subjectsA-1)*Sample_var_ArrayA+(n_subjectsB-1)*Sample_var_ArrayB)/ df
denom = ma.sqrt(svar*(1.0/n_subjectsA + 1.0/n_subjectsB)) # n-D computation here!
else:
vn1 = Sample_var_ArrayA/n_subjectsA
vn2 = Sample_var_ArrayB/n_subjectsB
df = (vn1 + vn2)**2 / (vn1**2 / (n_subjectsA - 1) + vn2**2 / (n_subjectsB - 1))
# If df is undefined, variances are zero.
# It doesn't matter what df is as long as it is not NaN.
df = np.where(np.isnan(df), 1, df)
denom = ma.sqrt(vn1 + vn2)
with np.errstate(divide='ignore', invalid='ignore'):
ttest_ind = (Sample_mean_ArrayA - Sample_mean_ArrayB) / denom
pvalues = special.betainc(0.5*df, 0.5, df/(df + ttest_ind*ttest_ind)).reshape(ttest_ind.shape)
# ttest_ind, pvalues = ma.filled(ttest_ind), ma.filled(pvalues)
return ttest_ind, pvalues,Sample_mean_ArrayA ,Sample_mean_ArrayB
def ttest_ind_samples(ROICorrMapsA, ROICorrMapsB, ROIAtlasmask,
equal_var = True, applyFisher = False):
'''
Modified from https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.stats.ttest_ind.html ,
https://github.com/scipy/scipy/blob/v0.19.1/scipy/stats/stats.py#L3950-L4072
Since it didn't support if the data is large and everything can't be loaded at once. So,
such modification has been made.
'''
n_subjectsA = len(ROICorrMapsA)
n_subjectsB = len(ROICorrMapsB)
assert (n_subjectsA > 0)
assert (n_subjectsB > 0)
Sample_mean_ArrayA, Sample_std_ArrayA = calc_mean_and_std(ROICorrMapsA,
n_subjectsA,
ROIAtlasmask, ddof =1,
applyFisher = applyFisher)
Sample_var_ArrayA = np.square(Sample_std_ArrayA)
del(Sample_std_ArrayA)
# n_subjectsB = len(ROICorrMapsB)
Sample_mean_ArrayB, Sample_std_ArrayB = calc_mean_and_std(ROICorrMapsB,
n_subjectsB,
ROIAtlasmask, ddof =1,
applyFisher = applyFisher)
Sample_var_ArrayB = np.square(Sample_std_ArrayB)
del(Sample_std_ArrayB)
# pvalues = stats.t.sf(np.abs(ttest_ind), df)*2
return _ttest_ind(Sample_mean_ArrayA, Sample_var_ArrayA, n_subjectsA,
Sample_mean_ArrayB, Sample_var_ArrayB, n_subjectsB,
equal_var = equal_var)
def ttest_ind_samples_if_npy(ROICorrMapsA, ROICorrMapsB, equal_var = True, applyFisher = False):
'''
Modified from https://docs.scipy.org/doc/scipy-0.19.1/reference/generated/scipy.stats.ttest_ind.html ,
https://github.com/scipy/scipy/blob/v0.19.1/scipy/stats/stats.py#L3950-L4072
Since it didn't support if the data is large and everything can't be loaded at once. So,
such modification has been made.
'''
n_subjectsA = len(ROICorrMapsA)
n_subjectsB = len(ROICorrMapsB)
assert (n_subjectsA > 0)
assert (n_subjectsB > 0)
Sample_mean_ArrayA, Sample_std_ArrayA = calc_mean_and_std_if_npy(ROICorrMapsA,
n_subjectsA, ddof = 1,
applyFisher = applyFisher)
Sample_var_ArrayA = np.square(Sample_std_ArrayA)
del(Sample_std_ArrayA)
Sample_mean_ArrayB, Sample_std_ArrayB = calc_mean_and_std_if_npy(ROICorrMapsB,
n_subjectsB, ddof =1,
applyFisher = applyFisher)
Sample_var_ArrayB = np.square(Sample_std_ArrayB)
del(Sample_std_ArrayB)
# pvalues = stats.t.sf(np.abs(ttest_ind), df)*2
return _ttest_ind(Sample_mean_ArrayA, Sample_var_ArrayA, n_subjectsA,
Sample_mean_ArrayB, Sample_var_ArrayB, n_subjectsB,
equal_var = equal_var)
def convert_ma_to_np(MaskedArrayObj):
return ma.filled(MaskedArrayObj)
def fdr_correction(pvalues , type = 'ind_ROIs'):
'''
Two types:
ind_ROIs: When the ROIs are taken independently and the FDR is done considering the
the tests only in that ROI.
all: When all the tests are treated as one.
'''
# ### Create an MNI 3mm brain mask
#
# In[219]:
# mask = opj(base_directory,parent_wf_directory,motion_correction_bet_directory,coreg_reg_directory,'resample_mni/MNI152_T1_2mm_brain_resample_mask.nii.gz')
# In[220]:
# Author Deepak Singla : singlakdeepak5@gmail.com
def main(autistic_list, td_list, combination, mask, applyFisher, hypothesis_test_dir):
# combination = 'hypothesis_test_' + combination
apply_fisher = applyFisher
list1 = autistic_list
list2 = td_list
Tvals, Pvals, meanC1, meanC2 = ttest_ind_samples(list1,list2,mask,equal_var = False, applyFisher=apply_fisher)
Tvals, Pvals, meanC1, meanC2 = convert_ma_to_np(Tvals), convert_ma_to_np(Pvals), convert_ma_to_np(meanC1), convert_ma_to_np(meanC2)
save_destination = opj(hypothesis_test_dir,combination)
print('Saving files in ',save_destination)
if not os.path.exists(save_destination):
os.makedirs(save_destination) # to create a nested directory structure
Tvals_path = opj(save_destination,'Tvals')
Pvals_path = opj(save_destination,'Pvals')
mean1_path = opj(save_destination,'meanC1')
mean2_path = opj(save_destination,'meanC2')
np.save(Tvals_path,Tvals)
np.save(Pvals_path,Pvals)
np.save(mean1_path,meanC1)
np.save(mean2_path,meanC2)
#
# fc_datasink_name = 'fc_datasink'
# itr = (list(itertools.product([0, 1], repeat=3)))
# (1,1,1),
# itr = [(1,0,0,1)]
# def main(paths, bugs, matching=0, motion_param_regression=0, global_signal_regression=0, band_pass_filtering=0, \
# smoothing=0, num_proc = 7):
# json_path=paths[0]
# base_directory=paths[1]
# motion_correction_bet_directory=paths[2]
# parent_wf_directory=paths[3]
# functional_connectivity_directory=paths[4]
# coreg_reg_directory=paths[5]
# atlas_resize_reg_directory=paths[6]
# subject_list = paths[7]
# datasink_name=paths[8]
# fc_datasink_name=paths[9]
# atlasPath=paths[10]
# brain_path=paths[11]
# mask_path=paths[12]
# atlas_path=paths[13]
# tr_path=paths[14]
# motion_params_path=paths[15]
# func2std_mat_path=paths[16]
# MNI3mm_path=paths[17]
# demographics_file_path = paths[18]
# phenotype_file_path = paths[19]
# hypothesis_test_dir = paths[20]
#
# # Runall:
#
#
# if phenotype_file_path == None:
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
#
# df = pd.read_csv(phenotype_file_path) # , index_col='SUB_ID'
#
# df = df.sort_values(['SUB_ID'])
# # df = df.sort_values(['SUB+AF8-ID'])
#
# if bugs == None:
# bugs = ['51232','51233','51242','51243','51244','51245','51246','51247','51270','51310','50045', '51276', '50746', '50727', '51276']
#
# # Bugs:
# # 50045 - ROI Missing
# # 51276, 50746, 50727 - Many in between ROIs Missing
# # 51276 - Many in between ROIs Missing
#
# # '0051242' in bugs
#
#
# # selecting Autistic males(DSM IV) of age <= 18 years
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) ]
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 2)] # eyes closed
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=12) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 12-18
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=6) & (df['AGE_AT_SCAN'] <12) & (df['DSM_IV_TR'] == 1) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 6 - lt 12
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 1)] # AGE <= 18
#
#
# # In[205]:
#
#
# # df_aut_lt18_m.shape
#
#
# # In[206]:
#
#
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) ]
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 2)] # eyes closed
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=12) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 12- 18
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] >=6) & (df['AGE_AT_SCAN'] <12) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 1)] # eyes open age 6 - lt 12
# # df_td_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0)] # AGE <= 18
#
# # df_aut_lt18_m = df.loc[(df['SEX'] == 1) & (df['AGE_AT_SCAN'] <=18) & (df['DSM_IV_TR'] == 0) & (df['EYE_STATUS_AT_SCAN'] == 2)] # TD eyes closed
#
# # In[207]:
#
#
# # df_td_lt18_m.shape
#
#
# # In[208]:
#
#
# # table_males_np = table_males.as_matrix(columns=['SUB_ID','DX_GROUP', 'DSM_IV_TR', 'AGE_AT_SCAN' ,'SEX' ,'EYE_STATUS_AT_SCAN'])
#
#
# # In[209]:
#
#
# # --------------------- Matched data --------------------------------------------
#
# demographics_file_path = '/home1/varunk/Autism-Connectome-Analysis-brain_connectivity/notebooks/demographics.csv'
# phenotype_file_path = '/home1/varunk/data/ABIDE1/RawDataBIDs/composite_phenotypic_file.csv'
# df_demographics = pd.read_csv(demographics_file_path)
# df_phenotype = pd.read_csv(phenotype_file_path)
# df_phenotype = df_phenotype.sort_values(['SUB_ID'])
#
#
#
# # Volume matching
# # print('Volume Matching')
# # volumes_bins = np.array([[0,150],[151,200],[201,250],[251,300]])
# # matched_df_TD = df_phenotype
# # matched_df_AUT = df_phenotype
# # matched_df_TD, matched_df_AUT = matching.volumes_matching(volumes_bins, df_demographics, matched_df_TD, matched_df_AUT)
# #
#
#
#
# # Age 6 - 18
#
# df_td_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DX_GROUP'] == 2) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
# # & (df_phenotype['DSM_IV_TR'] == 0) \
#
# df_aut_lt18_m = df_phenotype.loc[(df_phenotype['SEX'] == 1) & (df_phenotype['DSM_IV_TR'] == 1) \
# & (df_phenotype['EYE_STATUS_AT_SCAN'] == 1) ]
#
#
# # Age 12 - 18
#
#
# # df_td_lt18_m = matched_df_TD.loc[(matched_df_TD['SEX'] == 1) & (matched_df_TD['DSM_IV_TR'] == 0) \
# # & (matched_df_TD['EYE_STATUS_AT_SCAN'] == 1)
# # & (matched_df_TD['AGE_AT_SCAN'] >= 12 )
# # & (matched_df_TD['AGE_AT_SCAN'] <= 18) ]
# #
# # df_aut_lt18_m = matched_df_AUT.loc[(matched_df_AUT['SEX'] == 1) & (matched_df_AUT['DSM_IV_TR'] == 1) \
# # & (matched_df_AUT['EYE_STATUS_AT_SCAN'] == 1)
# # & (matched_df_AUT['AGE_AT_SCAN'] >= 12 )
# # & (matched_df_AUT['AGE_AT_SCAN'] <= 18) ]
#
# # # TR matching
# print('TR Matching with range (0,2.5]')
#
# df_demographics = df_demographics.drop(df_demographics.index[[7]]) # Deleting OHSU with volumes 82
#
# TR_bins = np.array([[0,2.5]])
# # matched_df_TD = df_phenotype
# # matched_df_AUT = df_phenotype
# df_td_lt18_m, df_aut_lt18_m = matching.tr_matching(TR_bins,df_demographics, df_td_lt18_m, df_aut_lt18_m)
#
#
# # Age Matching
# print('Age Matching')
# age_bins = np.array([[0,9],[9,12],[12,15],[15,18]])
# # matched_df_TD = df_phenotype
# # matched_df_AUT = df_phenotype
# df_td_lt18_m, df_aut_lt18_m = matching.age_matching(age_bins, df_td_lt18_m, df_aut_lt18_m)
#
#
#
#
#
#
# # ----------------------------------------Checking the difference between eyes closed vs open ---------------------------------
#
# # df_td_lt18_m = matched_df_TD.loc[(matched_df_TD['SEX'] == 1) & (matched_df_TD['DSM_IV_TR'] == 0) \
# # & (matched_df_TD['EYE_STATUS_AT_SCAN'] == 1) \
# # & (matched_df_TD['AGE_AT_SCAN'] >= 12 ) \
# # & (matched_df_TD['AGE_AT_SCAN'] <= 18) ]
# #
# # df_aut_lt18_m = matched_df_TD.loc[(matched_df_TD['SEX'] == 1) & (matched_df_TD['DSM_IV_TR'] == 0) \
# # & (matched_df_TD['EYE_STATUS_AT_SCAN'] == 2) \
# # & (matched_df_TD['AGE_AT_SCAN'] >= 12 ) \
# # & (matched_df_TD['AGE_AT_SCAN'] <= 18) ]
#
#
# # -------------------------------------------------------------------------------------------------------------------------
# # import pdb; pdb.set_trace()
# df_aut_subid = df_aut_lt18_m.as_matrix(columns=['SUB_ID'])
# df_td_subid = df_td_lt18_m.as_matrix(columns=['SUB_ID'])
#
# print("Storing the subjects' information used")
# df_td_lt18_m.to_csv('TD_subects.csv')
# print('Saved TD_subects.csv')
# df_aut_lt18_m.to_csv('AUT_subjects.csv')
# print('Saved AUT_subects.csv')
#
#
# # In[210]:
#
#
# # df_aut_subid#, df_td_subid
# for motion_param_regression, band_pass_filtering, global_signal_regression, smoothing in itr:
# combination = 'pearcoff_motionRegress' + str(int(motion_param_regression)) + 'filt' + \
# str(int(band_pass_filtering)) + 'global' + str(int(global_signal_regression)) + \
# 'smoothing' + str(int(smoothing))
#
# print("Combination: ",combination)
# print(motion_param_regression,band_pass_filtering, global_signal_regression, smoothing)
# fc_file_list = opj(base_directory,fc_datasink_name,combination,'fc_map_brain_file_list.npy')
# print('Reading the brain paths from: ',fc_file_list)
# # apply_fisher = True
#
#
# autistic_list = (get_subject_fc_file(df_aut_subid.squeeze(), fc_file_list, bugs))
# print("Number of autistic participants ", len(autistic_list))
#
# td_list = (get_subject_fc_file(df_td_subid.squeeze(), fc_file_list, bugs))
# print("Number of TD participants ", len(td_list))
#
# # participants_considered = min(len(autistic_list), len(td_list))
#
# # participants_considered = 2
#
# # print("Number of participants being Considered per group:", participants_considered)
#
# autistic_list = autistic_list#[0:participants_considered]
# td_list = td_list#[0:participants_considered]
#
# main(autistic_list,td_list, combination)
|
from router_solver import *
import pygame
import game_engine.constants
from game_engine.constants import *
class SpriteSheet(object):
def __init__(self, file_name):
# Load the sprite sheet.
BLACK = (0, 0, 0)
self.sprite_sheet = pygame.image.load(file_name).convert()
self.sprite_sheet.set_colorkey(BLACK)
def get_image(self, x, y, width, height):
"""Grab a single image out of a larger spritesheet
Pass in the x, y location of the sprite
and the width and height of the sprite."""
# Create a new blank image
image = pygame.Surface([width, height]).convert()
# Copy the sprite from the large sheet onto the smaller image
image.blit(self.sprite_sheet, (0, 0), (x, y, width, height))
image.set_colorkey(Constants.BLUE)
# Return the image
return image
|
# Generated by Django 2.1.2 on 2018-10-06 03:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NbaNews',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(blank=True)),
('title', models.CharField(blank=True, default='', max_length=100)),
('author', models.CharField(max_length=100)),
('context', models.CharField(blank=True, default='', max_length=1000)),
('photo', models.URLField(blank=True, default='')),
('video', models.URLField(blank=True, default='')),
],
),
]
|
import argparse
# defaults
DEF_MODEL_PATH = "ssdlite_mobilenet_v2_coco_2018_05_09.pb"
DEF_LABEL_PATH = "label/mscoco_label_map.pbtxt"
DEF_CONFIDENCE = 0.5
DEF_CAMERA_ID = 0
DEF_VERBOSE_LOG = False
DEF_TARGET_CLASS = 1 # detect people
DEF_LOST_FRAME = 20
DEF_TRACKING_HISTORY = 10
# argument description
parser_cfg = argparse.ArgumentParser()
parser_cfg.add_argument('--model', dest='model', default=DEF_MODEL_PATH, help='trained model path')
parser_cfg.add_argument('--lalel', dest='label', default=DEF_LABEL_PATH, help='label definition file path')
parser_cfg.add_argument('--confidence', dest='confidence', default=DEF_CONFIDENCE, help='minmum score of the detection probability')
# todo use 'choice' option
parser_cfg.add_argument('--camera_id', dest='camera_id', default=DEF_CAMERA_ID, help='camera id')
parser_cfg.add_argument('--input_file', '-i', dest='input_file', default="", help='class id to be detected')
parser_cfg.add_argument('--staic_image', dest='staic_image', default="core/image/blank.png", help='class id to be detected')
parser_cfg.add_argument('--verbose', '-v', dest='verbose', action='store_true', help='print verbose log if True')
parser_cfg.add_argument('--target', dest='target', default=DEF_TARGET_CLASS, help='class id to be detected')
# parser_cfg.add_argument('--all', '-a', dest='all', action='store_true', help='run all')
parser_cfg.add_argument('--lost_frame', dest='lost_frame', default=DEF_LOST_FRAME, help='remove tracking point if the object keeps disappeared on the frame count')
parser_cfg.add_argument('--history', type=int, dest='history', default=DEF_TRACKING_HISTORY, help='tracking history')
# read arguments
args_cfg = parser_cfg.parse_args()
MODEL_PATH = args_cfg.model
LABEL_PATH = args_cfg.label
CONFIDENCE = args_cfg.confidence
CAMERA_ID = args_cfg.camera_id
VERBOSE_LOG = args_cfg.verbose
TARGET_CLASS = args_cfg.target
INPUT_FILE = args_cfg.input_file
STATIC_IMAGE = args_cfg.staic_image
# RUN_ALL = args_cfg.all
LOST_COUNT = args_cfg.lost_frame
TRACKING_HISTORY = args_cfg.history
COLOR_ORANGE = (24, 89, 207) # blue, green, red
COLOR_YELLOW = (90, 203, 246) # blue, green, red
TEXT_COLOR = (32, 32, 32) # blue, green, red
TEXT_BGCOLOR = (0, 255, 127) # blue, green, red
PASS_COUNT_LEFT_BORDER = 250
PASS_COUNT_RIGHT_BORDER = 390
# debug print
if VERBOSE_LOG:
print("Config")
print(f"\tMODEL_PATH : {MODEL_PATH}")
print(f"\tLABEL_PATH : {LABEL_PATH}")
print(f"\tCONFIDENCE : {CONFIDENCE}")
print(f"\tCAMERA_ID : {CAMERA_ID}")
print(f"\tVERBOSE_LOG : {VERBOSE_LOG}")
print(f"\tTARGET_CLASS : {TARGET_CLASS}")
print(f"\tINPUT_FILE : {INPUT_FILE}")
print(f"\tLOST_COUNT : {LOST_COUNT}")
print(f"\tTRACKING_HISTORY : {TRACKING_HISTORY}")
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from typing import List
import pytest
from pants.backend.project_info.dependents import DependentsGoal
from pants.backend.project_info.dependents import rules as dependent_rules
from pants.engine.target import Dependencies, SpecialCasedDependencies, Target
from pants.testutil.rule_runner import RuleRunner
class SpecialDeps(SpecialCasedDependencies):
alias = "special_deps"
class MockDepsField(Dependencies):
pass
class MockTarget(Target):
alias = "tgt"
core_fields = (MockDepsField, SpecialDeps)
@pytest.fixture
def rule_runner() -> RuleRunner:
runner = RuleRunner(rules=dependent_rules(), target_types=[MockTarget])
runner.write_files(
{
"base/BUILD": "tgt()",
"intermediate/BUILD": "tgt(dependencies=['base'])",
"leaf/BUILD": "tgt(dependencies=['intermediate'])",
}
)
return runner
def assert_dependents(
rule_runner: RuleRunner,
*,
targets: List[str],
expected: List[str],
transitive: bool = False,
closed: bool = False,
) -> None:
args = []
if transitive:
args.append("--transitive")
if closed:
args.append("--closed")
result = rule_runner.run_goal_rule(DependentsGoal, args=[*args, *targets])
assert result.stdout.splitlines() == expected
def test_no_targets(rule_runner: RuleRunner) -> None:
assert_dependents(rule_runner, targets=[], expected=[])
def test_normal(rule_runner: RuleRunner) -> None:
assert_dependents(rule_runner, targets=["base"], expected=["intermediate:intermediate"])
def test_no_dependents(rule_runner: RuleRunner) -> None:
assert_dependents(rule_runner, targets=["leaf"], expected=[])
def test_closed(rule_runner: RuleRunner) -> None:
assert_dependents(rule_runner, targets=["leaf"], closed=True, expected=["leaf:leaf"])
def test_transitive(rule_runner: RuleRunner) -> None:
assert_dependents(
rule_runner,
targets=["base"],
transitive=True,
expected=["intermediate:intermediate", "leaf:leaf"],
)
def test_multiple_specified_targets(rule_runner: RuleRunner) -> None:
# This tests that --output-format=text will deduplicate which dependent belongs to which
# specified target.
assert_dependents(
rule_runner,
targets=["base", "intermediate"],
transitive=True,
# NB: `intermediate` is not included because it's a root and we have `--no-closed`.
expected=["leaf:leaf"],
)
def test_special_cased_dependencies(rule_runner: RuleRunner) -> None:
rule_runner.write_files({"special/BUILD": "tgt(special_deps=['intermediate'])"})
assert_dependents(
rule_runner, targets=["intermediate"], expected=["leaf:leaf", "special:special"]
)
assert_dependents(
rule_runner,
targets=["base"],
transitive=True,
expected=["intermediate:intermediate", "leaf:leaf", "special:special"],
)
|
import PIL
from PIL import ImageFilter
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
from skimage.filters import scharr
from skimage import color,morphology
import numpy as np
import pdb
num_segs = 3
DEBUG = False # this draws the segmentation clustering for illustration
def CNR(nparr_img,method=2):
pilimg = PIL.Image.fromarray(nparr_img.astype("float")).convert('RGB')
im1 = pilimg.filter(ImageFilter.MedianFilter(5))
numpy_img = np.asarray(im1.convert('L'))
p1 = []
height = len(numpy_img)
width = len(numpy_img[0])
for row in range(height):
for column in range(width):
if method == 1:
p1.append([row*0.15,column*0.15,numpy_img[row][column]])
else:
p1.append([numpy_img[row][column]])
#kmeans = KMeans(init=np.array([[np.min(numpy_img)], [int(np.median(numpy_img))],[np.max(numpy_img)]]), n_clusters=num_segs)
kmeans = KMeans(init='k-means++', n_clusters=num_segs)
kmeans.fit(p1)
seg_map = kmeans.labels_.reshape(im1.size[::-1])
if DEBUG:
[_,ax] = plt.subplots(1,3)
ax[0].imshow(color.label2rgb(seg_map,numpy_img))
ax[1].imshow(scharr(numpy_img))
ax[2].imshow(numpy_img)
plt.show()
regions = [[] for k in range(num_segs)]
for row in range(len(numpy_img)):
for col in range(len(numpy_img[0])):
regions[seg_map[row][col]].append(numpy_img[row][col])
means = [ np.mean(arr) for arr in regions]
bg_noise = np.argmin(means)
fg_hard = np.argmax(means)
fg_soft = set(range(num_segs)).difference([bg_noise,fg_hard]).pop()
contrast = (means[fg_hard] - means[fg_soft])
noise = means[bg_noise]
if noise == 0:
CNR = contrast
else:
CNR = contrast / noise
return CNR
|
import pandas as pd
import numpy as np
import tensorflow as tf
import os
from sklearn.model_selection import train_test_split
from data_generation import load_data, get_field_vocab_size
from model import model
from recommendation import final_recommendation, write_recommendation_file
target_df = './dataframe/test_df_2.csv'
model_name = './trained_model1.h5'
if not os.path.exists(target_df):
df = load_data('test', data_num=-1)
print("loading data done")
df.to_csv(target_df, mode='w')
df= pd.read_csv(target_df, index_col=0)
X=df.iloc[:,:-1]
Y=df.iloc[:,-1]
m = tf.keras.models.load_model(model_name)
result = m.evaluate(X,Y)
# change to final dic, user_order list
user_order = []
result_dic = {}
final = final_recommendation(result_dic)
write_recommendation_file(user_order, final)
|
import itertools
import sys
import time
import warnings
from functools import partial
from threading import Event
from typing import Tuple, Union
from ._log import SceneLog
from ._user_namespace import UserNamespace
from ._vis_base import VisModel, Control, Action
from .. import field, math
from ..field import Scene, SampledField
from phiml.math import batch, Tensor
def create_viewer(namespace: UserNamespace,
fields: dict,
name: str,
description: str,
scene: Union[Scene, None],
asynchronous: bool,
controls: tuple,
actions: dict,
log_performance: bool) -> 'Viewer':
cls = AsyncViewer if asynchronous else Viewer
viewer = cls(namespace, fields, name, description, scene, controls, actions, log_performance)
return viewer
class Viewer(VisModel):
"""
Shows variables from the user namespace.
To create a `Viewer`, call `phi.vis.view()` from the top-level Python script or from a notebook.
Use `Viewer.range()` to control the loop execution from the user interface.
Also see the user interface documentation at https://tum-pbs.github.io/PhiFlow/Visualization.html
"""
def __init__(self,
namespace: UserNamespace,
fields: dict,
name: str,
description: str,
scene: Scene,
controls: tuple,
actions: dict,
log_performance: bool,
):
VisModel.__init__(self, name, description, scene=scene)
self.initial_field_values = fields
self._controls = controls
self.namespace = namespace
self.log_performance = log_performance
self._rec = None
self._in_loop = False
self._log = SceneLog(self.scene)
self.log_file = self._log.log_file
self._elapsed = None
self.reset_step = 0
self._actions = {}
custom_reset = False
self.reset_count = 0
for action, function in actions.items():
if action.name == 'reset':
self._actions[action] = partial(self.reset, custom_reset=function)
custom_reset = True
else:
self._actions[action] = function
if not custom_reset:
self._actions[Action('reset', Viewer.reset.__doc__)] = self.reset
def log_scalars(self, reduce=math.mean, **values):
self._log.log_scalars(self.steps, reduce=reduce, **values)
def info(self, message: str): # may be replaced by a different solution later on
"""
Update the status message.
The status message is written to the console and the log file.
Additionally, it may be displayed by the user interface.
See `debug()`.
Args:
message: Message to display
"""
message = str(message)
self.message = message
self._log.log(message)
def __rrshift__(self, other):
self.info(other)
@property
def field_names(self) -> tuple:
return tuple(self.initial_field_values.keys())
def get_field(self, name, dim_selection: dict) -> SampledField:
if name not in self.initial_field_values:
raise KeyError(name)
if self._rec:
value = self._rec[name]
else:
value = self.namespace.get_variable(name)
if callable(value):
value = value()
if isinstance(value, (SampledField, Tensor)):
value = value[dim_selection]
return value
@property
def curve_names(self) -> tuple:
return self._log.scalar_curve_names
def get_curve(self, name: str) -> tuple:
return self._log.get_scalar_curve(name)
@property
def controls(self) -> Tuple[Control]:
return self._controls
def get_control_value(self, name):
return self.namespace.get_variable(name)
def set_control_value(self, name, value):
self.namespace.set_variable(name, value)
@property
def actions(self) -> tuple:
return tuple(self._actions.keys())
def run_action(self, name):
for action, fun in self._actions.items():
if action.name == name:
fun()
return
raise KeyError(name)
def range(self, *args, warmup=0, **rec_dim):
"""
Similarly to `range()`, returns a generator that can be used in a `for` loop.
>>> for step in ModuleViewer().range(100):
>>> print(f'Running step {step}')
However, `Viewer.range()` enables controlling the flow via the user interface.
Each element returned by the generator waits for `progress` to be invoked once.
Note that `step` is always equal to `Viewer.steps`.
This method can be invoked multiple times.
However, do not call this method while one `range` is still active.
Args:
*args: Either no arguments for infinite loop or single `int` argument `stop`.
Must be empty if `rec_dim` is used.
**rec_dim: Can be used instead of `*args` to record values along a new batch dimension of this name.
The recorded values can be accessed as `Viewer.rec.<name>` or `Viewer.rec['<name>']`.
warmup: Number of uncounted loop iterations to perform before `step()` is invoked for the first time.
Yields:
Step count of `Viewer`.
"""
for _ in range(warmup):
yield self.steps
self._in_loop = True
self._call(self.progress_available)
if rec_dim:
assert len(rec_dim) == 1, f"Only one rec_dim allowed but got {rec_dim}"
assert not args, f"No positional arguments are allowed when a rec_dim is specified. {rec_dim}"
rec_dim_name = next(iter(rec_dim.keys()))
size = rec_dim[rec_dim_name]
assert isinstance(size, int)
self._rec = Record(rec_dim_name)
self._rec.append(self.initial_field_values, warn_missing=False)
args = [size]
self.growing_dims = [rec_dim_name]
if len(args) == 0:
def count():
i = 0
while True:
yield i
i += 1
step_source = count()
else:
step_source = range(*args)
try:
for step in step_source:
self.steps = step - self.reset_step
try:
self._pre_step()
t = time.perf_counter()
yield step - self.reset_step
self._elapsed = time.perf_counter() - t
self.steps = step - self.reset_step + 1
if rec_dim:
self._rec.append({name: self.namespace.get_variable(name) for name in self.field_names})
if self.log_performance:
self._log.log_scalars(self.steps, reduce=None, step_time=self._elapsed)
finally:
self._post_step()
finally:
self._in_loop = False
self._call(self.progress_unavailable)
def _pre_step(self):
self._call(self.pre_step)
def _post_step(self):
self._call(self.post_step)
@property
def rec(self) -> 'Record':
"""
Read recorded fields as `viewer.rec.<name>`.
Accessing `rec` without having started a recording using `Viewer.range()` raises an `AssertionError`.
"""
assert self._rec, "Enable recording by calling range() with a dimension name, e.g. 'range(frames=10)'."
return self._rec
def progress(self):
raise AssertionError("progress() not supported by synchronous Viewer.")
@property
def can_progress(self) -> bool:
return self._in_loop
def reset(self, custom_reset=None):
"""
Restores all viewed fields to the states they were in when the viewer was created.
Changes variable values in the user namespace.
"""
if custom_reset:
custom_reset()
for name, value in self.initial_field_values.items():
self.namespace.set_variable(name, value)
self.reset_step += self.steps
self.steps = 0
self.reset_count += 1
class AsyncViewer(Viewer):
def __init__(self, *args):
Viewer.__init__(self, *args)
self.step_exec_event = Event()
self.step_finished_event = Event()
def _pre_step(self):
self.step_exec_event.wait()
self._call(self.pre_step)
def _post_step(self):
self._call(self.post_step)
self.step_exec_event.clear()
self.step_finished_event.set()
def progress(self): # called by the GUI
"""
Allows the generator returned by `ModuleViewer.range()` to advance one element.
In typical scenarios, this will run one loop iteration in the top-level script.
"""
self.step_finished_event.clear()
self.step_exec_event.set()
self.step_finished_event.wait()
def can_progress(self) -> bool:
return True
class Record:
def __init__(self, dim: Union[str, None]):
self.dim = dim
self.history = {}
def append(self, variables: dict, warn_missing=True):
if not self.history:
self.history = {name: [] for name in variables.keys()}
for name, val in variables.items():
self.history[name].append(val)
if val is None and warn_missing:
warnings.warn(f"None value encountered for variable '{name}' at step {self.viewer.steps}. This value will not show up in the recording.", RuntimeWarning)
@property
def recorded_fields(self):
return tuple(self.history.keys())
def get_snapshot(self, name: str, frame: int):
return self.history[name][frame]
def recording_size(self, name: str):
return len(self.history[name])
def __getattr__(self, item: str):
assert item in self.history, f"No recording available for '{item}'. The following fields were recorded: {self.recorded_fields}"
snapshots = [v for v in self.history[item] if v is not None]
if snapshots:
return field.stack(snapshots, batch(self.dim))
else:
return None
def __getitem__(self, item):
assert isinstance(item, str)
return self.__getattr__(item)
def __repr__(self):
return ", ".join([f"{name} ({len(values)})" for name, values in self.history.items()])
|
## GIS2BIM Library
def GIS2BIM_CreateBoundingBox(CoördinateX,CoördinateY,BoxWidth,BoxHeight,DecimalNumbers):
XLeft = round(CoördinateX-0.5*BoxWidth,DecimalNumbers)
XRight = round(CoördinateX+0.5*BoxWidth,DecimalNumbers)
YBottom = round(CoördinateY-0.5*BoxWidth,DecimalNumbers)
YTop = round(CoördinateY+0.5*BoxWidth,DecimalNumbers)
boundingBoxString1 = str(XLeft) + "," + str(YBottom) + "," + str(XRight) + "," + str(YTop)
return boundingBoxString1
a = GIS2BIM_CreateBoundingBox(1000,1000,200,200,0)
print(a)
|
#!/usr/bin/python
import numpy as np
import pylab as py
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv
from scipy import integrate
import COMMON as CM
from USEFUL import time_estimate
from matplotlib import colors
from scipy import interpolate as ip
#Input parameters:
zbins=500 #Number of z-bins.
mchbins=500 #Number of pixels of chirp mass.
fbins=500 #Number of f-bins.
finteg=100 #Number of frequency bins to consider for the integral inside the S/N.
snrbins=7 #Number of S/N levels in the plot.
maxreds=500 #Maximum redshift considered for the plots.
minreds=1e-2 #Minimum redshift.
detector='EPTA' #Either 'PPTA' or 'EPTA'. ['ET', 'ALIGO', 'LIGO-L', 'LIGO-H']
if detector in ['EPTA', 'PPTA']:
minmch=np.log10(2e8) #Minimum log10(chirp mass/msun).
maxmch=np.log10(1e12) #Maximum log10(chirp mass/msun).
else:
minmch=np.log10(1e-1)
maxmch=np.log10(1e3)
outputfile='../data/snr_red_'+detector #File to save data.
tobs=10.*yr #Observation time (needed only to see when the binaries are monochromatic).
snrt=8. #S/N threshold.
rotangle=-40 #Angle to rotate the text on the plots (the minimum frequency level).
reds_text=5 #Redshift at which the text with frequency should appear.
factor=1. #Factor by which the S_n(f) sensitivity curve is multiplied (to see what is the result for a hypothetical future array).
#-----------------------------------------------------------------
if factor!=1.:
print 'Beware that the noise has been manually multiplied by a factor of %e !' %factor
print
#Load detector's sensitivity.
if detector=='PPTA':
outputplot='../plots/z_vs_mc_red_PPTA'
#Parameters assumed to infer S_n(f) from h0 upper limits:
tobs_upp=7.*yr
snrt_upp=8.
ifile1='../data/PPTA/LimSen4f.dat' #ZhuEtAl2014 limit.
ul1=np.array(np.loadtxt(ifile1,usecols=(1,2)))
#ul2=np.array(np.loadtxt(ifile1,usecols=(1,3)))
#ul3=np.array(np.loadtxt(ifile1,usecols=(1,4)))
fvecd,hvecd=ul1[:,0],ul1[:,1] #I should check which one is the most appropriate curve to use!
#fvecd,hvecd=ul2[:,0],ul2[:,1]
#fvecd,hvecd=ul3[:,0],ul3[:,1]
sn=hvecd**2.*tobs_upp/snrt_upp**2. #Power spectral density S_n(f) derived from the upper limit (I do not know if this is accurate).
sn*=factor #For testing I can decrease the noise manually.
fbin=1./tobs
fmin=1./tobs
elif detector=='EPTA':
outputplot='../plots/z_vs_mc_red_EPTA'
#Parameters assumed to infer S_n(f) from h0 upper limits:
tobs_upp=7.*yr
snrt_upp=8.
inputdir='../data/EPTA/'
#ifile1='upper_Steve_Varyingnoise.txt' #Fp with sample from Steve noise posterior. Fp
ifile2='upper_fixed_at_maxL_Steve.txt' #Fp with fixed noise to ML values. Fp_ML
#ifile3='upper_Fe-AP_fixed.txt' #Fe from AntoineP. Fe
#ifile4='UppETwBayAL.txt' #Bayes_E
#ifile5='upper_limit_FULL6psr_AL.txt' #Bayes_EP
#ifile6='SensWhole.dat' #Bayes_EP_NoEv
#Load EPTA upper limits data.
#ul1=np.array(np.loadtxt(inputdir+ifile1))
ul2=np.array(np.loadtxt(inputdir+ifile2))
#ul3=np.array(np.loadtxt(inputdir+ifile3,usecols=(0,1))*[nanosec,1.]) #Since frequency (zeroth column) is given in nanosec.
#ul4=10**(np.array(np.loadtxt(inputdir+ifile4))) #Since both columns are log10.
#ul5=10**(np.array(np.loadtxt(inputdir+ifile5))) #Since both columns are log10.
#ul6=np.array(np.loadtxt(inputdir+ifile6))
fvecd,hvecd=ul2[:,0],ul2[:,1]
sn=hvecd**2.*tobs_upp/snrt_upp**2. #Power spectral density S_n(f).
sn*=factor #For testing I can decrease the noise manually.
fbin=1./tobs
fmin=1./tobs
elif detector in ['ET', 'ALIGO', 'LIGO-L', 'LIGO-H']:
#Load GBD upper limits data.
outputplot='../plots/z_vs_mc_red_'+detector
ifile1='../data/ground_based/ground_based.npy' #From Paul.
data=np.load(ifile1)[()][detector] #Observed GW frequency and S_n(f)**(1/2).
fvecd,svecd=data[:,0],data[:,1]**2.*factor
fmin=min(fvecd)
fbin=1.
#fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbin)
#Svec=np.interp(fvec,fvecd,Svecd)
sn=svecd
#sn_f=ip.interp1d(fvecd, sn)
#fmin=min(fvecd)
fmax=max(fvecd)
fvec=np.logspace(np.log10(fmin),np.log10(fmax),fbins)
#fvec=np.arange(fmin,fmax,fbin)
svec=np.interp(fvec,fvecd,sn) #Power spectral density interpolated.
fvec_m=0.5*(fvec[1:]+fvec[:-1]) #Vector of frequencies centred at the arithmetic mean of the bin.
svec_m=np.interp(fvec_m,fvecd,sn) #Power spectral density interpolated at the arithmetic mean of the bin.
#Create vector of chirp mass.
mchvec=np.logspace(minmch, maxmch, mchbins)
#Calculate luminosity distance and similar functions.
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced.
reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin.
lum_dist=CM.comdist(reds_m)*(1.+reds_m) #Luminosity distance in Mpc.
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.13, 0.94, 0.96, 0.16, 0.08 #Borders of the plot.
xmin,xmax=min(mchvec),max(mchvec) #Edges of the x-axis.
ymin,ymax=minreds,maxreds #Edges of the y-axis.
sn_f=ip.interp1d(fvecd,sn)
mch_mat=np.zeros((len(reds_m), len(mchvec)))
z_mat=np.zeros(np.shape(mch_mat))
zisco_mat=np.zeros(np.shape(mch_mat))
fmin_mat=np.zeros(np.shape(mch_mat))
snr_mat=np.zeros(np.shape(mch_mat))
t=time_estimate(len(mchvec)) #A class that prints estimated computation time.
for mchi in xrange(len(mchvec)):
t.display() #Shows the remaining computation time.
t.increase() #Needed to calculate the remaining computation time.
#Calculate S/N for a given physical chirp mass.
#mch_mat[mchi,:], z_mat[mchi, :], zisco_mat[mchi, :], fmin_mat[mchi, :], snr_mat[mchi, :]=CM.snr_mat_f(np.array([mchvec[mchi]]), reds_m, lum_dist, fmin, fmax, fvec_m, finteg, tobs, sn_f)
mch_mati, z_mati, zisco_mati, fmin_mati, snr_mati=CM.snr_mat_f(np.array([mchvec[mchi]]), reds_m, lum_dist, fmin, fmax, fvec_m, finteg, tobs, sn_f)
mch_mat[:,mchi]=mch_mati[:,0]
z_mat[:,mchi]=z_mati[:,0]
zisco_mat[:,mchi]=zisco_mati[:,0]
fmin_mat[:,mchi]=fmin_mati[:,0]
snr_mat[:,mchi]=snr_mati[:,0]
print 'Saving matrices...'
print
dicti={'mch':mch_mat, 'z':z_mat, 'snr':snr_mat, 'tobs':tobs}
np.save(outputfile, dicti)
#mch_mat, z_mat, zisco_mat, fmin_mat, snr_mat=CM.snr_mat_f(mchvec, reds_m, lum_dist, fmin, fmax, fvec_m, finteg, tobs, sn_f)
#snr_mat=np.amax(snr_full_mat, axis=2)
#snr_mat=snr_full_mat[:,:,0]
snr_mat[snr_mat<=0]=np.nan
snr_mat[snr_mat>0.]=np.log10(snr_mat[snr_mat>0.])
#mch_mat=mch_mat[:,:,0,0]
#z_mat=z_mat[:,:,0,0]
#zisco_mat=zisco_mat[:,:,0,0]
#fmin_mat=fmin_mat[:,:,0,0]
#Create an S/N plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.set_xscale('log')
ax.set_yscale('log')
cmap=py.cm.winter
levels=np.log10(np.logspace(np.log10(snrt), np.amax(snr_mat[np.isnan(snr_mat)==False]), snrbins))
snrmap=ax.contourf(mch_mat, z_mat, snr_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=levels)
cmap = colors.ListedColormap(['white', 'red'])
snr_mat_t=snr_mat.copy()
snr_mat_t[10**(snr_mat)<snrt]=1
snr_mat_t[10**(snr_mat)>=snrt]=np.nan
ax.contourf(mch_mat, z_mat, snr_mat_t, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=[0., 5.])
cmap = colors.ListedColormap(['black'])
#ax.contour(f_mat, z_mat, snr_final_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=[np.log10(snrt)])
ax.contour(mch_mat, z_mat, snr_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.2, cmap=cmap, levels=levels)
ax.contourf(mch_mat, z_mat, zisco_mat, origin='lower', interpolation='None', aspect='auto', alpha=0.5, cmap=cmap, levels=[0., 5.])
if detector in ['EPTA', 'PPTA']:
mchlevels=10**(np.array([9., 10., 11., 12.]))
flevels_vec=np.zeros(len(mchlevels))
for mi in xrange(len(mchlevels)):
indi=abs(mch_mat[0,:]-mchlevels[mi]).argmin()
flevels_vec[mi]=fmin_mat[0, indi]
flevels_vec=np.sort(flevels_vec)
#flevels=5
#flevels_vec=np.logspace(np.log10(fmin), np.log10(fmax), flevels)
#flevels_vec=10**(np.array([-8., -7.]))
flevels_max=1e-7
else:
flevels_vec=10**(np.array([0.,0.97,1., 2., 3.]))
flevels_max=3.
#Create labels for text.
flevels_exp=np.zeros(np.shape(flevels_vec))
flevels_num=np.zeros(np.shape(flevels_vec))
for fi in xrange(len(flevels_vec)):
label=('%.1e' %flevels_vec[fi]).split('e')
exp_label=int(label[1])
num_label=float(label[0])
flevels_exp[fi]=exp_label
flevels_num[fi]=num_label
snr_level=np.zeros(np.shape(snr_mat))
ypix=abs(reds_m-reds_text).argmin() #Number of y-pixel where the text should appear.
rowi=fmin_mat[ypix,:]
for fi in xrange(len(flevels_vec)-1):
selecti=(fmin_mat>=flevels_vec[fi])&(fmin_mat<flevels_vec[fi+1])
if len(selecti[selecti])==0:
continue
snr_level[selecti]=fi+1
xpix=abs(rowi-flevels_vec[fi]).argmin() #Number of x-pixel where the text should appear.
if flevels_vec[fi]<flevels_max:
alittleleft=1.02 #The text should be a bit on the left of the line so that it can be read.
#ax.text(10**(alittleleft*np.log10(mchvec[xpix])), reds_m[ypix], '$%.1e \\mathrm{ Hz}$ '%flevels_vec[fi], horizontalalignment='center', fontsize=6, color='black', rotation=rotangle)
texti='$%.1f \\times 10^{%i} \\mathrm{ Hz}$ '%(flevels_num[fi], flevels_exp[fi])
#texti='$%.1f \\times 10^{%i} - %.1f \\times 10^{%i} \\mathrm{ Hz}$ '%(flevels_num[fi], flevels_exp[fi], flevels_num[fi+1], flevels_exp[fi+1])
ax.text(10**(alittleleft*np.log10(mchvec[xpix])), reds_m[ypix], texti, horizontalalignment='center', fontsize=7, color='black', rotation=rotangle)
#print flevels_vec[fi]
#print texti
#print
snr_level[fmin_mat>=flevels_vec[-1]]=fi+2
snr_level[zisco_mat==1.]=0.
cmap = colors.ListedColormap(['black'])
ax.contour(mch_mat, z_mat, snr_level, cmap=cmap, levels=np.arange(10), alpha=1.)
#ax.contour(mch_mat, z_mat, fmin_mat, cmap=cmap, levels=flevels_vec, alpha=1.)
cb = fig.colorbar(snrmap,fraction=cb_fraction,format='$%i$', ticks=[-2., -1., 0., 1., 2., 3., 4., 5., 6.])
ax.set_xlabel('$\log_{10}(\\mathrm{Physical\ chirp\ mass\ /\ M_{\\odot}})$')
ax.set_ylabel('$\log_{10}(\\mathrm{Redshift})$')
ax.set_xlim(xmin,xmax)
if detector in ['EPTA', 'PPTA']:
ax.set_xticks([1e9, 1e10, 1e11, 1e12])
ax.set_xticklabels(["$9$", "$10$", "$11$", "$12$"])
else:
ax.set_xticks([1e-1, 1e0, 1e1, 1e2, 1e3])
ax.set_xticklabels(["$-1$", "$0$", "$1$", "$2$", "$3$"])
ax.set_ylim(ymin,ymax)
ax.set_yticks([1e-2,1e-1,1e0,1e1,1e2])
ax.set_yticklabels(["$-2$","$-1$","$0$","$1$","$2$"])
cb.set_label('$\\log_{10}(\\mathrm{Optimal\ S/N})$')
fig.savefig(outputplot+'.png', dpi=600)
|
import numpy as np
import matplotlib.pyplot as plt
def analyse(filename):
"""
Reads data from the specified file and plots the average, maximum and minimum along the first axis of the
data.
Parameters
----------
filename : str
Name or path to a file containing data to be plotted. Data should be 2-dimensional and values should be
separated by commas.
Examples
--------
>>> analyse('/path/to/mydata.dat')
"""
data = np.loadtxt(fname=filename, delimiter=',')
fig = plt.figure(figsize=(10.0, 3.0))
axes1 = fig.add_subplot(1, 3, 1)
axes2 = fig.add_subplot(1, 3, 2)
axes3 = fig.add_subplot(1, 3, 3)
axes1.set_ylabel('average')
axes1.plot(np.mean(data, axis=0))
axes2.set_ylabel('max')
axes2.plot(np.max(data, axis=0))
axes3.set_ylabel('min')
axes3.plot(np.min(data, axis=0))
fig.tight_layout()
plt.show()
def detect_problems(filename):
"""
Tests data stored in the specified file for spurious or unexpected values.
Parameters
----------
filename : str
Name or path to a file containing data to tested. Data should be 2-dimensional and values should be
separated by commas.
Examples
--------
>>> analyse('/path/to/mydata.dat')
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
if np.max(data, axis=0)[0] == 0 and np.max(data, axis=0)[20] == 20:
print('Suspicious looking maxima!')
elif np.sum(np.min(data, axis=0)) == 0:
print('Minima add up to zero!')
else:
print('Seems OK!')
|
char_1 = input()
char_2 = input()
char_3 = input()
# print(char_1 + char_2 + char_3)
print(f"{char_1}{char_2}{char_3}")
|
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 11:26:56 2019
@author: szelagp
"""
from argparse import ArgumentParser
from datetime import datetime
from Config import disable_utf8
from board import Board
from colors import WHITE, BLACK
from display import display_board
from pieces import StraightMover, King, Knight, Bishop
class Game:
def __init__(self):
"""
Initialise the Game
"""
self.board = Board(self)
self.timestamp = datetime.now()
self.player = WHITE
self.turn = 0
self.winner = None
def display(self):
display_board(self.board)
def is_check(self, color):
"""
Parse the board to check if a king is in check and store the color in check in the check parameter of the board
if there is not a check the value None remain in the check parameter
:param color: the color of the king to check
"""
king_pos = self.board.kings[color].position
threats = []
for i in self.board.living_pieces[opposite_color(color)]:
if type(i) != King:
if i.can_capture_at(king_pos):
threats = threats+[i]
return threats
def is_checkmate(self):
"""
"""
threats = self.is_check(self.player)
king = self.board.kings[self.player]
if len(threats) == 0:
return False
# if the king cannot move
if len(king.legal_moves()) != 0:
return False
# a partir d ici le roi ne peut plus bouger
if len(threats) > 1:
return True
# pinned pieces
for p in self.board.living_pieces[opposite_color(self.player)]:
if isinstance(p, StraightMover):
p.pin_targets()
# if une piece peut se mettre sur le chemin de la menace
if isinstance(threats[0], StraightMover):
directions = threats[0].get_directions()
for direction in directions:
if king.position in direction:
counters = direction[:direction.index(king.position)]
for position in counters:
for piece in self.board.living_pieces[self.player]:
if piece.can_play_at(position):
return False
return True
def lack_of_pieces(self):
"""
"""
lack = False
all_living_pieces = [
piece
for piece in self.board.living_pieces[BLACK] + self.board.living_pieces[WHITE]
if not isinstance(piece, King)
]
# King vs King
if len(all_living_pieces) == 0:
lack = True
# King vs King + (Knight | Bishop)
elif len(all_living_pieces) == 1:
for i in all_living_pieces:
if type(i) == Knight or type(i) == Bishop:
lack = True
# King + Bishop vs King + Bishop (bishops on the same square color)
elif len(all_living_pieces) == 2:
if type(all_living_pieces[0]) == type(all_living_pieces[1]) == Bishop:
if (sum(all_living_pieces[0].position) + sum(all_living_pieces[1].position)) % 2 == 0:
lack = True
if lack:
print("\nDraw due to lack of pieces")
return True
return False
def end_game(self):
"""
Display a summary message at the end of a game
"""
if self.winner == WHITE or self.winner == BLACK:
print("\nThe ",self.winner," player won in ",self.turn," turns",'\nThe game lasted : ',(datetime.now()-self.timestamp))
if self.winner == "draw":
print("\nThe game ended in ",self.turn," turns",'\nThe game lasted : ',(datetime.now()-self.timestamp))
def command_to_pos(self, command):
"""
Retrieve the command input of the player and convert it to movement coordinate
:param command: the input of the player
"""
if command == "break":
return None, None, "break"
if command == "resign":
return None, None, "resign"
piece = target = None
if 64 < ord(command[0]) < 73:
piece = 8 - int(command[1]), ord(command[0])-65
if 96 < ord(command[0]) < 105:
piece = 8 - int(command[1]), ord(command[0])-97
if 64 < ord(command[3]) < 73:
target = 8 - int(command[4]), ord(command[3])-65
if 96 < ord(command[3]) < 105:
target = 8 - int(command[4]), ord(command[3])-97
return piece, target, None
def play_turn(self,color, piece, target):
"""
Check is a movement is valid
:param color: color of the piece to move
:param piece: coordinate of the piece to move
:param target: coordinate of the destination of the movement
"""
if piece.color != self.player:
print("You can only move your pieces !")
return False
if not (piece.can_play_at(target)):
print("This move is illegal !")
return False
else:
piece.move_to(target)
return True
def run(self):
print("\nTo move a piece the format of the command is <letter><number><space><letter><number>")
print("\nYou can abandon a game by typing : resign")
while True:
print('#----------------------------------------#')
print(f'\n{self.player}s are playing\n')
self.display()
if self.lack_of_pieces():
self.winner = "draw"
self.end_game()
break
if self.is_checkmate():
self.winner = opposite_color(self.player)
self.end_game()
break
command = input('commande:')
if command and len(command) > 4:
coord_piece, coord_target, status = self.command_to_pos(command)
if status == "break":
break
elif status == "resign":
self.winner=opposite_color(self.player)
self.end_game()
break
if coord_piece and coord_target:
piece = self.board.grid[coord_piece]
if piece:
if self.play_turn(piece.color, piece, coord_target):
for p in self.board.living_pieces[self.player]:
p.pinned = False
self.turn += 1
# Unpin pieces
for piece in self.board.living_pieces[self.player]:
piece.pinned = False
self.player = opposite_color(self.player)
if self.winner is not None:
self.end_game()
break
else:
print("This square is empty !")
else:
print("Invalid coordinates !")
else:
print("The command is invalid !")
def opposite_color(color):
"""
Give the opposite color
:return: the opposite color
"""
if color == WHITE:
return BLACK
return WHITE
if __name__ == "__main__":
parser = ArgumentParser(prog="PolyChess", description="A chess game in coded in python")
parser.add_argument("--no-utf8", "-n", action='store_true', help="disable chess characters and replace them with "
"letters, for terminal that doesn't support "
"latest unicode characters")
args = parser.parse_args()
if args.no_utf8:
disable_utf8()
game = Game()
game.run()
|
# -*- coding: utf-8 -*-
from django.http import HttpResponse
from django.contrib.auth import login
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from Aluno.views.utils import aluno_exist
from annoying.decorators import render_to
from django.contrib.auth.models import User
from Avaliacao.models import *
from Aluno.models import *
@render_to('avaliacao/exibir.html')
@aluno_exist
def exibir(request,template_id):
aluno = request.user.aluno_set.get()
avaliacao=Avaliacao.objects.get(pk=template_id)
questoes=avaliacao.questoes.all()
return locals()
|
#!/usr/bin/env python3
#import fire
from fire.core import Fire
from tensor_tracer import ttracer
import sys
class TtracerCmd(object):
"""..."""
def start(self, target_file):
print(sys.argv)
sys.argv = sys.argv[2:]
print(sys.argv)
ttracer.start(target_file)
if __name__ == '__main__':
Fire(TtracerCmd)
|
R=input(float())
L=2*3.14*float(R)
S=3.14*float(R)*float(R)
print(L)
print(S)
|
import sys, os
import numpy as np
import spectral
from hylite.hyimage import HyImage
from .headers import matchHeader, makeDirs, loadHeader, saveHeader
# spectral python throws depreciation warnings - ignore these!
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def loadWithGDAL(path, dtype=np.float32, mask_zero = True):
"""
Load an image using gdal.
*Arguments*:
- path = file path to the image to load
- mask_zero = True if zero values should be masked (replaced with nan). Default is true.
*Returns*:
- a hyImage object
"""
# find GDAL
try:
import osgeo.gdal as gdal
gdal.PushErrorHandler('CPLQuietErrorHandler') # ignore GDAL warnings
except:
assert False, "Error - please install GDAL before using loadWithGDAL(...)"
#parse file format
_, ext = os.path.splitext(path)
if len(ext) == 0 or 'hdr' in ext.lower() or 'dat' in ext.lower() or 'img' in ext.lower(): #load ENVI file?
header, image = matchHeader(path)
elif 'tif' in ext.lower() or 'png' in ext.lower() or 'jpg' in ext.lower(): #standard image formats
image = path
header = None
else:
print( 'Warning - %s is an unknown/unsupported file format. Trying to load anyway....')
#assert False, "Error - %s is an unknown/unsupported file format." % ext
# load header
if not header is None:
header = loadHeader(header)
#load image
assert os.path.exists(image), "Error - %s does not exist." % image
try:
raster = gdal.Open(image) # open image
data = raster.ReadAsArray().T
except:
assert False, "Error - %s could not be read by GDAL." % image
#create image object
assert data is not None, "Error - GDAL could not retrieve valid image data from %s" % path
pj = raster.GetProjection()
gt = raster.GetGeoTransform()
img = HyImage(data, projection=pj, affine=gt, header=header, dtype=dtype)
if mask_zero and img.dtype == np.float:
img.data[img.data == 0] = np.nan #note to self: np.nan is float...
return img
def loadWithSPy( path, dtype=np.float32, mask_zero = True):
"""
Load an image using spectral python. This works for most envi images, but doesn not load
georeferencing information (in which case loadWithGDAL(...) should be used).
*Arguments*:
- path = file path to the image to load
- mask_zero = True if zero values should be masked (replaced with nan). Default is true.
*Returns*:
- a hyImage object
"""
assert os.path.exists(path), "Error - %s does not exist." % path
# parse file format
_, ext = os.path.splitext(path)
if len(ext) == 0 or 'hdr' in ext.lower() or 'dat' in ext.lower() or 'img' in ext.lower(): # load ENVI file?
header, image = matchHeader(path)
# load image with SPy
assert os.path.exists(image), "Error - %s does not exist." % image
img = spectral.open_image(header) # load with SPy
data = np.transpose( np.array(img.load()), (1,0,2) )
# load header
if not header is None:
header = loadHeader(header)
elif 'tif' in ext.lower() or 'png' in ext.lower() or 'jpg' in ext.lower(): # standard image formats
# load with matplotlib
import matplotlib.image as mpimg
data = mpimg(path)
header = None
else:
print('Warning - %s is an unknown/unsupported file format. Trying to load anyway...')
#assert False, "Error - %s is an unknown/unsupported file format." % ext
# create image object
assert data is not None, "Error - GDAL could not retrieve valid image data from %s" % path
img = HyImage(data, projection=None, affine=None, header=header, dtype=dtype)
if mask_zero and img.dtype == np.float:
img.data[img.data == 0] = np.nan # note to self: np.nan is float...
return img
# noinspection PyUnusedLocal
def saveWithGDAL(path, image, writeHeader=True, interleave='BSQ'):
"""
Write this image to a file.
*Arguments*:
- path = the path to save to.
- image = the image to write.
- writeHeader = true if a .hdr file will be written. Default is true.
- interleave = data interleaving for ENVI files. Default is 'BSQ', other options are 'BIL' and 'BIP'.
"""
# find GDAL
try:
import osgeo.gdal as gdal
gdal.PushErrorHandler('CPLQuietErrorHandler') # ignore GDAL warnings
except:
assert False, "Error - please install GDAL before using saveWithGDAL(...)"
# make directories if need be
makeDirs( path )
path, ext = os.path.splitext(path)
if "hdr" in str.lower(ext):
ext = ".dat"
#get image driver
driver = 'ENVI'
if '.tif' in str.lower(ext):
driver = 'GTiff'
#todo - add support for png and jpg??
#set byte order
if 'little' in sys.byteorder:
image.header['byte order'] = 0
else:
image.header['byte order'] = 1
#parse data type from image array
data = image.data
dtype = gdal.GDT_Float32
image.header["data type"] = 4
image.header["interleave"] = str.lower(interleave)
if image.data.dtype == np.int or image.data.dtype == np.int32:
dtype = gdal.GDT_Int32
image.header["data type"] = 3
if image.data.dtype == np.int16:
dtype = gdal.GDT_Int16
image.header["data type"] = 2
if image.data.dtype == np.uint8:
data = np.array(image.data, np.dtype('b'))
dtype = gdal.GDT_Byte
image.header["data type"] = 1
if image.data.dtype == np.uint or image.data.dtype == np.uint32:
dtype = gdal.GDT_UInt32
image.header["data type"] = 13
if image.data.dtype == np.uint16:
dtype = gdal.GDT_UInt16
image.header["data type"] = 12
#write
if driver == 'GTiff':
output = gdal.GetDriverByName(driver).Create( path + ext, image.xdim(), image.ydim(), image.band_count(), dtype)
else:
output = gdal.GetDriverByName(driver).Create( path + ext, image.xdim(), image.ydim(), image.band_count(), dtype, ['INTERLEAVE=%s'%interleave] )
#write bands
for i in range(image.band_count()):
rb = output.GetRasterBand(i+1)
rb.WriteArray(data[:, :, i].T)
rb = None #close band
output = None #close file
if writeHeader and not image.header is None: #write .hdr file
image.push_to_header()
saveHeader(path + ".hdr", image.header)
# save geotransform/project information
output = gdal.Open(path + ext, gdal.GA_Update)
output.SetGeoTransform(image.affine)
if not image.projection is None:
output.SetProjection(image.projection.ExportToPrettyWkt())
output = None # close file
def saveWithSPy( path, image, writeHeader=True, interleave='BSQ'):
# make directories if need be
makeDirs(path)
path, ext = os.path.splitext(path)
# set byte order
if 'little' in sys.byteorder:
image.header['byte order'] = 0
byteorder = 0
else:
image.header['byte order'] = 1
byteorder = 1
image.push_to_header()
spectral.envi.save_image( path + ".hdr", np.transpose(image.data,(1,0,2)),
dtype=image.data.dtype, force=True,
ext='dat', byteorder=byteorder, metadata=image.header)
|
#-*-coding:utf-8-*-
import RPi.GPIO as GPIO
import weather as we
import finedust as dust
import FND
import time
import threading
from multiprocessing import Process
def setGPIO():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
#----------------------LCD--------------------------
#Define GPIO to LCD mapping
LCD_E = 26
LCD_RS = 23
LCD_RW = 24
LCD_D4 = 17
LCD_D5 = 18
LCD_D6 = 27
LCD_D7 = 22
#Define some device constants
LCD_WIDTH = 16 #Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 #LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 #LCD RAM address for the 2nd line
#Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
#창문 LCD 기본값
def setLCD1():
GPIO.setup(LCD_E, GPIO.OUT)
GPIO.setup(LCD_RS, GPIO.OUT)
#GPIO.setup(LCD_RW, GPIO.OUT)
GPIO.setup(LCD_D4, GPIO.OUT)
GPIO.setup(LCD_D5, GPIO.OUT)
GPIO.setup(LCD_D6, GPIO.OUT)
GPIO.setup(LCD_D7, GPIO.OUT)
#initialise display
lcd_init()
while True:
value10 = dust.display_value10()
#value10 = "Fine dust:10"
value25 = dust.display_value25()
#value25 = "ultra-F.dust:7"
today_temp = we.requestCurrentTemp()
today_prec = we.requestCurrentPrec()
tomorrow_temp = we.requestForecastTomtemp()
tomorrow_prec = we.requestForecastTomprec()
#창문 LCD 첫번째 표시 (미세먼지) / 3초 동안 지속
lcd_string(value10, LCD_LINE_1)
lcd_string(value25, LCD_LINE_2)
time.sleep(5)
#창문 LCD 두번째 표시 (오늘날씨) / 3초 동안 지속
lcd_string("today",LCD_LINE_1)
lcd_string(today_temp, LCD_LINE_2)
time.sleep(3)
lcd_string("today",LCD_LINE_1)
lcd_string(today_prec, LCD_LINE_2)
time.sleep(2.5)
#창문 LCD 세번째 표시 (내일날씨) / 3초 동안 지속
lcd_string("tomorrow", LCD_LINE_1)
lcd_string(tomorrow_temp, LCD_LINE_2)
time.sleep(3)
lcd_string("tomorrow", LCD_LINE_1)
lcd_string(tomorrow_prec, LCD_LINE_2)
time.sleep(2.5)
#창문 열렸을 때 LCD 기본값
def setLCD2():
GPIO.setup(LCD_E, GPIO.OUT)
GPIO.setup(LCD_RS, GPIO.OUT)
#GPIO.setup(LCD_RW, GPIO.OUT)
GPIO.setup(LCD_D4, GPIO.OUT)
GPIO.setup(LCD_D5, GPIO.OUT)
GPIO.setup(LCD_D6, GPIO.OUT)
GPIO.setup(LCD_D7, GPIO.OUT)
#initialise display
lcd_init()
#창문 열렸을 때 표시할 문구 입력
state = dust.grade_state()
#state = "Grade: GOOD"
order = dust.grade_order()
#order = "IT'S A CLEAR DAY"
lcd_string(state, LCD_LINE_1)
lcd_string(order, LCD_LINE_2)
time.sleep(3)
def lcd_init():
lcd_byte(0x33, LCD_CMD)
lcd_byte(0x32, LCD_CMD)
lcd_byte(0x06, LCD_CMD)
lcd_byte(0x0C, LCD_CMD)
lcd_byte(0x28, LCD_CMD)
lcd_byte(0x01, LCD_CMD)
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
GPIO.output(LCD_RS, mode)
#High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(LCD_D7, True)
#toggle 'enable' pin
lcd_toggle_enable()
#Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(LCD_D7, True)
#toggle 'enable' pin
lcd_toggle_enable()
def lcd_toggle_enable():
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message, line):
message = message.ljust(LCD_WIDTH," ") #left side
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
#------------------------------------------------------------
#-------------setUltrasonic&setPiezo-------------------------
GPIO_TRIGGER = 0
GPIO_ECHO = 1
def setUltrasonic():
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN, pull_up_down = GPIO.PUD_UP)
while True:
dist = distance()
print "Measured Distance = %.1f cm" %dist
time.sleep(0.5)
#창문 열렸을 때 거리 기준값 설정
if (dist > 10):
#미세먼지 등급
dust_state = dust.grade_value()
#dust_state = '1'
if (dust_state == '1'):
setLCD2()
else:
tha = threading.Thread(target=setPiezo)
thb = threading.Thread(target=setLCD2)
tha.start()
thb.start()
tha.join()
thb.join()
def distance():
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
return distance
def setPiezo():
GPIO_PIEZO = 13
GPIO.setup(GPIO_PIEZO, GPIO.OUT)
p = GPIO.PWM(GPIO_PIEZO, 100)
p.start(100)
p.ChangeDutyCycle(90)
#창문 열렸을 때 소리 지속 시간 설정
for i in range(0, 3, 1):
for j in range(0, 3, 1):
p.ChangeFrequency(392)
time.sleep(0.3)
p.stop()
#----------------------------------------------------
#if __name__=='__main__':
# try:
# setGPIO()
#
# proc = Process(target=setUltrasonic)
# proc.start()
#
# proc_disp = Process(target=setLCD1)
# proc_disp.start()
#
# except KeyboardInterrupt:
# pass
# finally:
# lcd_init()
# GPIO.cleanup()
|
class DATASET_PATH(object):
JULY = "../data/sts/csv/2018_08_05/"
JUNE = "../data/sts/csv/2018_05_04/"
BRT = "../data/shapefiles/brt_lines/brt"
class DATASET(object):
JUNE = 'june'
JULY = 'july'
BRT = 'brt'
BRT_1 = 'brt1'
DATASETS = {
DATASET.JUNE : DATASET_PATH.JUNE,
DATASET.JULY : DATASET_PATH.JULY,
DATASET.BRT : DATASET_PATH.BRT,
DATASET.BRT_1 : DATASET_PATH.BRT
}
class SERVICE(object):
UNKNOWN = 0
MWF = 1
SAT = 2
SUN = 3
SERVICES = [
SERVICE.MWF,
SERVICE.SAT,
SERVICE.SUN,
]
"""
These dicts were obtained from the downloaded TransitRoute files, and include
all the routes.
It looks like each file contains a before and after set of routes.
i.e., the 05_04 after data is the same as the 06_21 before data
Therefore when there are duplicate routes, the one with the higher ID should be
used. This could have been dome programatically but instead do it by commenting out
routes in these dicts. This allows other routes (e.g., high school routes) to be
excluded as well
"""
ROUTE_IDS_06_21 = {
# 10089 : "8th Street / City Centre",
10164 : "8th Street / City Centre", # Route 8
# 10120 : "Aden Bowman",
10160 : "Airport / City Centre", # Route 11
10201 : "Arbor Creek / City Centre", # Route 45
# 10115 : "Bedford Feehan & Royal",
# 10124 : "Bishop Murray",
# 10154 : "Briarwood / Centre Mall",
10229 : "Briarwood / Centre Mall", # Route 84
# 10087 : "Broadway / Market Mall",
10162 : "Broadway / Market Mall", # Route 6
# 10153 : "Centre Mall / Stonebridge",
10228 : "Centre Mall / Stonebridge", # Route 83
10233 : "City Centre", # Route 4
# 10147 : "City Centre / Blairmore",
# 10102 : "City Centre / Centre Mall",
10177 : "City Centre / Centre Mall", # Route 19
# 10098 : "City Centre / Civic Op Centre",
# 10173 : "City Centre / Civic Op Centre", # Route 15
# 10146 : "City Centre / Confederation",
# 10082 : "City Centre / Exhibition",
10157 : "City Centre / Exhibition", # Route 1
# 10149 : "City Centre / Hampton Village",
# 10084 : "City Centre / Hudson Bay Park",
10159 : "City Centre / Hudson Bay Park", # Route 3
# 10150 : "City Centre / Kensington",
# 10148 : "City Centre / Montgomery",
# 10086 : "City Centre/ McCormack",
10161 : "City Centre/ McCormack", # Route 5
# 10100 : "College Park / University",
10175 : "College Park / University", # Route 18
10180 : "Confederation / City Centre", # Route 22
# 10121 : "Cross & Murray",
# 10113 : "Cross Murray & Bowman",
# 10151 : "Cumberland / Centre Mall",
10226 : "Cumberland / Centre Mall", # Route 81
# 10088 : "Dundonald / City Centre",
10163 : "Dundonald / City Centre", # Route 7
# 10125 : "Evergreen / City Centre",
10232 : "Evergreen / City Centre", # Route 43
10313 : "Field House / City Centre",
10314 : "Field House / City Centre",
10234 : "Forest Grove / University", # Route 26
# 10118 : "Holy Cross",
# 10126 : "Kenderdine / City Centre",
# 10101 : "Kenderdine DT Express",
# 10140 : "Lakeridge/ University",
10215 : "Lakeridge/ University", # Route 55
# 10127 : "Lakeview / University",
10202 : "Lakeview / University", # Route 50
# 10096 : "Lawson Heights / Broadway",
10171 : "Lawson Heights / Broadway", # Route 13
# 10108 : "Lawson Heights / City Centre",
10183 : "Lawson Heights / City Centre", # Route 30
# 10152 : "Main Street / Centre Mall",
10227 : "Main Street / Centre Mall", # Route 82
# 10083 : "Meadowgreen / City Centre",
# 10091 : "Meadowgreen / City Centre",
10158 : "Meadowgreen / City Centre", # Route 2
10166 : "Meadowgreen / City Centre", # Route 10
# 10097 : "North Industrial / City Centre",
10172 : "North Industrial / City Centre", # Route 14
# 10117 : "Oskayak & Nutana",
# 10095 : "River Heights / Airport",
10170 : "River Heights / City Centre", # Route 12
# 10090 : "Riversdale / City Centre",
10165 : "Riversdale / City Centre", # Route 9
# 10155 : "Rosewood / Centre Mall",
10230 : "Rosewood / Centre Mall", # Route 86
# 10106 : "Sasktel Centre",
10181 : "Sasktel Centre", # Route 25
# 10134 : "Sasktel Centre / North Ind",
10209 : "Sasktel Centre / North Ind",
10235 : "Silverspring / University", # Route 27
# 10122 : "Silverwood / City Centre",
10197 : "Silverwood / City Centre", # Route 35
# 10103 : "South Industrial / City Centre",
10178 : "South Industrial / City Centre", # Route 20
# 10123 : "St Joseph",
# 10099 : "Stonebridge / University",
10174 : "Stonebridge / University", # Route 17
# 10104 : "University",
10179 : "University", # Route 21
# 10105 : "University / Confederation",
10221 : "University / Confederation", # Route 60
10224 : "University / Hampton Village", # Route 63
10225 : "University / Kensington", # Route 65
10223 : "University / Montgomery", # Route 62
# 10092 : "University Direct 1",
# 10093 : "University Direct 2",
10222 : "University/ Blairmore", # Route 61
10236 : "Willowgrove / City Centre", # Route 44
# 10094 : "Willowgrove DT Express",
# 10085 : "Willowgrove Sq / Mayfair",
# 10107 : "Willowgrove Sq/Silverspring",
}
ROUTE_IDS_05_04 = {
# 10014 : "8th Street / City Centre",
10089 : "8th Street / City Centre",
# 10045 : "Aden Bowman",
# 10120 : "Aden Bowman",
# 10040 : "Bedford Feehan & Royal",
# 10115 : "Bedford Feehan & Royal",
# 10049 : "Bishop Murray",
# 10124 : "Bishop Murray",
# 10079 : "Briarwood / Centre Mall",
10154 : "Briarwood / Centre Mall",
# 10012 : "Broadway / Market Mall",
10087 : "Broadway / Market Mall",
# 10078 : "Centre Mall / Stonebridge",
10153 : "Centre Mall / Stonebridge",
# 10072 : "City Centre / Blairmore",
10147 : "City Centre / Blairmore",
# 10027 : "City Centre / Centre Mall",
10102 : "City Centre / Centre Mall",
# 10023 : "City Centre / Civic Op Centre",
# 10098 : "City Centre / Civic Op Centre",
# 10071 : "City Centre / Confederation",
10146 : "City Centre / Confederation",
# 10007 : "City Centre / Exhibition",
10082 : "City Centre / Exhibition",
# 10074 : "City Centre / Hampton Village",
10149 : "City Centre / Hampton Village",
# 10009 : "City Centre / Hudson Bay Park",
10084 : "City Centre / Hudson Bay Park",
# 10075 : "City Centre / Kensington",
10150 : "City Centre / Kensington",
# 10073 : "City Centre / Montgomery",
10148 : "City Centre / Montgomery",
# 10011 : "City Centre/ McCormack",
10086 : "City Centre/ McCormack",
# 10025 : "College Park / University",
10100 : "College Park / University",
# 10046 : "Cross & Murray",
# 10121 : "Cross & Murray",
# 10038 : "Cross Murray & Bowman",
# 10113 : "Cross Murray & Bowman",
# 10076 : "Cumberland / Centre Mall",
10151 : "Cumberland / Centre Mall",
# 10013 : "Dundonald / City Centre",
10088 : "Dundonald / City Centre",
# 10050 : "Evergreen / City Centre",
10125 : "Evergreen / City Centre",
# 10043 : "Holy Cross",
# 10118 : "Holy Cross",
# 10051 : "Kenderdine / City Centre",
10126 : "Kenderdine / City Centre",
# 10026 : "Kenderdine DT Express",
10101 : "Kenderdine DT Express",
# 10065 : "Lakeridge/ University",
10140 : "Lakeridge/ University",
# 10052 : "Lakeview / University",
10127 : "Lakeview / University",
# 10021 : "Lawson Heights / Broadway",
10096 : "Lawson Heights / Broadway",
# 10033 : "Lawson Heights / City Centre",
10108 : "Lawson Heights / City Centre",
# 10077 : "Main Street / Centre Mall",
10152 : "Main Street / Centre Mall",
# 10008 : "Meadowgreen / City Centre",
10016 : "Meadowgreen / City Centre",
# 10083 : "Meadowgreen / City Centre",
10091 : "Meadowgreen / City Centre",
# 10022 : "North Industrial / City Centre",
10097 : "North Industrial / City Centre",
# 10042 : "Oskayak & Nutana",
# 10117 : "Oskayak & Nutana",
# 10020 : "River Heights / Airport",
10095 : "River Heights / Airport",
# 10015 : "Riversdale / City Centre",
10090 : "Riversdale / City Centre",
# 10080 : "Rosewood / Centre Mall",
10155 : "Rosewood / Centre Mall",
# 10031 : "Sasktel Centre",
10106 : "Sasktel Centre",
# 10059 : "Sasktel Centre / North Ind",
10134 : "Sasktel Centre / North Ind",
# 10047 : "Silverwood / City Centre",
10122 : "Silverwood / City Centre",
# 10028 : "South Industrial / City Centre",
10103 : "South Industrial / City Centre",
# 10048 : "St Joseph",
# 10123 : "St Joseph",
# 10024 : "Stonebridge / University",
10099 : "Stonebridge / University",
# 10029 : "University",
10104 : "University",
# 10030 : "University / Confederation",
10105 : "University / Confederation",
# 10017 : "University Direct 1",
10092 : "University Direct 1",
# 10018 : "University Direct 2",
10093 : "University Direct 2",
# 10019 : "Willowgrove DT Express",
10094 : "Willowgrove DT Express",
# 10010 : "Willowgrove Sq / Mayfair",
10085 : "Willowgrove Sq / Mayfair",
# 10032 : "Willowgrove Sq/Silverspring",
10107 : "Willowgrove Sq/Silverspring",
}
# Have detected a few bad shape IDs in the data. This dict suppresses them
BAD_SHAPES = {
10125 : [73456],
10175 : [73729]
}
BAD_STOP_IDS_BRT = {
'remix-aecb02f2-5b8a-4b0e-9864-e19c07a29e7f' : 10000,
'City Centre' : 10001,
'Confederation' : 10002,
'Lawson' : 10003,
'Market Mall' : 10004,
'remix-cd4a214b-ea7f-4f85-a483-f67870acddad' : 10005,
'remix-c928cfb9-0347-4b43-a69f-dcc1368fda9a' : 10006,
'remix-353597bb-b43a-4b1e-879b-d51b3d5f758f' : 10007,
'remix-bafe47a6-8522-41d7-8d6f-a043b364b47b' : 10008,
'remix-2c8102b7-26fa-4a8a-8d23-34a564c36b94' : 10009,
'remix-4c26a811-e7a6-4fd9-9b21-400dbe9b4119' : 10010,
'remix-dbe1840e-545e-4139-9747-6e580019d412' : 10011,
'remix-4cda1ffa-0b2b-4d94-a584-3883acf820b8' : 10012,
'remix-00697fd8-9bc6-46ea-97de-3ddb36b4e411' : 10013,
'remix-b2a01211-2b25-42bb-821d-d68870ecfb7d' : 10014,
'remix-85b5c071-1019-4ae4-b71f-f8b268c17f30' : 10015,
'remix-b7602926-1585-4a95-b63e-322b4484c48d' : 10016,
'remix-5fa6b263-a968-4e7b-ac51-f8ae81e877ef' : 10017,
'remix-160c8934-2d09-4397-a182-dde5998f1bd3' : 10018,
'remix-b6af24e6-241a-400a-bc2e-c26a97d96ae0' : 10019,
'remix-23308a1d-8a87-44de-83e9-416f2dd2a110' : 10020,
'remix-49d01a56-50cc-4f4f-9bb8-bf8fbf528202' : 10021,
'remix-c28ae3dd-36bd-4324-942e-691f7ca86ce0' : 10022,
'remix-aba251a8-d08c-4122-b808-a4bffb2997e2' : 10023,
'remix-83bbe727-3627-41e9-a90f-95f7f558e1f8' : 10024,
'remix-53723e43-e126-4c04-b84a-786c0900c1d5' : 10025,
'remix-d52fa55b-7456-4a86-ab16-d7986314666b' : 10026,
'remix-9354d6d3-469c-488c-93fb-6a6517c3bcd0' : 10027,
'University-Place Riel' : 10028,
'remix-f7a53adc-4a5f-4738-9a22-7531c0197d72' : 10029,
'Wildwood-Centre Mall' : 10030,
}
OPEN_DATA_ROUTE_FILTER = {
DATASET.JUNE : ROUTE_IDS_05_04,
DATASET.JULY : ROUTE_IDS_06_21
}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
模拟发送串口数据
"""
import struct
def dec2hl8(dec):
s = struct.pack('>h', dec)
print dec, '-------------=', repr(s)
n = len(s)
# print n, n/2
# print s[:n / 2], s[n / 2:]
# print hex(ord(s[:n / 2])), hex(ord(s[n / 2:]))
return ord(s[:n / 2]), ord(s[n / 2:]) ##高八位在前,低八位在后
def dec2hl8_new(dec):
print dec, '-------------=', repr(dec)
print hex(dec)
print repr(hex(dec))
print chr(dec)
print ord(chr(dec))
# print n, n/2
# print s[:n / 2], s[n / 2:]
# print hex(ord(s[:n / 2])), hex(ord(s[n / 2:]))
return ord(chr(dec))
def char_checkxor(data, n):
print data[n:len(data) - 3]
# print (~(reduce(lambda x, y: x + y, data[n:len(data) - 3]))) & 255
# print hex((~(reduce(lambda x, y: x + y, data[n:len(data) - 3]))) & 255)
ss = hex((~(reduce(lambda x, y: x + y, data[n:len(data) - 3]))) & 255)
print ss, int(ss, 16)
return ss
# ord() 函数是 chr() 函数(对于8位的ASCII字符串)或 unichr() 函数(对于Unicode对象)的配对函数,它以一个字符(长度为1的字符串)作为参数,返回对应的 ASCII 数值,或者 Unicode 数值,如果所给的 Unicode 字符超出了你的 Python 定义范围,则会引发一个 TypeError 的异常。
def send_command_new(left_fb, left_speed):
print 'new date'
def send_command(left_fb, left_speed, right_fb, right_speed):
data = [0xAA, 0x55, 0x06, 0x01, 0x04, 0, 0, 0, 0, 0, 0x0D, 0x0A]
# data[6], data[5] = dec2hl8(100)
# data[8], data[7] = dec2hl8(1)
data[5] = left_fb
data[6] = left_speed
data[7] = right_fb
data[8] = right_speed
data[9] = int(char_checkxor(data, 5), 16)
#mdata = map(lambda x: chr(x), data)
print ('Try to send data=', data)
#print 'mdata:', mdata
if __name__ == '__main__':
# 0x0F
# 0x0B
send_command(0x0F, 100, 0X0F, 100)
#send_command_new(1, 1)
# print ord('Z'), ord('\x0f'), ord('d')
print dec2hl8(100)
# print hex(-100)
#
print int('0x0F', 16)
print ord(chr(int('0x0F', 16)))
print dec2hl8_new(int('0x0F', 16))
|
def union_all(graphs, rename=()): ...
def disjoint_union_all(graphs): ...
def compose_all(graphs): ...
def intersection_all(graphs): ...
|
dollars = float(input('Dollars to convert: '))
yen = dollars * 111.47
yuan = dollars * 6.87
euro = dollars * 0.86
pound = dollars * 0.77
Canada = dollars * 1.31
print("Yen:",yen)
print("Yuan:",yuan)
print("Euro",euro)
print("Pound",pound)
print("Canadian dollar",Canada)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.