text stringlengths 8 6.05M |
|---|
from PyQt5.QtCore import *
from dao.nutzer_dao import NutzerDao
from dao.kassen_dao import KassenDao
from dao.historie_dao import HistorieDao
import os
import numpy as np
from time import gmtime, strftime
import logging
class HistorieController(QObject):
def __init__(self, nutzermodel, nk_model, historiemodel):
QObject.__init__(self)
self._nutzerkassenmodel = nk_model
self._nutzermodel = nutzermodel
self._historiemodel = historiemodel
self._nutzerdao = NutzerDao()
self._kassendao = KassenDao()
self._historiedao = HistorieDao()
@pyqtSlot()
def loadTableData(self):
historie = self._historiedao.select_content()
while self._historiemodel.rowCount() > 0:
self._historiemodel.deleteContent(0)
for i in range(len(historie)):
self._historiemodel.addContent(historie[i]['Datum'], historie[i]['Kategorie'], historie[i]['Name'], "{:10.2f}".format(float(historie[i]['Betrag'])))
@pyqtSlot(str, str, str, str)
def deleteContent(self, datum, kategorie, name, betrag):
if '€' in betrag:
betrag = betrag.replace('€', '')
if ',' in betrag:
betrag = betrag.replace(',', '.')
if 'Barzahlung' in kategorie:
kasse = self._kassendao.select_geld()
self._kassendao.edit_geld("{:10.2f}".format(float(kasse) - float(betrag)))
elif 'Listenzahlung' in kategorie:
konto = self._nutzermodel._konto
names = self._nutzermodel._names
for i in range(len(names)):
if names[i] in name:
self._nutzerdao.edit_user(self._nutzermodel._names[i], self._nutzermodel._bild[i], self._nutzermodel._mitglied[i], "{:10.2f}".format(float(konto[i]) + float(betrag)))
elif 'Kassenabrechnung' in kategorie or 'Einzahlung' in kategorie:
if name in 'Kasse':
kasse = self._kassendao.select_geld()
self._kassendao.edit_geld("{:10.2f}".format(float(kasse) + float(betrag)))
else:
konto = self._nutzermodel._konto
names = self._nutzermodel._names
for i in range(len(names)):
if names[i] in name:
self._nutzerdao.edit_user(self._nutzermodel._names[i], self._nutzermodel._bild[i], self._nutzermodel._mitglied[i], "{:10.2f}".format(float(konto[i]) - float(betrag)))
self._historiedao.delete_content(datum) |
from ..FeatureExtractor import FeatureExtractor
class pair_slope_trend_extractor(FeatureExtractor):
"""percentage of pairs of points which continually rise, over total number number of pairs.
We only want to run this on the last MAX_PAIRS points to see if there is an
overall trend to the rise/fall.
To account for plateaus, we devide by the total number of pairs examined, and
not the total of rising/falling pairs.
The slopes are not weighted, but you can change the value of PLATEAU_SLOPE
to included values close to 0 as 0 slope.
Blame: john m. brewer
"""
active = True
extname = 'pair_slope_trend' # extractor name
MAX_PAIRS = 30 # maximum number of pairs to examine from end of epochs
PLATEAU_SLOPE = 0.0 # slope considered equal to 0
def extract(self):
rising = 0;
falling = 0;
lastpoint = len(self.time_data) - 1
firstpoint = max(0,lastpoint-self.MAX_PAIRS)
for i in range(firstpoint,lastpoint):
fluxDiff = self.flux_data[i+1] - self.flux_data[i]
timeDiff = self.time_data[i+1] - self.time_data[i]
slope = fluxDiff/timeDiff
rising += (slope > self.PLATEAU_SLOPE)
falling += (slope < (0.0 - self.PLATEAU_SLOPE))
return float(rising - falling)/max(1.0,(lastpoint - firstpoint))
|
"""Tests for treadmill.rest.*"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import tempfile
import unittest
from unittest import mock
from treadmill import rest
# W0212: Access to a protected member of a client class
# pylint: disable=W0212
class UdsRestServerTest(unittest.TestCase):
"""Test for treadmill.rest.UdsRestServer."""
def test_udsrestsrv(self):
"""Dummy test."""
socket = os.path.join(
tempfile.gettempdir(), 'no', 'such', 'dir', 'foo.sock'
)
rest_server = rest.UdsRestServer(socket)
# not yet existing socket w/ containing dir should be created
rest_server._setup_endpoint(mock.Mock())
self.assertTrue(os.path.exists(socket))
# shouldn't fail if the containing dir already exists
rest_server._setup_endpoint(mock.Mock())
if __name__ == '__main__':
unittest.main()
|
# Copyright 2022 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import Iterable
import pytest
from internal_plugins.test_lockfile_fixtures.lockfile_fixture import (
JVMLockfileFixture,
JVMLockfileFixtureDefinition,
)
from pants.backend.openapi.codegen.java.rules import GenerateJavaFromOpenAPIRequest
from pants.backend.openapi.codegen.java.rules import rules as java_codegen_rules
from pants.backend.openapi.sample.resources import PETSTORE_SAMPLE_SPEC
from pants.backend.openapi.target_types import (
OpenApiDocumentDependenciesField,
OpenApiDocumentField,
OpenApiDocumentGeneratorTarget,
OpenApiDocumentTarget,
OpenApiSourceGeneratorTarget,
OpenApiSourceTarget,
)
from pants.core.util_rules import config_files, external_tool, source_files, system_binaries
from pants.engine.addresses import Address, Addresses
from pants.engine.target import (
DependenciesRequest,
GeneratedSources,
HydratedSources,
HydrateSourcesRequest,
)
from pants.jvm.target_types import JvmArtifactTarget
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.testutil.rule_runner import PYTHON_BOOTSTRAP_ENV, QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
target_types=[
JvmArtifactTarget,
OpenApiSourceTarget,
OpenApiSourceGeneratorTarget,
OpenApiDocumentTarget,
OpenApiDocumentGeneratorTarget,
],
rules=[
*java_codegen_rules(),
*config_files.rules(),
*source_files.rules(),
*external_tool.rules(),
*system_binaries.rules(),
QueryRule(HydratedSources, (HydrateSourcesRequest,)),
QueryRule(GeneratedSources, (GenerateJavaFromOpenAPIRequest,)),
QueryRule(Addresses, (DependenciesRequest,)),
],
)
rule_runner.set_options(args=[], env_inherit=PYTHON_BOOTSTRAP_ENV)
return rule_runner
@pytest.fixture
def openapi_lockfile_def() -> JVMLockfileFixtureDefinition:
return JVMLockfileFixtureDefinition(
"openapi.test.lock",
[
"org.apache.commons:commons-lang3:3.12.0",
"io.swagger:swagger-annotations:1.6.3",
"com.squareup.okhttp3:okhttp:4.9.2",
"com.google.code.findbugs:jsr305:3.0.2",
"io.gsonfire:gson-fire:1.8.5",
"org.openapitools:jackson-databind-nullable:0.2.2",
"com.squareup.okhttp3:logging-interceptor:4.9.2",
"jakarta.annotation:jakarta.annotation-api:1.3.5",
"com.google.code.gson:gson:2.8.8",
"org.threeten:threetenbp:1.5.0",
],
)
@pytest.fixture
def openapi_lockfile(
openapi_lockfile_def: JVMLockfileFixtureDefinition, request
) -> JVMLockfileFixture:
return openapi_lockfile_def.load(request)
def _assert_generated_files(
rule_runner: RuleRunner,
address: Address,
*,
expected_files: Iterable[str],
source_roots: Iterable[str] | None = None,
extra_args: Iterable[str] = (),
) -> None:
args = []
if source_roots:
args.append(f"--source-root-patterns={repr(source_roots)}")
args.extend(extra_args)
rule_runner.set_options(args, env_inherit=PYTHON_BOOTSTRAP_ENV)
tgt = rule_runner.get_target(address)
protocol_sources = rule_runner.request(
HydratedSources, [HydrateSourcesRequest(tgt[OpenApiDocumentField])]
)
generated_sources = rule_runner.request(
GeneratedSources, [GenerateJavaFromOpenAPIRequest(protocol_sources.snapshot, tgt)]
)
# We only assert expected files are a subset of all generated since the generator creates a lot of support classes
assert set(expected_files).intersection(generated_sources.snapshot.files) == set(expected_files)
@maybe_skip_jdk_test
def test_skip_generate_java(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": "openapi_document(name='petstore', source='petstore_spec.yaml', skip_java=True)",
"petstore_spec.yaml": PETSTORE_SAMPLE_SPEC,
}
)
def assert_gen(address: Address, expected: Iterable[str]) -> None:
_assert_generated_files(rule_runner, address, expected_files=expected)
tgt_address = Address("", target_name="petstore")
assert_gen(tgt_address, [])
tgt = rule_runner.get_target(tgt_address)
runtime_dependencies = rule_runner.request(
Addresses, [DependenciesRequest(tgt[OpenApiDocumentDependenciesField])]
)
assert not runtime_dependencies
@maybe_skip_jdk_test
def test_generate_java_sources(
rule_runner: RuleRunner, openapi_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": openapi_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": openapi_lockfile.requirements_as_jvm_artifact_targets(),
"src/openapi/BUILD": "openapi_document(name='petstore', source='petstore_spec.yaml')",
"src/openapi/petstore_spec.yaml": PETSTORE_SAMPLE_SPEC,
}
)
def assert_gen(address: Address, expected: Iterable[str]) -> None:
_assert_generated_files(
rule_runner, address, source_roots=["src/openapi"], expected_files=expected
)
tgt_address = Address("src/openapi", target_name="petstore")
assert_gen(
tgt_address,
[
"src/openapi/org/openapitools/client/api/PetsApi.java",
"src/openapi/org/openapitools/client/model/Pet.java",
"src/openapi/org/openapitools/client/model/Error.java",
],
)
tgt = rule_runner.get_target(tgt_address)
runtime_dependencies = rule_runner.request(
Addresses, [DependenciesRequest(tgt[OpenApiDocumentDependenciesField])]
)
assert runtime_dependencies
@maybe_skip_jdk_test
def test_generate_java_sources_using_custom_model_package(
rule_runner: RuleRunner, openapi_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": openapi_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": openapi_lockfile.requirements_as_jvm_artifact_targets(),
"src/openapi/BUILD": "openapi_document(name='petstore', source='petstore_spec.yaml', java_model_package='org.mycompany')",
"src/openapi/petstore_spec.yaml": PETSTORE_SAMPLE_SPEC,
}
)
def assert_gen(address: Address, expected: Iterable[str]) -> None:
_assert_generated_files(
rule_runner, address, source_roots=["src/openapi"], expected_files=expected
)
assert_gen(
Address("src/openapi", target_name="petstore"),
[
"src/openapi/org/mycompany/Pet.java",
"src/openapi/org/mycompany/Error.java",
],
)
@maybe_skip_jdk_test
def test_generate_java_sources_using_custom_api_package(
rule_runner: RuleRunner, openapi_lockfile: JVMLockfileFixture
) -> None:
rule_runner.write_files(
{
"3rdparty/jvm/default.lock": openapi_lockfile.serialized_lockfile,
"3rdparty/jvm/BUILD": openapi_lockfile.requirements_as_jvm_artifact_targets(),
"src/openapi/BUILD": "openapi_document(name='petstore', source='petstore_spec.yaml', java_api_package='org.mycompany')",
"src/openapi/petstore_spec.yaml": PETSTORE_SAMPLE_SPEC,
}
)
def assert_gen(address: Address, expected: Iterable[str]) -> None:
_assert_generated_files(
rule_runner, address, source_roots=["src/openapi"], expected_files=expected
)
assert_gen(
Address("src/openapi", target_name="petstore"),
[
"src/openapi/org/mycompany/PetsApi.java",
],
)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
get_nvl_position_list_query = """
SELECT hmup.id AS id,
hmup.user_id AS user_id,
hmup.traceable_object_id AS traceable_object_id,
hmup.hw_module_id AS hw_module_id,
ST_FlipCoordinates(hmup.position)::geometry AS geom,
hmup.show_on_map AS show_on_map,
hmup.active AS active,
hmup.meta_information::json AS meta_information,
hmup.record_time AS record_time,
polygon_detect(hmup.id) AS in_geofence,
parse_gps_timestamp(
(hmup.meta_information ->> 'date')::VARCHAR,
(hmup.meta_information ->> 'time')::VARCHAR ) AS event_time
FROM public.hw_module_user_position AS hmup
--LEFT OUTER JOIN public.user_hw_action_coolection AS uhac ON uhac.user_id = hmup.user_id
WHERE ($1::BIGINT = 0 OR hmup.user_id = $1::BIGINT)
AND ($2::BIGINT = 0 OR hmup.traceable_object_id = $2::BIGINT)
AND ($3::timestamptz is NULL OR (hmup.created_on >= $3::timestamptz ))
AND ($4::timestamptz is NULL OR (hmup.created_on <= $4::timestamptz ))
AND ST_IsEmpty(hmup.position) IS FALSE
"""
get_nvl_position_list_count_query = """
SELECT count(*) AS record_count
FROM public.hw_module_user_position AS hmup
WHERE ($1::BIGINT = 0 OR hmup.user_id = $1::BIGINT)
AND ($2::BIGINT = 0 OR hmup.traceable_object_id = $2::BIGINT)
AND ($3::timestamptz is NULL OR (hmup.created_on >= $3::timestamptz ))
AND ($4::timestamptz is NULL OR (hmup.created_on <= $4::timestamptz))
AND ST_IsEmpty(hmup.position) IS FALSE
"""
get_nvl_distance_query = """
SELECT
st_length(ST_Transform(ST_FlipCoordinates(
st_makeline(position)::geometry), 4326
)::geography
)
as distance
FROM (
SELECT hmup.position AS position,
hmup.record_time
FROM public.hw_module_user_position AS hmup
WHERE ($1::BIGINT = 0 OR hmup.user_id = $1::BIGINT)
AND ($2::BIGINT = 0 OR hmup.traceable_object_id = $2::BIGINT)
AND ($3::timestamptz is NULL OR (hmup.created_on >= $3::timestamptz))
AND ($4::timestamptz is NULL OR (hmup.created_on <= $4::timestamptz))
AND ST_IsEmpty(hmup.position) IS FALSE
GROUP BY hmup.record_time, hmup.position
) AS r;
"""
|
import os
from configparser import ConfigParser, NoSectionError, NoOptionError
from elasticsearch import Elasticsearch, ConnectionError, ElasticsearchException
class ES:
def __init__(self):
self.path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.__get_config()
try:
self.es = Elasticsearch(hosts=self.host)
except ConnectionError as e:
raise e
def __get_config(self):
"""
解析配置文件
"""
try:
config = ConfigParser()
config.read(self.path + '/conf/app.conf')
self.host = config.get('es', 'host').split(',')
except (NoSectionError, NoOptionError):
self.host = None
def add_document(self, index, body):
"""
添加数据
:param index: 索引名称
:param body: 内容
:return: bool
"""
try:
self.es.index(index=index, body=body)
return True
except ElasticsearchException as e:
raise e
|
a = input ("Digite o nome do cliente:")
b = input ("Digite o dia de vencimento:")
c = input ("Digite o mês de vencimento:")
d = input ("Digite o valor da fatura:")
print("Olá,", a)
print("A sua fatura com vencimento em", b, "de", c, "no valor de R$", d, "está fechada.") |
from django.contrib.auth.tokens import default_token_generator
from templated_mail.mail import BaseEmailMessage
from rest_framework.views import APIView
from authentication.conf import settings
from django.core.mail import send_mail
from api.models import CompanyStuff
from django.template import Context
from django.template.loader import render_to_string
from rest_framework.response import Response
class InvitationView(APIView):
def post(self, request, *args, **kwargs):
user = self.request.user
to_email = self.request.data['email']
companystuff = CompanyStuff.objects.filter(stuff=user).first()
company = companystuff.company
token = default_token_generator.make_token(user)
c = Context({'token': token, 'store': company.name})
msg_plain = render_to_string(['email/invitation.html'], {'token': token, 'store': company.name})
msg_html = render_to_string(['email/invitation.html'], {'token': token, 'store': company.name})
response = send_mail(
'Invitation to dropify',
msg_plain,
'contact@dropify.net',
[to_email],
html_message=msg_html,
)
return Response(response)
class ActivationEmail(BaseEmailMessage):
template_name = 'email/invitation.html'
def get_context_data(self):
context = super(ActivationEmail, self).get_context_data()
user = context.get('user')
company_id = user.company_id
company_name = user.company
context['store'] = company_name
context['token'] = default_token_generator.make_token(company_id)
context['url'] = settings.INVITATION_URL.format(**context)
return context
|
import csv
import os
from typing import Any, Callable, List, Optional, Tuple
from PIL import Image
from .utils import download_and_extract_archive
from .vision import VisionDataset
class Kitti(VisionDataset):
"""`KITTI <http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark>`_ Dataset.
It corresponds to the "left color images of object" dataset, for object detection.
Args:
root (string): Root directory where images are downloaded to.
Expects the following folder structure if download=False:
.. code::
<root>
└── Kitti
└─ raw
├── training
| ├── image_2
| └── label_2
└── testing
└── image_2
train (bool, optional): Use ``train`` split if true, else ``test`` split.
Defaults to ``train``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.PILToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
transforms (callable, optional): A function/transform that takes input sample
and its target as entry and returns a transformed version.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
data_url = "https://s3.eu-central-1.amazonaws.com/avg-kitti/"
resources = [
"data_object_image_2.zip",
"data_object_label_2.zip",
]
image_dir_name = "image_2"
labels_dir_name = "label_2"
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
transforms: Optional[Callable] = None,
download: bool = False,
):
super().__init__(
root,
transform=transform,
target_transform=target_transform,
transforms=transforms,
)
self.images = []
self.targets = []
self.root = root
self.train = train
self._location = "training" if self.train else "testing"
if download:
self.download()
if not self._check_exists():
raise RuntimeError("Dataset not found. You may use download=True to download it.")
image_dir = os.path.join(self._raw_folder, self._location, self.image_dir_name)
if self.train:
labels_dir = os.path.join(self._raw_folder, self._location, self.labels_dir_name)
for img_file in os.listdir(image_dir):
self.images.append(os.path.join(image_dir, img_file))
if self.train:
self.targets.append(os.path.join(labels_dir, f"{img_file.split('.')[0]}.txt"))
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""Get item at a given index.
Args:
index (int): Index
Returns:
tuple: (image, target), where
target is a list of dictionaries with the following keys:
- type: str
- truncated: float
- occluded: int
- alpha: float
- bbox: float[4]
- dimensions: float[3]
- locations: float[3]
- rotation_y: float
"""
image = Image.open(self.images[index])
target = self._parse_target(index) if self.train else None
if self.transforms:
image, target = self.transforms(image, target)
return image, target
def _parse_target(self, index: int) -> List:
target = []
with open(self.targets[index]) as inp:
content = csv.reader(inp, delimiter=" ")
for line in content:
target.append(
{
"type": line[0],
"truncated": float(line[1]),
"occluded": int(line[2]),
"alpha": float(line[3]),
"bbox": [float(x) for x in line[4:8]],
"dimensions": [float(x) for x in line[8:11]],
"location": [float(x) for x in line[11:14]],
"rotation_y": float(line[14]),
}
)
return target
def __len__(self) -> int:
return len(self.images)
@property
def _raw_folder(self) -> str:
return os.path.join(self.root, self.__class__.__name__, "raw")
def _check_exists(self) -> bool:
"""Check if the data directory exists."""
folders = [self.image_dir_name]
if self.train:
folders.append(self.labels_dir_name)
return all(os.path.isdir(os.path.join(self._raw_folder, self._location, fname)) for fname in folders)
def download(self) -> None:
"""Download the KITTI data if it doesn't exist already."""
if self._check_exists():
return
os.makedirs(self._raw_folder, exist_ok=True)
# download files
for fname in self.resources:
download_and_extract_archive(
url=f"{self.data_url}{fname}",
download_root=self._raw_folder,
filename=fname,
)
|
import prefpy
import io
import math
from .preference import Preference
from .profile import Profile
'''
if __name__ == "__main__":
#profile is not defined?
p = Profile()
# need to make filename first
#the designed file name is pretty confusing based on the read_election_file function
# Preflib Election Data Format
p.importPreflibFile("Basic Text Document.txt")
mP = MechanismPlurality()
scoreVect = mP.getScoringVector(p)
'''
class MechanismSTV(Mechanism):
"""
Goal is to return the winner of STV Voting (plurality each round, where loser
drops out every round until there is a winner).
Inherits from the general scoring mechanism (can change to positional if that
works better).
TODO:
- STV with no tiebreaker
- STV with ties broken alphabetically
- STV with all alternatives returned
- Ensure voting is valid (no partial orders)
A few questions for the future:
- Should the final result rank whoever dropped first as the last place?
- Curious about line 97 in Mechanism.py:
if elecType != "soc" and elecType != "toc":
return False
Is this correct? It seems like there should be an 'or'
"""
#def __init__(self):
# add something here...
# override getWinners eventually...
# def getWinners(self, profile):
# and possibly getRanking...
# def getRanking(self, profile)
def computeRoundLoser(self, profile, droppedOut):
"""
Computes who should drop out on a round
profile - voting profile given
droppedOut - list of candidates who have already dropped out
"""
rankMaps = []
counts = []
for preference in profile:
ranksMaps.append(preference.getReverseRankMap())
counts.append(preferences.count)
if (len(rankMaps) != len(counts)):
print("something is wrong")
totals = dict()
for rank in rankMaps:
for i in range(1, len(rank)):
if (rank[i] not in droppedOut):
if (rank[i] in totals):
totals[rank[i]] += counts[i]
else:
totals[rank[i]] = counts[i]
break
minVotes = min(totals.values())
losers = [key for key, value in totals.iteritems() if value == minVotes]
return losers
# def STVWinner(self, profile):
"""
Computes the winner(s) for STV voting
TODO: implement this so it continually calls computeRound loser until a winner
can be found
"""
#Basic main that should get the winner
# Phil still an't load prefpy....
def main():
filename = "test.txt"
candmap, rankmaps, rankmapcounts, numvoters = read_election_file(filename)
prof = Profile(candmap, rankMaps)
dropped = []
while(dropped.len < candmap.len-1)
dropped = computeRoundLoser(prof, dropped)
for name in dropped:
if name not in candmap:
print name
main() |
T1=(1,2)
T2 = (3,4)
print(T1)
T3 = T1+T2
print(T3) # Concatenation
print(T2*4) # Repetition
print(T3[0]) # Indexing
print(T3[1:3]) # Slicing
|
from django.db import models
from products.models import Products
class Categories(models.Model):
title = models.CharField(max_length=50, blank=True)
sub_categories = models.ForeignKey('self', blank=True, null=True)
def __unicode__(self):
return self.title
class ProductToCategories(models.Model):
categories = models.ForeignKey(Categories)
product = models.ForeignKey(Products)
|
#This code simulates the airline luggage problem
#Running in O(n^2)
string='1,2,3,4,5,6,7,8'
def airline_luggage(string):
weights = string.split(',')
length=len(weights)
container_size=3
if(length%(container_size)==0):
num_containers=(length)/(container_size)
else:
num_containers=((length)/(container_size))+1
print num_containers
j=0
containers=[]
for i in range(0,num_containers):
container=weights[j:j+3]
j=j+3
containers.append(container)
print containers
output_sequennce=""
for j in range(num_containers-1,-1,-1):
small_container=containers[j]
for k in range(0,len(small_container)):
output_sequennce=output_sequennce+small_container[k]+','
print "Input sequence = ",string
print "Output sequence = ",output_sequennce[:-1]
airline_luggage(string) |
# extended from https://github.com/WorldFamousElectronics/PulseSensor_Amped_Arduino
import time
import threading
import board
import busio
import sys
i2c = busio.I2C(board.SCL, board.SDA)
import adafruit_ads1x15.ads1015 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
ads = ADS.ADS1015(i2c)
chan = AnalogIn(ads, ADS.P0)
while True:
sys.stdout.flush()
print(chan.value, chan.voltage)
#time.sleep(0.005)
time.sleep(0.0005)
|
# Generated by Django 3.0.4 on 2020-04-25 08:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0021_auto_20200425_1121'),
]
operations = [
migrations.AddField(
model_name='vacancy',
name='visible',
field=models.BooleanField(default=True, verbose_name='Видимый'),
),
]
|
# Create a Book class, that has an author, a title and a release year
# Create a constructor for setting those values
# Book should be represented as string in this format:
# Douglas Adams : The Hitchhiker's Guide to the Galaxy (1979)
# Create a BookShelf class that has a list of books in it
# We should be able to add and remove books.
# We should be able to query the favourite author (who has written the most books in the shelf)
# We should be able to query the earliest published books.
# We should be able to query the latest published books.
# Bookself should have a method whitch give us information about the number of books,
# the earliest and the latest released books, and the favourite author
class Book():
def __init__(self, author, title, release_year):
self.title = title
self.author = author
self.release_year = release_year
self.book = str(self.author) + ": " + str(self.title) + " (" + self.release_year + ")"
def __repr__(self):
return str(self.author) + ": " + str(self.title) + " (" + self.release_year + ")"
class BookShelf():
def __init__(self):
self.list_of_books = []
self.list_of_titles = []
def add_books(self):
bk = Book()
self.list_of_books.append(bk.book)
def remove_books(self):
bk = Book()
self.list_of_books.remove(bk.book)
def name_the_favourite_author(self):
self.list_of_titles = []
b = Book()
if self.author == author:
self.list_of_titles += self.b.book
return list_of_titles
#def earliest_publication(self):
# earliest_date = list_of_release year
# for date in list_of_numbers:
# if earliest_date > date:
# earliest_date = date
def latest_publication(self):
for book in self.list_of_titles:
pass
def all_information(self):
self.total = len(self.list_of_titles)
print("You have " + str(self.total) + " books")
book = Book("Sylvia Plath", "The Bell Jar", "1963")
print(book)
my_shelf = BookShelf()
my_shelf.add_books()
print(my_shelf.all_information())
#print(my_shelf.books())
# Should print out:
# You have no books here.
#my_shelf.put("Douglas Adams", "The Hitchhiker's Guide to the Galaxy", 1979)
#my_shelf.put("Douglas Adams", "Mostly Harmless", 1992)
#my_shelf.put("Frank Herbert", "Dune", 1965)
#my_shelf.put("Frank Herbert", "The Dragon in the Sea", 1957)
#my_shelf.remove("The Dragon in the Sea")
#print(my_shelf.books())
# Should print out:
# You have 3 books.
# Earliest released: Frank Herbert : Dune (1965)
# Latest released: Douglas Adams : Mostly Harmless (1992)
# Favourite author: Douglas Adams
|
# -*- coding:utf-8 -*-
"""
@author:cuibo
@file:sample.py
@time:2018/4/2015:00
"""
" 表达式"
a= 10;
b= 20;
print(a and b)
"数据结构"
print("------数据结构")
list1 = ['abc','lkl',1991,2002];
list2 = [1,2,3,4,5,6,7,8];
print('list1[0]: ',list1[0])
print('list1[1:5]: ',list1[1:3])
list1 = ('abc','lkl',1991,2002);
list2 = (1,2,3,4,5,6,7,8);
'字典'
dict1 = {"name":"panada","age":7}
'----------控制流 '
'--------- if elif else'
x=10
if x >10:
print("X 大于 10")
elif x< 20 :
print("x 小于 10")
else:
print('zero')
'--------- while 循环'
n= 100
sum = 0
counter = 1
while counter <=n:
sum = sum +counter
counter +=1
print("sum:",sum)
print('1 到 %d 之和为 : %d' %(n,sum))
'----------for 循环'
language = ["c","c++","java","python","scala"]
for x in language:
print(x)
print("-------------")
|
import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader
import time
import matplotlib.pyplot as plt
from numpy.linalg import inv
import math
import torch
from torch.utils.data import TensorDataset, DataLoader
import anfis
from membership import TrapezoidalMembFunc, make_trap_mfs, make_bell_mfs, BellMembFunc, Zero, make_zero
import os
dtype = torch.float
linear_velocity = 1.5
x = 0.0
y = 0.0
q1 = 0.0
q2 = 0.0
q3 = 0.0
q4 = 0.0
currentAngle = 0.0
control_law = 0.0
stop = False
path_count = 0
batch_size = 128
done = False
robot_path = []
dis_error = []
def angdiff(th1, th2):
d = th1 - th2
d = np.mod(d+np.pi, 2*np.pi) - np.pi
return -d
def wraptopi(x):
pi = np.pi
x = x - np.floor(x/(2*pi)) *2 *pi
if (x >= pi):
return x -2*pi
return x
def test_course2():
path = [
[ 0 , 0],
[14.7800, 0],
[15.6427, -0.3573],
[16.0000, -1.2200],
[16.0000, -4.7800],
[15.6427, -5.6427],
[14.7800, -6.0000],
[1.2200 , -6.0000],
[0.3573 , -6.3573],
[0.0000 , -7.2200],
[0.0000 , -10.7800],
[0.3573 , -11.6427],
[1.2200 , -12.0000],
[14.7800, -12.0000],
[15.6427, -12.3573],
[16.0000, -13.2200],
[16.0000, -16.7800],
[15.6427, -17.6427],
[14.7800, -18.0000],
[-3.7800, -18.0000],
[-4.6427, -17.6427],
[-5.0000, -16.7800],
[-5.0000, -1.2200],
[-4.6427, -0.3573],
[-3.7800, 0],
[ 0 , 0]
]
return path
def test_course():
path = [
[0.0, 0.0],
[1.0000 , 0],
[1.7000, -0.1876],
[2.2124, -0.7000],
[2.4000, -1.4000],
[2.5876, -2.1000],
[3.1000, -2.6124],
[3.8000, -2.8000],
[4.5000, -2.6124],
[5.0124, -2.1000],
[5.2000, -1.4000],
[5.3876, -0.7000],
[5.9000, -0.1876],
[6.6000, 0],
[7.3000, -0.1876],
[7.8124, -0.7000],
[8.0000, -1.4000],
[8.1876, -2.1000],
[8.7000, -2.6124],
[9.4000, -2.8000],
[10.1000, -2.6124],
[10.6124, -2.1000],
[10.8000, -1.4000],
[10.9876, -0.7000],
[11.5000, -0.1876],
[12.2000, 0],
[12.9000, -0.1876],
[13.4124, -0.7000],
[13.6000, -1.4000],
[13.7876, -2.1000],
[14.3000, -2.6124],
[15.0000, -2.8000],
[16.0000, -2.8000] ]
# path = [[0.0, 0.0],[2.0, 0.0], [2.0, 2.0], [4.0, 2.0]]
return path
def fuzzy_error(curr, tar, future):
global dis_error
A = np.array([ [curr[1]-tar[1], tar[0]-curr[0]], [tar[0]-curr[0], tar[1]-curr[1]] ] )
b = np.array([ [tar[0]*curr[1]-curr[0]*tar[1]], [x*(tar[0]-curr[0]) + y*(tar[1]-curr[1])] ])
proj = np.matmul(inv(A),b)
d = ( x-curr[0] )*(tar[1]-curr[1]) - (y-curr[1])*(tar[0]-curr[0])
if ( d >0):
side = 1
elif ( d < 0):
side = -1
else:
side = 0
distanceLine= np.linalg.norm(np.array([x,y])-proj.T,2)*side ##########################check this
dis_error.append(distanceLine)
farTarget = np.array( [0.9*proj[0] + 0.1*tar[0], 0.9*proj[1] + 0.1*tar[1]] )
th1 = math.atan2(farTarget[1]-y, farTarget[0]-x)
th2 = math.atan2(tar[1]-curr[1], tar[0]-curr[0])
th3 = math.atan2(future[1]-tar[1], future[0]-tar[0])
theta_far = th1 - currentAngle
theta_near = th2 - currentAngle
theta_far = wraptopi(theta_far)
theta_near = wraptopi(theta_near)
return [distanceLine,theta_far,theta_near]
def target_generator(path):
global path_count
global stop
global path_length
path_length = len(path) - 1
pos_x = x
pos_y = y
current_point = np.array( path[path_count] )
target = np.array( path[path_count + 1] )
A = np.array([ [(current_point[1]-target[1]),(target[0]-current_point[0])], [(target[0]-current_point[0]), (target[1]-current_point[1])] ])
b = np.array([ [(target[0]*current_point[1] - current_point[0]*target[1])], [(pos_x*(target[0]-current_point[0]) + pos_y*(target[1] - current_point[1]))] ])
proj = np.matmul(inv(A),b)
current_point = np.array( [ [current_point[0]],[current_point[1]] ] )
target = np.array( [ [target[0]],[target[1]] ] )
temp1 = proj-current_point ####dot product
temp2 = target - current_point
projLen = (temp1[0]*temp2[0] + temp1[1]*temp2[1]) / np.linalg.norm(target - current_point,2)**2
if (projLen > 1):
path_count += 1
if (path_count == path_length-1):
stop = True
if ( (path_count == (path_length-2)) or (path_count == (path_length -1)) ):
curr = np.array(path[path_count])
tar = np.array(path[path_count+1])
future = np.array(path[path_count+1])
else:
curr = np.array(path[path_count])
tar = np.array(path[path_count+1])
future = np.array(path[path_count+2])
return curr, tar, future
def reward(errors, linear_vel, angular_vel):
DE_penalty_gain = 25
DE_penalty_shape = 1
HE_penalty_gain = 25
HE_penalty_shape = 3
HE_iwrt_DE = 2
TDD_reward_gain = 5
TDD_iwrt_DE = 5
vel_reward_gain = 1
vel_iwrt_DE = 1
steering_penalty_gain = 1
steering_iwrt_DE = 4
dis = errors[0]
theta_far = errors[1]
theta_near = errors[2]
dis_temp = np.abs(dis)/1.0
dis = (math.pow(dis_temp,DE_penalty_shape) + dis_temp) * -DE_penalty_gain
theta_near_temp = theta_near / np.pi
theta_near = math.pow(theta_near_temp,HE_penalty_shape) * HE_penalty_gain / (np.exp(dis_temp*HE_iwrt_DE)) * -15
theta_far_temp = np.abs(theta_far) / np.pi
theta_far = math.pow(theta_far_temp, HE_penalty_shape) * HE_penalty_gain / (np.exp(dis_temp*HE_iwrt_DE)) * -1.5
linear_vel = linear_vel * vel_reward_gain / (np.exp(dis_temp* vel_iwrt_DE))
angular_vel = np.abs(angular_vel) * steering_penalty_gain / (np.exp(dis_temp * steering_iwrt_DE)) * -1
rewards = dis + theta_near + theta_far + linear_vel + angular_vel
return rewards
def callback(msg):
global x
global y
global q1
global q2
global q3
global q4
global currentAngle
global robot_path
global stop
global done
x = msg.pose.pose.position.x
y = msg.pose.pose.position.y
q2 = msg.pose.pose.orientation.x
q3 = msg.pose.pose.orientation.y
q4 = msg.pose.pose.orientation.z
q1 = msg.pose.pose.orientation.w
currentAngle = math.atan2(2*(q1*q4+q2*q3),1-2*(q3**2+q4**2))
if stop == False:
robot_path.append([x,y])
print('x position: ',x)
print('y position: ',y)
test_path = test_course() ####testcoruse MUST start with 0,0 . Check this out
pathcount = 0
pathlength = len(test_path)
test_path.append([1000,1000])
agent= torch.load('anfis_initialized.model')
##########################################################3
rospy.init_node('check_odometry')
# sub = rospy.Subscriber("/odom", Odometry, callback)
sub = rospy.Subscriber("/odometry/filtered", Odometry, callback)
pub = rospy.Publisher("/cmd_vel",Twist,queue_size =10)
# rate = rospy.Rate(100)
######################################################3
while not rospy.is_shutdown():
###Wait untill publisher gets connected
while not pub.get_num_connections() == 1:
print(pub.get_num_connections())
current_point, target_point, future_point = target_generator(test_path)
if stop == True:
print("STOP")
os.system('rosservice call /gazebo/reset_world "{}"')
os.system('rosservice call /set_pose "{}"')
break
new_state = fuzzy_error(current_point, target_point, future_point)
# for ddpg model
control_law = agent.get_action(np.array(new_state))
control_law = control_law.item() * 8.
if (control_law > 4.):
control_law = 4.
if (control_law < -4.):
control_law = -4.
twist_msg = Twist()
twist_msg.linear.x = linear_velocity
twist_msg.angular.z = control_law
rewards = reward(new_state, linear_velocity, control_law)
####do this every 0.05 s
state = agent.curr_states
new_state = np.array(new_state)
agent.curr_states = new_state
agent.memory.push(state,control_law,rewards,new_state,done) ########control_law aftergain or before gain?
if len(agent.memory) > batch_size:
agent.update(batch_size)
pub.publish(twist_msg)
rospy.sleep(0.001)
torch.save(agent,'anfis_ddpg_trained.model')
####plot
test_path = np.array(test_path)
robot_path = np.array(robot_path)
plt.plot(test_path[:-1,0], test_path[:-1,1])
plt.plot(robot_path[:,0], robot_path[:,1])
plt.show()
###distance error mean
#print(np.mean(dis_error))
|
# Hello Variable World
## Instructions
country = "Mali"
name = "Jayahama"
age = 29
hourly_wage = 20.50
satisfied = False
daily_wage = hourly_wage * 8
print(name + " " + country + " " + str(age) + " " + str(hourly_wage))
print(f"name country daily_wage satisfied")
## **Hint**
|
from battle.battleeffect.EffectType import EffectType
from battle.battleeffect.BattleEffect import BattleEffect
from battle.targetselection.SingleAllyTargetSelection import SingleAllyTargetSelection
import random
class HealSpell(BattleEffect):
def __init__(self, source_fighter):
super().__init__(source_fighter, EffectType.healing)
self.name = "Heal"
self.selection_strategy = SingleAllyTargetSelection()
def get_battle_text(self):
return "Lowers his head and clears his mind..."
def calculate_power(self):
return self.source_fighter.magic * 10 + random.randint(1, 10)
'''class MeteorSpell(BattleEffect):
def __init__(self, source_fighter):
super().__init__(source_fighter, EffectType.magical)
self.name = "Meteor"
self.selection_strategy = AllEnemyTargetSelection() # TODO: Multiple target selection
def get_battle_text(self):
return ("The ground cracks around " + self.source_fighter.name + " as she gathers arcane energy!\n"
+ self.source_fighter.name + " casts Meteor!!")
def calculate_power(self):
return self.source_fighter.magic * 10 + random.randint(1, 25)'''
|
#!/usr/bin/env python
#=======================================================================================
# formationEnergiesFit.py
# Takes formation energies from the file 'formationEnergies.dat' and fits them with
# polynomials in two steps:
# - fits formation energies = f(He/V) for each vacancy number with a piece with
# function (you can choose each order of polynomials and the position of the separation)
# - then fits the obtained parameters = f(V) with polynomials (where you can also
# choose the order)
# Defines the 2D resultFunc corresponding to the fits and write its results in
# 'outputFile.dat' which has exactly the same format as 'formationEnergies.dat'
#=======================================================================================
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import *
from scipy import stats
from scipy import interpolate
import numpy.polynomial.polynomial as poly
## Decide the polynomial order for 'helium' fit and 'vacancy' fit and the separation
separation = .9
HeFitOrderLow = 2
VFitOrderLow = 3
HeFitOrderHigh = 3
VFitOrderHigh = 3
## Load helium number, vacancy number, and formation energy from 'formationEnergies.dat'
He, V, formationEnergy = loadtxt('formationEnergies.dat', usecols = (1,0,2) , unpack=True)
## Declare a list of color to have plots looking better
colors = ['r', 'b', 'c', 'k', 'g', 'm', 'y']
## Create plots
fig = plt.figure()
energies = plt.subplot(121)
paramsLow = plt.subplot(222)
paramsHigh = plt.subplot(224)
# results = plt.subplot(212, projection='3d')
## Declare lists where all fit parameters will be stored
paramListLow = []
paramListHigh = []
paramFinalLow = []
paramFinalHigh = []
## List to store which vacancy numbers are used
VList = []
## Loop on possible vacancy numbers
for i in range(1, 50):
## Filter on the vacancy number
HeFiltered = He[V == i]
formationEnergyFiltered = formationEnergy[V == i]
VFiltered = i
## Separating data at He/V = separation
HeLow= HeFiltered[HeFiltered/VFiltered <= separation]
formationEnergyLow = formationEnergyFiltered[HeFiltered/VFiltered <= separation]
HeHigh= HeFiltered[HeFiltered/VFiltered > separation]
formationEnergyHigh = formationEnergyFiltered[HeFiltered/VFiltered > separation]
## If data exists
if len(HeFiltered) > 0:
## Fit filtered formationEnergy = f(He/V) with a polynomial of order HeFitOrderLow for the low part
fitLow = poly.polyfit(HeLow/VFiltered, formationEnergyLow, HeFitOrderLow)
## Get the fit function
fitFuncLow = poly.Polynomial(fitLow)
## Fit filtered formationEnergy = f(He/V) with a polynomial of order HeFitOrderHigh for the high part
fitHigh = poly.polyfit(HeHigh/VFiltered, formationEnergyHigh, HeFitOrderHigh)
## Get the fit function
fitFuncHigh = poly.Polynomial(fitHigh)
## Plot both data and the fit
energies.plot(HeLow/VFiltered, fitFuncLow(HeLow/VFiltered), 'r--', color=(0.02*i,0,1), linewidth=3.)
energies.plot(HeHigh/VFiltered, fitFuncHigh(HeHigh/VFiltered), 'r--', color=(0.02*i,0,1), linewidth=3.)
energies.plot(HeFiltered/VFiltered, formationEnergyFiltered, color=(0.02*i,0,1), linewidth=2.)
## Store fit parameters
paramListLow.append(fitLow)
paramListHigh.append(fitHigh)
VList.append(VFiltered)
## Loop on the order of the polynomial
for k in range(0, len(fitLow)):
## Plot the fit parameter of order k = f(V)
paramsLow.scatter(VFiltered, fitLow[k], color=colors[6-k], s=100, alpha=0.7)
## Loop on the order of the polynomial
for k in range(0, len(fitHigh)):
## Plot the fit parameter of order k = f(V)
paramsHigh.scatter(VFiltered, fitHigh[k], color=colors[k], s=100, alpha=0.7)
## Loop on the order of the polynomial for the Low part
for i in range(0, len(paramListLow[0])):
## Fit the parameters of the same order for different vacancy numbers
## with a polynomial of order VFitOrderLow
fit= poly.polyfit(VList, [row[i] for row in paramListLow], VFitOrderLow)
## Get the new fit function
fitFunc = poly.Polynomial(fit)
## Plot the fit and store the parameters
paramsLow.plot(range(1,50), fitFunc(range(1,50)), color=colors[6-i])
paramFinalLow.append(fit)
## Loop on the order of the polynomial for the High part
for i in range(0, len(paramListHigh[0])):
## Fit the parameters of the same order for different vacancy numbers
## with a polynomial of order VFitOrderHigh
fit = poly.polyfit(VList, [row[i] for row in paramListHigh], VFitOrderHigh)
## Get the new fit function
fitFunc = poly.Polynomial(fit)
## Plot the fit and store the parameters
paramsHigh.plot(range(1,50), fitFunc(range(1,50)), color=colors[i])
paramFinalHigh.append(fit)
## Definition of the obtained 2D fitted function
def resultFunc(x, y):
f = 0
## Piecewise function
if x <= separation :
for i in range(0, len(paramListLow[0])):
for k in range(0, len(paramFinalLow[0])):
f += paramFinalLow[i][k] * math.pow(x,i) * math.pow(y,k)
else :
for i in range(0, len(paramListHigh[0])):
for k in range(0, len(paramFinalHigh[0])):
f += paramFinalHigh[i][k] * math.pow(x,i) * math.pow(y,k)
return f
## Open 'outputFile.dat' where results of the fitted function will be printed
outputFile = open('outputFile.dat', 'w')
## Loop on all the elements in 'HeVexpEHe.dat'
for i in range(0, len(He)):
## Compute the new formation energies
value = resultFunc(He[i]/V[i], V[i])
## Write in the output file
outputFile.write("%d %d %s \n" %(V[i], He[i], value))
## Close the output file
outputFile.close()
## Load helium number, vacancy number, and formation energy from the newly created file
He, V, formationEnergy = loadtxt('outputFile.dat', usecols = (1,0,2) , unpack=True)
## Loop on possible vacancy numbers
for i in range(1, 50):
## Filter on the vacancy number
HeFiltered = He[V == i]
formationEnergyFiltered = formationEnergy[V == i]
VFiltered = i
## If data exists
if len(HeFiltered) > 0:
## Plot both data and the fit
energies.plot(HeFiltered/VFiltered, formationEnergyFiltered, color=(0.02*i,0,1), linewidth=1.)
## To have understandable plots
paramsLow.set_title("Fit parameters for He/V <= %.1f" % (separation), fontsize=20)
paramsLow.set_xlabel("Vacancy number",fontsize=16)
paramsLow.set_xlim([0, 50])
paramsHigh.set_title("Fit parameters for He/V > %.1f" % (separation), fontsize=20)
paramsHigh.set_xlabel("Vacancy number",fontsize=16)
paramsHigh.set_xlim([0, 50])
energies.set_title("Formation Energies", fontsize=20)
energies.set_xlabel("Helium/Vacancy number",fontsize=16)
energies.set_xlim([0, 6])
energies.set_ylim([0, 100])
## Show the plots
plt.show() |
#!/usr/bin/python
import numpy as np
import pylab as py
from scipy import integrate
from COMMON import nanosec,yr,week,grav,msun,light,mpc,hub0,h0,omm,omv,kpc,mchirpfun,fmaxlso
#I will produce a plot of h as a function of z for a given physical chirp mass and a given frequency (that will show the amplification).
#On the same plot I will show h as a function of z, where the redshifted chirp mass equals the physical chirp mass of the other plotted curve. This will not have amplification.
#Input parameters:
maxreds=1000 #Maximum redshift considered for the plots.
minreds=1e-2 #Minimum redshift.
zbins=1000 #Number of z-bins to construct k(z).
mch=10**(9.3)
freq=1e-8
plotxmin=1e-2
plotxmax=1e2
plotymin=1e-16
plotymax=1e-13
outputplotdir='../plots/'
hdet=1e-15
#-----------------------------------------------------------------
#Define some functions
def fmaxlso(m1,m2):
'''Gives the emitted GW frequency of the last stable orbit of a binary of masses m1 and m2 (in solar masses) and redshift z.'''
return light**3./(6.*np.sqrt(6.)*np.pi*grav*(m1+m2)*msun)
def hlso(mch, comdist):
''''''
m1=mch*2**(1./5.)
m2=m1
const=4.*(grav*msun)**(5./3.)*np.pi**(2./3.)/(light**4.)
return const*mch**(5./3.)*fmaxlso(m1,m2)**(2./3.)/(comdist*mpc)
def htime(mch, f, z, lumdist):
'''Averaged GW strain amplitude in the time domain (dimensionless). 'mch' is the chirp mass in solar masses, 'f' is the observed GW frequency in Hz, and zpart is the z-dependent part, [1+z]/D_L(z), in Mpc^{-1}.'''
m1=mch*2**(1./5.)
m2=m1
const=4.*grav**(5./3.)*np.pi**(2./3.)/(light**4.)
flim=np.minimum(fmaxlso(m1, m2)*(1.+z)**(-1.), f)
return const*(mch*msun)**(5./3.)*flim**(2./3.)*(1.+z)**(5./3.)*1./(lumdist*mpc)
def htime_z(mch_z, f, lumdist):
'''.'''
m1=mch_z*2**(1./5.)
m2=m1
const=4.*grav**(5./3.)*np.pi**(2./3.)/(light**4.)
flim=np.minimum(fmaxlso(m1, m2), f)
return const*(mch_z*msun)**(5./3.)*flim**(2./3.)*1./(lumdist*mpc)
#-----------------------------------------------------------------
#Calculate luminosity distance and similar functions.
reds=np.logspace(np.log10(minreds),np.log10(maxreds),zbins) #Vector of redshifts logarithmically spaced.
reds_m=0.5*(reds[1:]+reds[:-1]) #Vector of redshifts at the arithmetic mean of the bin.
lum_dist=np.zeros(len(reds_m)) #This will be D_L(z), the luminosity distance, in Mpc.
com_dist=np.zeros(len(reds_m)) #This will be r(z), the comoving distance, in Mpc.
dist_const=light/(hub0*h0)/mpc #A constant that multiplies distances.
for zi in xrange(len(reds_m)):
lum_dist[zi]=(1.+reds_m[zi])*integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds_m[zi])[0]*dist_const
com_dist[zi]=integrate.quad(lambda z: (omm*(1.+z)**3.+omv)**(-0.5),0,reds_m[zi])[0]*dist_const
#Obtaining curves that will be plotted.
htimevec=htime(mch, freq, reds_m, lum_dist)
fmaxobsvec=fmaxlso(mch*2**(1./5.),mch*2**(1./5.))*1./(1.+reds_m)
htimevec[freq>=fmaxobsvec]=0
htimezvec=htime_z(mch, freq, lum_dist)
hlsovec=hlso(mch, com_dist)
detec=np.ones(len(reds_m))*hdet
#Obtaining apparent and absolute horizons.
zappind=abs(detec-htimezvec).argmin()
zapp_y=detec[zappind]
zapp_x=reds_m[zappind]
hminind=htimevec[htimevec>0.].argmin()
zabs1ind=abs(detec[htimevec>0.][0:hminind]-htimevec[htimevec>0.][0:hminind]).argmin()
zabs1_y=detec[htimevec>0.][0:hminind][zabs1ind]
zabs1_x=reds_m[htimevec>0.][0:hminind][zabs1ind]
zabs2ind=abs(detec[htimevec>0.][hminind::]-htimevec[htimevec>0.][hminind::]).argmin()
zabs2_y=detec[htimevec>0.][hminind::][zabs2ind]
zabs2_x=reds_m[htimevec>0.][hminind::][zabs2ind]
ziscoind=abs(htimevec-hlsovec).argmin()
zisco_x=reds_m[ziscoind]
zisco_y=hlsovec[ziscoind]
#Choose plotting options that look optimal for the paper.
fig_width = 3.4039
goldenmean=(np.sqrt(5.)-1.0)/2.0
fig_height = fig_width * goldenmean
sizepoints=8
legendsizepoints=4.5
py.rcParams.update({
'backend': 'ps',
'ps.usedistiller': 'xpdf',
'text.usetex': True,
'figure.figsize': [fig_width, fig_height],
'axes.titlesize': sizepoints,
'axes.labelsize': sizepoints,
'text.fontsize': sizepoints,
'xtick.labelsize': sizepoints,
'ytick.labelsize': sizepoints,
'legend.fontsize': legendsizepoints
})
left, right, top, bottom, cb_fraction=0.14, 0.94, 0.96, 0.16, 0.145 #Borders of the plot.
#Create a plot.
fig=py.figure()
fig.subplots_adjust(left=left,right=right,top=top,bottom=bottom)
ax=fig.gca()
ax.fill_between(reds_m,np.ones(len(reds_m))*maxreds,hlsovec,color='grey',alpha=0.5, edgecolor='grey', linewidth=0.0)
#ax.plot(reds_m, hlsovec, '-.', color='gray')
ax.plot(reds_m, htimevec, color='black')
ax.plot(reds_m, htimezvec, '--', color='black')
ax.plot(reds_m, detec, ':', color='black', linewidth=2)
ax.plot(zapp_x, zapp_y, '^', color='black')
ax.plot(zabs1_x, zabs1_y, 'o', color='black')
ax.plot(zabs2_x, zabs2_y, 'o', color='black')
ax.plot(zisco_x, zisco_y, 's', color='black')
ax.grid()
ax.set_ylabel('$\\log_{10}(h)$')
ax.set_xlabel('$\\log_{10}(\\mathrm{Redshift})$')
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(plotxmin,plotxmax)
ax.set_xticks([ 1e-2, 1e-1, 1e0, 1e1, 1e2])
ax.set_xticklabels(["$-2$", "$ -1$", "$0$", "$1$", "$2$"])
ax.set_ylim(plotymin,plotymax)
ax.set_yticks([1e-16, 1e-15, 1e-14, 1e-13])
ax.set_yticklabels(["$-16$", "$-15$", "$-14$", "$-13$"])
#ax.text(10,1e-14,'$h^\\textrm{LSO}$',fontsize=9)
#ax.legend(loc='lower right',handlelength=3.5)
#Save each individual plot.
oplot='h_comparison.pdf'
fig.savefig(outputplotdir+oplot, transparent=True)
|
import random
import math
print("Digite o valor A: ")
a= int(input())
b= math.floor(random.uniform(2,20))
flag= True
if a%b==0 :
print(str(flag))
else:
flag= False
print(flag)
print(str(b)) |
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
@receiver(post_save, sender=User)
def announce_new_user(sender, instance, created, **kwargs):
if created:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"gossip", {"type": "user.gossip","event": "New User","username": instance.last_name}
)
else:
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)(
"gossip", {"type": "user.gossip",
"event": "Edit User",
"username": instance.last_name})
|
# Generated by Django 3.1.7 on 2021-03-07 06:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0017_auto_20210307_0405'),
]
operations = [
migrations.AlterField(
model_name='enrolled',
name='tag',
field=models.CharField(max_length=255, null=True),
),
]
|
import h5py
import pylab as pl
f=h5py.File('logs/data.h5')
x = f['x'][:]
y = f['y'][:]
X = f['X'][:]
f.close()
F = pl.figure(figsize=(5,5))
f = F.add_subplot(111)
f.scatter(x,y,c=X,s=3)
pl.show()
|
from reportlab.pdfgen import canvas
from reportlab.lib.pagesizes import A4, portrait
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.cidfonts import UnicodeCIDFont
from reportlab.lib.units import cm
import openpyxl
import pathlib
import datetime
from PIL import Image
def load_informatiom():
wb = openpyxl.load_workbook("..\data\特銷說明會導覽.xlsx")
sh = wb.active
sale_dict = {}
for row in range(1, sh.max_row + 1):
if sh.cell(row,1).value == "導覽內容":
info_list = [sh.cell(row,2).value]
for info_row in range(row + 1 , sh.max_row + 1):
info_list.append(sh.cell(info_row,2).value)
sale_dict.setdefault("導覽內容", info_list)
elif sh.cell(row,1).value is not None:
sale_dict.setdefault(sh.cell(row,1).value, sh.cell(row,2).value)
return sale_dict
sale_dict = load_informatiom()
path = pathlib.Path("..\data\sales\pdf")
wb = openpyxl.load_workbook("..\data\客戶聯絡資料.xlsx")
sh = wb["收件人資料"]
for row in range(1, sh.max_row + 1):
file_name = (sh.cell(row,2).value) + "先生/小姐特銷會說明.pdf"
out_path = path / file_name
cv = canvas.Canvas(str(out_path), pagesize=portrait(A4))
cv.setTitle("特銷說明會導覽")
pdfmetrics.registerFont(UnicodeCIDFont("HeiseiKakuGo-W5"))
cv.setFont("HeiseiKakuGo-W5", 12)
cv.drawCentredString(6*cm, 27*cm, sh.cell(row,2).value + " " \
+ sh.cell(row,3).value + " 先生/小姐")
cv.line(1.8*cm, 26.8*cm,10.8*cm,26.8*cm) #在客戶名稱套用底線
cv.setFont("HeiseiKakuGo-W5", 14)
cv.drawCentredString(10*cm, 24*cm, sale_dict["主題"])
cv.setFont("HeiseiKakuGo-W5", 12)
cv.drawString(2*cm, 22*cm, "舉辦時間:" + sale_dict["舉辦時間"])
cv.drawString(2*cm, 21*cm, "舉辦地點:" + sale_dict["舉辦地點"])
textobject = cv.beginText()
textobject.setTextOrigin(2*cm, 19*cm,)
textobject.setFont("HeiseiKakuGo-W5", 12)
for line in sale_dict["導覽內容"]:
textobject.textOut(line)
textobject.moveCursor(0,14) # POSITIVE Y moves down!!!
cv.drawText(textobject)
now = datetime.datetime.now()
cv.drawString(14.4*cm, 14.8*cm, now.strftime("%Y/%m/%d"))
image =Image.open("..\data\logo.png")
cv.drawInlineImage(image,13*cm,13*cm)
cv.showPage()
cv.save() |
# Generated by Django 3.1.3 on 2021-01-07 17:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reddituser', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='reddituser',
name='bio',
field=models.TextField(blank=True, max_length=250, null=True),
),
]
|
[print(i) for i in range(10) if i % 2 == 0]
|
from flask_wtf import FlaskForm
from wtforms import StringField, validators, SelectField, FormField, FieldList, IntegerField, PasswordField, BooleanField
from werkzeug.datastructures import MultiDict
# some web forms and what not
class LoginForm(FlaskForm):
email = StringField('Email', [validators.Email(message = 'Please Enter A Valid Email')])
password = PasswordField('Password')
remember_me = BooleanField('Remember Me')
class RegisterForm(FlaskForm):
email = StringField('Email', [validators.Email(message = 'Please Enter A Valid Email')])
first_name = StringField('First Name', [validators.DataRequired(message = 'Please Enter Something')])
last_name = StringField('Last Name', [validators.DataRequired(message = 'Please Enter Something')])
password = PasswordField('Password')
is_cardio = SelectField('Cardiologist?', choices=[('True', 'Yes'), ('False', 'No')])
class UserForm(FlaskForm):
first_name = StringField('First Name', [validators.DataRequired(message = 'Please Enter Something')])
last_name = StringField('Last Name', [validators.DataRequired(message = 'Please Enter Something')])
email = StringField('Email', [validators.Email(message = 'Please Enter A Valid Email')])
is_cardio = SelectField('Cardiologist?', choices=[('True', 'Yes'), ('False', 'No')])
is_admin = SelectField('Administrator? (can add/remove users)', choices=[('False', 'No'), ('True', 'Yes')])
class EmailForm(FlaskForm):
email = StringField('Enter your email: ', [validators.DataRequired(message='Please Enter Something')])
class MessageForm(FlaskForm):
message = StringField('Enter your message to the Admins', [validators.DataRequired(message='Please Enter Something')])
class DeleteForm(FlaskForm):
first_name = StringField('First Name', [validators.DataRequired(message = 'Please Enter Something')])
last_name = StringField('Last Name')
email = StringField('Email')
class SetPasswordForm(FlaskForm):
password = PasswordField('Password', [validators.DataRequired(message = 'Please Enter Something')])
class ScheduleEntryForm(FlaskForm):
first_name = StringField('First Name', [validators.DataRequired(message = 'Please Enter Something')])
class ScheduleForm(FlaskForm):
userfirstNamesM1 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesT1 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesW1 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesTh1 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesF1 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesS1 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
userfirstNamesSu1 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
userfirstNamesM2 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesT2 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesW2 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesTh2 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesF2 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesS2 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
userfirstNamesSu2 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
userfirstNamesM3 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesT3 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesW3 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesTh3 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesF3 = FieldList(FormField(ScheduleEntryForm), min_entries=3, max_entries = 7)
userfirstNamesS3 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
userfirstNamesSu3 = FieldList(FormField(ScheduleEntryForm), min_entries=1)
class NumberUsersForm(FlaskForm):
NumberUsersM1 = IntegerField('# Working on first Monday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersT1 = IntegerField('# Working on first Tuesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersW1 = IntegerField('# Working on first Wednesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersTh1 = IntegerField('# Working on first Thursday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersF1 = IntegerField('# Working on first Friday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersS1 = IntegerField('# Working on first Saturday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
NumberUsersSu1 = IntegerField('# Working on first Sunday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
NumberUsersM2 = IntegerField('# Working on second Monday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersT2 = IntegerField('# Working on second Tuesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersW2 = IntegerField('# Working on second Wednesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersTh2 = IntegerField('# Working on second Thursday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersF2 = IntegerField('# Working on second Friday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersS2 = IntegerField('# Working on second Saturday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
NumberUsersSu2 = IntegerField('# Working on second Sunday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
NumberUsersM3 = IntegerField('# Working on third Monday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersT3 = IntegerField('# Working on third Tuesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersW3 = IntegerField('# Working on third Wednesday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersTh3 = IntegerField('# Working on third Thursday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersF3 = IntegerField('# Working on third Friday', [validators.DataRequired(message = 'Please Enter Something')], default=3)
NumberUsersS3 = IntegerField('# Working on third Saturday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
NumberUsersSu3 = IntegerField('# Working on third Sunday', [validators.DataRequired(message = 'Please Enter Something')], default=1)
class RequestForm(FlaskForm):
post_call = SelectField('Select when you want post call:', choices = [('M', 'Monday'), ('T', 'Tuesday'), ('W', 'Wednesday'), ('Th', 'Thursday'), ('F', 'Friday')])
|
# -*- coding: utf-8 -*-
#:
#: Author: redkern
#: Date: 26/12/2016
#: Version: 0.1
#: License: MIT
#:
"""Data convertion module."""
def bytes_sanitizer(data):
"""Ensure that data is from types bytes."""
if isinstance(data, str):
return data.encode("utf-8")
else:
return data
def str_to_bytes(data):
"""Turns a string into bytes type."""
return data.encode("utf-8")
def bytes_to_str(data):
"""Turns a bytes into a string."""
return data.decode("utf-8")
def bytes_to_bits(data):
"""Turn a string data into a bits list."""
bitlist = []
for byte in data:
bits = bin(byte)[2:]
bits = "00000000"[len(bits):] + bits
bitlist.extend([int(b) for b in bits])
return bitlist
def bits_to_bytes(bitlist):
"""Turn a bits list into a string data."""
chars = []
for b in range(len(bitlist) // 8):
byte = bitlist[b*8:(b+1)*8]
chars.append(int(''.join([str(bit) for bit in byte]), 2))
return bytes(chars)
def xor_bits(bitlist1, bitlist2):
"""Perform xor between two bits list"""
result = []
for t in zip(bitlist1, bitlist2):
result.append(t[0] ^ t[1])
return result
if __name__ == "__main__":
pass
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import re
from pants.testutil.pants_integration_test import ensure_daemon, run_pants
from pants.util.contextutil import overwrite_file_content
from pants.util.dirutil import read_file
@ensure_daemon
def test_fmt_then_edit(use_pantsd: bool) -> None:
f = "testprojects/src/python/hello/greet/greet.py"
def run() -> None:
run_pants(
[
"--backend-packages=['pants.backend.python', 'pants.backend.python.lint.black']",
"fmt",
f,
],
use_pantsd=use_pantsd,
).assert_success()
# Run once to start up, and then capture the file content.
run()
good_content = read_file(f)
# Edit the file.
with overwrite_file_content(f, lambda c: re.sub(b"def greet", b"def greet", c)):
assert good_content != read_file(f)
# Re-run and confirm that the file was fixed.
run()
assert good_content == read_file(f)
|
# 机试题
# 1.lis = [['哇',['how',{'good':['am',100,'99']},'太白金星'],'I']] (2分)
# # o列表lis中的'am'变成大写。(1分)
# # o列表中的100通过数字相加在转换成字符串的方式变成'10010'。(1分)
# lis = [['哇',['how',{'good':['am',100,'99']},'太白金星'],'I']]
# print(len(lis))
# print(len(lis[0]))
# print(len(lis[0][1]))
# print(len(lis[0][1][1]))
# print(lis[0][1][1]['good'])
# lis[0][1][1]['good'][0] = lis[0][1][1]['good'][0].upper()
# print(lis)
# lis[0][1][1]['good'][1] = str(lis[0][1][1]['good'][1] + 9910)
# print(lis)
# 2.dic = {'k1':'v1','k2':['alex','sb'],(1,2,3,):{'k3':['2',100,'wer']}} (3分)
# # o将'k3'对应的值的最后面添加一个元素'23'。(1分)
# # o将'k2'对应的值的第0个位置插入元素'a'。(1分)
# o将(1,2,3,)对应的值添加一个键值对'k4','v4'。(1分)
# dic = {
# 'k1':'v1',
# 'k2':['alex','sb'],
# (1,2,3,):{'k3':['2',100,'wer']}
# }
# print(dic)
# dic[(1,2,3)]['k3'].append('23')
# print(dic)
# dic['k2'].insert(0,'a')
# print(dic)
# dic[(1,2,3)]['k4'] = 'v4'
# print(dic)
# 3.实现一个整数加法计算器(多个数相加):(5分)
# 如:content = input("请输入内容:") 用户输入:5+9+6 +12+ 13,然后进行分割再进行计算。
# def add():
# content = input("请输入内容:")
# content_fix = content.strip().split('+',content.strip().count('+'))
# sum = 0
# for i in content_fix:
# sum += int(i)
# print(f'{content} 的加和为:{sum}')
# return sum
# add()
# 4.请写一个电影投票程序。现在上映的电影列表如下:(10分)
#lst = ['复仇者联盟4', '驯龙高手3', '金瓶梅', '老男孩', '大话西游']
# 由用户给每⼀个电影投票.最终将该⽤户投票信息公布出来。
# 要求:
# o用户可以持续投票,用户输入序号,进行投票。比如输入序号 1,给金瓶梅投票1。
# o每次投票成功,显示给哪部电影投票成功。
# o退出投票程序后,要显示最终每个电影的投票数。
# 建议最终投票的结果为这种形式:
# {'⾦瓶梅': 0, '复仇者联盟4': 0, '驯龙高手3': , '老男孩': 0,'大话西游':0}
#dic = dict.fromkeys(lst,0)
# print(dic)
# def vote():
# global dic
# while True:
# content = input('1.复仇者联盟4\n2.驯龙高手3\n3.金瓶梅\n4.老男孩\n5.大话西游\n请输入序号为电影投票,输入"q"退出投票:')
# if content=='':
# print('要输入序号才能投票')
# elif content.isdigit()==False and content != 'q':
# print("要输入序号才能投票")
# else:
# if content.lower()=='q':
# for mv_name,count in dic.items():
# print(f'电影 {mv_name} 的投票数为 {count} ')
# break
# elif int(content) >= 1 and int(content) <=5:
# num = int(content)
# dic[lst[num-1]] += 1
# print(f'给{dic[lst[num-1]]}投票成功!')
# else:
# print('序号超出范围!')
# vote()
# dic = dict.fromkeys(lst,0)
# print(dic)
# lst = ['复仇者联盟4', '驯龙高手3', '金瓶梅', '老男孩', '大话西游']
# dic = {}
# while 1:
# for index,moive_name in enumerate(lst):
# print(f'序号:{index+1} 电影名称:{moive_name}')
# num = input('请输入您要投票的电影序号:').strip()
# if num.isdecimal():
# num = int(num)
# if 0 < num <= len(lst):
# if lst[num-1] not in dic:
# dic[lst[num-1]] = 1
# print(dic)
# else:
# dic[lst[num-1]] += 1
# print(dic)
# else:
# print('超出范围,重新输入')
# elif num.upper() == 'Q': break
# else:
# print('输入有误,重新输入')
# print(dic)
# for movie_name,count in dic.items():
# print(f'电影{movie_name} 最终得票数为{count}')
# 5.有文件t1.txt里面的内容为:(10分)
# id,name,age,phone,job
# 1,alex,22,13651054608,IT 2,wusir,23,13304320533,Tearcher 3,taibai,18,1333235322,IT
# 利用文件操作,将其构造成如下数据类型。
# [{'id':'1','name':'alex','age':'22','phone':'13651054608','job':'IT'},......]
with open('t1.txt',encoding='utf-8',mode='r') as f1,open('t1_fix.txt',encoding='utf-8',mode='a') as f2:
lst1 = []
lst2 = []
for line in f1:
temp = line.strip().split(',',4)
lst1.append(temp)
#print(lst1)
for i in range(len(lst1)-1):
lst2.append(dict(list(zip(lst1[0],lst1[i+1]))))
f2.write(f'\n{dict(list(zip(lst1[0],lst1[i+1])))}')
print(lst2)
# 6.按要求完成下列转化。(10分)
# list3 = [
# {"name": "alex", "hobby": "抽烟"},
# {"name": "alex", "hobby": "喝酒"},
# {"name": "alex", "hobby": "烫头"},
# {"name": "alex", "hobby": "Massage"},
# {"name": "wusir", "hobby": "喊麦"},
# {"name": "wusir", "hobby": "街舞"},
# {"name": "wusir", "hobby": "出差"},
# {"name": "太白", "hobby": "看书"},
# ]
# list4 = [
# {"name": "alex", "hobby_list": ["抽烟", "喝酒", "烫头", "Massage"]},
# {"name": "wusir", "hobby_list": ["喊麦", "街舞","出差"]},
# ]
# # 将list3 这种数据类型转化成list4类型,你写的代码必须支持可拓展.
# # 比如list3 数据在加一个这样的字典{"name": "wusir", "hobby": "溜达"},
# # list4 {"name": "wusir", "hobby_list": ["喊麦", "街舞", "溜达"]
# # 或者list3增加一个字典{"name": "太白", "hobby": "开车"},
# # list4{"name": "太白", "hobby_list": ["开车"],
# # 无论按照要求加多少数据,你的代码都可以转化.如果不支持拓展,则4分,支持拓展则10分.
# l1 = []
# for i in list3:
# for j in l1:
# if i['name']==j['name']:
# j['hobby_list'].append(i['hobby'])
# break
# else:
# l1.append({'name':i['name'],'hobby_list':[i['hobby'],]})
# print(l1)
# 两种方式:
# l1 = []
# 1.直接构建。
# for i in list3:
# for j in l1:
# if i['name'] == j['name']:
# j['hobby_list'].append(i['hobby'])
# break
# else:
# l1.append({'name': i['name'], 'hobby_list':[i['hobby'],]})
# print(l1)
# 2,构建特殊的数据结构。
# dic = {'alex':{"name": "alex", "hobby_list": ["抽烟", "喝酒", "烫头", "Massage"]},
# "wusir": {"name": "wusir", "hobby_list": ["喊麦", "街舞","出差"]}
# }
# print(list(dic.values()))
# dic = {}
# for i in list3:
# if i['name'] not in dic:
# dic[i['name']] = {'name': i['name'],'hobby_list':[i['hobby'],]}
# else:
# dic[i['name']]['hobby_list'].append(i['hobby'])
# print(list(dic.values()))
|
import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from coffeebot import config
from coffeebot.models import Base
database_uri = config.DATABASE_URI
print(datetime.datetime.now())
print("Creating database...")
engine = create_engine(database_uri)
Base.metadata.create_all(engine)
print("Succesfully created database.")
Base.metadata.bind = engine
db_session = sessionmaker(bind=engine)
session = db_session()
|
a,b=map(int,input().split())
m=max(a,b)
l=[]
for i in range(1,m+1):
if(a%i==0 and b%i==0):
l.append(i)
print(max(l))
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2019-01-26 21:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='MedRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chief_complaint', models.CharField(max_length=250)),
('gender', models.CharField(max_length=250)),
('medications', models.CharField(max_length=500)),
],
),
migrations.CreateModel(
name='Patient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('clinician', models.CharField(max_length=250)),
('pat_name', models.CharField(max_length=500)),
('bloodtype', models.CharField(max_length=100)),
('pat_img', models.FileField(upload_to='')),
('contact', models.CharField(max_length=100)),
('user', models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='medrecord',
name='patient',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ehealth.Patient'),
),
]
|
from os.path import dirname, join, abspath
from pyrep import PyRep
from pyrep.objects.shape import Shape
from pyrep.objects.vision_sensor import VisionSensor
import numpy as np
import time
import math
from occupancyGrid import OccupancyGrid
class Pose:
def __init__(self, x, y, theta):
self.x = x
self.y = y
self.theta = theta
def __str__(self):
return '{}, {}, {}'.format(self.x, self.y, self.theta)
def __add__(self, rhs):
return Pose(self.x + rhs.x, self.y + rhs.y, self.theta + rhs.theta)
def get2DPose(shape):
return (shape.get_position()[:2] + shape.get_orientation()[-1:])
def set2DPose(shape, pose):
'''
Sets the 2D (top-down) pose of the robot [x, y, yaw]
'''
x = pose.x
y = pose.y
yaw = pose.theta
shape.set_position([x, y, shape.get_position()[2]])
shape.set_orientation([0, 0, yaw])
def setupOccGrid(occ_grid, vision_sensor):
# Capture vision depth and create occupancy grid
pr.step()
depth = vision_sensor.capture_depth()
occ_grid.fromDepth(depth)
def boustrophedon(block_size_x, block_size_y, area_size_x, area_size_y, occ_grid):
max_block_dim = max(block_size_x, block_size_y)
min_block_dim = min(block_size_x, block_size_y)
max_area_dim = max(area_size_x, area_size_y)
min_area_dim = min(area_size_x, area_size_y)
is_block_max_x = False
is_area_max_x = False
move_in_x_dir = False
if max_block_dim == block_size_x:
is_block_max_x = True
if max_area_dim == area_size_x:
is_area_max_x = True
move_in_x_dir = True
path = []
# TODO: Need to offset from centre
start_position = occ_grid.mapToWorld(max_block_dim/2 - 0.5, min_block_dim/2 - 0.5)
current_pose = Pose(start_position[0], start_position[1], 0)
# If the max of block and area are in the same direction then we need to rotate by 90 deg
if (is_block_max_x and is_area_max_x) or ((not is_block_max_x and not is_area_max_x)):
current_pose = Pose(start_position[1], start_position[0], math.radians(90))
# TODO:
# Check if block can fit in area -> block needs to be spun
# Check if block can perfectly sweep area -> else block needs to be spun
sweeps = min_area_dim / max_block_dim
moves_per_sweep = max_area_dim - min_block_dim
path.append(current_pose)
multiplier = 1
# Boustrophedon method
for i in range(int(sweeps)):
multiplier = 1 if i % 2 == 0 else -1
for j in range(int(moves_per_sweep)):
if move_in_x_dir:
current_pose += Pose(multiplier * 0.01, 0, 0)
else:
current_pose += Pose(0, multiplier * 0.01, 0)
path.append(current_pose)
# Break the loop so we dont perform the shift at the end
if i == int(sweeps) - 1:
continue
for k in range(max_block_dim):
if move_in_x_dir:
current_pose += Pose(0, 0.01, 0)
else:
current_pose += Pose(0.01, 0, 0)
path.append(current_pose)
if int(sweeps) != sweeps:
remaining = min_area_dim - (int(sweeps) * max_block_dim)
print(sweeps)
print(max_block_dim)
print(min_area_dim)
print(remaining)
for i in range(remaining):
if move_in_x_dir:
current_pose += Pose(0, 0.01, 0)
else:
current_pose += Pose(0.01, 0, 0)
path.append(current_pose)
for j in range(int(moves_per_sweep)):
if move_in_x_dir:
current_pose += Pose(-multiplier * 0.01, 0, 0)
else:
current_pose += Pose(0, -multiplier * 0.01, 0)
path.append(current_pose)
return path
# Get Scene
SCENE_FILE = join(dirname(abspath(__file__)), 'scenes/scene_cpp.ttt')
# Start Simulation
pr = PyRep()
pr.launch(SCENE_FILE, headless=False)
pr.start()
robot = Shape('robot')
vision_sensor = VisionSensor('vision_sensor')
# Setup occ_grid
occ_grid = OccupancyGrid()
setupOccGrid(occ_grid, vision_sensor)
path = boustrophedon(20, 2, 100,100, occ_grid)
for p in path:
set2DPose(robot, p)
pr.step()
time.sleep(0.01)
# End Simulation
pr.stop()
pr.shutdown() |
import warnings #GetAptUrls() Supresses warning
warnings.filterwarnings('ignore') #GetAptUrls() Supresses warning
from bs4 import BeautifulSoup #GetAptUrls() GetAptInfo(AptUrls)
import requests #GetAptUrls()
import re #GetAptInfo(AptUrls) MakeRentInt(df)
import pandas as pd #Everything
import time #MakeCurrentTimeString()
# from selenium import webdriver
# import time
# from selenium.webdriver.remote.webelement import WebElement
# from selenium.common.exceptions import StaleElementReferenceException
def Hood_Url_List_to_Apt_Urls(Hood_Url_List):
Apt_Urls = []
for i in range(0,len(Hood_Url_List)):
OldUrl = Hood_Url_List[i]
Oldsoup=BeautifulSoup(requests.get(OldUrl).content, "lxml")
ArchiveHrefs = Oldsoup.select('a.placardTitle.js-placardTitle')
if len(ArchiveHrefs) == 0:
ArchiveHrefs = Oldsoup.select('a.placardTitle')
#Each href for each separate ArchiveHref
for j in range(0, len(ArchiveHrefs)):
Current_href = ArchiveHrefs[j]['href']
#Don't forget that this append must be outside the loop, or else we're just adding the last one
Apt_Urls.append(Current_href)
assert len(Apt_Urls) != 0, "Apt_Urls is empty in Snapshot_Url_List_to_Archive_Apt_Urls"
return Apt_Urls
def From_Webarchive_Get_Snapshot(HoodUrl):
# https://hackernoon.com/guide-to-handling-internet-archives-cdx-server-api-response-c469df5b81f4
# This article lays out the method for this function
class Snapshot(dict):
def __init__(self, urlkey=None, timestamp=None, original=None, mimetype=None, statuscode=None, digest=None,
length=None):
super(Snapshot, self).__init__()
self['urlkey'] = urlkey
self['timestamp'] = timestamp
self['original'] = original
self['mimetype'] = mimetype
self['statuscode'] = statuscode
self['digest'] = digest
self['length'] = length
self['snapshot_url'] = 'http://web.archive.org/web/%s/%s/' % (timestamp, original)
res = requests.get(HoodUrl)
snapshots = res.text.split('\n')
snapshot_list = []
for snapshot in snapshots:
snapshot_items = snapshot.split(' ')
if len(snapshot_items) == 7:
snap = Snapshot(snapshot_items[0], snapshot_items[1], snapshot_items[2], snapshot_items[3],
snapshot_items[4], snapshot_items[5], snapshot_items[6])
snapshot_list.append(snap)
# print(len(snapshot_list))
# print(snapshot_list[0]['snapshot_url'])
# len returns 0 even though there is content
# assert len(snapshot_list) != 0, "snapshot_list is empty in From_Webarchive_Get_Snapshot()"
# Snapshot Urls to list
Snapshot_Url_List = []
for i in range(0, len(snapshot_list)):
if snapshot_list[i]['statuscode'] == '200':
# print('Got here')
Current_Snapshot_Url = snapshot_list[i]['snapshot_url']
# print(Current_Snapshot_Url)
Snapshot_Url_List.append(Current_Snapshot_Url)
# set() gets unique values from my list, list() converts back to a list object
Snapshot_Url_List = list(set(Snapshot_Url_List))
# assert len(Snapshot_Url_List) != 0, "Snapshot_Url_List is empty in From_Webarchive_Get_Snapshot()"
return Snapshot_Url_List
# def waitForLoad(driver):
# elem = driver.find_element_by_tag_name("html")
# count = 0
# while True:
# count += 1
# if count > 10:
# #print("Timing out after 10 seconds and returning")
# return
# time.sleep(.1)
# try:
# elem == driver.find_element_by_tag_name("html")
# except StaleElementReferenceException:
# return
def Get_Apt_Info(Apt_Urls, Archive):
df =[]
assert len(Apt_Urls) != 0, "Apt_Urls is empty in Get_Apt_Info(Apt_Urls)"
print(len(Apt_Urls))
for i in range(0, len(Apt_Urls)):
try:
r = requests.get(Apt_Urls[i])
except:
continue
# Write code that counts how many of my batch of Urls turns out to be bad for one reason or another, 0 rows, not 200 HTTTP ode
if r.status_code != 200:
continue
# URL to AptName by rsplit()
# if Archive == 2:
# # print("Archive works")
# Date = Apt_Urls[i].rsplit('/')[4]
# AptName = Apt_Urls[i].rsplit('/')[8]
# driver = webdriver.PhantomJS(executable_path='/usr/local/bin/phantomjs')
# driver.get(Apt_Urls[i])
# waitForLoad(driver)
# html = driver.page_source
# soup = BeautifulSoup(html, "lxml")
# tab = soup.select('div[data-tab-content-id="all"]')
# if len(tab) == 0:
# continue
# apts = tab[0].select('.rentalGridRow')
if Archive == 1:
# print("Archive works")
Date = Apt_Urls[i].rsplit('/')[4]
AptName = Apt_Urls[i].rsplit('/')[8]
soup = BeautifulSoup((r).content, "lxml")
tab = soup.select('div[data-tab-content-id="all"]')
if len(tab) == 0:
# Find length of unique
apts = soup.select('tr.rentalGridRow')
else:
apts = tab[0].select('.rentalGridRow')
if Archive == 0:
# print("Archive does not works")
Date = time.strftime("%Y%m%d-%H%M%S")
AptName = Apt_Urls[i].rsplit('/')[3]
soup=BeautifulSoup((r).content, "lxml")
tab = soup.select('div[data-tab-content-id="all"]')
if len(tab) == 0:
apts = soup.select('tr.rentalGridRow')
else:
apts = tab[0].select('.rentalGridRow')
Year = Date[:4]
Month = Date[4:6]
a,b,c, d = [], [], [], []
for apt in apts:
try:
type = apt.select('.shortText')[0].text.strip()
rent = apt.select('.rent')[0].text.strip()
sqft = apt.select('.sqft')[0].text.strip()
available = apt.select('.available')[0].text.strip()
a.append(type)
b.append(rent)
c.append(sqft)
d.append(available)
except:
# the Apt is missing this information, skip it
continue
df_child = pd.DataFrame({'type': a, 'rent': b, 'sqft': c, 'available': d, 'apt': AptName, 'Year': Year, 'Month': Month})
df.append(df_child)
assert len(df) != 0, "df is empty in Get_Apt_Info(Apt_Urls)"
# Creates one data frame from many
df = pd.concat(df).reset_index(drop=True)
return df
def Make_Current_Time_String():
timestr = time.strftime("%Y%m%d-%H%M%S")
return timestr
def DF_to_Csv(df, Hood_Name, timestr):
#Pass a dataframe and a string of name for the excel file
import os
try:
path = '/Users/Reed/PycharmProjects/akara/apt_get/data/data_unclean/' + Hood_Name + '/' + Hood_Name + "_unclean" + timestr + '.csv'
df.to_csv(path)
except:
os.mkdir(path='/Users/Reed/PycharmProjects/akara/apt_get/data/data_unclean/' + Hood_Name)
path = '/Users/Reed/PycharmProjects/akara/apt_get/data/data_unclean/' + Hood_Name + '/' + Hood_Name + "_unclean" + timestr + '.csv'
df.to_csv(path)
return path
def main(hood_choices):
# New
paths_unclean = []
for item in hood_choices:
Hood_Url = [item]
Apt_Urls_New = Hood_Url_List_to_Apt_Urls(Hood_Url)
df_live = Get_Apt_Info(Apt_Urls_New, 0)
Hood_Name = Hood_Url[0].split('/')[3]
# Export
timestr = Make_Current_Time_String()
# Old
Hood_Url_Webarchive = 'http://web.archive.org/cdx/search/cdx?url='+ Hood_Url[0]
Snapshot_Url_List = From_Webarchive_Get_Snapshot(Hood_Url_Webarchive)
if len(Snapshot_Url_List) == 0:
print('Snapshot_list is empty, skipping archive')
df = df_live
path = DF_to_Csv(df, Hood_Name, timestr)
paths_unclean.append(path)
continue
else:
Apt_Urls_Old = Hood_Url_List_to_Apt_Urls(Snapshot_Url_List)
df_archive = Get_Apt_Info(Apt_Urls_Old, 1)
# Combine
df = []
df.append(df_live)
df.append(df_archive)
# Creates one data frame from many
df = pd.concat(df).reset_index(drop=True)
Hood_Name = Hood_Url[0].split('/')[3]
# Export
timestr = Make_Current_Time_String()
# If Hood_Name subfolder does not exist, makedir
path = DF_to_Csv(df, Hood_Name, timestr)
paths_unclean.append(path)
return paths_unclean
|
#Multiple of 3 or 5
def multiple(n):
if (n==0 and n<0):
return -1
sum_mul=0;
for i in range(1,n):
if(i%3==0 or i%5==0):
sum_mul=sum_mul+i;
return sum_mul
output=multiple(1000)
if(output==-1):
print "Enter a valid input!!"
else:
print "The sum of multiples of 3 or 5 =",output
|
# Generated by Django 3.2.3 on 2021-05-17 17:57
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('interact', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='likeunlikefilm',
old_name='trueOrNot',
new_name='true_or_not',
),
]
|
from matplotlib import pyplot as plt
import vectorEntrenamiento as vE
from MLP import matplotlib
class Grafica(object):
def __init__(self, figure):
self.seguirDibujando = True
self.estaProbando = False
self.verctorEntradas = []
self.vectoresEntrenamiento = []
self.vectorPrueba = []
self.figure = figure
self.ax = self.figure.add_subplot(111)
self.ax.set_title('Adaline')
self.setAx()
self.ax.plot()
self.canvas = None
def setCanvas(self, canvas):
self.canvas = canvas
self.figure.canvas.mpl_connect('button_press_event',self.plot)
def setAx(self):
self.ax.set_xlim([-5,5])
self.ax.set_ylim([-5,5])
self.ax.grid()
self.ax.axvline(0, color="black")
self.ax.axhline(0, color="black")
def plot(self, event):
if event.inaxes!=self.ax.axes or not self.seguirDibujando:
if self.estaProbando:
self.vectorPrueba = vE.VectorEntrenamiento( ( event.xdata, event.ydata ), 2 )
self.ax.plot(event.xdata, event.ydata, 'ok')
self.figure.canvas.draw()
self.estaProbando = False
return
if event.button == 1:
self.vectoresEntrenamiento.append( vE.VectorEntrenamiento( ( event.xdata, event.ydata ),0 ) )
self.ax.plot(event.xdata, event.ydata, 'ob')
elif event.button == 3:
self.vectoresEntrenamiento.append( vE.VectorEntrenamiento( ( event.xdata, event.ydata ),1 ) )
self.ax.plot(event.xdata, event.ydata, 'or')
self.figure.canvas.draw()
def plotPrueba(self, x, y, color):
self.ax.plot( x, y, color)
self.figure.canvas.draw()
|
from sklearn.feature_extraction.text import TfidfVectorizer
# list of text documents
#text = ["The The The The The quick brown fox jumped over the lazy dog.",
# "The dog.",
# "The fox"]
text=["the house had a tiny little mouse",
"the cat saw the mouse",
"the mouse ran away from the house",
"the cat finally ate the mouse",
"the end of the mouse story"
]
# create the transform
vectorizer = TfidfVectorizer()
#see tf idf images to know with expmle and equation
# tokenize and build vocab
vectorizer.fit(text)
# summarize
print("vectorizer.vocabulary_:\n",vectorizer.vocabulary_)
print("vectorizer.idf_:\n",vectorizer.idf_)
# encode document
vector = vectorizer.transform(text)
# summarize encoded vector
print("vector.shape:\n",vector.shape)
print("vector.toarray():\n",vector.toarray()) |
import pygame
import time
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0 , 0)
red = (255, 0, 0)
green = (0, 155, 0)
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width,display_height))
pygame.display.set_caption("snake game")
clock = pygame.time.Clock()
fps = 15
block_size = 20
smallfont = pygame.font.SysFont("comicsansms", 25)
medfont = pygame.font.SysFont("comicsansms", 50)
largefont = pygame.font.SysFont("comicsansms", 80)
def snake(block_size, snakeList):
for XnY in snakeList:
pygame.draw.rect(gameDisplay, green, [XnY[0], XnY[1], block_size, block_size])
def text_objects(text, color, size):
if size == "small":
textSurface = smallfont.render(text, True, color)
elif size == "medium":
textSurface = medfont.render(text, True, color)
elif size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
def message_to_screen (msg, color, y_displace = 0, size = "small"):
textSurf, textRect = text_objects(msg, color, size)
textRect.center = (display_width/2), (display_height/2)+ y_displace
gameDisplay.blit(textSurf, textRect)
def gameLoop():
gameExit = False
gameOver = False
lead_x = display_width/2
lead_y = display_height/2
lead_x_change = 0
lead_y_change = 0
snakeList = []
snakeLength = 1
randAppleX = round(random.randrange(0, display_width - block_size)) #/10.0)*10.0
randAppleY = round(random.randrange(0, display_height - block_size)) #/10.0)*10.0
while not gameExit:
while gameOver == True:
gameDisplay.fill(white)
message_to_screen("Game over", red, y_displace = -50, size= "large")
message_to_screen("press C to play or Q to quit", black, y_displace = 50, size= "small")
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
gameOver = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
lead_y_change = block_size
lead_x_change = 0
if lead_x >= display_width or lead_y >= display_height or lead_x <= 0 or lead_y <= 0:
gameOver = True
#if event.type == pygame.KEYUP:
# if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:
# lead_x_change = 0
#if event.type == pygame.KEYUP:
# if event.key == pygame.K_UP or event.key == pygame.K_DOWN:
# lead_y_change = 0
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(white)
AppleThickness = 30
pygame.draw.rect(gameDisplay, red, [randAppleX, randAppleY, AppleThickness, AppleThickness])
snakeHead = []
snakeHead.append(lead_x)
snakeHead.append(lead_y)
snakeList.append(snakeHead)
if len(snakeList) > snakeLength:
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead:
gameOver = True
snake(block_size, snakeList)
snake(block_size, snakeList)
pygame.display.update()
'''
if lead_x >= randAppleX and lead_x <= randAppleX + AppleThickness:
if lead_y >= randAppleY and lead_y <= randAppleY + AppleThickness:
print ("yay")
randAppleX = round(random.randrange(0, display_width - block_size)) #/10.0)*10.0
randAppleY = round(random.randrange(0, display_height - block_size)) #/10.0)*10.0
snakeLength += 1
'''
if lead_x > randAppleX and lead_x < randAppleX + AppleThickness or lead_x + block_size > randAppleX and lead_x + block_size < randAppleX + AppleThickness:
if lead_y > randAppleY and lead_y < randAppleY + AppleThickness or lead_y + block_size > randAppleY and lead_y + block_size < randAppleY + AppleThickness:
randAppleX = round(random.randrange(0, display_width - block_size)) #/10.0)*10.0
randAppleY = round(random.randrange(0, display_height - block_size)) #/10.0)*10.0
snakeLength += 1
clock.tick(fps)
#print(event)
pygame.quit()
quit()
gameLoop()
|
cipher = 'tMlsioaplnKlflgiruKanliae' \
'beLlkslikkpnerikTasatamkD' \
'psdakeraBeIdaegptnuaKtmte' \
'orpuTaTtbtsesOHXxonibmkse' \
'kaaoaKtrssegnveinRedlkkkr' \
'oeekVtkekymmlooLnanoKtlst' \
'oepHrpeutdynfSneloietbol'
length = len(cipher)
mid = length // 2
cipher = list(cipher)
for i in range(0, mid, 3):
cipher[i:i+3], cipher[-(i+1):-(i+4):-1] = cipher[-(i+1):-(i+4):-1][::-1], cipher[i:i+3][::-1]
for i in range(0, length, 2):
cipher[i], cipher[i+1] = cipher[i+1], cipher[i]
cipher[:mid], cipher[mid:] = cipher[mid:], cipher[:mid]
print(''.join(cipher)) |
import math
import sympy
def cifras(v, c):
return round(v, c - int(math.floor(math.log10(abs(v)))) - 1)
def aprox(v, a, n):
x = sympy.Symbol('x')
ex = x**(1/2)
r = ex.evalf(subs={x: a})
for i in range(1, n + 1):
ex = sympy.diff(ex)
r = r + ((ex.evalf(subs={x: a})/math.factorial(i))*(v - a)**i)
return cifras(r, 8)
for i in range(2, 12):
print("Aproximacion de sqrt(0.0088) con n="+str(i)+"->"+str(aprox(0.0088, 0.5, i))) |
"""
Espresso
~~~~~~~~~~~~~~~~~~~
Ratchet Robotics's custom Slack bot
Written from scratch, too!
:copyright: (c) 2015 by Liam Marshall
:license: BSD, see LICENSE for more details.
""" |
import uuid
from tests.graph_case import GraphTestCase
from office365.directory.group import Group
from office365.directory.group_profile import GroupProfile
class TestGraphTeam(GraphTestCase):
"""Tests for teams"""
target_group = None # type: Group
@classmethod
def setUpClass(cls):
super(TestGraphTeam, cls).setUpClass()
grp_name = "Group_" + uuid.uuid4().hex
properties = GroupProfile(grp_name)
properties.securityEnabled = False
properties.mailEnabled = True
properties.groupTypes = ["Unified"]
cls.target_group = cls.client.groups.add(properties).execute_query()
def test1_get_all_teams(self):
teams = self.client.teams.get_all().execute_query()
self.assertGreater(len(teams), 0)
def test2_ensure_team(self):
team_id = self.__class__.target_group.id
teams = self.client.me.joinedTeams.filter("id eq '{0}'".format(team_id)).get().execute_query()
self.assertIsNotNone(teams.resource_path)
if len(teams) == 0:
new_team = self.__class__.target_group.add_team().execute_query_retry()
self.assertIsNotNone(new_team.id)
else:
self.assertEqual(len(teams), 1)
def test3_get_team(self):
group_id = self.__class__.target_group.id
existing_team = self.client.teams[group_id].get().execute_query()
self.assertIsNotNone(existing_team.resource_url)
self.assertIsNotNone(existing_team.messagingSettings)
if existing_team.properties["isArchived"]:
existing_team.unarchive()
self.client.load(existing_team)
self.client.execute_query()
self.assertFalse(existing_team.properties["isArchived"])
def test4_update_team(self):
team_id = self.__class__.target_group.properties['id']
team_to_update = self.client.teams[team_id]
team_to_update.funSettings.allowGiphy = False
team_to_update.update().execute_query()
def test5_archive_team(self):
group_id = self.__class__.target_group.id
self.client.teams[group_id].archive().execute_query()
def test6_delete_group_with_team(self):
grp_to_delete = self.__class__.target_group
grp_to_delete.delete_object(True).execute_query()
|
# Faça um Programa que peça dois números e imprima a soma.
# entrada de dados
numero_1 = int(input('Digite um número: '))
numero_2 = int(input('Digite mais um número: '))
# processamento
soma = numero_1 + numero_2
mensagem = '{} + {} = {}'.format(numero_1, numero_2, soma)
# saída de dados
print(mensagem)
|
lower=int(input('lower:'))
upper=int(input('upper:'))
for num in range(lower,upper+1):
if num>0:
s=0
for i in range(1,num):
if num%i==0:
s+=i
if s==num:
print(num,end=' ') |
class Point:
WIDTH =5
# __slots__ = ["__x", "__y", "W"]
def __init__(self, x=0, y=0):
self.__x = x
self.__y = y
def __checkValue(x):
if isinstance(x, int) or isinstance(x, float):
return True
return False
def setCoords(self, x, y):
if Point.__checkValue(x) and Point.__checkValue(y):
self.__x = x
self.__y = y
else:
print("Координаты должны быть числами")
def getCoords(self):
return self.__x, self.__y
def __getattribute__(self, item):
if item == "_Point__x":
return "Частная переменная"
else:
return object.__getattribute__(self, item)
def __setattr__(self, key, value):
if key == "WIDTH":
raise AttributeError
else:
self.__dict__[key] = value
def __getattr__(self, item):
print("__getattr__: "+item)
def __delattr__(self, item):
print("__delattr__: "+item)
pt = Point(1, 2)
print(pt.getCoords())
pt.setCoords( 10, 20)
print(pt.getCoords())
Point._Point__checkValue(5)
pt.W
del pt.W
pt.W =2
print(pt.W) |
# 6042
a = input()
b = round(float(a), 2)
print(b)
# 6043
a, b = input().split()
c = float(a)/float(b)
print(format(c, '.3f'))
# 6044
a, b = input().split()
print(int(a)+int(b))
print(int(a)-int(b))
print(int(a)*int(b))
print(int(a)//int(b))
print(int(a) % int(b))
print(format(int(a)/int(b), '.2f'))
# 6045
a, b, c = input().split()
a = int(a)
b = int(b)
c = int(c)
sum = a+b+c
avg = sum/3
print(sum, format(avg, '.2f'))
# 6046
n = input()
int_n = int(n)
print(int_n<<1)
# 6047
a, b = input().split()
int_a = int(a)
int_b = int(b)
print(int_a<<int_b)
# 6048
a, b = input().split()
int_a = int(a)
int_b = int(b)
if int_a < int_b:
print(True)
else:
print(False)
# 6049
a, b = input().split()
int_a = int(a)
int_b = int(b)
if int_a == int_b:
print(True)
else:
print(False)
# 6050
a, b = input().split()
int_a = int(a)
int_b = int(b)
if int_b >= int_a:
print(True)
else:
print(False)
# 6051
a, b = input().split()
int_a = int(a)
int_b = int(b)
if int_b != int_a:
print(True)
else:
print(False)
# 6052
n = input()
n = int(n)
print(bool(n))
# 6053
n = input()
n = bool(int(n))
print(not n)
# 6054
a, b = input().split()
a = bool(int(a))
b = bool(int(b))
print(a and b)
# 6055
a, b = input().split()
a = bool(int(a))
b = bool(int(b))
print(a or b)
# 6056
a, b = input().split()
a = bool(int(a))
b = bool(int(b))
print(a and (not b) or (not a) and b)
# 6057
a, b = input().split()
a = bool(int(a))
b = bool(int(b))
print((a and b) or (not a and not b))
# 6058
a, b = input().split()
a = bool(int(a))
b = bool(int(b))
print(not a and not b) |
from django.shortcuts import render, redirect
from . import forms
from django.contrib.auth.models import User
from django.contrib import auth
from .models import ArrobaModel
from django.shortcuts import get_object_or_404
from . import twitter_api
from . import twitter_database
import pandas as pd
import json
import unicodedata
from django.utils import encoding
from django.core.paginator import Paginator
import os
from dotenv import load_dotenv
load_dotenv()
# Create your views here.
def index(request):
return render(request, 'index.html')
def cadastro(request):
if request.method == "GET":
cadastro_form = forms.CadastroForms()
contexto = {
'cadastro_form': cadastro_form
}
return render(request, 'cadastro.html', contexto)
if request.method == "POST":
nome = request.POST['nome']
email = request.POST['email']
senha = request.POST['senha']
senha2 = request.POST['senha2']
if senha != senha2:
return redirect('cadastro')
if User.objects.filter(email=email).exists():
return redirect('cadastro')
user = User.objects.create_user(
username=nome,
email=email,
password=senha
)
user.save()
return redirect('login')
def login(request):
if request.method == "GET":
if request.user.is_authenticated:
return redirect('dashboard')
else:
login_form = forms.LoginForms()
contexto = {
'login_form': login_form
}
return render(request, 'login.html', contexto)
if request.method == 'POST':
# if not request.user.is_authenticated():
# return redirect("login")
email = request.POST['email']
senha = request.POST['senha']
login_form = forms.LoginForms(request.POST)
login_form.get_request(request)
if login_form.is_valid():
if User.objects.filter(email=email).exists():
nome = (User.objects.filter(email=email).
values_list('username', flat=True).get())
user = auth.authenticate(request, username=nome, password=senha)
if user is not None:
auth.login(request, user)
return redirect('dashboard')
else:
contexto = {
'login_form':login_form,
}
return render(request, 'login.html', contexto)
else:
contexto = {
'login_form':login_form,
}
return render(request, 'login.html', contexto)
def logout(request):
auth.logout(request)
return redirect('index')
def dashboard(request):
if request.user.is_authenticated:
id = request.user.id
lista_arrobas = ArrobaModel.objects.filter(user_id=request.user)
paginator = Paginator(lista_arrobas, 3)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
contexto = {
'id': id,
'lista_arrobas': page_obj
}
return render(request, 'dashboard.html', contexto)
else:
return redirect('index')
# def cadastro_arroba(request):
# if request.method == "GET":
# arroba_form = forms.ArrobaForms()
# contexto = {
# 'arroba_form': arroba_form
# }
# return render(request, 'cadastro_arroba.html', contexto)
# if request.method == "POST":
# arroba = request.POST['arroba']
# user = get_object_or_404(User, pk=request.user.id)
# if twitter_api.validate_user(arroba):
# arroba_attributes = twitter_api.get_arroba_attributes(arroba)
# normal_image_url = arroba_attributes.profile_image_url
# larger_image_url = normal_image_url.split('_normal')[0] + normal_image_url.split('_normal')[1]
# twitter_arroba = ArrobaModel(arroba=arroba, profile_image_url=larger_image_url, user_id=user,
# description=arroba_attributes.description,
# name=arroba_attributes.name)
# twitter_arroba.save()
# return redirect('dashboard')
# else:
# return redirect('cadastro_arroba')
def cadastro_arroba(request):
if not request.user.is_authenticated:
return redirect('login')
if request.method == "GET":
arroba_form = forms.ArrobaForms()
contexto = {
'arroba_form': arroba_form
}
return render(request, 'cadastro_arroba.html', contexto)
if request.method == "POST":
arroba = request.POST['arroba']
user = get_object_or_404(User, pk=request.user.id)
arroba_form = forms.ArrobaForms(request.POST)
arroba_form.get_request(request)
print(arroba_form.request.user)
if arroba_form.is_valid():
lista_arrobas = ArrobaModel.objects.filter(user_id=request.user)
lista_arrobas = [arroba.arroba for arroba in lista_arrobas]
if arroba in lista_arrobas:
print("Arroba já faz parte do set")
return redirect('dashboard')
arroba_attributes = twitter_api.get_arroba_attributes(arroba)
normal_image_url = arroba_attributes.profile_image_url
larger_image_url = normal_image_url.split('_normal')[0] + normal_image_url.split('_normal')[1]
twitter_arroba = ArrobaModel(arroba=arroba, profile_image_url=larger_image_url, user_id=user,
description=arroba_attributes.description,
name=arroba_attributes.name)
twitter_arroba.save()
return redirect('dashboard')
else:
contexto = {
'arroba_form':arroba_form,
}
return render(request, 'cadastro_arroba.html', contexto)
def deleta_arroba(request, arroba_id):
if request.user.is_authenticated:
arroba = get_object_or_404(ArrobaModel, pk=arroba_id)
arroba.delete()
return redirect('dashboard')
"""
def detalha_arroba(request, arroba_id):
if request.user.is_authenticated:
arroba = get_object_or_404(ArrobaModel, pk=arroba_id)
mydb = twitter_database.mysql_rds_database_authentication('twitter_data')
df_tweets = pd.read_sql(f"SELECT * FROM tweets where arroba = '{arroba.arroba}';", con=mydb).sort_values(by='date', ascending=False)
df_tweets['date'] = df_tweets['date'].astype(str)
string_tweets = df_tweets.head(10).to_json(orient='records')
json_tweets = json.loads(string_tweets)
contexto = {
'arroba':arroba,
'len': df_tweets.shape,
'json_tweets': json_tweets
}
return render(request, 'detalhes_arroba.html', context=contexto)
"""
def detalha_arroba(request, arroba_id):
if request.user.is_authenticated:
arroba = get_object_or_404(ArrobaModel, pk=arroba_id)
mydb = twitter_database.mysql_rds_database_authentication(os.environ.get('MYSQL_TWITTER_DATABASE'))
df_tweets = pd.read_sql(f"SELECT * FROM tweets where arroba = '{arroba.arroba}';", con=mydb).sort_values(by='date', ascending=False)
df_tweets['date'] = df_tweets['date'].astype(str)
string_tweets = df_tweets.to_json(orient='records')
json_tweets = json.loads(string_tweets)
paginator = Paginator(json_tweets, 10)
page_number = request.GET.get('page')
if not page_number:
page_number = 1
page_obj = paginator.get_page(page_number)
limite_inf_pag = int(page_number) - 5
limite_sup_pag = int(page_number) + 5
last_page = int(paginator.num_pages)
contexto = {
'arroba':arroba,
'len': df_tweets.shape,
'json_tweets': json_tweets,
'paginated_json_tweets': page_obj,
'limite_inf_pag': limite_inf_pag,
'limite_sup_pag': limite_sup_pag,
'last_page': last_page
}
return render(request, 'detalhes_arroba.html', context=contexto) |
from django.shortcuts import get_object_or_404
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework.viewsets import ModelViewSet
from titles.models import Title
from .models import Review
from .permissions import IsOwnerAdminModeratorToEdit
from .serializers import CommentSerializer, ReviewSerializer
class ReviewsViewSet(ModelViewSet):
serializer_class = ReviewSerializer
permission_classes = (
IsAuthenticatedOrReadOnly, IsOwnerAdminModeratorToEdit
)
def get_queryset(self):
title = get_object_or_404(Title, pk=self.kwargs.get('title_id'))
return title.reviews.all()
def perform_create(self, serializer):
title = get_object_or_404(Title, pk=self.kwargs.get('title_id'))
serializer.save(author=self.request.user, title=title)
class CommentsViewSet(ModelViewSet):
serializer_class = CommentSerializer
permission_classes = (
IsAuthenticatedOrReadOnly, IsOwnerAdminModeratorToEdit
)
def get_queryset(self):
review = get_object_or_404(Review, pk=self.kwargs.get('review_id'),
title__id=self.kwargs['title_id'])
return review.comments.all()
def perform_create(self, serializer):
review = get_object_or_404(Review, pk=self.kwargs.get('review_id'),
title__id=self.kwargs['title_id'])
serializer.save(author=self.request.user, review=review)
|
import tensorflow as tf
import tensorflow_probability as tfp
tfd = tfp.distributions
sum_all = tf.math.reduce_sum
from VariationalPosterior import VariationalPosterior
class BayesianLSTMCell_Untied(tf.keras.Model):
def __init__(self, num_units, training, init, prior, **kwargs):
super(BayesianLSTMCell_Untied, self).__init__(num_units, **kwargs)
self.init = init
self.units = num_units
self.is_training = training
self.state_size = self.units
self.prior = prior
def initialise_cell(self, links):
self.num_links = links
self.Ui_mu = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Ui_mu', trainable=True)
self.Ui_rho = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Ui_rho', trainable=True)
self.Uo_mu = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Uo_mu', trainable=True)
self.Uo_rho = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Uo_rho', trainable=True)
self.Uf_mu = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Uf_mu', trainable=True)
self.Uf_rho = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Uf_rho', trainable=True)
self.Ug_mu = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Ug_mu', trainable=True)
self.Ug_rho = self.add_weight(shape=(self.units, self.units),
initializer=self.init,
name='Ug_rho', trainable=True)
self.Wi_mu = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wi_mu', trainable=True)
self.Wi_rho = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wi_rho', trainable=True)
self.Wo_mu = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wo_mu', trainable=True)
self.Wo_rho = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wo_rho', trainable=True)
self.Wf_mu = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wf_mu', trainable=True)
self.Wf_rho = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wf_rho', trainable=True)
self.Wg_mu = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wg_mu', trainable=True)
self.Wg_rho = self.add_weight(shape=(self.num_links, self.units),
initializer=self.init,
name='Wg_rho', trainable=True)
self.Bi_mu = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wi_mu', trainable=True)
self.Bi_rho = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wi_rho', trainable=True)
self.Bo_mu = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wo_mu', trainable=True)
self.Bo_rho = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wo_rho', trainable=True)
self.Bf_mu = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wf_mu', trainable=True)
self.Bf_rho = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wf_rho', trainable=True)
self.Bg_mu = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wg_mu', trainable=True)
self.Bg_rho = self.add_weight(shape=(1, self.units),
initializer=self.init,
name='Wg_rho', trainable=True)
self.Ui_dist = VariationalPosterior(self.Ui_mu, self.Ui_rho)
self.Uo_dist = VariationalPosterior(self.Uo_mu, self.Uo_rho)
self.Uf_dist = VariationalPosterior(self.Uf_mu, self.Uf_rho)
self.Ug_dist = VariationalPosterior(self.Ug_mu, self.Ug_rho)
self.Wi_dist = VariationalPosterior(self.Wi_mu, self.Wi_rho)
self.Wo_dist = VariationalPosterior(self.Wo_mu, self.Wo_rho)
self.Wf_dist = VariationalPosterior(self.Wf_mu, self.Wf_rho)
self.Wg_dist = VariationalPosterior(self.Wg_mu, self.Wg_rho)
self.Bi_dist = VariationalPosterior(self.Bi_mu, self.Bi_rho)
self.Bo_dist = VariationalPosterior(self.Bo_mu, self.Bo_rho)
self.Bf_dist = VariationalPosterior(self.Bf_mu, self.Bf_rho)
self.Bg_dist = VariationalPosterior(self.Bg_mu, self.Bg_rho)
## Make sure following is only printed once during training and not for testing!
print(" Untied cell has been built (in:", links, ") (out:", self.units, ")")
self.sampling = False
self.built = True
def call(self, inputs, states):
Ui = self.Ui_dist.sample(self.is_training, self.sampling)
Uo = self.Uo_dist.sample(self.is_training, self.sampling)
Uf = self.Uf_dist.sample(self.is_training, self.sampling)
Ug = self.Ug_dist.sample(self.is_training, self.sampling)
Wi = self.Wi_dist.sample(self.is_training, self.sampling)
Wo = self.Wo_dist.sample(self.is_training, self.sampling)
Wf = self.Wf_dist.sample(self.is_training, self.sampling)
Wg = self.Wg_dist.sample(self.is_training, self.sampling)
Bi = self.Bi_dist.sample(self.is_training, self.sampling)
Bo = self.Bo_dist.sample(self.is_training, self.sampling)
Bf = self.Bf_dist.sample(self.is_training, self.sampling)
Bg = self.Bg_dist.sample(self.is_training, self.sampling)
c_t, h_t = tf.split(value=states[0], num_or_size_splits=2, axis=0)
inputs = tf.cast(inputs, tf.float32)
i = tf.sigmoid(Bi + tf.linalg.matmul(h_t, Ui) + tf.linalg.matmul(inputs, Wi))
o = tf.sigmoid(Bo + tf.linalg.matmul(h_t, Uo) + tf.linalg.matmul(inputs, Wo))
f = tf.sigmoid(Bf + tf.linalg.matmul(h_t, Uf) + tf.linalg.matmul(inputs, Wf))
g = tf.math.tanh(Bg + tf.linalg.matmul(h_t, Ug) + tf.linalg.matmul(inputs, Wg))
self.log_prior = sum_all(self.prior.log_prob(Ui) + self.prior.log_prob(Uo) + self.prior.log_prob(Uf) + self.prior.log_prob(Ug))
self.log_prior += sum_all(self.prior.log_prob(Wi) + self.prior.log_prob(Wo) + self.prior.log_prob(Wf) + self.prior.log_prob(Wg))
self.log_prior += sum_all(self.prior.log_prob(Bi) + self.prior.log_prob(Bo) + self.prior.log_prob(Bf) + self.prior.log_prob(Bg))
self.log_variational_posterior = sum_all(self.Ui_dist.log_prob(Ui) + self.Uo_dist.log_prob(Uo) + self.Uf_dist.log_prob(Uf) + self.Ug_dist.log_prob(Ug))
self.log_variational_posterior += sum_all(self.Wi_dist.log_prob(Wi) + self.Wo_dist.log_prob(Wo) + self.Wf_dist.log_prob(Wf) + self.Wg_dist.log_prob(Wg))
self.log_variational_posterior += sum_all(self.Bi_dist.log_prob(Bi) + self.Bo_dist.log_prob(Bo) + self.Bf_dist.log_prob(Bf) + self.Bg_dist.log_prob(Bg))
c_new = f*c_t + i*g
h_new = o*tf.math.tanh(c_new)
new_state = tf.concat([c_new, h_new], axis=0)
return h_new, new_state
def get_initial_state(self, inputs = None, batch_size = None, dtype = None):
return tf.zeros((2*batch_size, self.units), dtype = dtype)
|
# -*- coding: utf-8 -*-
import logging
import psycopg2
import sys
import smtplib
from email.mime.text import MIMEText
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) <= 4:
logging.warn('argumentos insuficientes')
logging.warn('usuariodb clavedb dni usuarioau24')
sys.exit(1)
user = sys.argv[1]
passw = sys.argv[2]
dni = sys.argv[3]
username = sys.argv[4]
con = psycopg2.connect(host='163.10.17.80', dbname='au24', user=user, password=passw)
try:
cur = con.cursor()
logging.info('chequeando si existe {}'.format(username))
cur.execute('select username from mdl_user where username = %s', (username,))
if cur.rowcount <= 0:
logging.info('no existe usuario {}, por lo que no tengo que actualizar nada'.format(username))
sys.exit()
logging.info('chequeando si existe {}'.format(dni))
cur.execute('select username from mdl_user where username = %s', (dni,))
if cur.rowcount >= 0:
logging.info('actualizando {0} a {0}.viejo'.format(dni))
cur.execute("update mdl_user set auth = %s, username = %s where username = %s", ('fceldap', '{}.viejo'.format(dni), dni))
logging.info('actualizando {} a {}'.format(username, dni))
cur.execute("update mdl_user set auth = %s, username = %s where username = %s", ('fceldap', dni, username))
con.commit()
finally:
con.close()
text = 'usuario actualizado del au24 : {} --> {}'.format(username, dni)
msg = MIMEText(text)
msg['Subject'] = text
msg['From'] = 'pablo@econo.unlp.edu.ar'
msg['To'] = 'pablo@econo.unlp.edu.ar, anibal.alvarez@econo.unlp.edu.ar, soporte@econo.unlp.edu.ar'
s = smtplib.SMTP('163.10.17.115')
try:
s.send_message(msg)
finally:
s.quit()
|
import random
from past.builtins import range
import numpy as np
from GeneticAlgorithm.Fixed import Fixed
from GeneticAlgorithm.Population import Population
from GeneticAlgorithm.Tournament import Tournament
from GeneticAlgorithm.CycleCrossover import CycleCrossover
from GeneticAlgorithm.Candidate import Candidate
random.seed()
class Sudoku(object):
""" Solves a given Sudoku puzzle using a genetic algorithm. """
def __init__(self, Nd, sqrtVal, input, puzzle):
self.given = None
self.Nd = Nd
self.sqrtVal = sqrtVal
self.input = input
self.puzzle = puzzle
return
def load(self, p):
#values = np.array(list(p.replace(".","0"))).reshape((Nd, Nd)).astype(int)
self.given = Fixed(p, self.Nd, self.sqrtVal)
return
def solve(self):
Nc = 100 # Number of candidates (i.e. population size).
Ne = int(0.05 * Nc) # Number of elites.
Ng = 10000 # Number of generations.
Nm = 0 # Number of mutations.
# Mutation parameters.
phi = 0
sigma = 1
mutation_rate = 0.06
# Check given one first
if self.given.no_duplicates(self.Nd) == False:
return (-1, 1)
# Create an initial population.
self.population = Population(self.Nd, self.sqrtVal, self.input, self.puzzle)
print("Below are the population created for each generation.")
print("Note that some of them will be not correct Answer!!")
if self.population.seed(Nc, self.given) == 1:
pass
else:
return (-1, 1)
# For up to 10000 generations...
stale = 0
for generation in range(0, Ng):
# Check for a solution.
best_fitness = 0.0
#best_fitness_population_values = self.population.candidates[0].values
for c in range(0, Nc):
fitness = self.population.candidates[c].fitness
if (fitness == 1):
print("=========================================")
print("Solution found at generation %d!" % generation)
print("=========================================")
return (generation, self.population.candidates[c])
# Find the best fitness and corresponding chromosome
if (fitness > best_fitness):
best_fitness = fitness
#best_fitness_population_values = self.population.candidates[c].values
print("Generation:", generation, " Best fitness:", best_fitness)
#print(best_fitness_population_values)
# Create the next population.
next_population = []
# Select elites (the fittest candidates) and preserve them for the next generation.
self.population.sort()
elites = []
for e in range(0, Ne):
elite = Candidate(self.Nd, self.sqrtVal)
elite.values = np.copy(self.population.candidates[e].values)
elites.append(elite)
# Create the rest of the candidates.
for count in range(Ne, Nc, 2):
# Select parents from population via a tournament.
t = Tournament()
parent1 = t.compete(self.population.candidates)
parent2 = t.compete(self.population.candidates)
## Cross-over.
cc = CycleCrossover(self.Nd, self.sqrtVal)
child1, child2 = cc.crossover(parent1, parent2, crossover_rate=1.0)
# Mutate child1.
child1.update_fitness()
old_fitness = child1.fitness
success = child1.mutate(mutation_rate, self.given)
child1.update_fitness()
if (success):
Nm += 1
if (child1.fitness > old_fitness): # Used to calculate the relative success rate of mutations.
phi = phi + 1
# Mutate child2.
child2.update_fitness()
old_fitness = child2.fitness
success = child2.mutate(mutation_rate, self.given)
child2.update_fitness()
if (success):
Nm += 1
if (child2.fitness > old_fitness): # Used to calculate the relative success rate of mutations.
phi = phi + 1
# Add children to new population.
next_population.append(child1)
next_population.append(child2)
# Append elites onto the end of the population. These will not have been affected by crossover or mutation.
for e in range(0, Ne):
next_population.append(elites[e])
# Select next generation.
self.population.candidates = next_population
self.population.update_fitness()
# Calculate new adaptive mutation rate (based on Rechenberg's 1/5 success rule).
# This is to stop too much mutation as the fitness progresses towards unity.
if (Nm == 0):
phi = 0 # Avoid divide by zero.
else:
phi = phi / Nm
if (phi > 0.2):
sigma = sigma / 0.998
elif (phi < 0.2):
sigma = sigma * 0.998
mutation_rate = abs(np.random.normal(loc=0.0, scale=sigma, size=None))
# Check for stale population.
self.population.sort()
if (self.population.candidates[0].fitness != self.population.candidates[1].fitness):
stale = 0
else:
stale += 1
# Re-seed the population if 100 generations have passed
# with the fittest two candidates always having the same fitness.
if (stale >= 100):
print("The population has gone stale. Re-seeding...")
self.population.seed(Nc, self.given)
stale = 0
sigma = 1
phi = 0
mutation_rate = 0.06
print("No solution found.")
return (-2, 1) |
import requests
from urllib.parse import urljoin
class seller:
def __init__(self, url_prefix):
self.url_prefix = urljoin(url_prefix, "goods/")
def addGoods(self, goodsId, goodsName, goodsauth, goodsPrice, goodsNum, goodsType, goodsDsr,sellerName) -> bool:
json = {"goodsId": goodsId,"goodsName" : goodsName,"goodsauth": goodsauth,"goodsPrice" : goodsPrice,"goodsNum" : goodsNum,"goodsType":goodsType,"goodsDsr": goodsDsr,"sellerName":sellerName}
#headers = {"token": token}
url = urljoin(self.url_prefix, "addGoods")
r = requests.post(url, json=json)
return r.status_code == 200
def getMemberInfo(self, username: str, token: str) -> (str, str, str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "getMemberInfo")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["name"], r.json()["sex"], r.json()["tele"]
else:
return "", "", ""
def editMemberInfo(self, username: str, token: str) -> bool:
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "editMemberInfo")
r = requests.post(url, headers=headers, json=json)
return r.status_code == 200
# 1.editItem-编辑卖家商品接口
def editItem(self, username: str, token: str) -> bool:
json = {"username": username}
# headers = {"token": token}
url = urljoin(self.url_prefix, "editItem")
r = requests.post(url, json=json)
return r.status_code == 200
def getSoldItem(self, username: str, token: str) -> (str, str, str, str, str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix,"getSoldItem")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["orderId"], r.json()["orderDate"], r.json()["orderStatus"], r.json()["productName"], r.json()["productPrice"]
else:
return "", "", "", "", ""
def getRefundItemOrder(self, username: str, token: str) -> (str, str, str):
json = {"username": username}
headers = {"token": token}
url = urljoin(self.url_prefix, "getRefundItemOrder")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["orderId"], r.json()["productName"], r.json()["productPrice"]
else:
return "", "", ""
# 2.sellerRefundGoods-查询卖家退货物流信息接口
def sellerRefundGoods(self, username: str, token: str, orderId: str) -> (str, str, str):
json = {"username": username, "orderId": orderId}
headers = {"token": token}
url = urljoin(self.url_prefix, "sellerRefundGoods")
r = requests.get(url, headers=headers, json=json)
if r.status_code == 200:
return r.json()["orderId"], r.json()["productName"], r.json()["address"]
else:
return ""
|
import scipy
from numpy import *
import scipy.integrate
from fractions import Fraction
#variable declarations
a = array([1,1,0])
b = array([2,2,0])
def C1(x):
return 1
path1_1, err1 = scipy.integrate.quad(C1, 1, 2) # calculating part1 of path1
def C2(y):
return 4*(y+1)
path1_2, err2 = scipy.integrate.quad(C2, 1, 2) # calculating part2 of path1
Path1_2 = (Fraction(path1_2).limit_denominator(100))
path1 = path1_1+Path1_2
Path1 = Fraction(path1).limit_denominator(100)
def C3(x):
return (3*x**2)+(2*x)
path2, err2 = scipy.integrate.quad(C3, 1, 2) # calculating path2
Path2 = Fraction(path2).limit_denominator(100)
pathb = Path1 - Path2
Pathb = Fraction(pathb).limit_denominator(100)
print 'Path 1 part 1 =', int(path1_1)
print 'Path 1 part 2 =', Path1_2
print 'Path 1 = ',Path1
print 'path 2 = ',Path2
print 'for loop that goes out(1) and back(2) then, = ',Pathb
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#############################################################################################
# #
# plot_sim_position.py: create sim positional trend plots #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Feb 17, 2021 #
# #
#############################################################################################
import os
import sys
import re
import string
import time
import Chandra.Time
import random
import numpy
#
#--- reading directory list
#
path = '/data/mta/Script/SIM_move/Scripts/house_keeping/dir_list'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
sys.path.append(mta_dir)
sys.path.append(bin_dir)
#
#--- import several functions
#
import mta_common_functions as mcf
import sim_move_supple as sms
#
#--- temp writing file name
#
import random
rtail = int(time.time() * random.random())
zspace = '/tmp/zspace' + str(rtail)
#
#--- some setting
#
detectors = ['ACIS-I','ACIS-S','HRC-I','HRC-S', 'All']
drange = [[89000, 94103], [71420, 76820], [-51705, -49306], [-100800, -98400], [-110000, 101000]]
drange2 = [[-2000, 200], [-2000, 200], [-2000, 200], [-2000, 200], [-2000, 200]]
drange3 = [[-2, 15], [-2, 15], [-2, 15], [-2, 15], [-2, 15]]
tail_list = ['week', 'month', 'year', 'full']
add_list = [0, 86400, 3*86400, 365*86400]
#--------------------------------------------------------------------------------
#-- plot_sim_position: create sim positional trend plots --
#--------------------------------------------------------------------------------
def plot_sim_position(t_array, tsc_array, fa_array, mpw_array, inst_array, xmin_range, today):
"""
create sim positional trend plots
input: t_array --- an array of time data
tsc_array --- an array of tsc data
fa_array --- an array of fa data
mpw _array --- an array of mrmmxmv data
inst_array --- an array of instrument indecies
xmin_range --- a list of starting time in week, month, year and full range plot
today --- today's time in seconds from 1998.1.1
output: <web_dir>/Postion/<msid>_<inst>_<type>.png
"""
#
#--- acis i
#
prep_and_plot(0, xmin_range, t_array, tsc_array, inst_array, drange, 'tscpos', today, 'SIM Position')
prep_and_plot(0, xmin_range, t_array, fa_array, inst_array, drange2, 'fapos', today, 'FA Position')
prep_and_plot(0, xmin_range, t_array, mpw_array, inst_array, drange3, 'mrmmxmv', today, '3MRMMXMV')
prep_and_lot2(0, tsc_array, mpw_array, t_array, inst_array, drange, drange3, xmin_range, "tsc_mxmv", 'TSCPOS', 'MRMMXMV')
#
#--- acis s
#
prep_and_plot(1, xmin_range, t_array, tsc_array, inst_array, drange, 'tscpos', today, 'SIM Position')
prep_and_plot(1, xmin_range, t_array, fa_array, inst_array, drange2, 'fapos', today, 'FA Position')
prep_and_plot(1, xmin_range, t_array, mpw_array, inst_array, drange3, 'mrmmxmv', today, '3MRMMXMV')
prep_and_lot2(1, tsc_array, mpw_array, t_array, inst_array, drange, drange3, xmin_range, "tsc_mxmv", 'TSCPOS', 'MRMMXMV')
#
#--- hrc i
#
prep_and_plot(2, xmin_range, t_array, tsc_array, inst_array, drange, 'tscpos', today, 'SIM Position')
prep_and_plot(2, xmin_range, t_array, fa_array, inst_array, drange2, 'fapos', today, 'FA Position')
prep_and_plot(2, xmin_range, t_array, mpw_array, inst_array, drange3, 'mrmmxmv', today, '3MRMMXMV')
prep_and_lot2(2, tsc_array, mpw_array, t_array, inst_array, drange, drange3, xmin_range, "tsc_mxmv", 'TSCPOS', 'MRMMXMV')
#
#--- hrc s
#
prep_and_plot(3, xmin_range, t_array, tsc_array, inst_array, drange, 'tscpos', today, 'SIM Position')
prep_and_plot(3, xmin_range, t_array, fa_array, inst_array, drange2, 'fapos', today, 'FA Position')
prep_and_plot(3, xmin_range, t_array, mpw_array, inst_array, drange3, 'mrmmxmv', today, '3MRMMXMV')
prep_and_lot2(3, tsc_array, mpw_array, t_array, inst_array, drange, drange3, xmin_range, "tsc_mxmv", 'TSCPOS', 'MRMMXMV')
#
#--- full data
#
prep_and_plot(4, xmin_range, t_array, tsc_array, inst_array, drange, 'tscpos', today, 'SIM Position')
prep_and_plot(4, xmin_range, t_array, fa_array, inst_array, drange2, 'fapos', today, 'FA Position')
prep_and_plot(4, xmin_range, t_array, mpw_array, inst_array, drange3, 'mrmmxmv', today, '3MRMMXMV')
prep_and_lot2(4, tsc_array, mpw_array, t_array, inst_array, drange, drange3, xmin_range, "tsc_mxmv", 'TSCPOS', 'MRMMXMV')
#--------------------------------------------------------------------------------
#-- prep_and_plot: prepare data for specific plot and plot it ---
#--------------------------------------------------------------------------------
def prep_and_plot(pos, xmin_range, x_list, y_list, inst, drange, prefix, today, yname):
"""
prepare data for specific plot and plot it
input: pos --- indicator of instrument
xmin_range --- a list of starting time
x_list --- an array of time data
y_list --- an array of selected msid data
inst --- an array of instrument (in values between 0 and 4)
drange --- a list of data range
prefix --- indicator of which data set
today --- today's time in seconds from 1998.1.1
yname --- y axis label
output: <web_dir>/Position/<msid>_<inst>_<range>.png
"""
#
#--- select data for the instrument
#
if pos == 4: #--- combine data set
xr = x_list
yr = y_list
else:
indx = inst == pos
xr = x_list[indx]
yr = y_list[indx]
title = detectors[pos].replace('-','_')
y_range = drange[pos]
#
#--- week, month, year, and full range plots
#
#for k in range(0, 4):
for k in range(3, 4):
tail = tail_list[k]
outname = web_dir + 'Position/' + prefix + '_' + title.lower() + '_' + tail + '.png'
x_range = [xmin_range[k], today + add_list[k]]
#
#--- converting to yday
#
if k in [0, 1, 2]:
indx = xr > xmin_range[k]
x = xr[indx]
y = yr[indx]
if len(x) < 1:
cmd = 'cp ' + house_keeping + 'no_plot.png ' + outname
os.system(cmd)
continue
byear, x = sms.convert_time_format(x, 0)
[year1, start] = sms.chandratime_to_yday(x_range[0])
[year2, stop] = sms.chandratime_to_yday(x_range[1])
if year1 == year2:
x_range = [start, stop]
else:
if mcf.is_leapyear(byear):
base = 366
else:
base = 365
if byear == year1:
x_range = [start, stop + base]
else:
x_range = [start - base, stop]
xname = 'Time (YDay in Year: ' + str(byear) + ')'
#
#--- converting to fractional year
#
else:
byear, x = sms.convert_time_format(xr, 1)
y = yr
start = mcf.chandratime_to_fraq_year(x_range[0])
stop = mcf.chandratime_to_fraq_year(x_range[1])
x_range = [start, stop]
xname = 'Time (in Year)'
sms.plot_panel(x, y, x_range, y_range, xname, yname, title, outname)
#--------------------------------------------------------------------------------
#-- prep_and_lot2: prepare data for specific plot and plot it --
#--------------------------------------------------------------------------------
def prep_and_lot2(pos, x_list, y_list, t_list, inst, x_range, y_range, t_range, prefix, xname, yname):
"""
prepare data for specific plot and plot it
input: pos --- indicator of instrument
xmin_range --- a list of starting time
x_list --- an array of time data
y_list --- an array of selected msid data
inst --- an array of instrument (in values between 0 and 4)
drange --- a list of data range
prefix --- indicator of which data set
today --- today's time in seconds from 1998.1.1
yname --- y axis label
output: <web_dir>/Position/tsc_mxmv_<inst>_<range>.png
"""
#
#--- select data for the instrument
#
if pos == 4: #--- combine data set
xr = x_list
yr = y_list
tr = t_list
else:
indx = inst == pos
xr = x_list[indx]
yr = y_list[indx]
tr = t_list[indx]
title = detectors[pos].replace('-','_')
for k in range(0, 4):
tail = tail_list[k]
outname = web_dir + 'Position/' + prefix + '_' + title.lower() + '_' + tail + '.png'
indx = tr > t_range[k]
x = xr[indx]
y = yr[indx]
sms.plot_panel(x, y, x_range[pos], y_range[pos], xname, yname, title, outname)
#--------------------------------------------------------------------------------
if __name__ == "__main__":
plot_sim_position()
|
from marshmallow import Schema, fields, EXCLUDE, post_load
from summary.model import Commodity, CostSnapshot, StockSummary, Station, DockSummary
class BaseSchema(Schema):
class Meta:
unknown = EXCLUDE
class CostSnapshotSchema(BaseSchema):
system_name = fields.String(required=True)
station_name = fields.String(required=True)
timestamp = fields.String(required=True)
buy_price = fields.Integer(required=True)
stock = fields.Integer(required=True)
sell_price = fields.Integer(required=True)
demand = fields.Integer(required=True)
market_id = fields.Integer(allow_none=True)
star_pos = fields.List(fields.Float(), allow_none=True)
station_type = fields.String(allow_none=True)
system_address = fields.Integer(allow_none=True)
dist_from_star_ls = fields.Float(allow_none=True)
station_allegiance = fields.String(allow_none=True)
@post_load
def to_domain(self, data, **kwargs) -> CostSnapshot:
return CostSnapshot(**data)
class CommoditySchema(BaseSchema):
name = fields.String(required=True)
best_buys = fields.List(fields.Nested(CostSnapshotSchema), required=True)
best_sales = fields.List(fields.Nested(CostSnapshotSchema), required=True)
@post_load
def to_domain(self, data, **kwargs) -> Commodity:
return Commodity(**data)
class StockSummarySchema(BaseSchema):
commodities = fields.List(fields.Nested(CommoditySchema), Required=True)
@post_load
def to_domain(self, data, **kwargs) -> StockSummary:
return StockSummary(**data)
class StationSchema(BaseSchema):
market_id = fields.Integer(required=True)
star_pos = fields.List(fields.Float(), required=True)
station_name = fields.String(required=True)
station_type = fields.String(required=True)
system_address = fields.Integer(required=True)
system_name = fields.String(required=True)
timestamp = fields.String(required=True)
dist_from_star_ls = fields.Float(allow_none=True)
station_allegiance = fields.String(allow_none=True)
@post_load
def to_domain(self, data, **kwargs) -> Station:
return Station(**data)
class DockSummarySchema(BaseSchema):
stations = fields.Dict(fields.String(), fields.Nested(StationSchema), Required=True)
@post_load
def to_domain(self, data, **kwargs) -> DockSummary:
return DockSummary(**data)
|
''' This one line outlines the module content
Here we see the detailed Discription of the Module:
Make clear how to comment and structure your code so that other can read and use it.
Have a look at the content of https://github.com/alnkpa/pycc/wiki/Coding-style
'''
# import in the beginning
import sys
class OurClass:
''' This is one sentence describing the class' use
After two new lines the detailed description of the class follows
SOME_CONSTANT is used as example for constants and
should be described here.
The tab makes clear this belongs to the line above.
'''
SOME_CONSTANT = "something that will not change is all capital"
#this is a comment, regarding the following method
def ourMethod(self, a, b, c):
''' ourMethod(a, b, c) sets the value of variable
set variable to the sum of a, b, c and
return None
'''
self.variable = a + b + c
# use newlines between methods
def getVariable(self):
'''getVariable() returns the value of variable as int.'''
return int(self.variable)
# some things do not need an explanation
__all__ = ['OurClass']
if __name__ == '__main__':
try:
# testing OurClass() on errors during creation
o = OurClass()
except:
errortype, error, traceback = sys.exc_info()
print("always show when there is an unexpected error {0}".format(error))
# Rules are made to break them.
# And in case you did well you will not get harmed.
|
import time
import requests
from keys import BOT_CHATID, BOT_TOKEN
def send_telegram_message(bot_message):
vacio = {}
if bot_message != vacio:
send_text = 'https://api.telegram.org/bot' + BOT_TOKEN + \
'/sendMessage?chat_id=' + BOT_CHATID + \
'&parse_mode=Markdown&text=' + bot_message
response = requests.get(send_text)
return response.json()
|
#
# Arquitetura e Redes de Comunicação de Sistemas Embarcados
#
# Projeto I – Transporte confiável de dados utilizando protocolo de bit alternante
#
# sender.py (script para envio dos dados)
# receiver.py (script para envio dos dados)
#
# Instrucoes para uso disponiveis no arquivo README.md
#
# MATHEUS ARCANGELO ESPERANÇA
# RA 150007034
#
import os
from socket import *
#entrada do endereco IP do receiver
server_name = input('IP receiver: ')
#valida se é um dígito válido e encerra o script em caso de erro
ping = os.system("ping -c 1 " + server_name)
if ping != 0:
print("IP receiver invalido...")
quit()
#entrada do numero da porta a ser utilizada
server_port = int(input('PORTA: '))
#valida se o numero esta dentro do intervalo definido
if server_port < 10001 or server_port > 11000:
print("Porta invalida...")
quit()
#entrada do numero de mensagens a serem enviadas ao receiver
MSGS = input('MSGS: ')
#valida se é um dígito válido e encerra o script em caso de erro
if MSGS.isdigit() == 0:
print("N mensagens invalido...")
quit()
client_socket = socket(AF_INET, SOCK_DGRAM)
client_socket.settimeout(1)
SEQNO = 0
sent_MSGS = 0
while sent_MSGS < int(MSGS):
print('')
DATA = input('DATA: ')
if DATA.isdigit() == 0:
print("Dado invalido...")
quit()
ack_received = False
while not ack_received:
message = str(SEQNO) + ' ' + DATA + ' ' + MSGS
print('SEND: ' + message)
client_socket.sendto(message.encode() , (server_name, server_port))
try:
response, server_address = client_socket.recvfrom(2048)
except timeout:
print("RECV: timeout")
else:
print('RECV: ' + response.decode())
ack_seq = response[3] - 48
if ack_seq == SEQNO:
ack_received = True
SEQNO = 1 - SEQNO
sent_MSGS = 1 + sent_MSGS
print('')
print("Envio concluido...")
print('')
client_socket.close() |
from keras.models import Model, Input
from keras.models import Sequential
from keras.layers import GRU
from keras.layers import Dense
from keras.layers import Concatenate
from keras import optimizers
from keras_layer_normalization import LayerNormalization
#general
def GRU_model(x_length, n_features, n_aux, n_classes, n_neurons, learning_rate, dropout_rate, recurrent_dropout, loss_type):
input1 = Input(shape=(x_length, n_features))
x = GRU(n_neurons, activation='relu', dropout=dropout_rate, recurrent_dropout=recurrent_dropout, return_sequences=False)(input1)
x = LayerNormalization()(x)
aux_input = Input(shape=(n_aux,))
main_input = Concatenate(axis=1)([x, aux_input])
output = Dense(n_classes, activation='softmax')(main_input)
model = Model(inputs=[input1, aux_input], outputs=output)
adam_optim = optimizers.Adam(lr=learning_rate)
model.compile(loss=loss_type, optimizer=adam_optim)
return model |
keywords = [
"Word",
"Excel",
"PowerPoint",
"Power Point",
"Outlook",
"Afrikaans",
"Albanian",
"Arabic",
"Armenian",
"Basque",
"Bengali",
"Bulgarian",
"Catalan",
"Cambodian",
"Chinese",
"Croatian",
"Czech",
"Danish",
"Dutch",
"English",
"Estonian",
"Fiji",
"Finnish",
"French",
"Georgian",
"German",
"Greek",
"Gujarati",
"Hebrew",
"Hindi",
"Hungarian",
"Icelandic",
"Indonesian",
"Irish",
"Italian",
"Japanese",
"Javanese",
"Korean",
"Latin",
"Latvian",
"Lithuanian",
"Macedonian",
"Malay",
"Malayalam",
"Maltese",
"Maori",
"Marathi",
"Mongolian",
"Nepali",
"Norwegian",
"Persian",
"Polish",
"Portuguese",
"Punjabi",
"Quechua",
"Romanian",
"Russian",
"Samoan",
"Serbian",
"Slovak",
"Slovenian",
"Spanish",
"Swahili",
"Swedish",
"Tamil",
"Tatar",
"Telugu",
"Thai",
"Tibetan",
"Tonga",
"Turkish",
"Ukrainian",
"Urdu",
"Uzbek",
"Vietnamese",
"Welsh",
]
|
import json
path = "./testData.json"
#xValues = range(1,46,1)
xValues = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,
31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]
yValues = [66, 56, 70, 72, 67, 68, 70, 71, 74, 69, 72, 70, 70, 70, 72,
66, 70, 72, 72, 65, 70, 70, 72, 64, 68, 71, 70, 70, 78, 86,
85, 70, 76, 70, 74, 72, 70, 70, 75, 70, 70, 68, 70, 69, 73]
yValues = [1,2,3,4,5]
y2Values = [2,3,4,5,6]
y3Values = [3,4,5,6,7]
yValues = []
xValues = range(1, len(yValues)+1)
data = [xValues, yValues]
data = [xValues, yValues, xValues, y2Values, xValues, y3Values]
data = {"label1": [xValues, yValues], "label2": [xValues, y2Values], "label3": [xValues, y3Values]}
data = {"working": [[1],[12]], "eating":[[1],[2]], "sleeping":[[1],[8]], "exercising":[[1][2]]}
dataFile = open(path, "w")
json.dump(data, dataFile, indent=4)
dataFile.close() |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-27 08:30
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Claim',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_claimed', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
],
),
migrations.CreateModel(
name='Coupon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('code', models.CharField(max_length=10, unique=True)),
('terms', models.TextField()),
('create_date', models.DateTimeField(default=django.utils.timezone.now, editable=False)),
('publish_date', models.DateTimeField(default=django.utils.timezone.now)),
('validity', models.DateTimeField(default=django.utils.timezone.now)),
('claimants', models.ManyToManyField(related_name='Claimed', through='coupons.Claim', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='claim',
name='coupon',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coupons.Coupon'),
),
migrations.AddField(
model_name='claim',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
from server.response.code import *
|
#!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
from utils.timer import Timer
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.io as sio
import argparse
from contextlib import contextmanager
import shutil
from subprocess import Popen, PIPE
import shlex
import tempfile
import re
import time
import fcntl
import os
import sys
from timeit import default_timer as timer
from osgeo import gdal
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--tiles', dest='tile_path',
help='image tile output path',
default=None, type=str)
parser.add_argument('dstring', help="Detection string to process",
type=str)
args = parser.parse_args()
return args
def mergeTiles(src, dst):
img1 = mpimg.imread(src)
img2 = mpimg.imread(dst)
# Alpha Blend the two tiles
src_rgb = img1[..., :3]
src_a = img1[...,3]
dst_rgb = img2[..., :3]
dst_a = img2[...,3]
out_a = src_a + dst_a*(1.0-src_a)
out_rgb = (src_rgb*src_a[..., None] +
dst_rgb*dst_a[..., None] * (1.0-src_a[..., None]))/ out_a[..., None]
out = np.zeros_like(img1)
out[..., :3] = out_rgb
out[...,3] = out_a
mpimg.imsave(dst, out)
def moveTiles(src, dst):
files = os.listdir(src)
if not os.path.exists(dst):
os.makedirs(dst)
# chmod of dirs
for p,d,f in os.walk(dst):
os.chmod(p, 0o777)
for f in files:
sname = os.path.join(src, f)
dname = os.path.join(dst, f)
if os.path.isdir(sname):
moveTiles(sname, dname)
else:
fname, ext = os.path.splitext(dname)
# Currently only moving the tiles since the
# tilemapresource.xml is not being used by leaflet.
# TODO: merge the tilemapresource.xml files by
# reading the xml and updating the bounding box, and
# x,y of the tiles.
if os.path.exists(dname) == True and ext == '.png':
mergeTiles(sname, dname)
#i = 0;
#dname2 = dname + str(i)
#while os.path.exists(dname2) == True:
# i += 1
# dname2 = dname + str(i)
#shutil.move(sname, dname2)
#os.chmod(dname, 0o666)
pass
elif ext == '.png':
shutil.move(sname, dname)
os.chmod(dname, 0o666)
def parseRectStr(rectStr):
items = rectStr.split(' ')
# the first item is the class which we will ignore
x = int(round(float(items[1])))
y = int(round(float(items[2])))
w = int(round(float(items[3])))
h = int(round(float(items[4])))
print("pared rect {0},{1},{2},{3}".format(x,y,w,h))
return x,y,w,h
import signal, errno
from contextlib import contextmanager
@contextmanager
def timeout(seconds):
def timeout_handler(signum, frame):
pass
orig_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, orig_handler)
#
# We create the tile in a temp directory and then move it to its final
# destination.
#
def writeTilesFromDetects(tileDir, detects, origFile):
if detects == None or len(detects) == 0:
return
tempTileDir = tempfile.mkdtemp(dir='/home/trbatcha/tempDir')
os.chmod(tempTileDir, 0o777)
outputDir = os.path.join(tempTileDir, "output")
if not os.path.exists(outputDir):
os.makedirs(outputDir)
os.chmod(outputDir, 0o777)
vname = os.path.basename(origFile)
vPath = os.path.join(tempTileDir, vname + ".vrt")
listPath = os.path.join(tempTileDir, "fileList.txt")
listFile = open(listPath, "w")
basename = os.path.basename(origFile)
nname, ext = os.path.splitext(origFile)
if ext.lower() == '.tif':
bitSize = 8
else:
bitSize = 16
for detect in detects:
print(detect)
rectStr = detect
x, y, w, h = parseRectStr(rectStr)
print("detect = {0},{1},{2},{3}".format(x, y,w , h))
tName = basename + "_" + str(x) + "_" + str(y) + "_" + \
str(w) + "_" + str(h) + ".tif"
t2Name = basename + "_" + str(x) + "_" + str(y) + "_" + \
str(w) + "_" + str(h) + "_w" + ".tif"
tPath = os.path.join(tempTileDir, tName)
t2Path = os.path.join(tempTileDir, t2Name)
if os.path.exists(tPath) == True:
os.remove(tPath)
if os.path.exists(t2Path) == True:
os.remove(t2Path)
# Git the image clip
if bitSize == 16:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of GTiff " +\
"-ot Byte -scale 64 1024 0 255 -b 1 -srcwin " \
+ str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " \
+ origFile + " " + tPath
else:
transStr = "/home/trbatcha/tools/bin/gdal_translate -of GTiff " +\
"-ot Byte -b 1 -srcwin " \
+ str(x) + " " + str(y) + " " + str(w) + " " + str(h) + " " \
+ origFile + " " + tPath
args = shlex.split(transStr)
print("running translate")
p = Popen(args, stdout=PIPE, stderr=PIPE)
pstdout, pstderr = p.communicate()
print pstderr
print("translate complete")
#get rid of xml file gdal_translate creates
xmlfile = tPath + ".aux.xml"
if os.path.exists(xmlfile):
os.remove(tPath + ".aux.xml")
print (pstdout)
warpStr = "/home/trbatcha/tools/bin/gdalwarp -of GTiff -t_srs " + \
"EPSG:3857 -overwrite " + tPath + " " + t2Path
args = shlex.split(warpStr)
print("running warp")
p = Popen(args, stdout=PIPE, stderr=PIPE)
pstdout, pstderr = p.communicate()
print (pstderr)
print (pstdout)
print("warp complete")
listFile.write(t2Path + '\n')
listFile.close()
vrtStr = "/home/trbatcha/tools/bin/gdalbuildvrt -srcnodata 0 -addalpha " \
+ "-vrtnodata 0 -overwrite -input_file_list " + listPath + \
" " + vPath
args = shlex.split(vrtStr)
print("running vrt")
p = Popen(args, stdout=PIPE, stderr=PIPE)
pstdout, pstderr = p.communicate()
print (pstderr)
print (pstdout)
print("virt complete")
# Generate tiles for all the image chips
import gdal2tiles
tileStr = "-v -p mercator --zoom '13-16' -w none " + vPath + " " + outputDir
#debug tried gdal2tiles as seperate process, it did not fix my problem so commented out
#my_env = os.environ.copy()
#tileStr = "/home/trbatcha/tools/bin/python gdal2tiles.py -v -p mercator -z 13 -w none " + vPath + " " + outputDir
args = shlex.split(tileStr)
# run it in seperate shell for clean up
#p = Popen(args, env=my_env, stdout=PIPE, stderr=PIPE)
#stdout, stderr = p.communicate()
#print (stderr)
#print (stdout)
print("gen tiles")
tileGenFailed = False
with timeout(10):
try:
# By default gdal turns exceptions off
gdal.UseExceptions()
targs = gdal.GeneralCmdLineProcessor(args)
gtiles = gdal2tiles.GDAL2Tiles(targs)
gtiles.process()
except IOError, err:
if err.errno != errno.EINTR:
print("gdal2tiles FAILED!!!")
print(err)
sys.stdout.flush()
shutil.rmtree(tempTileDir, ignore_errors=True)
return
print("TileGeneration TIMED OUT!! for file " + origFile)
tileGenFailed = True
print("gen tiles complete")
# before we move tiles lets check lockfile and wait if not avail
if tileGenFailed == False:
with timeout(3):
lockFile = os.path.join(tileDir,"tileLock")
lock = open(lockFile, 'w+')
try:
fcntl.flock(lock, fcntl.LOCK_EX)
except IOError, e:
if e.errno != errno.EINTR:
raise e
print("Tile filelock timeout")
lock.close()
shutil.rmtree(tempTileDir, ignore_errors=True)
return
moveTiles(outputDir, tileDir)
fcntl.flock(lock, fcntl.LOCK_UN)
lock.close()
# remove the non-tiles we created
shutil.rmtree(tempTileDir, ignore_errors=True)
if __name__ == '__main__':
os.umask(0)
#debug force stdout to flush optut
sys.stdout = os.fdopen(sys.stdout.fileno(), "w", 0)
print("Running doTile.py")
args = parse_args()
tiledir = args.tile_path
dstring = args.dstring
print("Recieved dstring: " + dstring)
tempDir = tempfile.mkdtemp(dir = "/dev/shm")
os.chmod(tempDir, 0o777)
# If the string is surrounded my double quotes remove them
if dstring[0] == dstring[-1] and dstring.startswith(("'", '"')):
dstring = dstring[1:-1]
origFile, type, x, y, width, height = dstring.split(" ")
detectList = []
detectList.append(type + " " + x + " " + y + " " + width + " " + height)
print(detectList[0])
temp = detectList[0].split(' ')
print(temp[4])
writeTilesFromDetects(tiledir, detectList, origFile)
shutil.rmtree(tempDir)
sys.exit(0)
|
"""
================================================
Dataset
================================================
"""
import numpy as np
import pandas as pd
class Dataset:
def __init__(self):
self.dataset = None
self.target = None
self.column_stats = {}
self.corr_threshold = 0.80
def discovery(self):
self.column_statistics()
def set_columns(self):
self.dataset.columns = self.config['columns']
def set_target(self):
self.target = self.dataset[self.config['target']]
def shape(self):
print('\n--- Shape')
print('\tRow count:\t', '{}'.format(self.dataset.shape[0]))
print('\tColumn count:\t', '{}'.format(self.dataset.shape[1]))
def column_statistics(self):
print('\n--- Column Stats')
for col in self.dataset:
self.column_stats[col + '_dtype'] = self.dataset[col].dtype
self.column_stats[col + '_zero_num'] = (self.dataset[col] == 0).sum()
self.column_stats[col + '_zero_pct'] = (((self.dataset[col] == 0).sum() / self.dataset.shape[0]) * 100)
self.column_stats[col + '_nunique'] = (self.dataset[col] == 0).nunique()
print('\n- {} ({})'.format(col, self.column_stats[col + '_dtype']))
print('\tzero {} ({:.2f}%)'.format(self.column_stats[col + '_zero_num'],
self.column_stats[col + '_zero_pct']))
print('\tdistinct {}'.format(self.column_stats[col + '_nunique']))
# Numerical features
if self.dataset[col].dtype != object:
self.column_stats[col + '_min'] = (self.dataset[col] == 0).min()
self.column_stats[col + '_mean'] = (self.dataset[col] == 0).mean()
self.column_stats[col + '_quantile_25'] = (self.dataset[col] == 0).quantile(.25)
self.column_stats[col + '_quantile_50'] = (self.dataset[col] == 0).quantile(.50)
self.column_stats[col + '_quantile_75'] = (self.dataset[col] == 0).quantile(.75)
self.column_stats[col + '_max'] = (self.dataset[col] == 0).max()
self.column_stats[col + '_std'] = (self.dataset[col] == 0).std()
self.column_stats[col + '_skew'] = (self.dataset[col] == 0).skew()
self.column_stats[col + '_kurt'] = (self.dataset[col] == 0).kurt()
print('\tmin {}'.format(self.column_stats[col + '_min']))
print('\tmean {:.3f}'.format(self.column_stats[col + '_mean']))
print('\t25% {:.3f}'.format(self.column_stats[col + '_quantile_25']))
print('\t50% {:.3f}'.format(self.column_stats[col + '_quantile_50']))
print('\t75% {:.3f}'.format(self.column_stats[col + '_quantile_75']))
print('\tmax {}'.format(self.column_stats[col + '_max']))
print('\tstd {:.3f}'.format(self.column_stats[col + '_std']))
print('\tskew {:.3f}'.format(self.column_stats[col + '_skew']))
print('\tkurt {:.3f}'.format(self.column_stats[col + '_kurt']))
def row_count_by_target(self, target):
print('\n--- Row count by {}'.format(target))
series = self.dataset[target].value_counts()
for idx, val in series.iteritems():
print('\t{}: {} ({:6.3f}%)'.format(idx, val, ((val / self.dataset.shape[0]) * 100)))
def row_target_count_by_group(self, level, by):
print('\n--- Row count by {}'.format(level))
df = self.dataset.groupby(level)[by].count()
df = df.rename(columns={by[0]: 'Count'})
df['Percent'] = (df['Count'] / self.dataset.shape[0]) * 100
df_flat = df.reset_index()
print(df_flat)
def show_duplicates(self, level):
print('\n--- Duplicates by {}'.format(level))
df = self.dataset.groupby(self.dataset.columns.tolist()).size().reset_index(name='duplicates')
df['duplicates'] = df['duplicates'] - 1
df_flat = df.groupby(level)[['duplicates']].sum().reset_index()
print(df_flat)
def drop_duplicates(self):
self.dataset.drop_duplicates(keep='first', inplace=True)
def drop_cols(self, cols):
print('\n--- Dropping columns')
print(cols)
self.dataset.drop(columns=cols, inplace=True)
# Consider data only less than 95% of max to exlude extreme outliers
def drop_outliers(self):
print('\n--- Dropping Outliers')
for col in self.dataset.columns:
if self.dataset[col].dtype == np.float64 or self.dataset[col].dtype == np.int64:
threshold = self.dataset[col].max() * 0.95
outliers = self.dataset[(self.dataset[col] > 50) & (self.dataset[col] > threshold)]
if (not outliers.empty) and (len(outliers) < (self.dataset.shape[0] * 0.0001)):
print('For column {} deleting {} rows over value {}'.format(col, len(outliers), threshold))
self.dataset = pd.concat([self.dataset, outliers]).drop_duplicates(keep=False)
def drop_highly_correlated(self):
corr = self.dataset.corr().abs()
upper_triangle = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.95
cols_to_drop = [column for column in upper_triangle.columns if any(upper_triangle[column] >=
self.corr_threshold)]
cols_to_drop = list(set(cols_to_drop) - set(self.config['minority_cols_exclude_drop']))
self.drop_cols(cols_to_drop)
class KDDCup1999(Dataset):
def __init__(self):
Dataset.__init__(self)
self.config = {'columns': ['duration', 'protocol_type', 'service', 'flag', 'src_bytes', 'dst_bytes', 'land',
'wrong_fragment', 'urgent', 'hot', 'num_failed_logins', 'logged_in',
'num_compromised', 'root_shell', 'su_attempted', 'num_root', 'num_file_creations',
'num_shells', 'num_access_files', 'num_outbound_cmds', 'is_host_login',
'is_guest_login', 'count', 'srv_count', 'serror_rate', 'srv_serror_rate',
'rerror_rate', 'srv_rerror_rate', 'same_srv_rate', 'diff_srv_rate',
'srv_diff_host_rate', 'dst_host_count', 'dst_host_srv_count',
'dst_host_same_srv_rate', 'dst_host_diff_srv_rate', 'dst_host_same_src_port_rate',
'dst_host_srv_diff_host_rate', 'dst_host_serror_rate', 'dst_host_srv_serror_rate',
'dst_host_rerror_rate', 'dst_host_srv_rerror_rate', 'label'],
'path': 'data',
'file': 'kddcup.data_10_percent',
'target': 'target',
'level_01': ['attack_category', 'label'],
'drop_cols_01': ['is_host_login', 'num_outbound_cmds', 'label', 'target'],
'drop_cols_02': ['attack_category'],
'minority_cols_exclude_drop': ['urgent', 'num_failed_logins', 'num_compromised', 'root_shell',
'su_attempted', 'num_root', 'num_file_creations', 'num_shells',
'num_access_files', 'is_guest_login'],
'onehotencode_cols': ['protocol_type', 'service', 'flag'],
'attack_category': ['normal', 'dos', 'u2r', 'r2l', 'probe'],
'pairplot_cols': ['duration', 'dst_host_diff_srv_rate', 'dst_host_srv_count', 'logged_in',
'serror_rate', 'count'],
'pairplot_target': 'target'}
def clean(self):
self.dataset['label'] = self.dataset['label'].str.rstrip('.')
def set_binary_label(self):
conditions = [
(self.dataset['label'] == 'normal'),
(self.dataset['label'] == 'back') | (self.dataset['label'] == 'buffer_overflow') |
(self.dataset['label'] == 'ftp_write') | (self.dataset['label'] == 'guess_passwd') |
(self.dataset['label'] == 'imap') | (self.dataset['label'] == 'ipsweep') |
(self.dataset['label'] == 'land') | (self.dataset['label'] == 'loadmodule') |
(self.dataset['label'] == 'multihop') | (self.dataset['label'] == 'neptune') |
(self.dataset['label'] == 'nmap') | (self.dataset['label'] == 'perl') |
(self.dataset['label'] == 'phf') | (self.dataset['label'] == 'pod') |
(self.dataset['label'] == 'portsweep') | (self.dataset['label'] == 'rootkit') |
(self.dataset['label'] == 'satan') | (self.dataset['label'] == 'smurf') |
(self.dataset['label'] == 'spy') | (self.dataset['label'] == 'teardrop') |
(self.dataset['label'] == 'warezclient') | (self.dataset['label'] == 'warezmaster')
]
choices = [0, 1]
self.dataset['target'] = np.select(conditions, choices, default=0)
def set_attack_category(self):
conditions = [
(self.dataset['label'] == 'normal'),
(self.dataset['label'] == 'back') | (self.dataset['label'] == 'land') |
(self.dataset['label'] == 'neptune') | (self.dataset['label'] == 'pod') |
(self.dataset['label'] == 'smurf') | (self.dataset['label'] == 'teardrop'),
(self.dataset['label'] == 'buffer_overflow') | (self.dataset['label'] == 'loadmodule') |
(self.dataset['label'] == 'perl') | (self.dataset['label'] == 'rootkit'),
(self.dataset['label'] == 'ftp_write') | (self.dataset['label'] == 'guess_passwd') |
(self.dataset['label'] == 'imap') | (self.dataset['label'] == 'multihop') |
(self.dataset['label'] == 'phf') | (self.dataset['label'] == 'spy') |
(self.dataset['label'] == 'warezclient') | (self.dataset['label'] == 'warezmaster'),
(self.dataset['label'] == 'ipsweep') | (self.dataset['label'] == 'nmap') |
(self.dataset['label'] == 'portsweep') | (self.dataset['label'] == 'satan')
]
self.dataset['attack_category'] = np.select(conditions, self.config['attack_category'], default='na')
def transform(self):
self.clean()
self.set_binary_label()
self.set_attack_category()
def evaluate_sparse_features(self, engineer=False):
print('\n--- Evaluating sparse features')
for col in self.dataset.columns:
key = col + '_zero_pct'
if key in self.column_stats:
if self.column_stats[key] >= 99:
print('\n{} {:.3f}%'.format(col, self.column_stats[key]))
self.row_target_count_by_group(['label', 'attack_category', col], ['label'])
# Handcrafted engineering after column evaluation
if engineer:
# Col land - 19 of 20 rows land=1 for attack_category=land, set rare occurrence to 0 (data quality issue?)
self.dataset.loc[self.dataset['label'] == 'normal', 'land'] = 0
# Col urgent - rare and appears noisy, not signaling any particular attack type, remove
# Col su_attempted - rare, occurs only once for intrusion and few times for normal, remove
self.drop_cols(['urgent', 'su_attempted'])
def discovery(self):
Dataset.column_statistics(self)
self.row_count_by_target(self.config['target'])
self.row_count_by_target('attack_category')
self.row_target_count_by_group(self.config['level_01'], [self.config['target']])
|
from sqlalchemy import Column, String, Float, DateTime
from .base import Base
class BaseCollegeBase(Base):
__tablename__ = 'base_college_base'.lower()
inst_id = Column('inst_id', Float, primary_key=True, index=True) # UNITID
inst_nm = Column('inst_nm', String(120)) # INSTNM
inst_alias = Column('inst_alias', String(2000)) # IALIAS
chief_admin_nm = Column('chief_admin_nm', String(50)) # CHFNM
chief_admin_title = Column('chief_admin_title', String(50)) # CHFTITLE
general_telephone_num = Column('general_telephone_num', String(15)) # GENTELE
ein = Column('ein', String(9)) # EIN
dun_bradstreet_num = Column('dun_bradstreet_num', String(2000)) # DUNS
ope_id = Column('ope_id', String(8)) # OPEID
ope_title4_flg = Column('ope_title4_flg', Float) # OPEFLAG
url_general = Column('url_general', String(150)) # WEBADDR
url_admissions = Column('url_admissions', String(200)) # ADMINURL
url_financial_aid = Column('url_financial_aid', String(200)) # FAIDURL
url_application = Column('url_application', String(200)) # APPLURL
url_net_price_calculator = Column('url_net_price_calculator', String(200)) # NPRICURL
url_veterans = Column('url_veterans', String(200)) # VETURL
url_athlete_grad_rate = Column('url_athlete_grad_rate', String(150)) # ATHURL
url_disability = Column('url_disability', String(200)) # DISAURL
inst_sector = Column('inst_sector', Float) # SECTOR
inst_level = Column('inst_level', Float) # ICLEVEL
inst_control = Column('inst_control', Float) # CONTROL
level_highest_offering = Column('level_highest_offering', Float) # HLOFFER
undergrad_offering = Column('undergrad_offering', Float) # UGOFFER
grad_offering = Column('grad_offering', Float) # GROFFER
degree_highest_offered = Column('degree_highest_offered', Float) # HDEGOFR1
degree_granting_status = Column('degree_granting_status', Float) # DEGGRANT
hbcu_flg = Column('hbcu_flg', Float) # HBCU
hospital_flg = Column('hospital_flg', Float) # HOSPITAL
medical_degree_flg = Column('medical_degree_flg', Float) # MEDICAL
tribal_inst_flg = Column('tribal_inst_flg', Float) # TRIBAL
open_to_general_public = Column('open_to_general_public', Float) # OPENPUBL
ipeds_participation = Column('ipeds_participation', String(1)) # ACT
merged_inst_id = Column('merged_inst_id', Float) # NEWID
ipeds_removal_yr = Column('ipeds_removal_yr', Float) # DEATHYR
inst_closed_dt = Column('inst_closed_dt', String(10)) # CLOSEDAT
inst_curr_active_flg = Column('inst_curr_active_flg', Float) # CYACTIVE
post_secondary_only = Column('post_secondary_only', Float) # POSTSEC
post_secondary_flg = Column('post_secondary_flg', Float) # PSEFLAG
agg_type_flg = Column('agg_type_flg', Float) # PSET4FLG
reporting_method = Column('reporting_method', Float) # RPTMTH
inst_category = Column('inst_category', Float) # INSTCAT
carnegie_basic_class_shorthand = Column('carnegie_basic_class_shorthand', Float) # C15BASIC
carnegie_undergrad_focus = Column('carnegie_undergrad_focus', Float) # C15IPUG
carnegie_grad_focus = Column('carnegie_grad_focus', Float) # C15IPGRD
carnegie_undergrad_profile = Column('carnegie_undergrad_profile', Float) # C15UGPRF
carnegie_enrollment_profile = Column('carnegie_enrollment_profile', Float) # C15ENPRF
carnegie_setting_classification = Column('carnegie_setting_classification', Float) # C15SZSET
carnegie_basic_classification = Column('CCBASIC', Float) # CCBASIC
carnegie_classification = Column('carnegie_classification', Float) # CARNEGIE
land_grant_inst = Column('land_grant_inst', Float) # LANDGRNT
inst_size_category = Column('inst_size_category', Float) # INSTSIZE
multi_inst_flg = Column('multi_inst_flg', Float) # F1SYSTYP
multi_inst_nm = Column('multi_inst_nm', String(80)) # F1SYSNAM
multi_inst_id = Column('multi_inst_id', String(6)) # F1SYSCOD
inserted_ts = Column("inserted_ts", DateTime) # INSERTED_TS
updated_ts = Column("updated_ts", DateTime) # UPDATED_TS
|
import requests
from PIL import Image
from io import StringIO
from tesserocr import PyTessBaseAPI
import urllib
# column = Image.open('chin-emo.png')
# gray = column.convert('L')
# blackwhite = gray.point(lambda x: 0 if x < 200 else 255, '1')
# blackwhite.save("chin-emo_bw.jpg")
def process_image(url):
_get_image(url)
column = Image.open('01.jpg')
gray = column.convert('L')
blackwhite = gray.point(lambda x: 0 if x < 200 else 255, '1')
blackwhite.save("sample.jpg")
with PyTessBaseAPI() as api:
api.SetImageFile('sample.jpg')
return api.GetUTF8Text()
def _get_image(url):
f = open('01.jpg','wb')
f.write(urllib.request.urlopen(url).read())
f.close() |
#!/usr/bin/env python
# Jim Blaney
# Hood College
# 2 May 2014
# CS 319 - Algorithm Analysis
# Problem: You are given an array, A, of real numbers. Find the set, T,
# of contiguous numbers in A that provide the maximum sum. The
# set T must contain at least one number.
import os;
# find the positive difference between the values of a list
# the returned list will be one element shorter than the input
def pos_deltas(sequence):
deltas = [];
for i in xrange(len(sequence) - 1):
deltas.append(abs(sequence[i] - sequence[i + 1]));
return deltas;
# returns a list of two-element lists -- the start and end
# indexes of contiguous "parity groups"
def split_parity_groups(deltas):
splits = [];
last_parity = deltas[0] % 2;
start = 0;
for i in xrange(len(deltas)):
current_parity = deltas[i] % 2;
if current_parity is not last_parity:
splits.append([start, i - 1]);
last_parity = current_parity;
start = i;
splits.append([start, len(deltas) - 1]);
return splits;
# assembles lists that correlate with the second-degree
# delta indexes
def resolve_splits(seq, splits):
sequences = [];
for split in splits:
mn = split[0];
mx = split[1] + 4;
sequences.append(seq[mn:mx]);
return sequences;
# removes leading and tailing values that are negative so that
# the first and last elements in the list are non-negative
def reduce_seqs(seqs):
sequences = [];
for seq in seqs:
while len(seq) > 1 and seq[0] < 0:
seq = seq[1:];
while len(seq) > 1 and seq[-1] < 0:
seq = seq[:-1];
if len(seq) > 0:
sequences.append(seq);
return sequences;
# sums up a list of lists of numbers
def seq_sums(s):
sums = [];
for seq in s:
sums.append(sum(seq));
return sums;
def kadane_max_subarray_sum(A):
max_ending_here = max_so_far = A[0];
for x in A[1:]:
max_ending_here = max(x, max_ending_here + x);
max_so_far = max(max_so_far, max_ending_here);
return max_so_far;
def random_sequence(lower, upper, length):
seq = [];
range = upper - lower;
for i in xrange(length):
seq.append(int(os.urandom(4).encode('hex'), 16) % range + lower);
return seq;
def run_test(lower, upper, length):
seq = random_sequence(lower, upper, length);
# seq = [-10,-2,8,-5,7,10,2,5,9,-8,3,-1,0,-6,1,6,4];
# seq = [
# 4, 10, 5, 8, -4, -3, 3, 2, -10, -9,
# -1, 6, 0, 1, -8, 7, -7, -2, -5, 9,
# -6, -7, 5, 10, -9, 6, -4, -3, -5, 1,
# 3, -1, 7, -2, 9, 2, 0, 4, 8, -10,
# -6, -8, 2, -9, -6, 3, -10, 9, -7, 0,
# 4, -1, 8, -4, 5, -5, -3, 7, -8, 1
# ];
seqs = reduce_seqs( # 5. head/tail should be non-negative
resolve_splits( # 4. construct parity sequences
seq,
split_parity_groups( # 3. split by parity
pos_deltas( # 2. second-degree deltas
pos_deltas(seq))))); # 1. first-degree deltas
sums = seq_sums(seqs); # 6. calculate sums
max_index = max(enumerate(sums), key = lambda x: x[1])[0]; # 7. compare sums
# print seq;
#
# for i in xrange(len(seqs)): # 8. report findings
# if i is max_index:
# print "*",
# else:
# print " ",
# print str(seqs[i]) + " -> " + str(sums[i]);
# 9. check against the known best solution
max_sum = kadane_max_subarray_sum(seq);
if sums[max_index] is max_sum:
# print "Kadane's algorithm confirms";
return True;
else:
# print "Kadane's algorithm differs (%i)" % (max_sum);
return False;
if __name__ == '__main__':
results = [];
for i in range(100):
results.append(run_test(-50, 50, 1000));
hits = sum(results);
print hits;
|
# Create your views here.
'''
Uma view é um “tipo” de página Web em sua aplicação Django que em geral serve a uma função específica e tem um template específico.
Por exemplo,
=> em uma aplicação de blog, você deve ter as seguintes views:
-Página inicial do blog - exibe os artigos mais recentes.
-Página de “detalhes” - página de vínculo estático permanente para um único artigo.
-Página de arquivo por ano - exibe todos os meses com artigos para um determinado ano.
-Página de arquivo por mês - exibe todos os dias com artigos para um determinado mês.
-Página de arquivo por dia - exibe todos os artigos de um determinado dia.
-Ação de comentários - controla o envio de comentários para um artigo.
-Em nossa aplicação de enquetes, nós teremos as seguintes views:
Página de “índice” de enquetes - exibe as enquetes mais recente.
Question “detail” page – displays a question text, with no results but with a form to vote.
Página de “resultados” de perguntas - exibe os resultados de uma pergunta em particular.
Ação de voto - gerencia a votação para uma escolha particular em uma enquete em particular.'''
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from .models import Question, Choice
from django.utils import timezone
#from django.template import loader
#from django.http import Http404
# views genéricas: Menos código é melhor
"""
Nas partes anteriores deste tutorial, os templates tem sido fornecidos com um contexto que contém as variáveis question e latest_question_list. Para a DetailView a variavel question é fornecida automaticamente – já que estamos usando um modelo Django (Question), Django é capaz de determinar um nome apropriado para a variável de contexto. Contudo, para ListView, a variável de contexto gerada automaticamente é question_list. Para sobrescrever nós fornecemos o atributo context_object_name, especificando que queremos usar latest_question_list no lugar. Como uma abordagem alternativa, você poderia mudar seus templates para casar o novo padrão das variáveis de contexto – mas é muito fácil dizer para o Django usar a variável que você quer.
"""
class IndexView(generic.ListView):
template_name = 'polls/home.html'
context_object_name = 'latest_question_list'
'''
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]'''
def get_queryset(self):
return Question.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
'''
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list,
}
return HttpResponse(template.render(context, request))
=> isto é equivalente a função index acima
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'polls/index.html', context
'''
'''
def detail(request, question_id): # cada função é uma pagina web
return HttpResponse("You're looking at question %s." % question_id)'''
'''
def detail(request, question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExist:
raise Http404("Question does not exist")
return render(request, 'polls/detail.html', {'question': question})'''
#=> metodo paythonic de levnatar as erro 404
'''
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question': question}) '''
'''
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question}) '''
'''
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
#Esta é a view mais simples possível no Django. Para chamar a view, nós temos que mapear a URL
# - e para isto nós precisamos de uma URLconf.
#Para criar uma URLconf no diretório polls, crie um arquivo chamado urls.py'''
|
#Guímel Madrigal Uecker
#B54060
import numpy as np
from scipy import stats
from scipy import signal
from scipy import integrate
import matplotlib.pyplot as plt
#Lectura de los datos:
bits = []
f = open("bits10k.csv")
for line in f:
bits.append(int(line))
f.close()
#Parte1------------------------------------------------------------------
f = 5000 # Frecuencia
N = len(bits)#Cantidad de bits
T = 1/f # Duración del período de cada símbolo
p = 50#Puntos de muestreo por período
tp = np.linspace(0, T, p)#Puntos de muestreo para cada período
sinus = np.sin(2*np.pi * f * tp)# Creación de la forma de onda de la portadora
plt.plot(tp, sinus)
plt.xlabel('Tiempo (ms)')
plt.title('Onda portadora')
plt.savefig('Onda Portadora')
fs = p/T# Frecuencia de muestreo
t = np.linspace(0, N*T, N*p)# Creación de la línea temporal para toda la señal Tx
senal = np.zeros(t.shape)# Inicio del vector de la señal
# Creación de la señal modulada BPSK
for k, b in enumerate(bits):
senal[k*p:(k+1)*p] = sinus if b else -sinus
# Visualización de los primeros bits (pb) modulados
pb = 10
tp = np.linspace(0, pb*T, pb*p)
plt.figure()
plt.plot(tp, senal[0:pb*p])
plt.title("Muestra de los primeros " + str(pb) + " periodos de la señal modulada")
plt.ticklabel_format(axis = "x", style = "sci", scilimits=(0,0))
plt.savefig('Primeros bits Modulados')
plt.show()
#Parte2-------------------------------------------------------------------------
Pinst = senal**2# Energía instantánea
Ps = integrate.trapz(Pinst, t) / (N * T)# Potencia promedio (W)
print("La potencia promedio de la señal modulada generada tiene un valor numérico de:", Ps)
#Parte3--------------------------------------------------------------------------
# Potencia con Welch antes del canal ruidoso
fw, PSD = signal.welch(senal, fs)
plt.figure()
plt.semilogy(fw, PSD)
plt.savefig("Densidad espectral de potencia")
plt.show()
# Listas para la graficación de BER vs SNR
BER = [] # Lista para obtener los valores de BER con cada dB de SNR
snrVals = list(range(-2,4)) # Rango de valores de SNR deseados, con 1 dB entre cada uno
# Relación señal ruido deseada
for SNR in snrVals:
# señal recibida con cada SNR
nameRxPlot = "Ruido" + str(SNR)
# Método de Welch para cada SNR
# Potencia del ruido
Pn = Ps / (10**(SNR / 10))
# Desviación estándar del ruido
sigma = np.sqrt(Pn)
# Crear ruido
ruido = np.random.normal(0, sigma, senal.shape)
# canal señal recibida
Rx = senal + ruido
# Visualización de los primeros bits recibidos
plt.figure()
plt.plot(tp, Rx[0:pb*p])
plt.ticklabel_format(axis = "x", style = "sci", scilimits=(0,0))
plt.title("Forma de Rx con SNR = " + str(SNR) + " dB")
plt.savefig("Forma de Rx con SNR = " + str(SNR) + " dB" + nameRxPlot)
plt.show()
Es = np.sum(sinus**2)# Pseudo-energía de la onda original
bitsRx = np.zeros(bits.shape)#bits recibidos
# Decodificación de la señal
for k, b in enumerate(bits):
Ep = np.sum(Rx[k*p:(k+1)*p] * sinus)
bitsRx[k] = 1 if (Ep > 0) else 0
#Parte4------------------------------------------------------------------------------------
#Potencia con Welch después del canal ruidoso
fw, PSD = signal.welch(Rx, fs)
plt.figure()
plt.semilogy(fw, PSD)
plt.title("Densidad espectral de potencia con SNR de " + str(SNR) + " dB")
plt.savefig("Densidad espectral de potencia con SNR de " + str(SNR) + " dB")
plt.show()
#Parte5----------------------------------------------------------------------------------
#bits erróneros en la señal recibida
err = np.sum(np.abs(bits - bitsRx))
BER.append(err/N)
for n in range(0,11):
print(bitsRx[n])
#Parte6---------------------------------------------------------------------
# Graficación de BER vs SNR
plt.figure()
plt.plot(snrVals, BER)
plt.xlabel("SNR (dB)")
plt.ylabel("BER")
plt.title("Bit Error Rate en función del SNR")
plt.savefig("Bit Error Rate en función del SNR")
plt.show
|
# -*- coding: utf-8 -*-
from collections import deque
from typing import List
class Solution:
def calPoints(self, ops: List[str]) -> int:
stack = deque()
for op in ops:
if op == "+":
p2 = stack.pop()
p1 = stack.pop()
stack.append(p1)
stack.append(p2)
stack.append(p1 + p2)
elif op == "C":
stack.pop()
elif op == "D":
p = stack.pop()
stack.append(p)
stack.append(2 * p)
else:
stack.append(int(op))
return sum(stack)
if __name__ == "__main__":
solution = Solution()
assert 30 == solution.calPoints(["5", "2", "C", "D", "+"])
assert 27 == solution.calPoints(["5", "-2", "4", "C", "D", "9", "+", "+"])
|
from django.views.generic import ListView
from django.views.generic.edit import FormView, UpdateView
from django.shortcuts import HttpResponseRedirect, get_object_or_404
from django.core.urlresolvers import reverse
from guardian.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from authtools.forms import UserCreationForm
from guardian.models import Group
from accounts.models import User
from accounts.forms import ProfileForm
class AddUser(LoginRequiredMixin, FormView):
form_class = UserCreationForm
model = User
template_name = 'accounts/add_user.html'
success_url = '/'
def form_valid(self, form):
form.save()
return super(AddUser, self).form_valid(form)
class Members(LoginRequiredMixin, ListView):
model = User
template_name = 'accounts/user_list.html'
def get_queryset(self):
return User.objects.all().order_by('name')
class ProfileUpdate(LoginRequiredMixin, UpdateView):
template_name = 'accounts/update_profile.html'
form_class = ProfileForm
model = User
success_url = '/'
def get_object(self):
return get_object_or_404(User, pk=self.request.user.pk)
@login_required
def promote(request, pk):
user = User.objects.get(pk=pk)
g = Group.objects.get(name='manager')
g.user_set.add(user)
return HttpResponseRedirect(reverse('accounts:members'))
@login_required
def demote(request, pk):
user = User.objects.get(pk=pk)
g = Group.objects.get(name='manager')
g.user_set.remove(user)
return HttpResponseRedirect(reverse('accounts:members'))
|
# Generated by Django 3.2.4 on 2021-07-15 05:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('database', '0012_statement_unique_statement'),
]
operations = [
migrations.RenameField(
model_name='statement',
old_name='statement_categories',
new_name='statement_category',
),
]
|
number_rooms = int(input())
free_chairs = 0
is_game_on = True
for room in range(1, number_rooms + 1):
chairs_and_people = input().split()
people = int(chairs_and_people[-1])
# chairs = len(chairs_and_people[0])
chairs = chairs_and_people[0].count("X")
if people > chairs:
is_game_on = False
print(f"{people - chairs} more chairs needed in room {room}")
else:
free_chairs += (chairs - people)
if is_game_on:
print(f"Game On, {free_chairs} free chairs left")
|
from django.contrib import admin
from .models import *
from django.forms import Textarea
class CommentAdmin (admin.ModelAdmin):
formfield_overrides = {
models.TextField: {'widget': Textarea(
attrs={
'rows': 3,
'cols': 50,
'style': 'height: 3.5em;'})}}
list_display = ["id", "client_commit", "rating", "product_commit", "r_commit", "dates_commit"]
search_fields = ["id", "client_commit", "product_commit", "r_commit", "dates_commit"]
list_filter = ["dates_commit"]
class Meta:
model = Comment
admin.site.register(Comment, CommentAdmin)
|
import os
import random
import cv2
import ffmpeg
from sqlalchemy import ForeignKey
from family_foto.config import BaseConfig
from family_foto.models import db
from family_foto.models.file import File
from family_foto.utils.image import resize
class Video(File):
"""
Class of the video entity.
"""
id = db.Column(db.Integer, ForeignKey('file.id'), primary_key=True)
__mapper_args__ = {
'polymorphic_identity': 'video'
}
@property
def meta(self):
"""
Returns the meta data of the video.
"""
return ffmpeg.probe(self.path)
@property
def path(self):
"""
Returns path to video file.
"""
return f'{BaseConfig.UPLOADED_VIDEOS_DEST}/{self.filename}'
def thumbnail(self, width: int, height: int):
"""
Returns a still frame with play logo on top.
:param width: thumbnail width in pixel
:param height: thumbnail height in pixel (aspect ratio will be kept)
"""
video = cv2.VideoCapture(self.path)
frame_count = video.get(cv2.CAP_PROP_FRAME_COUNT)
video.set(cv2.CAP_PROP_POS_FRAMES, random.randint(0, frame_count))
_, frame = video.read()
path = f'{BaseConfig.RESIZED_DEST}/{width}_{height}_{self.filename}.jpg'
if not os.path.exists(BaseConfig.RESIZED_DEST):
os.mkdir(BaseConfig.RESIZED_DEST)
if not cv2.imwrite(path, frame):
raise IOError(f'could not write {path}')
path = resize(path, self.filename, width, height)
video.release()
cv2.destroyAllWindows()
return path.lstrip('.')
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import fractions
a = fractions.Fraction(1, 3)
b = fractions.Fraction(4, 6)
print(a)
print(b)
print(a + b)
print(a - b)
c = fractions.Fraction('0.25')
print(c)
# float类型转fraction类型
d = 2.55
e = fractions.Fraction(*d.as_integer_ratio())
print('2.55.as_integer_ratio()后:\n\t', e)
print('fractions.Fraction(\'2.55\'):\n\t', fractions.Fraction('2.55'))
# 由于float类型的精度问题,任然存在精度损失
# 将最大分母值限制为10
print('限制分母最大值为20:\n\t', e.limit_denominator(20))
|
from django.contrib.auth.models import User
from rest_framework import serializers
from .models import OtherUser, Category, Item, ItemImageAndVideos, Offers, Searches, Message, Notifications, ShipmentDetails, ContactUs
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'first_name', 'last_name', 'email', 'password')
class OtherUserSerializer(serializers.ModelSerializer):
class Meta:
model = OtherUser
fields = ('__all__')
class CategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = ('__all__')
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields = ('__all__')
class ItemImageAndVideosSerializer(serializers.ModelSerializer):
class Meta:
model = ItemImageAndVideos
fields = ('__all__')
class OffersSerializer(serializers.ModelSerializer):
class Meta:
model = Offers
fields = ('__all__')
class SearchesSerializer(serializers.ModelSerializer):
class Meta:
model = Searches
fields = ('__all__')
class MessageSerializer(serializers.ModelSerializer):
class Meta:
model = Message
fields = ('__all__')
class NotificationsSerializer(serializers.ModelSerializer):
class Meta:
model = Notifications
fields = ('__all__')
class ShipmentDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = ShipmentDetails
fields = ('__all__')
class contactUsSerializer(serializers.ModelSerializer):
class Meta:
model = ContactUs
fields = ('__all__')
|
# Generated by Django 3.0.3 on 2021-04-01 21:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0010_merge_20210401_1845'),
]
operations = [
migrations.AlterField(
model_name='resource',
name='category',
field=models.CharField(blank=True, choices=[('FOOD', 'Food'), ('HOUSING', 'Housing'), ('COMM_GIVE', 'Community Giveaways'), ('MENTAL_HEALTH', 'Mental Health'), ('INFO', 'Info Sessions/Webinars'), ('EVENTS', 'Events'), ('WIFI', 'Free Wifi'), ('OTHER', 'Other')], max_length=30, null=True),
),
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 11:34:33 2020
@author: ramakrishnadevarakonda
"""
import csv
from pathlib import Path
input_file = Path('Resources','election_data.csv')
output_file = Path('Analysis','Election_Result.txt')
total_no_of_votes=[];list_of_candidates=[];candidate_votes=[];names_and_votes=[]
with open(output_file, 'w') as file:
with open(input_file,'r') as csv_file:
csv_reader = csv.reader(csv_file,delimiter=',')
next(csv_reader)
data=[row for row in csv_reader]
# Total number of votes
[total_no_of_votes.append(row[0]) for row in data]
# Capture all the candidates
[list_of_candidates.append(row[2]) for row in data if row[2] not in list_of_candidates]
print('Election Results')
print('----------------')
print('Total Votes: ' +str(len(total_no_of_votes)))
print('--------------------')
file.write('Election Results')
file.write('\n')
file.write('-------------------')
file.write('\n')
file.write('Total Votes: {}'.format(len(total_no_of_votes)))
file.write('\n')
file.write('-------------------')
file.write('\n')
#Calculate all the votes for each candidate
for candidate in list_of_candidates:
for row in data:
if row[2]==candidate:
candidate_votes.append(row[0])
print(f"{candidate}: {(len(candidate_votes) / len(total_no_of_votes) * 100):.3f}% ({len(candidate_votes)})")
file.write(f"{candidate}: {(len(candidate_votes) / len(total_no_of_votes) * 100):.3f}% ({len(candidate_votes)})")
file.write('\n')
#create a list with all the candidate votes
names_and_votes.append((len(candidate_votes)))
candidate_votes=[]
# create a dictionary using 2 lists
d = dict(zip(list_of_candidates,names_and_votes))
winner=max(d,key=d.get)
print('-----------------------')
file.write('-------------------')
file.write('\n')
print('Winner: ' + winner)
print('-----------------')
file.write('Winner: ' +winner)
file.write('\n')
file.write('-------------------')
|
#!/usr/bin/python3
jegy = input("Adj meg egy számot 1 és 5 között! ")
jegy = int(jegy)
if jegy == 5 and jegy != 4 and jegy != 3 and jegy != 2 and jegy != 1:
print(int(jegy), " jeles", sep= "")
if jegy != 5 and jegy == 4 and jegy != 3 and jegy != 2 and jegy != 1:
print(int(jegy), " jó", sep= "")
if jegy != 5 and jegy != 4 and jegy == 3 and jegy != 2 and jegy != 1:
print(int(jegy), " közepes", sep= "")
if jegy != 5 and jegy != 4 and jegy != 3 and jegy == 2 and jegy != 1:
print(int(jegy), " elégséges", sep= "")
if jegy != 5 and jegy != 4 and jegy != 3 and jegy != 2 and jegy == 1:
print(int(jegy), " elégtelen", sep= "")
|
# python program to check the user input is a leap year or not?
#Solution:
year = int(input("Enter the year to check leap year: "))
def year_check(year):
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
else:
print("{0} is a leap year".format(year))
else:
print("{0} is not a leap year".format(year))
year_check(year)
#Output:
'''Enter the year to check leap year: 2000
2000 is a leap year
Process finished with exit code 0
'''
'''Enter the year to check leap year: 2017
2017 is not a leap year
Process finished with exit code 0''' |
class Animal(object):
MATURE_AGE = 5
name = None
age = 0
def __init__(self, name, age=6):
self.name = name
self.age = age
def __str__(self):
return f"name: {self.name}, age: {self.age}"
def is_mature(self):
if self.age >= 18:
return True
else:
return False
def get_sound(self):
return None
class Dog(Animal):
def __srr__(self):
animal_data = super().__str__()
return f"species: DOG, {animal_data}"
def get_sound(self):
return "hau"
class Cat(Animal):
def get_sound(self):
return "meow"
|
import socket
import threading
def re_msg(conn,addr):
while True:
print('message form ',addr)
msg =conn.recv(1024)
msg=msg.decode('utf8')
print(msg)
if msg =='fin':
conn.send(b'disconnect ')
server.close()
break
remsg = input('type your msg')
remg='张泽皓:{}'.format(remsg)
conn.send(remg.encode('utf8'))
ip = '0.0.0.0'
port = 8812
address = (ip,port)
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(address)
server.listen(5)
while True:
conn, addr = server.accept()
threading.Thread(target=re_msg,args=(conn,addr)).start() |
import preprocess
import numpy as np
import forward_pass
x1 = np.array([[2,2,3,4,5],
[1,3,3,4,5],
[1,2,4,4,5],
[1,2,3,5,5]])
x2 = np.array([[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5],
[1,2,3,4,5]])
wst = np.array([1,20,20,20,13])
# px1 , px2,u,m = preprocess.preprocess(x1,x2)
entry = np.cumsum(wst[0:-1] * wst[1:] + wst[0:-1])
print(entry)
# print(px1,px2,u,m) |
import warnings
warnings.filterwarnings('ignore')
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import scipy.stats as stats
color = sns.color_palette()
#get_ipython().run_line_magic('matplotlib', 'inline')
# sets the backend of matplotlib as inline backend.
#create dataframes and load all the data files
orders_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\orders.csv').drop(['Unnamed: 0'], axis=1)
prior_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\order_products_prior.csv')
train_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\order_products_train.csv').drop(['Unnamed: 0'], axis=1)
products_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\products.csv')
aisles_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\aisles.csv',index_col=0)
departments_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\departments.csv')
test_df = pd.read_csv(r'C:\Users\Admin\Contacts\Desktop\sangita\Project and Data Sets\Data\order_products_test.csv').drop(['Unnamed: 0'], axis=1)
#count all the value of each eval_set
orders_df['eval_set'].value_counts()
unique_users = orders_df.groupby("eval_set")["user_id"].nunique()
unique_users
# nunique return count
# 131209 unique users
# Last order of each user is divided into train and test.
# Training contains 101209 orders and testing contains 30000 orders.
orders_df[''].boxplot()
# In[9]:
orders_per_user = orders_df.groupby("user_id")["order_number"].max().reset_index()
orders_per_user
orders_per_user = orders_per_user['order_number'].value_counts()
orders_per_user
plt.figure(figsize=(12, 6))
sns.barplot(orders_per_user.index, orders_per_user.values, alpha=0.8, color='blue')
plt.ylabel('Number of Customers', fontsize=12)
plt.xlabel('Number of Orders ', fontsize=12)
plt.title('Prior Orders Per Customer', fontsize=15)
plt.xticks(rotation='vertical')
plt.show()
# Around 15300 users made order for 4 times
# Decrease in the number of customers as the number of order increases
# With maximum orders capped to 100.
# In[84]:
products_per_order = prior_df.groupby("order_id")["add_to_cart_order"].max().reset_index()
products_per_order
products_per_order = products_per_order['add_to_cart_order'].value_counts()
plt.figure(figsize=(8, 6))
sns.barplot(products_per_order.index, products_per_order.values, color='orange')
plt.ylabel('Number of Occurrences', fontsize=12)
plt.xlabel('Products', fontsize=12)
plt.title('Number of Products Per Order', fontsize=15)
plt.xlim(xmax=50)
plt.xticks(rotation='vertical')
plt.show()
# Max Products per customer order is 5
# MEdian - 8
# In[54]:
def make_day(x):
return {
0: 'Saturday',
1: 'Sunday',
2: 'Monday',
3: 'Tuesday',
4: 'Wednesday',
5: 'Thursday',
}.get(x, 'Friday')
# In[55]:
orders_df['order_dow'] = orders_df['order_dow'].map(make_day)
dow = orders_df.groupby('order_dow')[['order_id']].count().reset_index().sort_values('order_id', ascending=False)
plt.figure(figsize=(7, 7))
sns.barplot(x='order_dow', y='order_id', data=dow, color='green')
plt.ylabel('Total Orders', fontsize=12)
plt.xlabel('Day of week', fontsize=12)
plt.title("Total Orders by Day of Week", fontsize=15)
plt.show()
# Considering
# 0 - Saturday
# 1 - Sunday
# Max orders are made on Saturday and Sunday
# In[56]:
# No of Orders by Hour
plt.figure(figsize=(8, 6))
sns.countplot(x="order_hour_of_day", data=orders_df, color='blue')
plt.ylabel('Number of Orders', fontsize=12)
plt.xlabel('Hour of Day', fontsize=12)
plt.title("Number of Orders by Hour", fontsize=15)
plt.show()
# Max orders are made during late morning (10 - 11)&
# In the afternoon from (1 - 4)
# In[85]:
# Total Orders by Days Since Prior Order
sample = orders_df[orders_df['days_since_prior_order'].notnull()]
sample['days_since_prior_order'] = sample['days_since_prior_order'].map(lambda x: int(x))
plt.figure(figsize=(7, 7))
sns.countplot(x="days_since_prior_order", data=sample, color='purple')
plt.ylabel('Total Orders', fontsize=12)
plt.xlabel('Days Since Prior order', fontsize=12)
plt.xticks(rotation='vertical')
plt.title("Total Orders by Days Since Prior Order", fontsize=15)
plt.show()
# In[86]:
duplicates = prior_df.groupby(['order_id', 'product_id'])[['product_id']].count()
duplicates.columns = ['count']
duplicates = duplicates.reset_index()
print('Number of instances of an item having a quanity greater than one in an order: ' +
str(len(duplicates[duplicates['count'] > 1])))
# In[87]:
# Most Ordered Products
opp = pd.merge(prior_df, products_df, on='product_id', how='inner')
opp = pd.merge(opp, departments_df, on='department_id', how='inner')
opp = pd.merge(opp, aisles_df, on='aisle_id', how='inner')
# In[88]:
dept_freq = opp['department'].value_counts().head(5)
plt.figure(figsize=(8, 6))
ax = sns.barplot(dept_freq.index, dept_freq.values, color='red')
plt.ticklabel_format(style='plain', axis='y', scilimits=(0, 0))
plt.ylabel('Products Sold', fontsize=12)
plt.xlabel('Departments', fontsize=12)
plt.title('Products Sold By Highest Volume Departments', fontsize=15)
plt.show()
# In[89]:
aisle_freq = opp['aisle'].value_counts().head(5)
plt.figure(figsize=(8, 6))
sns.barplot(aisle_freq.index, aisle_freq.values, alpha=0.8)
plt.ylabel('Number of Orders', fontsize=12)
plt.xlabel('Aisle', fontsize=12)
plt.title('Products Sold By Highest Volume Aisles', fontsize=15)
plt.xticks(rotation='vertical')
plt.show()
# In[90]:
opp_orders = pd.merge(opp, orders_df, on='order_id', how='inner')
prod_orders = opp_orders.groupby('product_id')[['order_id']].count().reset_index()
prod_orders.columns = ['product_id', 'prod_orders']
prod_orders = pd.merge(prod_orders, products_df, on='product_id', how='inner')
prod_orders = pd.merge(prod_orders, departments_df, on='department_id', how='inner')
prod_orders.head(1)
# In[91]:
plt.figure(figsize=(8, 8))
dept_list = ['dairy eggs', 'snacks', 'beverages', 'frozen', 'pantry', 'bakery', 'produce']
mask = prod_orders['department'].isin(dept_list)
ax = sns.stripplot(x="department", y="prod_orders", data=prod_orders[mask], jitter=True)
ax.set(xlabel='Department', ylabel='Product Orders', title='Product Orders By Department')
plt.ylim(ymin=0)
plt.ylim(ymax=50000)
plt.show()
# In[92]:
most_ordered = prod_orders[['product_name', 'prod_orders']].sort_values('prod_orders', ascending=False).head(15)
ax = sns.barplot(y='product_name', x='prod_orders', data=most_ordered, color='crimson')
ax.set(xlabel='Total Orders', ylabel='Products', title='Most Ordered Products')
plt.show()
# In[93]:
print('Unique Products: ' + str(products_df['product_id'].nunique()))
print('Median Product Orders: ' + str(prod_orders['prod_orders'].median()))
print('Mean Product Orders: ' + str(prod_orders['prod_orders'].mean()))
# In[94]:
products_df['product_id'][49688] = 49699
products_df['product_name'][49688] = 'None'
products_df['aisle_id'][49688] = 100
products_df['department_id'][49688] = 21
# In[96]:
prior = pd.merge(prior_df, orders_df, on='order_id', how='inner')
no_reorders = prior[prior['order_number'] != 1].groupby('order_id')[['reordered']].sum().reset_index()
no_reorders = no_reorders[no_reorders['reordered'] == 0]['order_id'].unique()
prior_sub = prior[prior['order_id'].isin(no_reorders)].drop('add_to_cart_order', axis=1)
prior_sub['product_id'] = 49699
prior = pd.concat([prior.drop('add_to_cart_order', axis=1), prior_sub.drop_duplicates()], ignore_index=True)
# In[97]:
prod_by_user = prior.groupby(['user_id', 'product_id'])[['order_id']].count().reset_index()
prod_by_user.columns = ['user_id', 'product_id', 'orders']
single_orders = prod_by_user[prod_by_user['orders'] == 1].groupby('product_id')[['orders']].count().reset_index()
single_orders.columns = ['product_id', 'single']
multiple_orders = prod_by_user[prod_by_user['orders'] > 1].groupby('product_id')[['orders']].count().reset_index()
multiple_orders.columns = ['product_id', 'multiple']
prod_reorder = pd.merge(single_orders, multiple_orders, on='product_id', how='left')
prod_reorder = prod_reorder.fillna(value=0)
prod_reorder['reorder_rate'] = prod_reorder['multiple'] / (prod_reorder['single'] + prod_reorder['multiple'])
prod_reorder[prod_reorder['product_id'] == 49699]
# In[99]:
prods = prod_reorder['product_id'].unique()
print('Products single ordered at least once: ' + str(prod_reorder['product_id'].nunique()))
products_subset = products_df[-products_df['product_id'].isin(prods)]
products_subset['reorder_rate'] = 1
products_subset = products_subset[['product_id', 'reorder_rate']]
print('Products only ever reordered: ' + str(products_subset['product_id'].nunique()))
# In[100]:
prod_reorder = prod_reorder[['product_id', 'reorder_rate']].sort_values('reorder_rate', ascending=False)
prod_reorder = pd.concat([prod_reorder, products_subset], ignore_index=True)
# In[101]:
product_totals = pd.merge(prod_orders, prod_reorder, on='product_id', how='inner')
mask = product_totals['prod_orders'] >= 1000
head = product_totals[mask].sort_values('reorder_rate', ascending=False).head(7)
tail = product_totals[mask].sort_values('reorder_rate').head(7)
# In[102]:
ax = sns.barplot(y='product_name', x='reorder_rate', color='green', data=head)
ax.set(xlabel='reorder rate', ylabel='products', title='Most Reordered Products')
plt.show()
# In[103]:
ax = sns.barplot(y='product_name', x='reorder_rate', hue='department', data=tail)
ax.set(xlabel='reorder rate', ylabel='products', title='Least Reordered Products')
plt.show()
# In[104]:
print('Median reorder rate: ' + str(prod_reorder['reorder_rate'].median()))
print('Mean reorder rate: ' + str(prod_reorder['reorder_rate'].mean()))
# In[106]:
grouped_df = train_df.groupby("order_id")["reordered"].sum().reset_index()
print('Percent of orders with no reordered products in training orders: ' +
str(float(grouped_df[grouped_df['reordered'] == 0].shape[0]) / grouped_df.shape[0]))
# In[107]:
train_orders = orders_df[orders_df['eval_set'] == 'train']
train_days = train_orders.groupby('days_since_prior_order')[['order_id']].count().reset_index()
train_days.columns = ['days_since_prior_order', 'train_orders']
nulls = pd.merge(orders_df, grouped_df[grouped_df['reordered'] == 0], on='order_id', how='inner')
none_df = nulls.groupby('days_since_prior_order')[['order_id']].count().reset_index()
none_df.columns = ['days_since_prior_order', 'none_orders']
none_df = pd.merge(none_df, train_days, on='days_since_prior_order', how='left')
none_df['proportion_of_nones'] = none_df['none_orders'] / none_df['train_orders']
none_df['days_since_prior_order'] = none_df['days_since_prior_order'].map(lambda x: int(x))
mask = (none_df['days_since_prior_order'] >= 9) & (none_df['days_since_prior_order'] <= 21)
none_df.loc[none_df[mask].days_since_prior_order, 'proportion_of_nones'] = none_df[mask]['proportion_of_nones'].median()
mask = (none_df['days_since_prior_order'] >= 22) & (none_df['days_since_prior_order'] <= 29)
none_df.loc[none_df[mask].days_since_prior_order, 'proportion_of_nones'] = none_df[mask]['proportion_of_nones'].median()
fig, ax = plt.subplots(figsize=(14, 8))
sns.pointplot(y='proportion_of_nones', x='days_since_prior_order', color='blue', data=none_df, ax=ax)
ax.set(xlabel='days since prior order', ylabel='proportion of none orders', title=
'Proportion of Orders With No Reordered Products')
plt.show()
# In[108]:
train_hour = train_orders.groupby('order_hour_of_day')[['order_id']].count().reset_index()
train_hour.columns = ['order_hour_of_day', 'train_orders']
none_hour = nulls.groupby('order_hour_of_day')[['order_id']].count().reset_index()
none_hour.columns = ['order_hour_of_day', 'none_orders']
none_hour = pd.merge(none_hour, train_hour, on='order_hour_of_day', how='left')
none_hour['proportion_of_nones'] = none_hour['none_orders'] / none_hour['train_orders']
fig, ax = plt.subplots(figsize=(14, 8))
sns.pointplot(y='proportion_of_nones', x='order_hour_of_day', color='blue', data=none_hour, ax=ax)
ax.set(xlabel='hour of the day', ylabel='proportion of none orders', title=
'Proportion of Orders With No Reordered Products')
plt.show()
# In[109]:
train_dow = train_orders.groupby('order_dow')[['order_id']].count().reset_index()
train_dow.columns = ['order_dow', 'train_orders']
none_dow = nulls.groupby('order_dow')[['order_id']].count().reset_index()
none_dow.columns = ['order_dow', 'none_orders']
none_dow = pd.merge(none_dow, train_dow, on='order_dow', how='left')
none_dow['proportion_of_nones'] = none_dow['none_orders'] / none_dow['train_orders']
none_dow = none_dow.sort_values('proportion_of_nones', ascending=False)
fig, ax = plt.subplots(figsize=(14, 8))
sns.pointplot(y='proportion_of_nones', x='order_dow', color='blue', data=none_dow, ax=ax)
ax.set(xlabel='day of the week', ylabel='proportion of none orders', title=
'Proportion of Orders With No Reordered Products')
plt.show()
# In[110]:
def plot_none(train_orders_df, first_median_range=None, x_max=None, second_median_range=None,
days_filter=None, title_add_on=None, lowest_median_num=None):
if days_filter == None:
train_total = train_orders_df.groupby('order_number')[['order_id']].count().reset_index()
none_total = nulls.groupby('order_number')[['order_id']].count().reset_index()
elif type(days_filter) == int:
mask = train_orders_df['days_since_prior_order'] == days_filter
train_total = train_orders_df[mask].groupby('order_number')[['order_id']].count().reset_index()
mask = nulls['days_since_prior_order'] == days_filter
none_total = nulls[mask].groupby('order_number')[['order_id']].count().reset_index()
else:
mask = (train_orders['days_since_prior_order'] >= days_filter[0]) & (
train_orders['days_since_prior_order'] <= days_filter[1])
train_total = train_orders_df[mask].groupby('order_number')[['order_id']].count().reset_index()
mask = (nulls['days_since_prior_order'] >= days_filter[0]) & (nulls['days_since_prior_order'] <= days_filter[1])
none_total = nulls[mask].groupby('order_number')[['order_id']].count().reset_index()
train_total.columns = ['order_number', 'train_orders']
none_total.columns = ['order_number', 'none_orders']
none_total = pd.merge(none_total, train_total, on='order_number', how='left')
none_total['proportion_of_nones'] = none_total['none_orders'] / none_total['train_orders']
order_numbers = none_total['order_number'].unique()
absent_order_numbers = pd.DataFrame(columns=['order_number', 'proportion_of_nones'])
if type(lowest_median_num) == int:
lowest_median = none_total[none_total['order_number'] >= lowest_median_num]['proportion_of_nones'].median()
else:
mask = (none_total['order_number'] >= lowest_median_num[0]) & (
none_total['order_number'] <= lowest_median_num[1])
lowest_median = none_total[mask]['proportion_of_nones'].median()
for i in range(4, 101):
if i not in order_numbers:
absent_order_numbers.loc[i, 'order_number'] = i
absent_order_numbers.loc[i, 'none_orders'] = np.nan
absent_order_numbers.loc[i, 'train_orders'] = np.nan
absent_order_numbers.loc[i, 'proportion_of_nones'] = lowest_median
none_total = pd.concat([none_total, absent_order_numbers], ignore_index=True)
mask = (none_total['order_number'] >= first_median_range[0]) & (none_total['order_number'] <= first_median_range[1])
none_total.set_value(none_total[mask].index, 'proportion_of_nones',
none_total[mask]['proportion_of_nones'].median())
if second_median_range != None:
mask = (none_total['order_number'] >= second_median_range[0]) & (
none_total['order_number'] <= second_median_range[1])
none_total.set_value(none_total[mask].index, 'proportion_of_nones',
none_total[mask]['proportion_of_nones'].median())
if type(lowest_median_num) == int:
none_total.set_value(none_total.order_number >= lowest_median_num,
'proportion_of_nones', lowest_median).sort_values('order_number')
else:
none_total.set_value(none_total.order_number >= lowest_median_num[0],
'proportion_of_nones', lowest_median).sort_values('order_number')
fig, ax = plt.subplots(figsize=(14, 8))
sns.pointplot(y='proportion_of_nones', x='order_number', color='blue', data=none_total, ax=ax)
if type(title_add_on) == str:
ax.set(xlabel='total orders', ylabel='proportion of none orders', title=
'Proportion of Orders With No Reordered Products' + title_add_on)
else:
ax.set(xlabel='total orders', ylabel='proportion of none orders', title=
'Proportion of Orders With No Reordered Products')
plt.xlim(xmax=x_max)
plt.show()
# In[111]:
plot_none(train_orders, first_median_range=[12, 26], x_max=40, lowest_median_num=40, second_median_range=[27, 39])
# In[112]:
plot_none(train_orders, first_median_range=[7, 10], x_max=30, second_median_range=[11, 24], days_filter=[0, 2],
lowest_median_num=25, title_add_on=', 0-2 Days Since Prior Order')
# In[113]:
plot_none(train_orders, [16, 20], x_max=30, second_median_range=[21, 29], days_filter=[3, 29], lowest_median_num=30,
title_add_on=', 3-29 Days Since Prior Order')
# In[115]:
plot_none(train_orders, first_median_range=[13, 21], x_max=30,
days_filter=30, lowest_median_num=[22, 41], title_add_on=', 30 Days Since Prior Order')
# In[116]:
prod_by_user = prior.groupby(['user_id', 'product_id'])[['order_id']].count().reset_index()
prod_by_user.columns = ['user_id', 'product_id', 'num_ordered']
prod_by_user.head(1)
# In[117]:
max_orders = prior.groupby('user_id')[['order_number']].max().reset_index()
max_orders.columns = ['user_id', 'total_orders']
prod_by_user = pd.merge(prod_by_user, max_orders, on='user_id', how='left')
prod_by_user['order_rate'] = prod_by_user['num_ordered'] / prod_by_user['total_orders']
prod_by_user.head(1)
# In[118]:
last_order = prior.groupby(['user_id', 'product_id'])[['order_number']].max().reset_index()
last_order.columns = ['user_id', 'product_id', 'last_order']
prod_by_user = pd.merge(prod_by_user, last_order, on=['user_id', 'product_id'], how='left')
prod_by_user['orders_since'] = prod_by_user['total_orders'] - prod_by_user['last_order']
prod_by_user.sample(1)
# In[120]:
prod_reorder_subset = prod_reorder[['product_id', 'reorder_rate']]
prod_by_user = pd.merge(prod_by_user, prod_reorder_subset, on='product_id', how='inner')
prod_by_user[prod_by_user['user_id'] == 4]
# In[121]:
def recently_ordered(prior_df, num_from_last, features_df, new_col):
order = prior_df.groupby('user_id')[['order_number']].max() - num_from_last
order = order.reset_index()
prior_subset = prior_df[['user_id', 'order_number', 'product_id']]
order = pd.merge(order, prior_subset, on=['user_id', 'order_number'], how='inner')
order = order[['user_id', 'product_id']]
updated_df = pd.merge(features_df, order, on=['user_id', 'product_id'], how='left', indicator=True)
updated_df['_merge'] = updated_df['_merge'].map(lambda x: 1 if x == 'both' else 0)
updated_df = updated_df.rename(columns={'_merge': new_col})
return updated_df
# In[ ]:
prod_by_user = recently_ordered(prior, 0, prod_by_user, 'in_last')
prod_by_user = recently_ordered(prior, 1, prod_by_user, 'in_2nd_last')
prod_by_user = recently_ordered(prior, 2, prod_by_user, 'in_3rd_last')
prod_by_user = recently_ordered(prior, 3, prod_by_user, 'in_4th_last')
prod_by_user = recently_ordered(prior, 4, prod_by_user, 'in_5th_last')
prod_by_user.sample(n=1)
# In[ ]:
mask = (orders_df['eval_set'] == 'train') | (orders_df['eval_set'] == 'test')
orders_subset = orders_df[mask].drop(['order_id', 'eval_set', 'order_hour_of_day'], axis=1)
orders_subset['order_number'] = orders_subset['order_number'].map(lambda x: x - 1)
orders_subset = orders_subset.rename(columns={'order_number': 'total_orders'})
model = pd.merge(prod_by_user, orders_subset, on=['user_id', 'total_orders'], how='left')
model.sample(n=5)
|
import getopt
import sys
import good_usage
import get_package_info
import check_package
if len(sys.argv[1:]) is 0:
good_usage.good_usage()
else:
try:
opts, args = getopt.getopt(sys.argv[1:], "s:c:hd", ["search=", "check=", "help", "debug"])
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
good_usage.good_usage()
sys.exit()
elif opt in ("-d", "--debug"):
global _debug
_debug = 1
elif opt in ("-s", "--search"):
get_package_info.search_package(package=arg)
sys.exit()
elif opt in ("-c", "--check"):
check_package.find_check_package(package=arg)
sys.exit() |
# -*- coding: utf-8 -*-
# pylint: disable=exec-used
"""
Organize Django settings into multiple files and directories.
Easily override and modify settings. Use wildcards and optional
settings files.
"""
import glob
import inspect
import os
import sys
import types
__all__ = ['optional', 'include']
def optional(filename):
"""
This functions is used for compatibility reasons,
it masks the old `optional` class with the name error.
Now `invalid-name` is removed from `pylint`.
Args:
filename: the filename to be optional
Returns: new instance of :class:`_Optional`
"""
return _Optional(filename)
class _Optional(str):
"""Wrap a file path with this class to mark it as optional.
Optional paths don't raise an :class:`IOError` if file is not found.
"""
pass
def include(*args, **kwargs):
"""
Used for including Django project settings from multiple files.
Usage::
from split_settings.tools import optional, include
include(
'components/base.py',
'components/database.py',
optional('local_settings.py'),
scope=globals() # optional scope
)
Parameters:
*args: File paths (``glob`` - compatible wildcards can be used)
**kwargs: The context for the settings,
may contain ``scope=globals()`` or be empty
Raises:
IOError: if a required settings file is not found
"""
# we are getting globals() from previous frame
# globals - it is caller's globals()
scope = kwargs.pop('scope', inspect.stack()[1][0].f_globals)
scope.setdefault('__included_files__', [])
included_files = scope.get('__included_files__')
including_file = scope.get(
'__included_file__',
scope['__file__'].rstrip('c'),
)
conf_path = os.path.dirname(including_file)
for conf_file in args:
saved_included_file = scope.get('__included_file__')
pattern = os.path.join(conf_path, conf_file)
# find files per pattern, raise an error if not found (unless file is
# optional)
files_to_include = glob.glob(pattern)
if not files_to_include and not isinstance(conf_file, _Optional):
raise IOError('No such file: {}'.format(pattern))
for included_file in files_to_include:
included_file = os.path.abspath(included_file)
if included_file in included_files:
continue
included_files.append(included_file)
scope['__included_file__'] = included_file
with open(included_file, 'rb') as to_compile:
exec(compile(to_compile.read(), included_file, 'exec'), scope)
# add dummy modules to sys.modules to make runserver autoreload
# work with settings components
module_name = '_split_settings.{}'.format(
conf_file[:conf_file.rfind('.')].replace('/', '.'),
)
module = types.ModuleType(str(module_name))
module.__file__ = included_file
sys.modules[module_name] = module
if saved_included_file:
scope['__included_file__'] = saved_included_file
elif '__included_file__' in scope:
del scope['__included_file__']
|
from ._title import Title
from plotly.graph_objs.scattergeo.marker.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
# coding: utf-8
# ##AIP Friday September 12
# To do list:
# - events : get information from keyboard and mouse
# - pygame and time
# - open, write and save a data file
# - (if we have time) images
# ###Events
# Here is a little script to help you grasp the way events are coded.
#
# Run the script either from the ipython notebook (mac) or the purely python script (windows or problem)
#
# Any remarks?
# In[14]:
import os
wpos_x = 100
wpos_y = 100
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (wpos_x,wpos_y)
import pygame
from pygame.locals import *
import random
bg_color = (0,0,0)
stim_color = (255,255,255)
w_width = 640
w_height = 480
# prepare position of vertices for the polygon stimulus, here a triangle
stim_vertices=[(w_width/2,w_height/2-100),(w_width/2-50,w_height/2+50),(w_width/2+50,w_height/2+50)]
try:
pygame.init()
w = pygame.display.set_mode((w_width,w_height))
w.fill(bg_color)
pygame.display.flip()
pygame.time.wait(1000+random.randint(0,1000))
pygame.draw.polygon(w,stim_color,stim_vertices)
pygame.display.flip()
t0 = pygame.time.get_ticks()
while pygame.time.get_ticks()-t0 < 10000:
pygame.time.wait(500)
#total_events = pygame.event.get()
#for e in total_events:
e = pygame.event.poll()
if e.type != NOEVENT:
print e
if e.type == KEYDOWN:
if e.key == K_b:
t = pygame.time.get_ticks() - t0
print t
pygame.draw.polygon(w,[0,0,255],stim_vertices)
pygame.display.flip()
if e.key == K_r:
t = pygame.time.get_ticks() - t0
print t
pygame.draw.polygon(w,[255,0,0],stim_vertices)
pygame.display.flip()
w.fill(bg_color)
pygame.display.flip()
pygame.time.wait(1000)
finally:
pygame.quit()
# In[20]:
pygame.key.name(27)
|
import re
from twisted.python import log
# This file contains rules for processing input files
#
# regexp -> function to manipulate input
nameRules = {}
# all rules are functions. input is the parse tree ?!?
# output is the modified parse tree
def slashdot(soup):
log.msg('rules.slashdot(): processing slashdot.org')
junk = soup.body.findAll(True, {'class': re.compile(r'\badvertisement\b') } )
[junkSection.extract() for junkSection in junk]
return soup
nameRules[r'.*slashdot\.org\b'] = slashdot
|
from django.db import models
from django.contrib.auth.models import User
from core.utils import generate_slug
class BetInvite(models.Model):
INITIAL_STATE = 'pending'
INVITE_STATES = (
('pending', 'Pending'),
('accepted', 'Accepted'),
('rejected', 'Rejected'),
)
creator = models.ForeignKey(User, related_name="bet_invite_creator")
invited = models.ForeignKey(User, related_name="bet_invite_invited")
state = models.CharField(max_length=30, choices=INVITE_STATES, default=INITIAL_STATE)
def __unicode__(self):
return u'%s => %s "%s"' % (self.creator, self.invited, self.state)
class Bet(models.Model):
INITIAL_STATE = 'active'
BET_STATES = (
('new', 'New'),
('active', 'Active'),
('completed', 'Completed'),
)
slug = models.SlugField(default=generate_slug)
title = models.CharField(max_length=50)
description = models.CharField(max_length=500)
wager = models.IntegerField(default=10)
created = models.DateTimeField(auto_now_add=True)
creator = models.ForeignKey(User)
friend = models.ForeignKey(User, related_name='bet_friend')
state = models.CharField(max_length=30, choices=BET_STATES, default=INITIAL_STATE)
winner = models.ForeignKey(User, blank=True, null=True, related_name='bet_winner')
loser = models.ForeignKey(User, blank=True, null=True, related_name='bet_loser')
def __unicode__(self):
return u'Title[%s] Bet[%s->%s] Wager[%d] State[%s]' % (self.title, self.creator, self.friend, self.wager, self.state)
def to_dict(self):
store = {}
store['id'] = self.id
store['slug'] = self.slug
store['title'] = self.title
store['wager'] = self.wager
store['description'] = self.description
store['state'] = self.state
if self.winner:
store['winner'] = self.winner.username
if self.loser:
store['loser'] = self.loser.username
store['created'] = self.created.strftime('%Y-%m-%dT%H:%M:%S-000')
store['creator'] = self.creator.username
store['friend'] = self.friend.username
return store
|
from license import p
from time import sleep
i = 1341
while i >= 1244:
content = f"https://boxnovel.com/novel/the-legendary-mechanic-boxnovel/chapter-{i}"
print(f'add {content}')
p.add(content)
i -= 1
sleep(1)
|
array = []
d = 0
a = int(input("How many numbers would you like to check for? "))
for i in range(0,a):
b = int(input("Enter the number? "))
array.append(b)
while d < a:
for j in range(0,len(array) - 1):
if array[j] < array[j + 1]:
c = array[j]
array[j] = array[j + 1]
array[j + 1] = c
d = d + 1
print(array)
|
#!/usr/bin/env python
# coding=utf-8
from point import *
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.