repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
|---|---|---|
madpin/renthub
|
location/app/mappings/InterestPoints.py
|
<filename>location/app/mappings/InterestPoints.py
import os
from urllib import parse
from herepy import (
PlacesApi,
RoutingApi,
RouteMode,
GeocoderApi,
)
import schemas
def get_website(place):
if('contacts' not in place):
return None
for contact in place['contacts']:
if('www' in contact):
for www in contact['www']:
if('value' in www):
return www['value'].lower()
return None
def get_chain(place):
for chain in place.get('chains', []):
if('name' in chain):
return chain['name']
return None
def get_interenst_points(latitude, longitude, query: str):
places_api = PlacesApi(api_key=os.environ['HERE_API_KEY'])
# fetches a list of places based on a query string and country code
response = places_api.search_in_country(
coordinates=[latitude, longitude], query=query, country_code="IRL"
)
places = response.as_dict()
if('items' not in places):
raise ReferenceError('There is no `items` in the response')
ret_ = []
for place in places['items']:
place_instance = schemas.InterestPoint(
lat=place['position']['lat'],
long=place['position']['lng'],
name=place['title'].title(),
address=place['address']['label'],
distance=place['distance'],
chain_name=get_chain(place)
)
website = get_website(place)
if(website):
place_instance.website = website
place_instance.website_domain = parse.urlsplit(
website).netloc.lstrip('www.')
ret_.append(place_instance)
return ret_
|
madpin/renthub
|
main/app/models.py
|
from typing import Optional, List
import sqlalchemy
from sqlmodel import SQLModel, Field, Relationship
from datetime import date, datetime
# #############################################################################
# Links
class ListingFacilityLink(SQLModel, table=True):
listing_id: int = Field(
foreign_key="listings.id", primary_key=True
)
facility_id: int = Field(
foreign_key="facilities.id", primary_key=True
)
# #############################################################################
class SongBase(SQLModel):
id: Optional[int]
name: str
artist: str
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class SongRead(SongBase):
id: int
created_at: datetime
updated_at: datetime
class SongUpdate(SQLModel):
name: Optional[str] = None
artist: Optional[str] = None
year: Optional[int] = None
class SongCreate(SongBase):
pass
class Increment(SQLModel, table=True):
id: int = Field(primary_key=True)
# #############################################################################
class ListingBase(SQLModel):
id: int = Field(primary_key=True)
is_active: bool
title: Optional[str] = None
description: Optional[str] = None
url: str
source: str
source_id: str
source_code: Optional[str] = None
address: str
short_postal_code: Optional[str] = None
property_type: Optional[str] = None
postal_code: Optional[str] = None
ber_code: Optional[str] = None
views: Optional[int] = None
bedrooms: Optional[int] = None
bathrooms: Optional[int] = None
price: Optional[int] = None
rating_auto: Optional[int] = None
rating_user: Optional[int] = None
telegram_sent_at: Optional[datetime] = None
images_count: Optional[int] = 0
latitude: Optional[float] = None
longitude: Optional[float] = None
notes: Optional[str] = None
publish_date: Optional[datetime] = None
last_updated: Optional[datetime] = None
class Listing(ListingBase, table=True):
__tablename__ = 'listings'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
images: List["Image"] = Relationship(back_populates="listing",
# sa_relationship_kwargs={'lazy': 'joined'}
)
facilities: List["Facility"] = Relationship(link_model=ListingFacilityLink)
places_nearby: List["PlaceNearby"] = Relationship(
back_populates="listing",)
routes: List["Route"] = Relationship(back_populates="listing",)
class ListingRead(ListingBase):
id: str
created_at: datetime
updated_at: datetime
class ListingCreate(ListingBase):
pass
class ListingUpdate(ListingBase):
id: Optional[str]
is_active: Optional[bool]
url: Optional[str]
source: Optional[str]
source_id: Optional[str]
address: Optional[str]
# #############################################################################
class FacilityBase(SQLModel):
id: Optional[int]
name: str
category: Optional[str] = None
notes: Optional[str] = None
class Facility(FacilityBase, table=True):
__tablename__ = 'facilities'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class FacilityRead(FacilityBase):
id: int
created_at: datetime
updated_at: datetime
class FacilityCreate(FacilityBase):
pass
# #############################################################################
class ImageBase(SQLModel):
id: Optional[int]
url: str
url_600: Optional[str]
size_x: Optional[float]
size_y: Optional[float]
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class Image(ImageBase, table=True):
__tablename__ = 'images'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="images",
# sa_relationship_kwargs={'lazy': 'selectin'}
)
class ImageRead(ImageBase):
id: int
created_at: datetime
updated_at: datetime
class ImageCreate(ImageBase):
pass
# #############################################################################
class PlaceNearbyBase(SQLModel):
id: Optional[int]
latitude: Optional[float] = None
longitude: Optional[float] = None
query: Optional[str] = None
name: str
address: str
distance: int
website: Optional[str] = None
website_domain: Optional[str] = None
chain_name: Optional[str] = None
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class PlaceNearby(PlaceNearbyBase, table=True):
__tablename__ = 'places_nearby'
id: Optional[int] = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="places_nearby",)
class PlaceNearbyRead(PlaceNearbyBase):
id: int
created_at: datetime
updated_at: datetime
class PlaceNearbyCreate(PlaceNearbyBase):
pass
# #############################################################################
class InterestPointBase(SQLModel):
id: Optional[int]
name: str
is_active: bool
latitude: Optional[float] = None
longitude: Optional[float] = None
class InterestPoint(InterestPointBase, table=True):
__tablename__ = 'interest_points'
id: Optional[int] = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
class InterestPointRead(InterestPointBase):
id: int
created_at: datetime
updated_at: datetime
class InterestPointCreate(InterestPointBase):
pass
# #############################################################################
class RouteBase(SQLModel):
id: Optional[int]
waking_distance: Optional[int] = 0
total_distance: Optional[int] = 0
total_time: Optional[int] = 0
public_transport_count: Optional[int] = 0
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
interest_point_id: Optional[int] = Field(
default=None, foreign_key="interest_points.id")
class Route(RouteBase, table=True):
__tablename__ = 'routes'
id: int = Field(primary_key=True)
created_at: datetime = Field(default=datetime.now())
updated_at: datetime = Field(
default=datetime.now(),
sa_column_kwargs={'onupdate': datetime.now()})
listing: Optional[Listing] = Relationship(back_populates="routes",)
interest_point: Optional[InterestPoint] = Relationship()
class RouteRead(RouteBase):
id: int
created_at: datetime
updated_at: datetime
class RouteCreate(RouteBase):
id: Optional[int] = None
# #############################################################################
# #############################################################################
class ImageReadWithListings(ImageRead):
listing: Optional[Listing] = None
class ListingReadWithRelations(ListingRead):
images: List["ImageRead"] = []
facilities: List["Facility"] = []
places_nearby: List["PlaceNearby"] = []
routes: List["Route"] = []
class ListingCreateWithRelations(ListingCreate):
images: List["ImageCreate"] = []
facilities: List["Facility"] = []
|
madpin/renthub
|
old/main_old/app/models.py
|
from sqlalchemy import (
Table, Column, ForeignKey,
Boolean, Integer, String, Float, DateTime
)
from sqlalchemy.orm import relationship
from sqlalchemy.sql.sqltypes import Date
from database import Base
from datetime import datetime
listings_facilities = Table(
'listings_facilities',
Base.metadata,
Column('listing_id', ForeignKey('listings.id'), primary_key=True),
Column('facility_id', ForeignKey('facilities.id'), primary_key=True)
)
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
email = Column(String, unique=True, index=True)
hashed_password = Column(String)
is_active = Column(Boolean, default=True)
def __repr__(self):
return "<User(id='%s', emails='%s', is_active='%s')>" % (
self.id, self.email, self.is_active)
# items = relationship("Item", back_populates="owner")
class Listing(Base):
__tablename__ = "listings"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
is_active = Column(Boolean, default=True)
title = Column(String, index=True)
url = Column(String)
source = Column(String)
address = Column(String)
short_postal_code = Column(String)
postal_code = Column(String)
ber_code = Column(String(10))
price = Column(Integer)
rating_auto = Column(Integer)
rating_user = Column(Integer)
telegram_sent_at = Column(DateTime)
images_count = Column(Integer)
latitude = Column(Float)
longitude = Column(Float)
notes = Column(String)
images = relationship("Image", back_populates="listing")
distances = relationship("ListingDistance", back_populates="listing")
facilities = relationship(
"Facility", secondary=listings_facilities, back_populates="listings")
class Facility(Base):
__tablename__ = "facilities"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
name = Column(String)
category = Column(String)
notes = Column(String)
facilities = relationship(
"Listing", secondary=listings_facilities, back_populates="facilities")
class Image(Base):
__tablename__ = "images"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
url = Column(String)
size_x = Column(Integer)
size_y = Column(Integer)
notes = Column(String)
listing_id = Column(Integer, ForeignKey("listings.id"))
image_tags = relationship("ImageTags", back_populates="image")
listing = relationship("Listing", back_populates="images")
class ImageTags(Base):
__tablename__ = "image_tags"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
notes = Column(String)
image_id = Column(Integer, ForeignKey("images.id"))
image = relationship("Image", back_populates="image_tags")
class Point(Base):
__tablename__ = "points"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
is_active = Column(Boolean, default=True)
name = Column(String)
latitude = Column(Float)
longitude = Column(Float)
notes = Column(String)
distances = relationship("ListingDistance", back_populates="point")
class ListingDistance(Base):
__tablename__ = "listing_distances"
id = Column(Integer, primary_key=True, index=True, autoincrement=True)
created_at = Column(DateTime)
updated_at = Column(DateTime, default=datetime.now, onupdate=datetime.now)
distance_km = Column(Float)
listing_id = Column(Integer, ForeignKey("listings.id"))
listing = relationship("Listing", back_populates="distances")
point_id = Column(Integer, ForeignKey("points.id"))
point = relationship("Point", back_populates="distances")
|
madpin/renthub
|
old/main_old/alembic/versions/cda240ae1ea5_first_tables.py
|
"""first tables
Revision ID: cda240ae1ea5
Revises: <KEY>
Create Date: 2021-10-31 23:47:10.313256
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'cda240ae1ea5'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/main.py
|
<filename>main/app/main.py
from pathlib import Path
from typing import List
import asyncio
import requests
from fastapi import Depends, FastAPI, Request, HTTPException, APIRouter
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from sqlalchemy.orm import Session
from sqlalchemy.orm import joinedload, lazyload, subqueryload, raiseload
# from sqlalchemy.ext.asyncio import AsyncSession
# from sqlalchemy.exc import IntegrityError
# from sqlalchemy.orm import joinedload, selectinload
from sqlalchemy import select
from models import (
Song, SongRead, SongCreate, SongUpdate,
Listing, ListingCreate, ListingRead, ListingReadWithRelations, ListingCreateWithRelations,ListingUpdate,
Image, ImageCreate, ImageRead,
Facility
)
# from database import async_session, get_session
from database import get_session
from backgroud import BackgroundRunner
from loop import give_it_a_try
from custom_logger import CustomizeLogger
# models.Base.metadata.create_all(bind=engine)
# Hopefully not needed with Alembic
config_path=Path(__file__).with_name("custom_logger.json")
def create_app() -> FastAPI:
app = FastAPI(title='CustomLogger', debug=False)
logger = CustomizeLogger.make_logger(config_path)
app.logger = logger
return app
app = create_app()
# app = FastAPI()
router = APIRouter(prefix='/api')
app.mount("/static", StaticFiles(directory="static"), name="static")
templates = Jinja2Templates(directory="templates")
@app.get("/listing/{id}", response_class=HTMLResponse)
async def read_item(request: Request, id: str, session: Session = Depends(get_session)):
result = session.query(Listing).get(id)
return templates.TemplateResponse("project-detail.html",
{
"request": request,
"listing": result,
})
# @app.get("/test2/", response_class=HTMLResponse)
# async def read_item(request: Request):
# return templates.TemplateResponse("project-detail.html",
# {
# "request": request,
# "listing": Listing
# })
runner = BackgroundRunner()
@app.on_event('startup')
async def app_startup():
asyncio.create_task(runner.run_main())
@app.get("/runner/is_running", response_model=bool)
def runner_is_running():
return runner.is_running
@app.put("/runner/is_running")
def runner_is_running_put(is_running: bool):
runner.is_running = is_running
return 'ok'
@app.get("/runner/value")
def runner_value():
return runner.value
@app.get("/test", )
def test():
try:
response = requests.get('http://daft:8000/search_result')
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
return response.json()
@app.get("/test_full", )
def test():
return give_it_a_try()
@app.get("/songs", response_model=List[SongRead])
def get_songs(session: Session = Depends(get_session)):
result = session.execute(select(Song))
songs = result.scalars().all()
return songs
@app.post("/songs", response_model=SongRead)
def create_user(song: SongCreate, session: Session = Depends(get_session)):
db_item = Song(**song.dict())
session.add(db_item)
session.commit()
session.refresh(db_item)
return db_item
@app.patch("/songs/{song_id}", response_model=SongRead)
def update_song(song_id: int, song: SongUpdate, session: Session = Depends(get_session)):
db_song = session.get(Song, song_id)
if not db_song:
raise HTTPException(status_code=404, detail="Song not found")
song_data = song.dict(exclude_unset=True)
for key, value in song_data.items():
setattr(db_song, key, value)
session.add(db_song)
session.commit()
session.refresh(db_song)
return db_song
@app.get("/listings", response_model=List[ListingReadWithRelations])
def get_songs(session: Session = Depends(get_session)):
result = session.query(Listing).options(subqueryload('*'))
songs = result.all()
# print(songs)
return songs
@app.post("/listings", response_model=ListingRead)
def listings_post(listing: ListingReadWithRelations, session: Session = Depends(get_session)):
db_item = Listing(**listing.dict())
session.add(db_item)
session.commit()
session.refresh(db_item)
return db_item
@app.post("/listings/withRel", response_model=ListingReadWithRelations)
def listings_post(listing: ListingCreateWithRelations, session: Session = Depends(get_session)):
db_item = Listing.from_orm(listing)
facilities_db = []
for facility in listing.facilities:
facility_rec = session.query(Facility).where(
Facility.name == facility.name).first()
if(facility_rec is None):
facility_rec = Facility(
name=facility
)
facilities_db.append(facility_rec)
session.add(db_item)
session.commit()
session.refresh(db_item)
return db_item
@app.patch("/listing/{id}", response_model=ListingRead)
def update_song(id: int, listing: ListingUpdate, session: Session = Depends(get_session)):
db_listing = session.get(Listing, id)
if not db_listing:
raise HTTPException(status_code=404, detail="Listing not found")
listing_data = listing.dict(exclude_unset=True)
for key, value in listing_data.items():
setattr(db_listing, key, value)
session.add(db_listing)
session.commit()
session.refresh(db_listing)
return db_listing
@app.post("/images", response_model=ImageRead)
def listings_post(listing: ImageCreate, session: Session = Depends(get_session)):
db_item = Image(**listing.dict())
session.add(db_item)
session.commit()
session.refresh(db_item)
return db_item
# @app.get("/listings", response_model=List[ListingReadWithRelations])
# async def get_listings(session: AsyncSession = Depends(get_session)):
# # result = await session.execute(select(ListingWithRelationship, Image).join(Image))
# result = await session.exec(select(Listing))
# listings = result.scalars().all()
# return listings
# @app.get("/images", response_model=List[ImageWithRelationship])
# async def get_images(session: AsyncSession = Depends(get_session)):
# result = await session.execute(select(ImageWithRelationship))
# listings = result.scalars().all()
# return listings
# ################################################################################
# ################################################################################
# ################################################################################
# ################################################################################
# ################################################################################
# @app.post("/users/", response_model=schemas.User)
# async def create_user(user: schemas.UserCreate, db: AsyncSession = Depends(get_session)):
# db_user = await crud.get_user_by_email(db, email=user.email)
# if db_user:
# raise HTTPException(status_code=400, detail="Email already registered")
# try:
# result = crud.create_user(db=db, user=user)
# await db.commit()
# except IntegrityError as ex:
# await db.rollback()
# raise ValueError("The city is already stored")
# return result
# @app.get("/users/", response_model=List[schemas.User])
# async def read_users(skip: int = 0, limit: int = 100, db: AsyncSession = Depends(get_session)):
# users = await crud.get_users(db, skip=skip, limit=limit)
# return users
# @app.get("/users/{user_id}", response_model=schemas.User)
# def read_user(user_id: int, db: AsyncSession = Depends(get_session)):
# db_user = crud.get_user(db, user_id=user_id)
# if db_user is None:
# raise HTTPException(status_code=404, detail="User not found")
# return db_user
# @app.post("/users/{user_id}/items/", response_model=schemas.Item)
# def create_item_for_user(
# user_id: int, item: schemas.ItemCreate, db: AsyncSession = Depends(get_session)
# ):
# return crud.create_user_item(db=db, item=item, user_id=user_id)
# @app.get("/listings/", response_model=List[schemas.Item])
# def read_items(skip: int = 0, limit: int = 100, db: AsyncSession = Depends(get_session)):
# items = crud.get_listings(db, skip=skip, limit=limit)
# return items
|
madpin/renthub
|
location/app/mappings/route.py
|
<reponame>madpin/renthub
import os
from urllib import parse
from fastapi.logger import logger
from herepy import (
PlacesApi,
RoutingApi,
RouteMode,
GeocoderApi,
RoutingResponseV8,
)
import schemas
def get_route_raw(routes_dict):
logger.debug(routes_dict)
response = routes_dict.get('response', {})
logger.debug('response')
logger.debug(response)
routes = response.get('route', [])
logger.debug('routes')
logger.debug(routes)
for route in routes:
lines = route.get('publicTransportLine', [])
public_transport_count = len(lines)
summary = route.get('summary', {})
legs = route.get('leg', [])
logger.debug('legs')
for leg in legs:
if('maneuver' in leg):
logger.debug('maneuver')
yield {
'maneuvers': leg['maneuver'],
'public_transport_count': public_transport_count,
'total_distance': summary.get('distance', ''),
'total_time': summary.get('travelTime', ''),
}
def parse_route_raw(route_details):
maneuvers = route_details['maneuvers']
route = schemas.RouteSummary(
waking_distance=0,
public_transport_count=route_details['public_transport_count'],
total_distance=route_details['total_distance'],
total_time=route_details['total_time'],
)
for maneuver in maneuvers:
if(maneuver.get('_type', '') == 'PrivateTransportManeuverType'):
route.waking_distance = \
route.waking_distance + int(maneuver.get('length', '0'))
return route
def get_routes(lat1, long1, lat2, long2):
routing_api = RoutingApi(api_key=os.environ['HERE_API_KEY'])
response = routing_api.public_transport(
waypoint_a=[lat1, long1],
waypoint_b=[lat2, long2],
combine_change=True,
modes=[RouteMode.balanced, RouteMode.publicTransportTimeTable],
)
response_dict = response.as_dict()
ret_ = []
for route in get_route_raw(response_dict):
ret_.append(parse_route_raw(route))
return ret_
|
madpin/renthub
|
old/rentcrud/alembic/versions/01b6c8ce3965_initial3.py
|
"""initial3
Revision ID: 01b6c8ce3965
Revises: <PASSWORD>
Create Date: 2021-11-01 04:29:57.210756
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '01b6c8ce3965'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('images', sa.Column('size_x', sa.Float(), nullable=False))
op.add_column('images', sa.Column('size_y', sa.Float(), nullable=False))
op.create_index(op.f('ix_images_size_x'), 'images', ['size_x'], unique=False)
op.create_index(op.f('ix_images_size_y'), 'images', ['size_y'], unique=False)
op.add_column('listings', sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False))
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_listings_url'), table_name='listings')
op.drop_column('listings', 'url')
op.drop_index(op.f('ix_images_size_y'), table_name='images')
op.drop_index(op.f('ix_images_size_x'), table_name='images')
op.drop_column('images', 'size_y')
op.drop_column('images', 'size_x')
# ### end Alembic commands ###
|
madpin/renthub
|
old/rentcrud/app/database.py
|
from sqlalchemy.ext.asyncio import create_async_engine
# from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
# from sqlalchemy.ext.asyncio import AsyncSession
from sqlmodel.ext.asyncio.session import AsyncSession
# SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
SQLALCHEMY_DATABASE_URL = "postgresql+asyncpg://postgres:postgres@db/rentcrud"
engine = create_async_engine(
SQLALCHEMY_DATABASE_URL, future=True, echo=True,
connect_args={
})
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)
# Base = declarative_base()
# Dependency
# async def get_session() -> AsyncSession:
# async with async_session() as session:
# yield session
async def get_session() -> AsyncSession:
async_session = sessionmaker(
engine, class_=AsyncSession, expire_on_commit=False
)
async with async_session() as session:
yield session
|
madpin/renthub
|
old/rentcrud/alembic/versions/9d9a746dbfd7_initial2.py
|
"""initial2
Revision ID: 9d9a<PASSWORD>dbfd7
Revises: <PASSWORD>
Create Date: 2021-11-01 04:28:38.426261
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '9d9a746db<PASSWORD>'
down_revision = 'a<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_images_id'), 'images', ['id'], unique=False)
op.create_index(op.f('ix_images_url'), 'images', ['url'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_images_url'), table_name='images')
op.drop_index(op.f('ix_images_id'), table_name='images')
op.drop_table('images')
# ### end Alembic commands ###
|
madpin/renthub
|
notification/app/schemas.py
|
import os
from typing import List, Optional
import datetime
from pydantic import BaseModel
class TelegramImageUrl(BaseModel):
url: str
caption: str = ''
class TelegramMessage(BaseModel):
message: str
chat_id: str = os.getenv('TG_ADMIN_CHAT_ID')
timeout: int = 10
disable_web_page_preview: bool = True
images: Optional[List[TelegramImageUrl]]
|
madpin/renthub
|
main/app/database.py
|
# from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker, Session
# SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
SQLALCHEMY_DATABASE_URL = "postgresql://postgres:postgres@db/renthub"
engine = create_engine(
SQLALCHEMY_DATABASE_URL,
echo=True,
connect_args={
# "check_same_thread": False
}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)
Base = declarative_base()
# Dependency
def get_session():
try:
session = SessionLocal()
yield session
finally:
session.close()
|
madpin/renthub
|
main/app/alembic/versions/8c61ffcb32e6_initial_3.py
|
<reponame>madpin/renthub<gh_stars>0
"""Initial 3
Revision ID: 8c61ffcb32e6
Revises: 4bee0e2ca37f
Create Date: 2021-11-14 00:31:29.584714
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '4bee0e2ca37f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('listings', 'source_code',
existing_type=sa.VARCHAR(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('listings', 'source_code',
existing_type=sa.VARCHAR(),
nullable=False)
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/loop.py
|
from datetime import datetime
import dateutil.parser
import json
import requests
from requests.models import to_key_val_list
from sqlmodel import Field, Session, SQLModel, create_engine, select
from fastapi.logger import logger
from database import engine
from models import Listing, Facility, Image, InterestPoint, Route, RouteCreate, PlaceNearby
def get_daft_search_result():
try:
response = requests.get('http://daft:8000/search_result/')
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
return response.json()
def get_daft_details(url):
try:
print(url)
params = {
'url': url,
'method': 'json_details',
}
response = requests.get(
'http://daft:8000/listing_details/', params=params)
response.raise_for_status()
return response.json()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
logger.error(error)
return response.json()
def get_routes_json(from_lat, from_long, to_lat, to_long):
try:
data = {
"from_point": {"lat": from_lat, "long": from_long},
"to_point": {"lat": to_lat, "long": to_long}
}
response = requests.post(
'http://location:8000/route/', data=json.dumps(data))
response.raise_for_status()
return response.json()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
logger.error(error)
return {}
def get_routes(listing: Listing):
ret_ = []
with Session(engine) as session:
interest_points_sttm = select(InterestPoint).\
where(InterestPoint.is_active == True)
interest_points = session.exec(interest_points_sttm).all()
for interest_point in interest_points:
routes = get_routes_json(
listing.latitude, listing.longitude,
interest_point.latitude, interest_point.longitude)
print('routes')
print(routes)
for route in routes:
ret_.append(Route(
interest_point_id=interest_point.id,
waking_distance=route['waking_distance'],
total_distance=route['total_distance'],
total_time=route['total_time'],
public_transport_count=route['public_transport_count'],
))
print(ret_)
return ret_
def get_places_nearby_json(from_lat, from_long, query):
try:
data = {"lat": from_lat, "long": from_long}
response = requests.post(
'http://location:8000/interest_places_nearby/', data=json.dumps(data))
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
return response.json()
def get_places_nearby(listing: Listing):
ret_ = []
query = 'Grocery'
places = get_places_nearby_json(
from_lat=listing.latitude, from_long=listing.longitude,
query=query)
for place in places:
ret_.append(PlaceNearby(
name=place['name'],
latitude=place['lat'],
longitude=place['long'],
address=place['address'],
distance=place['distance'],
website=place['website'],
website_domain=place['website_domain'],
chain_name=place['chain_name'],
query=query,
))
return ret_
def save_new_listing(search_result, listing_d):
with Session(engine) as session:
listing = Listing()
# Search Result
listing.source = 'daft'
listing.is_active = True
listing.url = search_result['url']
listing.address = search_result['title']
listing.price = search_result['monthly_price']
listing.latitude = search_result['latitude']
listing.longitude = search_result['longitude']
listing.publish_date = dateutil.parser.isoparse(
search_result['publish_date'])
# Details:
listing.source_id = listing_d['id']
listing.source_code = listing_d['daftShortcode']
listing.title = listing_d['title']
listing.bedrooms = listing_d['numBedrooms']
listing.bathrooms = listing_d['numBathrooms']
listing.description = listing_d['description']
listing.last_updated = listing_d['lastUpdateDate']
listing.images_count = listing_d['totalImages']
listing.views = listing_d['listingViews']
facilities_arr = []
for facility in listing_d['facilities']:
facility_sttm = select(Facility).\
where(Facility.name == facility.title()).\
where(Facility.category == 'facilities')
facility_obj = session.exec(facility_sttm).first()
if(not facility_obj):
facility_obj = Facility(
name=facility.title(),
category='facilities'
)
facilities_arr.append(facility_obj)
for facility in listing_d['propertyOverview']:
facility_sttm = select(Facility).\
where(Facility.name == facility.title()).\
where(Facility.category == 'overview')
facility_obj = session.exec(facility_sttm).first()
if(not facility_obj):
facility_obj = Facility(
name=facility.title(),
category='overview'
)
facilities_arr.append(facility_obj)
listing.facilities = facilities_arr
listing.images = [Image(url=x['url'], url_600=x['url_600']) for x in listing_d['images']]
listing.routes = get_routes(listing)
listing.places_nearby = get_places_nearby(listing)
# Saving it
session.add(listing)
session.commit()
def give_it_a_try(how_many = 25):
ret_ = {}
daft_search_results = get_daft_search_result()
daft_result_list = daft_search_results['result_list']
c = 0
details = []
with Session(engine) as session:
for daft_result in daft_result_list:
statement = select(Listing).\
where(Listing.source == 'daft').\
where(Listing.url == daft_result['url']).\
where(Listing.price == daft_result['monthly_price'])
results = session.exec(statement).first()
if results:
continue
pass # Check telegram sent message
else:
print(daft_result['url'])
details = get_daft_details(daft_result['url'])
save_new_listing(daft_result, details)
c += 1
if c < how_many:
continue
break
return details
|
madpin/renthub
|
old/rentcrud/alembic/versions/a57c89b47e7b_initial.py
|
"""initial
Revision ID: a57c89b47e7b
Revises:
Create Date: 2021-11-01 04:27:56.134285
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'a57c89b47e7b'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_table('song',
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
op.create_index(op.f('ix_song_year'), 'song', ['year'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_song_year'), table_name='song')
op.drop_index(op.f('ix_song_name'), table_name='song')
op.drop_index(op.f('ix_song_id'), table_name='song')
op.drop_index(op.f('ix_song_artist'), table_name='song')
op.drop_table('song')
op.drop_index(op.f('ix_listings_id'), table_name='listings')
op.drop_table('listings')
op.drop_index(op.f('ix_increment_id'), table_name='increment')
op.drop_table('increment')
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/de84556d9e2a_initial_8.py
|
<reponame>madpin/renthub<filename>main/app/alembic/versions/de84556d9e2a_initial_8.py
"""Initial 8
Revision ID: de<PASSWORD>d<PASSWORD>a
Revises: a<PASSWORD>
Create Date: 2021-11-14 02:57:59.511295
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = 'de84556d9e2a'
down_revision = 'a<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('places_nearby', sa.Column('query', sqlmodel.sql.sqltypes.AutoString(), nullable=True))
op.create_index(op.f('ix_places_nearby_query'), 'places_nearby', ['query'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_places_nearby_query'), table_name='places_nearby')
op.alter_column('places_nearby', 'id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
op.drop_column('places_nearby', 'query')
op.alter_column('interest_points', 'id',
existing_type=sa.INTEGER(),
nullable=False,
autoincrement=True)
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/alembic/versions/52a4c736df24_images.py
|
"""Images
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2021-11-08 21:17:43.704391
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/api_calls.py
|
import requests
try:
response = requests.get('http://daft:8000/search_result')
response.raise_for_status()
# Additional code will only run if the request is successful
except requests.exceptions.HTTPError as error:
print(error)
|
madpin/renthub
|
location/app/points.py
|
from schemas import Point
indeed = Point(lat=53.34545621516955, long=-6.231801040391591)
bank_house = Point(lat=53.34347027177946, long=-6.276045630904159)
|
madpin/renthub
|
old/main_old/alembic/versions/1038f05634e6_images_2.py
|
"""images 2
Revision ID: 1038f05634e6
Revises: <PASSWORD>
Create Date: 2021-11-08 21:19:25.145728
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '1038f05634e6'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('images',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.Column('size_x', sa.Integer(), nullable=True),
sa.Column('size_y', sa.Integer(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_images_id'), 'images', ['id'], unique=False)
op.drop_index('ix_pictures_id', table_name='pictures')
op.drop_table('pictures')
op.add_column('listings', sa.Column('images_count', sa.Integer(), nullable=True))
op.drop_column('listings', 'pictures')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('listings', sa.Column('pictures', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_column('listings', 'images_count')
op.create_table('pictures',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('created_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True),
sa.Column('url', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('size_x', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('size_y', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('listing_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], name='pictures_listing_id_fkey'),
sa.PrimaryKeyConstraint('id', name='pictures_pkey')
)
op.create_index('ix_pictures_id', 'pictures', ['id'], unique=False)
op.drop_index(op.f('ix_images_id'), table_name='images')
op.drop_table('images')
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/alembic/versions/94c2f18b742c_distances_2.py
|
<filename>old/main_old/alembic/versions/94c2f18b742c_distances_2.py
"""Distances 2
Revision ID: 94c2f18b742c
Revises: <PASSWORD>
Create Date: 2021-10-31 23:55:43.580559
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('listings',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('title', sa.String(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.Column('source', sa.String(), nullable=True),
sa.Column('address', sa.String(), nullable=True),
sa.Column('short_postal_code', sa.String(), nullable=True),
sa.Column('postal_code', sa.String(), nullable=True),
sa.Column('pictures', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_table('points',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_points_id'), 'points', ['id'], unique=False)
op.create_table('pictures',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('url', sa.String(), nullable=True),
sa.Column('size_x', sa.Integer(), nullable=True),
sa.Column('size_y', sa.Integer(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_pictures_id'), 'pictures', ['id'], unique=False)
op.drop_index('ix_items_description', table_name='items')
op.drop_index('ix_items_id', table_name='items')
op.drop_index('ix_items_title', table_name='items')
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('description', sa.VARCHAR(), autoincrement=False, nullable=True),
sa.Column('owner_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], name='items_owner_id_fkey'),
sa.PrimaryKeyConstraint('id', name='items_pkey')
)
op.create_index('ix_items_title', 'items', ['title'], unique=False)
op.create_index('ix_items_id', 'items', ['id'], unique=False)
op.create_index('ix_items_description', 'items', ['description'], unique=False)
op.drop_index(op.f('ix_pictures_id'), table_name='pictures')
op.drop_table('pictures')
op.drop_index(op.f('ix_points_id'), table_name='points')
op.drop_table('points')
op.drop_index(op.f('ix_listings_title'), table_name='listings')
op.drop_index(op.f('ix_listings_id'), table_name='listings')
op.drop_table('listings')
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/0101e666f4e9_initial_5.py
|
"""Initial 5
Revision ID: 0101e666f4e9
Revises: 6c98e82ae2b5
Create Date: 2021-11-14 01:40:19.792380
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '0101e666f4e9'
down_revision = '6c98e82ae2b5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('interest_points',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_interest_points_address'), 'interest_points', ['address'], unique=False)
op.create_index(op.f('ix_interest_points_chain_name'), 'interest_points', ['chain_name'], unique=False)
op.create_index(op.f('ix_interest_points_created_at'), 'interest_points', ['created_at'], unique=False)
op.create_index(op.f('ix_interest_points_distance'), 'interest_points', ['distance'], unique=False)
op.create_index(op.f('ix_interest_points_id'), 'interest_points', ['id'], unique=False)
op.create_index(op.f('ix_interest_points_latitude'), 'interest_points', ['latitude'], unique=False)
op.create_index(op.f('ix_interest_points_listing_id'), 'interest_points', ['listing_id'], unique=False)
op.create_index(op.f('ix_interest_points_longitude'), 'interest_points', ['longitude'], unique=False)
op.create_index(op.f('ix_interest_points_name'), 'interest_points', ['name'], unique=False)
op.create_index(op.f('ix_interest_points_updated_at'), 'interest_points', ['updated_at'], unique=False)
op.create_index(op.f('ix_interest_points_website'), 'interest_points', ['website'], unique=False)
op.create_index(op.f('ix_interest_points_website_domain'), 'interest_points', ['website_domain'], unique=False)
op.create_table('places_nearby',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_places_nearby_address'), 'places_nearby', ['address'], unique=False)
op.create_index(op.f('ix_places_nearby_chain_name'), 'places_nearby', ['chain_name'], unique=False)
op.create_index(op.f('ix_places_nearby_created_at'), 'places_nearby', ['created_at'], unique=False)
op.create_index(op.f('ix_places_nearby_distance'), 'places_nearby', ['distance'], unique=False)
op.create_index(op.f('ix_places_nearby_id'), 'places_nearby', ['id'], unique=False)
op.create_index(op.f('ix_places_nearby_latitude'), 'places_nearby', ['latitude'], unique=False)
op.create_index(op.f('ix_places_nearby_listing_id'), 'places_nearby', ['listing_id'], unique=False)
op.create_index(op.f('ix_places_nearby_longitude'), 'places_nearby', ['longitude'], unique=False)
op.create_index(op.f('ix_places_nearby_name'), 'places_nearby', ['name'], unique=False)
op.create_index(op.f('ix_places_nearby_updated_at'), 'places_nearby', ['updated_at'], unique=False)
op.create_index(op.f('ix_places_nearby_website'), 'places_nearby', ['website'], unique=False)
op.create_index(op.f('ix_places_nearby_website_domain'), 'places_nearby', ['website_domain'], unique=False)
op.create_table('routes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('distance', sa.Integer(), nullable=False),
sa.Column('website', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('website_domain', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('chain_name', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_routes_address'), 'routes', ['address'], unique=False)
op.create_index(op.f('ix_routes_chain_name'), 'routes', ['chain_name'], unique=False)
op.create_index(op.f('ix_routes_created_at'), 'routes', ['created_at'], unique=False)
op.create_index(op.f('ix_routes_distance'), 'routes', ['distance'], unique=False)
op.create_index(op.f('ix_routes_id'), 'routes', ['id'], unique=False)
op.create_index(op.f('ix_routes_latitude'), 'routes', ['latitude'], unique=False)
op.create_index(op.f('ix_routes_listing_id'), 'routes', ['listing_id'], unique=False)
op.create_index(op.f('ix_routes_longitude'), 'routes', ['longitude'], unique=False)
op.create_index(op.f('ix_routes_name'), 'routes', ['name'], unique=False)
op.create_index(op.f('ix_routes_updated_at'), 'routes', ['updated_at'], unique=False)
op.create_index(op.f('ix_routes_website'), 'routes', ['website'], unique=False)
op.create_index(op.f('ix_routes_website_domain'), 'routes', ['website_domain'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_routes_website_domain'), table_name='routes')
op.drop_index(op.f('ix_routes_website'), table_name='routes')
op.drop_index(op.f('ix_routes_updated_at'), table_name='routes')
op.drop_index(op.f('ix_routes_name'), table_name='routes')
op.drop_index(op.f('ix_routes_longitude'), table_name='routes')
op.drop_index(op.f('ix_routes_listing_id'), table_name='routes')
op.drop_index(op.f('ix_routes_latitude'), table_name='routes')
op.drop_index(op.f('ix_routes_id'), table_name='routes')
op.drop_index(op.f('ix_routes_distance'), table_name='routes')
op.drop_index(op.f('ix_routes_created_at'), table_name='routes')
op.drop_index(op.f('ix_routes_chain_name'), table_name='routes')
op.drop_index(op.f('ix_routes_address'), table_name='routes')
op.drop_table('routes')
op.drop_index(op.f('ix_places_nearby_website_domain'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_website'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_updated_at'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_name'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_longitude'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_listing_id'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_latitude'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_id'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_distance'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_created_at'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_chain_name'), table_name='places_nearby')
op.drop_index(op.f('ix_places_nearby_address'), table_name='places_nearby')
op.drop_table('places_nearby')
op.drop_index(op.f('ix_interest_points_website_domain'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_website'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_updated_at'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_name'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_longitude'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_listing_id'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_latitude'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_id'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_distance'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_created_at'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_chain_name'), table_name='interest_points')
op.drop_index(op.f('ix_interest_points_address'), table_name='interest_points')
op.drop_table('interest_points')
# ### end Alembic commands ###
|
madpin/renthub
|
old/rentcrud/alembic/versions/819871403219_initial4.py
|
"""initial4
Revision ID: 819871403219
Revises: <PASSWORD>
Create Date: 2021-11-01 04:34:08.506090
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '819871403219'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/a8810e4a9ace_initial_7.py
|
<gh_stars>0
"""Initial 7
Revision ID: a8810e4a9ace
Revises: <PASSWORD>
Create Date: 2021-11-14 02:33:09.091191
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a8810e4a9ace'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('routes', sa.Column('waking_distance', sa.Integer(), nullable=True))
op.add_column('routes', sa.Column('total_distance', sa.Integer(), nullable=True))
op.add_column('routes', sa.Column('total_time', sa.Integer(), nullable=True))
op.add_column('routes', sa.Column('public_transport_count', sa.Integer(), nullable=True))
op.add_column('routes', sa.Column('interest_point_id', sa.Integer(), nullable=True))
op.drop_index('ix_routes_address', table_name='routes')
op.drop_index('ix_routes_chain_name', table_name='routes')
op.drop_index('ix_routes_distance', table_name='routes')
op.drop_index('ix_routes_latitude', table_name='routes')
op.drop_index('ix_routes_longitude', table_name='routes')
op.drop_index('ix_routes_name', table_name='routes')
op.drop_index('ix_routes_website', table_name='routes')
op.drop_index('ix_routes_website_domain', table_name='routes')
op.create_index(op.f('ix_routes_interest_point_id'), 'routes', ['interest_point_id'], unique=False)
op.create_index(op.f('ix_routes_public_transport_count'), 'routes', ['public_transport_count'], unique=False)
op.create_index(op.f('ix_routes_total_distance'), 'routes', ['total_distance'], unique=False)
op.create_index(op.f('ix_routes_total_time'), 'routes', ['total_time'], unique=False)
op.create_index(op.f('ix_routes_waking_distance'), 'routes', ['waking_distance'], unique=False)
op.create_foreign_key(None, 'routes', 'interest_points', ['interest_point_id'], ['id'])
op.drop_column('routes', 'address')
op.drop_column('routes', 'name')
op.drop_column('routes', 'latitude')
op.drop_column('routes', 'longitude')
op.drop_column('routes', 'distance')
op.drop_column('routes', 'chain_name')
op.drop_column('routes', 'website')
op.drop_column('routes', 'website_domain')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('routes', sa.Column('website_domain', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('routes', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('routes', sa.Column('chain_name', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('routes', sa.Column('distance', sa.INTEGER(), autoincrement=False, nullable=False))
op.add_column('routes', sa.Column('longitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('routes', sa.Column('latitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('routes', sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False))
op.add_column('routes', sa.Column('address', sa.VARCHAR(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'routes', type_='foreignkey')
op.drop_index(op.f('ix_routes_waking_distance'), table_name='routes')
op.drop_index(op.f('ix_routes_total_time'), table_name='routes')
op.drop_index(op.f('ix_routes_total_distance'), table_name='routes')
op.drop_index(op.f('ix_routes_public_transport_count'), table_name='routes')
op.drop_index(op.f('ix_routes_interest_point_id'), table_name='routes')
op.create_index('ix_routes_website_domain', 'routes', ['website_domain'], unique=False)
op.create_index('ix_routes_website', 'routes', ['website'], unique=False)
op.create_index('ix_routes_name', 'routes', ['name'], unique=False)
op.create_index('ix_routes_longitude', 'routes', ['longitude'], unique=False)
op.create_index('ix_routes_latitude', 'routes', ['latitude'], unique=False)
op.create_index('ix_routes_distance', 'routes', ['distance'], unique=False)
op.create_index('ix_routes_chain_name', 'routes', ['chain_name'], unique=False)
op.create_index('ix_routes_address', 'routes', ['address'], unique=False)
op.drop_column('routes', 'interest_point_id')
op.drop_column('routes', 'public_transport_count')
op.drop_column('routes', 'total_time')
op.drop_column('routes', 'total_distance')
op.drop_column('routes', 'waking_distance')
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/app/crud.py
|
<gh_stars>0
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
import models, schemas
async def get_user(db: AsyncSession, user_id: int):
return await db.query(models.User).filter(models.User.id == user_id).first()
async def get_user_by_email(db: AsyncSession, email: str):
# return await db.query(models.User).filter(models.User.email == email).first()
result = await db.execute(
select(models.User)\
.filter(models.User.email == email)\
.order_by(models.User.id)
)
return result.first()
async def get_users(db: AsyncSession, skip: int = 0, limit: int = 100):
result = await db.execute(
select(models.User).order_by(models.User.id).offset(skip).limit(limit)
)
return result.scalars().all()
def create_user(db: AsyncSession, user: schemas.UserCreate):
fake_hashed_password = <PASSWORD>
db_user = models.User(email=user.email, hashed_password=<PASSWORD>)
db.add(db_user)
return db_user
# def get_listings(db: Session, skip: int = 0, limit: int = 100):
# return db.query(models.Listing).offset(skip).limit(limit).all()
# def create_user_item(db: Session, item: schemas.ItemCreate, user_id: int):
# db_item = models.Item(**item.dict(), owner_id=user_id)
# db.add(db_item)
# db.commit()
# db.refresh(db_item)
# return db_item
|
madpin/renthub
|
old/main_old/alembic/versions/1a289167affe_distances.py
|
"""Distances
Revision ID: 1a289167affe
Revises: <KEY>
Create Date: 2021-10-31 23:52:46.651398
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1a289167affe'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/d63ccd5484d7_initial.py
|
"""Initial
Revision ID: d63ccd5484d7
Revises:
Create Date: 2021-11-14 00:28:55.123695
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('category', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_category'), 'facilities', ['category'], unique=False)
op.create_index(op.f('ix_facilities_created_at'), 'facilities', ['created_at'], unique=False)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.create_index(op.f('ix_facilities_name'), 'facilities', ['name'], unique=False)
op.create_index(op.f('ix_facilities_notes'), 'facilities', ['notes'], unique=False)
op.create_index(op.f('ix_facilities_updated_at'), 'facilities', ['updated_at'], unique=False)
op.create_table('increment',
sa.Column('id', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_increment_id'), 'increment', ['id'], unique=False)
op.create_table('listings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('is_active', sa.Boolean(), nullable=False),
sa.Column('title', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('description', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_id', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('source_code', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('address', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('short_postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('property_type', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('postal_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('ber_code', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('bedrooms', sa.Integer(), nullable=True),
sa.Column('bathrooms', sa.Integer(), nullable=True),
sa.Column('price', sa.Integer(), nullable=True),
sa.Column('rating_auto', sa.Integer(), nullable=True),
sa.Column('rating_user', sa.Integer(), nullable=True),
sa.Column('telegram_sent_at', sa.DateTime(), nullable=True),
sa.Column('images_count', sa.Integer(), nullable=True),
sa.Column('latitude', sa.Float(), nullable=True),
sa.Column('longitude', sa.Float(), nullable=True),
sa.Column('notes', sqlmodel.sql.sqltypes.AutoString(), nullable=True),
sa.Column('publish_date', sa.DateTime(), nullable=True),
sa.Column('last_updated', sa.DateTime(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listings_address'), 'listings', ['address'], unique=False)
op.create_index(op.f('ix_listings_bathrooms'), 'listings', ['bathrooms'], unique=False)
op.create_index(op.f('ix_listings_bedrooms'), 'listings', ['bedrooms'], unique=False)
op.create_index(op.f('ix_listings_ber_code'), 'listings', ['ber_code'], unique=False)
op.create_index(op.f('ix_listings_created_at'), 'listings', ['created_at'], unique=False)
op.create_index(op.f('ix_listings_description'), 'listings', ['description'], unique=False)
op.create_index(op.f('ix_listings_id'), 'listings', ['id'], unique=False)
op.create_index(op.f('ix_listings_images_count'), 'listings', ['images_count'], unique=False)
op.create_index(op.f('ix_listings_is_active'), 'listings', ['is_active'], unique=False)
op.create_index(op.f('ix_listings_last_updated'), 'listings', ['last_updated'], unique=False)
op.create_index(op.f('ix_listings_latitude'), 'listings', ['latitude'], unique=False)
op.create_index(op.f('ix_listings_longitude'), 'listings', ['longitude'], unique=False)
op.create_index(op.f('ix_listings_notes'), 'listings', ['notes'], unique=False)
op.create_index(op.f('ix_listings_postal_code'), 'listings', ['postal_code'], unique=False)
op.create_index(op.f('ix_listings_price'), 'listings', ['price'], unique=False)
op.create_index(op.f('ix_listings_property_type'), 'listings', ['property_type'], unique=False)
op.create_index(op.f('ix_listings_publish_date'), 'listings', ['publish_date'], unique=False)
op.create_index(op.f('ix_listings_rating_auto'), 'listings', ['rating_auto'], unique=False)
op.create_index(op.f('ix_listings_rating_user'), 'listings', ['rating_user'], unique=False)
op.create_index(op.f('ix_listings_short_postal_code'), 'listings', ['short_postal_code'], unique=False)
op.create_index(op.f('ix_listings_source'), 'listings', ['source'], unique=False)
op.create_index(op.f('ix_listings_source_code'), 'listings', ['source_code'], unique=False)
op.create_index(op.f('ix_listings_source_id'), 'listings', ['source_id'], unique=False)
op.create_index(op.f('ix_listings_telegram_sent_at'), 'listings', ['telegram_sent_at'], unique=False)
op.create_index(op.f('ix_listings_title'), 'listings', ['title'], unique=False)
op.create_index(op.f('ix_listings_updated_at'), 'listings', ['updated_at'], unique=False)
op.create_index(op.f('ix_listings_url'), 'listings', ['url'], unique=False)
op.create_table('song',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('artist', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('year', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_song_artist'), 'song', ['artist'], unique=False)
op.create_index(op.f('ix_song_created_at'), 'song', ['created_at'], unique=False)
op.create_index(op.f('ix_song_id'), 'song', ['id'], unique=False)
op.create_index(op.f('ix_song_name'), 'song', ['name'], unique=False)
op.create_index(op.f('ix_song_updated_at'), 'song', ['updated_at'], unique=False)
op.create_index(op.f('ix_song_year'), 'song', ['year'], unique=False)
op.create_table('images',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('url', sqlmodel.sql.sqltypes.AutoString(), nullable=False),
sa.Column('size_x', sa.Float(), nullable=True),
sa.Column('size_y', sa.Float(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_images_created_at'), 'images', ['created_at'], unique=False)
op.create_index(op.f('ix_images_id'), 'images', ['id'], unique=False)
op.create_index(op.f('ix_images_listing_id'), 'images', ['listing_id'], unique=False)
op.create_index(op.f('ix_images_size_x'), 'images', ['size_x'], unique=False)
op.create_index(op.f('ix_images_size_y'), 'images', ['size_y'], unique=False)
op.create_index(op.f('ix_images_updated_at'), 'images', ['updated_at'], unique=False)
op.create_index(op.f('ix_images_url'), 'images', ['url'], unique=False)
op.create_table('listingfacilitylink',
sa.Column('listing_id', sa.Integer(), nullable=False),
sa.Column('facility_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['facility_id'], ['facilities.id'], ),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.PrimaryKeyConstraint('listing_id', 'facility_id')
)
op.create_index(op.f('ix_listingfacilitylink_facility_id'), 'listingfacilitylink', ['facility_id'], unique=False)
op.create_index(op.f('ix_listingfacilitylink_listing_id'), 'listingfacilitylink', ['listing_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_listingfacilitylink_listing_id'), table_name='listingfacilitylink')
op.drop_index(op.f('ix_listingfacilitylink_facility_id'), table_name='listingfacilitylink')
op.drop_table('listingfacilitylink')
op.drop_index(op.f('ix_images_url'), table_name='images')
op.drop_index(op.f('ix_images_updated_at'), table_name='images')
op.drop_index(op.f('ix_images_size_y'), table_name='images')
op.drop_index(op.f('ix_images_size_x'), table_name='images')
op.drop_index(op.f('ix_images_listing_id'), table_name='images')
op.drop_index(op.f('ix_images_id'), table_name='images')
op.drop_index(op.f('ix_images_created_at'), table_name='images')
op.drop_table('images')
op.drop_index(op.f('ix_song_year'), table_name='song')
op.drop_index(op.f('ix_song_updated_at'), table_name='song')
op.drop_index(op.f('ix_song_name'), table_name='song')
op.drop_index(op.f('ix_song_id'), table_name='song')
op.drop_index(op.f('ix_song_created_at'), table_name='song')
op.drop_index(op.f('ix_song_artist'), table_name='song')
op.drop_table('song')
op.drop_index(op.f('ix_listings_url'), table_name='listings')
op.drop_index(op.f('ix_listings_updated_at'), table_name='listings')
op.drop_index(op.f('ix_listings_title'), table_name='listings')
op.drop_index(op.f('ix_listings_telegram_sent_at'), table_name='listings')
op.drop_index(op.f('ix_listings_source_id'), table_name='listings')
op.drop_index(op.f('ix_listings_source_code'), table_name='listings')
op.drop_index(op.f('ix_listings_source'), table_name='listings')
op.drop_index(op.f('ix_listings_short_postal_code'), table_name='listings')
op.drop_index(op.f('ix_listings_rating_user'), table_name='listings')
op.drop_index(op.f('ix_listings_rating_auto'), table_name='listings')
op.drop_index(op.f('ix_listings_publish_date'), table_name='listings')
op.drop_index(op.f('ix_listings_property_type'), table_name='listings')
op.drop_index(op.f('ix_listings_price'), table_name='listings')
op.drop_index(op.f('ix_listings_postal_code'), table_name='listings')
op.drop_index(op.f('ix_listings_notes'), table_name='listings')
op.drop_index(op.f('ix_listings_longitude'), table_name='listings')
op.drop_index(op.f('ix_listings_latitude'), table_name='listings')
op.drop_index(op.f('ix_listings_last_updated'), table_name='listings')
op.drop_index(op.f('ix_listings_is_active'), table_name='listings')
op.drop_index(op.f('ix_listings_images_count'), table_name='listings')
op.drop_index(op.f('ix_listings_id'), table_name='listings')
op.drop_index(op.f('ix_listings_description'), table_name='listings')
op.drop_index(op.f('ix_listings_created_at'), table_name='listings')
op.drop_index(op.f('ix_listings_ber_code'), table_name='listings')
op.drop_index(op.f('ix_listings_bedrooms'), table_name='listings')
op.drop_index(op.f('ix_listings_bathrooms'), table_name='listings')
op.drop_index(op.f('ix_listings_address'), table_name='listings')
op.drop_table('listings')
op.drop_index(op.f('ix_increment_id'), table_name='increment')
op.drop_table('increment')
op.drop_index(op.f('ix_facilities_updated_at'), table_name='facilities')
op.drop_index(op.f('ix_facilities_notes'), table_name='facilities')
op.drop_index(op.f('ix_facilities_name'), table_name='facilities')
op.drop_index(op.f('ix_facilities_id'), table_name='facilities')
op.drop_index(op.f('ix_facilities_created_at'), table_name='facilities')
op.drop_index(op.f('ix_facilities_category'), table_name='facilities')
op.drop_table('facilities')
# ### end Alembic commands ###
|
madpin/renthub
|
old/rentcrud/app/main.py
|
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import joinedload, selectinload
from sqlalchemy import select
from models import Song, Listing, Image, SongRead, ListingRead
# from database import async_session, get_session
from database import get_session
# from models import ImageWithRelationship, ListingWithRelationship,ListingReadWithImages
from models import ListingReadWithImages
# models.Base.metadata.create_all(bind=engine)
# Hopefully not needed with Alembic
app = FastAPI()
@app.get("/songs", response_model=List[SongRead])
async def get_songs(session: AsyncSession = Depends(get_session)):
result = await session.execute(select(Song))
songs = result.scalars().all()
return songs
@app.get("/listings", response_model=List[ListingReadWithImages])
async def get_songs(session: AsyncSession = Depends(get_session)):
result = await session.execute(select(Listing))
# result = await session.execute(select(Listing).options(selectinload(Listing.images))) # Works!!!!
# result = await session.execute(select(Listing, Image).where(Listing.id == Image.listing_id))
# result = await session.execute(select(Listing).options(joinedload(Listing.images)) # Failed
songs = result.scalars().all()
return songs
# @app.get("/listings", response_model=List[ListingReadWithImages])
# async def get_listings(session: AsyncSession = Depends(get_session)):
# # result = await session.execute(select(ListingWithRelationship, Image).join(Image))
# result = await session.exec(select(Listing))
# listings = result.scalars().all()
# return listings
@app.get("/listings2", response_model=List[ListingRead])
async def get_listings2(session: AsyncSession = Depends(get_session)):
result = await session.exec(select(Listing))
listings = result.scalars().all()
return listings
# @app.get("/images", response_model=List[ImageWithRelationship])
# async def get_images(session: AsyncSession = Depends(get_session)):
# result = await session.execute(select(ImageWithRelationship))
# listings = result.scalars().all()
# return listings
# ################################################################################
# ################################################################################
# ################################################################################
# ################################################################################
# ################################################################################
# @app.post("/users/", response_model=schemas.User)
# async def create_user(user: schemas.UserCreate, db: AsyncSession = Depends(get_session)):
# db_user = await crud.get_user_by_email(db, email=user.email)
# if db_user:
# raise HTTPException(status_code=400, detail="Email already registered")
# try:
# result = crud.create_user(db=db, user=user)
# await db.commit()
# except IntegrityError as ex:
# await db.rollback()
# raise ValueError("The city is already stored")
# return result
# @app.get("/users/", response_model=List[schemas.User])
# async def read_users(skip: int = 0, limit: int = 100, db: AsyncSession = Depends(get_session)):
# users = await crud.get_users(db, skip=skip, limit=limit)
# return users
# @app.get("/users/{user_id}", response_model=schemas.User)
# def read_user(user_id: int, db: AsyncSession = Depends(get_session)):
# db_user = crud.get_user(db, user_id=user_id)
# if db_user is None:
# raise HTTPException(status_code=404, detail="User not found")
# return db_user
# @app.post("/users/{user_id}/items/", response_model=schemas.Item)
# def create_item_for_user(
# user_id: int, item: schemas.ItemCreate, db: AsyncSession = Depends(get_session)
# ):
# return crud.create_user_item(db=db, item=item, user_id=user_id)
# @app.get("/listings/", response_model=List[schemas.Item])
# def read_items(skip: int = 0, limit: int = 100, db: AsyncSession = Depends(get_session)):
# items = crud.get_listings(db, skip=skip, limit=limit)
# return items
|
madpin/renthub
|
location/app/main.py
|
<filename>location/app/main.py
import os
from pathlib import Path
from pydantic.errors import ExtraError
import uvicorn
from typing import List, Optional
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import HTMLResponse
from herepy import (
PlacesApi,
RoutingApi,
RouteMode,
GeocoderApi,
)
from custom_logger import CustomizeLogger
# from location.app.schemas import Point
from points import indeed, bank_house
import schemas
from mappings.InterestPoints import get_interenst_points
from mappings.route import get_routes
config_path=Path(__file__).with_name("custom_logger.json")
def create_app() -> FastAPI:
app = FastAPI(title='CustomLogger', debug=False)
logger = CustomizeLogger.make_logger(config_path)
app.logger = logger
return app
app = create_app()
# app = FastAPI()
@app.post("/interest_places_nearby/", response_model=List[schemas.InterestPoint])
async def raw_route(location: schemas.Point = indeed, query: str = 'Grocery'):
ret_ = get_interenst_points(location.lat, location.long, query)
return ret_
@app.post("/route/", response_model=List[schemas.RouteSummary])
async def raw_route(from_point: schemas.Point = bank_house, to_point: schemas.Point = indeed):
print('###### 1')
ret_ = get_routes(from_point.lat,
from_point.long, to_point.lat, to_point.long)
print('###### 2')
print(ret_)
return ret_
@app.post("/herepy/places")
async def raw_route2(from_point: schemas.Point = bank_house, query: str = 'Grocery'):
places_api = PlacesApi(api_key=os.environ['HERE_API_KEY'])
# fetches a list of places based on a query string and country code
response = places_api.search_in_country(
coordinates=[from_point.lat,
from_point.long], query=query, country_code="IRL"
)
return response.as_dict()
@app.post("/herepy/route")
async def raw_route2(from_point: schemas.Point = bank_house, to_point: schemas.Point = indeed):
routing_api = RoutingApi(api_key=os.environ['HERE_API_KEY'])
response = routing_api.public_transport(
waypoint_a=[from_point.lat, from_point.long],
waypoint_b=[to_point.lat, to_point.long],
combine_change=True,
modes=[RouteMode.balanced, RouteMode.publicTransportTimeTable],
)
return response.as_dict()
@app.post("/herepy/address")
async def raw_route2(address: str = '17 Leinster Square, Rathmines, Dublin'):
geocoder_api = GeocoderApi(api_key=os.environ['HERE_API_KEY'])
response = geocoder_api.free_form(address)
return response.as_dict()
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|
madpin/renthub
|
provider_daft/app/mappings/listing_details.py
|
<reponame>madpin/renthub
import json
import re
import hashlib
import requests
from dateutil import parser
from nums_from_string import get_nums
from bs4 import BeautifulSoup
import schemas
def int_from_str(text_, default_for_none=None):
numbers = re.findall("\d+", text_)
if(len(numbers) == 0):
return default_for_none
elif(len(numbers) == 1):
return numbers[0]
else:
raise ValueError('String with multiple numbers')
def parse_date(text_):
if(text_ is None):
return None
return parser.parse(text_, dayfirst=True)
async def get_listing_details(url):
response = requests.get(url)
if(response.status_code == 200):
html = requests.get(url).text
else:
raise ReferenceError('The request didn`t run correctly')
soup = BeautifulSoup(html, 'html.parser')
data = json.loads(soup.find(id="__NEXT_DATA__").text)
pageProps = data['props']['pageProps']
listing = pageProps['listing']
# print(listing)
result = schemas.DaftListing(
id=listing.get('id', ''),
title=listing.get('title', ''),
seoTitle=listing.get('seoTitle', ''),
sections=listing.get('sections', ''),
featuredLevel=listing.get('featuredLevel', ''),
lastUpdateDate=parse_date(listing.get('lastUpdateDate', None)),
numBedrooms=int_from_str(listing.get('numBedrooms', '0')),
numBathrooms=int_from_str(listing.get('numBathrooms', '0')),
propertyType=listing.get('propertyType', ''),
daftShortcode=listing.get('daftShortcode', ''),
ber=str(listing.get('ber', '')),
seoFriendlyPath=listing.get('seoFriendlyPath', ''),
category=listing.get('category', ''),
state=listing.get('state', ''),
premierPartner=listing.get('premierPartner', ''),
description=listing.get('description', ''),
facilities=[x['name'] for x in listing.get('facilities', [])],
propertyOverview=[
f"{x['label']}: {x['text']}" for x in listing.get('propertyOverview', [])],
listingViews=int_from_str(str(pageProps.get('listingViews', '0'))),
)
if('media' in listing):
result.totalImages = listing['media'].get('totalImages', '')
result.hasVideo = listing['media'].get('hasVideo', '')
result.hasVirtualTour = listing['media'].get('hasVirtualTour', '')
result.hasBrochure = listing['media'].get('hasBrochure', '')
result.images = []
for image_block in listing['media'].get('images', []):
url_600 = None
for key, val in image_block.items():
print(key, val)
digit_groups = re.findall("\d+", key)
if((len(digit_groups) > 0) and (int(re.findall("\d+", key)[0]) <= 600) and val.startswith('http')):
url_600 = val
break
result.images.append(
schemas.Image(
url=next(filter(lambda y: y.startswith('http'), image_block.values())),
url_600=url_600
)
)
# Price
if('nonFormatted' in listing and 'price' in listing['nonFormatted']):
result.price = listing['nonFormatted']['price']
elif('dfpTargetingValues' in pageProps and 'price' in listing['nonFormatted']):
result.price = pageProps['dfpTargetingValues']['price']
result.hash_version = hashlib.md5(
f"{result.totalImages}{result.description}{result.price}".encode('utf-8')).hexdigest()
with open(f"/data/jsons/{listing.get('id', '')}.json", 'w') as f:
json.dump(data, f, indent=2)
return result
|
madpin/renthub
|
old/main_old/alembic/versions/76c5c92496ea_improved_listings.py
|
"""Improved Listings
Revision ID: 76c5c92496ea
Revises: <PASSWORD>
Create Date: 2021-11-08 21:34:17.068563
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '76c5c92496ea'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('image_tags',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('image_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['image_id'], ['images.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_image_tags_id'), 'image_tags', ['id'], unique=False)
op.drop_column('images', 'updated_at')
op.drop_column('listing_distances', 'updated_at')
op.add_column('listings', sa.Column('ber_code', sa.String(length=10), nullable=True))
op.add_column('listings', sa.Column('price', sa.Integer(), nullable=True))
op.add_column('listings', sa.Column('rating_auto', sa.Integer(), nullable=True))
op.add_column('listings', sa.Column('rating_user', sa.Integer(), nullable=True))
op.add_column('listings', sa.Column('telegram_sent_at', sa.DateTime(), nullable=True))
op.drop_column('listings', 'updated_at')
op.drop_column('points', 'updated_at')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('points', sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('listings', sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.drop_column('listings', 'telegram_sent_at')
op.drop_column('listings', 'rating_user')
op.drop_column('listings', 'rating_auto')
op.drop_column('listings', 'price')
op.drop_column('listings', 'ber_code')
op.add_column('listing_distances', sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.add_column('images', sa.Column('updated_at', postgresql.TIMESTAMP(), autoincrement=False, nullable=True))
op.drop_index(op.f('ix_image_tags_id'), table_name='image_tags')
op.drop_table('image_tags')
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/2be8e05a6a4f_initial_6.py
|
<reponame>madpin/renthub<filename>main/app/alembic/versions/2be8e05a6a4f_initial_6.py
"""Initial 6
Revision ID: 2<PASSWORD>
Revises: <PASSWORD>
Create Date: 2021-11-14 01:41:31.599029
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '2<PASSWORD>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interest_points', sa.Column('is_active', sa.Boolean(), nullable=False))
op.drop_index('ix_interest_points_address', table_name='interest_points')
op.drop_index('ix_interest_points_chain_name', table_name='interest_points')
op.drop_index('ix_interest_points_distance', table_name='interest_points')
op.drop_index('ix_interest_points_listing_id', table_name='interest_points')
op.drop_index('ix_interest_points_website', table_name='interest_points')
op.drop_index('ix_interest_points_website_domain', table_name='interest_points')
op.create_index(op.f('ix_interest_points_is_active'), 'interest_points', ['is_active'], unique=False)
op.drop_constraint('interest_points_listing_id_fkey', 'interest_points', type_='foreignkey')
op.drop_column('interest_points', 'chain_name')
op.drop_column('interest_points', 'listing_id')
op.drop_column('interest_points', 'website')
op.drop_column('interest_points', 'address')
op.drop_column('interest_points', 'distance')
op.drop_column('interest_points', 'website_domain')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('interest_points', sa.Column('website_domain', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('interest_points', sa.Column('distance', sa.INTEGER(), autoincrement=False, nullable=False))
op.add_column('interest_points', sa.Column('address', sa.VARCHAR(), autoincrement=False, nullable=False))
op.add_column('interest_points', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True))
op.add_column('interest_points', sa.Column('listing_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.add_column('interest_points', sa.Column('chain_name', sa.VARCHAR(), autoincrement=False, nullable=True))
op.create_foreign_key('interest_points_listing_id_fkey', 'interest_points', 'listings', ['listing_id'], ['id'])
op.drop_index(op.f('ix_interest_points_is_active'), table_name='interest_points')
op.create_index('ix_interest_points_website_domain', 'interest_points', ['website_domain'], unique=False)
op.create_index('ix_interest_points_website', 'interest_points', ['website'], unique=False)
op.create_index('ix_interest_points_listing_id', 'interest_points', ['listing_id'], unique=False)
op.create_index('ix_interest_points_distance', 'interest_points', ['distance'], unique=False)
op.create_index('ix_interest_points_chain_name', 'interest_points', ['chain_name'], unique=False)
op.create_index('ix_interest_points_address', 'interest_points', ['address'], unique=False)
op.drop_column('interest_points', 'is_active')
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/app/main.py
|
<gh_stars>0
from typing import List
from fastapi import Depends, FastAPI, HTTPException
from sqlalchemy.orm import Session
from sqlalchemy.exc import IntegrityError
# import models as models
import crud as crud
import schemas as schemas
from database import get_db
# models.Base.metadata.create_all(bind=engine)
# Hopefully not needed with Alembic
app = FastAPI()
@app.post("/users/", response_model=schemas.User)
async def create_user(user: schemas.UserCreate, db: Session = Depends(get_db)):
db_user = await crud.get_user_by_email(db, email=user.email)
if db_user:
raise HTTPException(status_code=400, detail="Email already registered")
try:
result = crud.create_user(db=db, user=user)
await db.commit()
except IntegrityError as ex:
await db.rollback()
raise ValueError("The city is already stored")
return result
@app.get("/users/", response_model=List[schemas.User])
async def read_users(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
users = await crud.get_users(db, skip=skip, limit=limit)
return users
# @app.get("/users/{user_id}", response_model=schemas.User)
# def read_user(user_id: int, db: Session = Depends(get_db)):
# db_user = crud.get_user(db, user_id=user_id)
# if db_user is None:
# raise HTTPException(status_code=404, detail="User not found")
# return db_user
# @app.post("/users/{user_id}/items/", response_model=schemas.Item)
# def create_item_for_user(
# user_id: int, item: schemas.ItemCreate, db: Session = Depends(get_db)
# ):
# return crud.create_user_item(db=db, item=item, user_id=user_id)
# @app.get("/listings/", response_model=List[schemas.Item])
# def read_items(skip: int = 0, limit: int = 100, db: Session = Depends(get_db)):
# items = crud.get_listings(db, skip=skip, limit=limit)
# return items
|
madpin/renthub
|
main/app/alembic/versions/6c98e82ae2b5_initial_4.py
|
<reponame>madpin/renthub<gh_stars>0
"""Initial 4
Revision ID: 6c98e82ae2b5
Revises: <KEY>
Create Date: 2021-11-14 00:39:11.321233
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '6c98e82ae2b5'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('listings', sa.Column('views', sa.Integer(), nullable=True))
op.create_index(op.f('ix_listings_views'), 'listings', ['views'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_listings_views'), table_name='listings')
op.drop_column('listings', 'views')
# ### end Alembic commands ###
|
madpin/renthub
|
main/app/alembic/versions/4bee0e2ca37f_initial_2.py
|
<filename>main/app/alembic/versions/4bee0e2ca37f_initial_2.py<gh_stars>0
"""Initial 2
Revision ID: 4bee0e2ca37f
Revises: <PASSWORD>
Create Date: 2021-11-14 00:29:04.234694
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '4bee0e2ca37f'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/alembic/versions/5eeb5b496362_improved_listings_2.py
|
"""Improved Listings 2
Revision ID: <KEY>
Revises: 76c5c<PASSWORD>
Create Date: 2021-11-08 21:39:20.267455
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '76c5c92496ea'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('facilities',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('name', sa.String(), nullable=True),
sa.Column('category', sa.String(), nullable=True),
sa.Column('notes', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_facilities_id'), 'facilities', ['id'], unique=False)
op.add_column('image_tags', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('image_tags', sa.Column('notes', sa.String(), nullable=True))
op.add_column('images', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('images', sa.Column('notes', sa.String(), nullable=True))
op.add_column('listing_distances', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('listings', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('listings', sa.Column('notes', sa.String(), nullable=True))
op.add_column('points', sa.Column('updated_at', sa.DateTime(), nullable=True))
op.add_column('points', sa.Column('notes', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('points', 'notes')
op.drop_column('points', 'updated_at')
op.drop_column('listings', 'notes')
op.drop_column('listings', 'updated_at')
op.drop_column('listing_distances', 'updated_at')
op.drop_column('images', 'notes')
op.drop_column('images', 'updated_at')
op.drop_column('image_tags', 'notes')
op.drop_column('image_tags', 'updated_at')
op.drop_index(op.f('ix_facilities_id'), table_name='facilities')
op.drop_table('facilities')
# ### end Alembic commands ###
|
madpin/renthub
|
old/main_old/alembic/versions/a531e7c78fbe_create_inital_tables.py
|
<gh_stars>0
"""create inital tables
Revision ID: a531e7c78fbe
Revises:
Create Date: 2021-10-31 23:32:37.346693
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'a531e7c78fbe'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(), nullable=True),
sa.Column('hashed_password', sa.String(), nullable=True),
sa.Column('is_active', sa.Boolean(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_items_description'), 'items', ['description'], unique=False)
op.create_index(op.f('ix_items_id'), 'items', ['id'], unique=False)
op.create_index(op.f('ix_items_title'), 'items', ['title'], unique=False)
op.drop_table('image')
op.drop_table('property')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('property',
sa.Column('property_id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('source_url', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('bedrooms', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('bathrooms', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('total_images', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('postal_code', sa.VARCHAR(length=20), autoincrement=False, nullable=True),
sa.Column('address', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('latitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('longitude', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True),
sa.Column('message_sent', sa.BOOLEAN(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('property_id', name='property_pk')
)
op.create_table('image',
sa.Column('image_id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('url', sa.VARCHAR(length=255), autoincrement=False, nullable=True),
sa.Column('property_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('size_x', sa.INTEGER(), autoincrement=False, nullable=True),
sa.Column('size_y', sa.INTEGER(), autoincrement=False, nullable=True),
sa.PrimaryKeyConstraint('image_id', name='image_pk')
)
op.drop_index(op.f('ix_items_title'), table_name='items')
op.drop_index(op.f('ix_items_id'), table_name='items')
op.drop_index(op.f('ix_items_description'), table_name='items')
op.drop_table('items')
op.drop_index(op.f('ix_users_id'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
# ### end Alembic commands ###
|
madpin/renthub
|
old/rentcrud/app/models.py
|
from typing import Optional, List
from sqlmodel import SQLModel, Field, Relationship
class SongBase(SQLModel):
name: str
artist: str
year: Optional[int] = None
class Song(SongBase, table=True):
id: int = Field(primary_key=True)
class SongRead(SongBase):
id: int
class SongCreate(SongBase):
pass
class Increment(SQLModel, table=True):
id: int = Field(primary_key=True)
# #############################################################################
class ListingBase(SQLModel):
url: str
class Listing(ListingBase, table=True):
__tablename__ = 'listings'
id: int = Field(primary_key=True)
images: List["Image"] = Relationship(back_populates="listing",
sa_relationship_kwargs={'lazy': 'selectin'})
class ListingRead(ListingBase):
id: str
# #############################################################################
class ImageBase(SQLModel):
url: str
size_x: float
size_y: float
listing_id: Optional[int] = Field(default=None, foreign_key="listings.id")
class Image(ImageBase, table=True):
__tablename__ = 'images'
id: int = Field(primary_key=True)
listing: Optional[Listing] = Relationship(back_populates="images",
sa_relationship_kwargs={'lazy': 'selectin'})
class ImageRead(ImageBase):
id: int
class ImageReadWithListings(ImageRead):
listing: Optional[Listing] = None
class ListingReadWithImages(ListingRead):
images: List["ImageRead"] = []
|
madpin/renthub
|
old/main_old/alembic/versions/ae656d76b2f1_distances_4.py
|
<gh_stars>0
"""distances 4
Revision ID: <KEY>
Revises: 94c2<PASSWORD>42c
Create Date: 2021-11-01 00:00:44.703478
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '94c2f18b742c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('listing_distances',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('distance_km', sa.Float(), nullable=True),
sa.Column('listing_id', sa.Integer(), nullable=True),
sa.Column('point_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['listing_id'], ['listings.id'], ),
sa.ForeignKeyConstraint(['point_id'], ['points.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_listing_distances_id'), 'listing_distances', ['id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_listing_distances_id'), table_name='listing_distances')
op.drop_table('listing_distances')
# ### end Alembic commands ###
|
madpin/renthub
|
old/rentcrud/alembic/versions/7343c5f48f99_initial4.py
|
<reponame>madpin/renthub<filename>old/rentcrud/alembic/versions/7343c5f48f99_initial4.py<gh_stars>0
"""initial4
Revision ID: 7343c5f48f99
Revises: 01b6c8ce3965
Create Date: 2021-11-01 04:32:05.270719
"""
from alembic import op
import sqlalchemy as sa
import sqlmodel
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '01b6c8ce3965'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('images', sa.Column('listing_id', sa.Integer(), nullable=True))
op.create_index(op.f('ix_images_listing_id'), 'images', ['listing_id'], unique=False)
op.create_foreign_key(None, 'images', 'listings', ['listing_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'images', type_='foreignkey')
op.drop_index(op.f('ix_images_listing_id'), table_name='images')
op.drop_column('images', 'listing_id')
# ### end Alembic commands ###
|
madpin/renthub
|
notification/app/main.py
|
<filename>notification/app/main.py<gh_stars>0
import os
from pathlib import Path
import uvicorn
from typing import List, Optional
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import HTMLResponse
from custom_logger import CustomizeLogger
import telegram
import schemas
config_path=Path(__file__).with_name("custom_logger.json")
def create_app() -> FastAPI:
app = FastAPI(title='CustomLogger', debug=False)
logger = CustomizeLogger.make_logger(config_path)
app.logger = logger
return app
app = create_app()
# app = FastAPI()
@app.post("/send_telegram/")
async def sent_telegram(message: schemas.TelegramMessage):
bot = telegram.Bot(token=os.getenv('TG_BOT_TOKEN'))
# bot_all = telegram.Bot(token=os.getenv('TG_BOT_TOKEN_ALL'))
ret_ = []
main_msg = bot.send_message(
chat_id=message.chat_id,
text=message.message,
parse_mode=telegram.ParseMode.HTML,
timeout=message.timeout,
disable_web_page_preview=message.disable_web_page_preview,
)
ret_.append(main_msg.to_dict())
if(message.images):
media_array = []
for image in message.images:
media_array.append(telegram.InputMediaPhoto(
image.url,
caption=image.caption
))
media_array_split = [media_array[i:i + 10]
for i in range(0, len(media_array), 10)]
for small_array in media_array_split:
media_msg = bot.send_media_group(
reply_to_message_id=main_msg['message_id'],
chat_id=message.chat_id,
media=small_array,
timeout=message.timeout,
)
ret_.append([x.to_dict() for x in media_msg])
return ret_
# @app.post("/uploadfiles/")
# async def create_upload_files(files: Optional[List[UploadFile]] = File(None)):
# return {"filenames": [file.filename for file in files]}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|
madpin/renthub
|
provider_daft/app/main.py
|
# import logging
from pathlib import Path
from enum import Enum
import uvicorn
from fastapi import FastAPI
from custom_logger import CustomizeLogger
import schemas
from mappings.daft_listings import get_daft_search_result
from mappings.listing_details import get_listing_details
# logger = logging.getLogger(__name__)
config_path=Path(__file__).with_name("custom_logger.json")
def create_app() -> FastAPI:
app = FastAPI(title='CustomLogger', debug=False)
logger = CustomizeLogger.make_logger(config_path)
app.logger = logger
return app
app = create_app()
# app = FastAPI()
@app.get("/search_result/", response_model=schemas.SearchResultList)
async def search_result():
result = await get_daft_search_result()
return result
class DaftMethodListing(str, Enum):
json_details = "json_details"
selenium = "selenium"
@app.get("/listing_details/", response_model=schemas.DaftListing)
async def daft_listing(url, method: DaftMethodListing):
result = await get_listing_details(url)
return result
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)
|
funky23exe/habitat-sim
|
examples/demo_runner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import multiprocessing
import os
import random
import time
from enum import Enum
import numpy as np
from PIL import Image
from settings import default_sim_settings, make_cfg
import habitat_sim
import habitat_sim.agent
import habitat_sim.bindings as hsim
from habitat_sim.physics import MotionType
from habitat_sim.utils.common import (
d3_40_colors_rgb,
download_and_unzip,
quat_from_angle_axis,
)
_barrier = None
class DemoRunnerType(Enum):
BENCHMARK = 1
EXAMPLE = 2
class DemoRunner:
def __init__(self, sim_settings, simulator_demo_type):
if simulator_demo_type == DemoRunnerType.EXAMPLE:
self.set_sim_settings(sim_settings)
def set_sim_settings(self, sim_settings):
self._sim_settings = sim_settings.copy()
def save_color_observation(self, obs, total_frames):
color_obs = obs["color_sensor"]
color_img = Image.fromarray(color_obs, mode="RGBA")
color_img.save("test.rgba.%05d.png" % total_frames)
def save_semantic_observation(self, obs, total_frames):
semantic_obs = obs["semantic_sensor"]
semantic_img = Image.new("P", (semantic_obs.shape[1], semantic_obs.shape[0]))
semantic_img.putpalette(d3_40_colors_rgb.flatten())
semantic_img.putdata((semantic_obs.flatten() % 40).astype(np.uint8))
semantic_img.save("test.sem.%05d.png" % total_frames)
def save_depth_observation(self, obs, total_frames):
if self._sim_settings["depth_sensor"]:
depth_obs = obs["depth_sensor"]
depth_img = Image.fromarray(
(depth_obs / 10 * 255).astype(np.uint8), mode="L"
)
depth_img.save("test.depth.%05d.png" % total_frames)
def output_semantic_mask_stats(self, obs, total_frames):
semantic_obs = obs["semantic_sensor"]
counts = np.bincount(semantic_obs.flatten())
total_count = np.sum(counts)
print(f"Pixel statistics for frame {total_frames}")
for object_i, count in enumerate(counts):
sem_obj = self._sim.semantic_scene.objects[object_i]
cat = sem_obj.category.name()
pixel_ratio = count / total_count
if pixel_ratio > 0.01:
print(f"obj_id:{sem_obj.id},category:{cat},pixel_ratio:{pixel_ratio}")
def init_agent_state(self, agent_id):
# initialize the agent at a random start state
agent = self._sim.initialize_agent(agent_id)
start_state = agent.get_state()
# force starting position on first floor (try 100 samples)
num_start_tries = 0
while start_state.position[1] > 0.5 and num_start_tries < 100:
start_state.position = self._sim.pathfinder.get_random_navigable_point()
num_start_tries += 1
agent.set_state(start_state)
if not self._sim_settings["silent"]:
print(
"start_state.position\t",
start_state.position,
"start_state.rotation\t",
start_state.rotation,
)
return start_state
def compute_shortest_path(self, start_pos, end_pos):
self._shortest_path.requested_start = start_pos
self._shortest_path.requested_end = end_pos
self._sim.pathfinder.find_path(self._shortest_path)
print("shortest_path.geodesic_distance", self._shortest_path.geodesic_distance)
def init_physics_test_scene(self, num_objects):
object_position = np.array(
[-0.569043, 2.04804, 13.6156]
) # above the castle table
# turn agent toward the object
print("turning agent toward the physics!")
agent_state = self._sim.get_agent(0).get_state()
agent_to_obj = object_position - agent_state.position
agent_local_forward = np.array([0, 0, -1.0])
flat_to_obj = np.array([agent_to_obj[0], 0.0, agent_to_obj[2]])
flat_dist_to_obj = np.linalg.norm(flat_to_obj)
flat_to_obj /= flat_dist_to_obj
# move the agent closer to the objects if too far (this will be projected back to floor in set)
if flat_dist_to_obj > 3.0:
agent_state.position = object_position - flat_to_obj * 3.0
# unit y normal plane for rotation
det = (
flat_to_obj[0] * agent_local_forward[2]
- agent_local_forward[0] * flat_to_obj[2]
)
turn_angle = math.atan2(det, np.dot(agent_local_forward, flat_to_obj))
agent_state.rotation = quat_from_angle_axis(turn_angle, np.array([0, 1.0, 0]))
# need to move the sensors too
for sensor in agent_state.sensor_states:
agent_state.sensor_states[sensor].rotation = agent_state.rotation
agent_state.sensor_states[
sensor
].position = agent_state.position + np.array([0, 1.5, 0])
self._sim.get_agent(0).set_state(agent_state)
# hard coded dimensions of maximum bounding box for all 3 default objects:
max_union_bb_dim = np.array([0.125, 0.19, 0.26])
# add some objects in a grid
object_lib_size = self._sim.get_physics_object_library_size()
object_init_grid_dim = (3, 1, 3)
object_init_grid = {}
assert (
object_lib_size > 0
), "!!!No objects loaded in library, aborting object instancing example!!!"
# clear the objects if we are re-running this initializer
for old_obj_id in self._sim.get_existing_object_ids():
self._sim.remove_object(old_obj_id)
for obj_id in range(num_objects):
# rand_obj_index = random.randint(0, object_lib_size - 1)
# rand_obj_index = 0 # overwrite for specific object only
rand_obj_index = self._sim_settings.get("test_object_index")
if rand_obj_index < 0: # get random object on -1
rand_obj_index = random.randint(0, object_lib_size - 1)
object_init_cell = (
random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
)
while object_init_cell in object_init_grid:
object_init_cell = (
random.randint(-object_init_grid_dim[0], object_init_grid_dim[0]),
random.randint(-object_init_grid_dim[1], object_init_grid_dim[1]),
random.randint(-object_init_grid_dim[2], object_init_grid_dim[2]),
)
object_id = self._sim.add_object(rand_obj_index)
object_init_grid[object_init_cell] = object_id
object_offset = np.array(
[
max_union_bb_dim[0] * object_init_cell[0],
max_union_bb_dim[1] * object_init_cell[1],
max_union_bb_dim[2] * object_init_cell[2],
]
)
self._sim.set_translation(object_position + object_offset, object_id)
print(
"added object: "
+ str(object_id)
+ " of type "
+ str(rand_obj_index)
+ " at: "
+ str(object_position + object_offset)
+ " | "
+ str(object_init_cell)
)
def do_time_steps(self):
total_sim_step_time = 0.0
total_frames = 0
start_time = time.time()
action_names = list(
self._cfg.agents[self._sim_settings["default_agent"]].action_space.keys()
)
# load an object and position the agent for physics testing
if self._sim_settings["enable_physics"]:
self.init_physics_test_scene(
num_objects=self._sim_settings.get("num_objects")
)
print("active object ids: " + str(self._sim.get_existing_object_ids()))
time_per_step = []
while total_frames < self._sim_settings["max_frames"]:
if total_frames == 1:
start_time = time.time()
action = random.choice(action_names)
if not self._sim_settings["silent"]:
print("action", action)
start_step_time = time.time()
# apply kinematic or dynamic control to all objects based on their MotionType
if self._sim_settings["enable_physics"]:
obj_ids = self._sim.get_existing_object_ids()
for obj_id in obj_ids:
rand_nudge = np.random.uniform(-0.05, 0.05, 3)
if self._sim.get_object_motion_type(obj_id) == MotionType.KINEMATIC:
# TODO: just bind the trnslate function instead of emulating it here.
cur_pos = self._sim.get_translation(obj_id)
self._sim.set_translation(cur_pos + rand_nudge, obj_id)
elif self._sim.get_object_motion_type(obj_id) == MotionType.DYNAMIC:
self._sim.apply_force(rand_nudge, np.zeros(3), obj_id)
# get "interaction" time
total_sim_step_time += time.time() - start_step_time
observations = self._sim.step(action)
time_per_step.append(time.time() - start_step_time)
# get simulation step time without sensor observations
total_sim_step_time += self._sim._previous_step_time
if self._sim_settings["save_png"]:
if self._sim_settings["color_sensor"]:
self.save_color_observation(observations, total_frames)
if self._sim_settings["depth_sensor"]:
self.save_depth_observation(observations, total_frames)
if self._sim_settings["semantic_sensor"]:
self.save_semantic_observation(observations, total_frames)
state = self._sim.last_state()
if not self._sim_settings["silent"]:
print("position\t", state.position, "\t", "rotation\t", state.rotation)
if self._sim_settings["compute_shortest_path"]:
self.compute_shortest_path(
state.position, self._sim_settings["goal_position"]
)
if self._sim_settings["compute_action_shortest_path"]:
self._action_shortest_path.requested_start.position = state.position
self._action_shortest_path.requested_start.rotation = state.rotation
self._action_pathfinder.find_path(self._action_shortest_path)
print(
"len(action_shortest_path.actions)",
len(self._action_shortest_path.actions),
)
if (
self._sim_settings["semantic_sensor"]
and self._sim_settings["print_semantic_mask_stats"]
):
self.output_semantic_mask_stats(observations, total_frames)
total_frames += 1
end_time = time.time()
perf = {}
perf["total_time"] = end_time - start_time
perf["frame_time"] = perf["total_time"] / total_frames
perf["fps"] = 1.0 / perf["frame_time"]
perf["time_per_step"] = time_per_step
perf["avg_sim_step_time"] = total_sim_step_time / total_frames
return perf
def print_semantic_scene(self):
if self._sim_settings["print_semantic_scene"]:
scene = self._sim.semantic_scene
print(f"House center:{scene.aabb.center} dims:{scene.aabb.sizes}")
for level in scene.levels:
print(
f"Level id:{level.id}, center:{level.aabb.center},"
f" dims:{level.aabb.sizes}"
)
for region in level.regions:
print(
f"Region id:{region.id}, category:{region.category.name()},"
f" center:{region.aabb.center}, dims:{region.aabb.sizes}"
)
for obj in region.objects:
print(
f"Object id:{obj.id}, category:{obj.category.name()},"
f" center:{obj.aabb.center}, dims:{obj.aabb.sizes}"
)
input("Press Enter to continue...")
def init_common(self):
self._cfg = make_cfg(self._sim_settings)
scene_file = self._sim_settings["scene"]
if (
not os.path.exists(scene_file)
and scene_file == default_sim_settings["test_scene"]
):
print(
"Test scenes not downloaded locally, downloading and extracting now..."
)
download_and_unzip(default_sim_settings["test_scene_data_url"], ".")
print("Downloaded and extracted test scenes data.")
self._sim = habitat_sim.Simulator(self._cfg)
random.seed(self._sim_settings["seed"])
self._sim.seed(self._sim_settings["seed"])
# initialize the agent at a random start state
start_state = self.init_agent_state(self._sim_settings["default_agent"])
return start_state
def _bench_target(self, _idx=0):
self.init_common()
best_perf = None
for _ in range(3):
if _barrier is not None:
_barrier.wait()
if _idx == 0:
_barrier.reset()
perf = self.do_time_steps()
# The variance introduced between runs is due to the worker threads
# being interrupted a different number of times by the kernel, not
# due to difference in the speed of the code itself. The most
# accurate representation of the performance would be a run where
# the kernel never interrupted the workers, but this isn't
# feasible, so we just take the run with the least number of
# interrupts (the fastest) instead.
if best_perf is None or perf["frame_time"] < best_perf["frame_time"]:
best_perf = perf
self._sim.close()
del self._sim
return best_perf
@staticmethod
def _pool_init(b):
global _barrier
_barrier = b
def benchmark(self, settings):
self.set_sim_settings(settings)
nprocs = settings["num_processes"]
barrier = multiprocessing.Barrier(nprocs)
with multiprocessing.Pool(
nprocs, initializer=self._pool_init, initargs=(barrier,)
) as pool:
perfs = pool.map(self._bench_target, range(nprocs))
res = {k: [] for k in perfs[0].keys()}
for p in perfs:
for k, v in p.items():
res[k] += [v]
return dict(
frame_time=sum(res["frame_time"]),
fps=sum(res["fps"]),
total_time=sum(res["total_time"]) / nprocs,
avg_sim_step_time=sum(res["avg_sim_step_time"]) / nprocs,
)
def example(self):
start_state = self.init_common()
# initialize and compute shortest path to goal
if self._sim_settings["compute_shortest_path"]:
self._shortest_path = hsim.ShortestPath()
self.compute_shortest_path(
start_state.position, self._sim_settings["goal_position"]
)
# set the goal headings, and compute action shortest path
if self._sim_settings["compute_action_shortest_path"]:
agent_id = self._sim_settings["default_agent"]
goal_headings = self._sim_settings["goal_headings"]
self._action_pathfinder = self._sim.make_action_pathfinder(agent_id)
self._action_shortest_path = hsim.MultiGoalActionSpaceShortestPath()
self._action_shortest_path.requested_start.position = start_state.position
self._action_shortest_path.requested_start.rotation = start_state.rotation
# explicitly reset the start position
self._shortest_path.requested_start = start_state.position
# initialize the requested ends when computing the action shortest path
next_goal_idx = 0
while next_goal_idx < len(goal_headings):
sampled_pos = self._sim.pathfinder.get_random_navigable_point()
self._shortest_path.requested_end = sampled_pos
if (
self._sim.pathfinder.find_path(self._shortest_path)
and self._shortest_path.geodesic_distance < 5.0
and self._shortest_path.geodesic_distance > 2.5
):
self._action_shortest_path.requested_ends.append(
hsim.ActionSpacePathLocation(
sampled_pos, goal_headings[next_goal_idx]
)
)
next_goal_idx += 1
self._shortest_path.requested_end = self._sim_settings["goal_position"]
self._sim.pathfinder.find_path(self._shortest_path)
self._action_pathfinder.find_path(self._action_shortest_path)
print(
"len(action_shortest_path.actions)",
len(self._action_shortest_path.actions),
)
# print semantic scene
self.print_semantic_scene()
perf = self.do_time_steps()
self._sim.close()
del self._sim
return perf
|
vivamoto/challenges
|
longest_word/longest_word.py
|
"""
Longest Word
Have the function LongestWord(sen) take the sen parameter being passed and
return the largest word in the string. If there are two or more words that are
the same length, return the first word from the string with that length.
Ignore punctuation and assume sen will not be empty.
Examples
Input: "fun&!! time"
Output: time
Input: "I love dogs"
Output: love
"""
import re
def LongestWord(sen):
res = ''
for i in re.split('[^a-z|^A-Z|^0-9]', sen):
if len(i) > len(res):
res = i
return res
if __name__ == '__main__':
input = "fun&!! time"
print(LongestWord(input))
#Output: time
input = "I love dogs"
print(LongestWord(input))
#Output: love
input = "0123456789 123456"
print(LongestWord(input))
#Output: 0123456789
|
vivamoto/challenges
|
codeland_username_validation/codeland_username_validation.py
|
"""
Codeland Username Validation
Have the function CodelandUsernameValidation(str) take the str parameter being
passed and determine if the string is a valid username according to the following
rules:
1. The username is between 4 and 25 characters.
2. It must start with a letter.
3. It can only contain letters, numbers, and the underscore character.
4. It cannot end with an underscore character.
If the username is valid then your program should return the string 'true',
otherwise return the string 'false'.
Examples
Input: "aa_"
Output: false
Input: "u__hello_world123"
Output: true
"""
import re
def CodelandUsernameValidation(strParam):
# 1. The username is between 4 and 25 characters.
if len(strParam) < 4 or len(strParam) > 25:
return 'false'
# 2. It must start with a letter.
letter = re.match('[A-Z,a-z]', strParam[0])
if letter == None:
return 'false'
# 3. It can only contain letters, numbers, and the underscore character.
chars = re.match('[A-Z,a-z,0-9,_]', strParam[len(strParam) - 1])
if chars == None:
return 'false'
# 4. It cannot end with an underscore character.
letter = re.match('_', strParam[len(strParam) - 1])
if letter != None:
return 'false'
# 5. Return 'true' if pass all tests
return 'true'
if __name__ == '__main__':
input = "aa_"
print(CodelandUsernameValidation(input))
# Output: false
input = "u__hello_world123"
print(CodelandUsernameValidation(input))
# Output: true
|
vivamoto/challenges
|
questions_marks/question_marks.py
|
"""
Questions Marks
Have the function QuestionsMarks(str) take the str string parameter, which will
contain single digit numbers, letters, and question marks, and check if there
are exactly 3 question marks between every pair of two numbers that add up to 10.
If so, then your program should return the string true, otherwise it should
return the string false. If there aren't any two numbers that add up to 10 in
the string, then your program should return false as well.
For example: if str is "arrb6???4xxbl5???eee5" then your program should return
'true' because there are exactly 3 question marks between 6 and 4, and 3
question marks between 5 and 5 at the end of the string.
Examples
Input: "aa6?9"
Output: false
Input: "acc?7??sss?3rr1??????5"
Output: true
"""
import re
def QuestionsMarks(strParam):
n = 0
count = 0
flag = 'false'
for i in strParam:
if i in '0123456789':
n += int(i)
if count == 3 and n == 10:
flag = 'true'
count = 0
elif i == '?':
count += 1
return flag
if __name__ == '__main__':
input = "aa6?9"
print(QuestionsMarks(input))
# Output: false
input = "acc?7??sss?3rr1??????5"
print(QuestionsMarks(input))
# Output: true
|
vivamoto/challenges
|
min_window_substring/min_window_substring.py
|
""":cvar
Title: React Button Toggle
Difficulty: Easy
Solutions: 4327
Maximum Score: 10
Description: For this challenge you will be editing a small React application.
React Button Toggle
We provided some simple React template code. Your goal is to modify the component so that you can properly toggle the button to switch between an ON state and an OFF state. When the button is on and it is clicked, it turns off and the text within it changes from ON to OFF and vice versa. Make use of component state for this challenge.
You are free to add classes and styles, but make sure you leave the element ID's as they are. Submit your code once it is complete and our system will validate your output.
"""
|
vivamoto/challenges
|
first_factorial/first_factorial.py
|
"""
First Factorial
Have the function FirstFactorial(num) take the num parameter being passed and
return the factorial of it. For example: if num = 4, then your program should
return (4 * 3 * 2 * 1) = 24. For the test cases, the range will be between 1 and
18 and the input will always be an integer.
Examples
Input: 4
Output: 24
Input: 8
Output: 40320
"""
# Iterative solution
def FirstFactorial(num):
n = 1
for i in range(1, num + 1):
n *= i
return n
# Recursive solution
def factorial(num):
if num == 1:
return 1
return num * factorial(num - 1)
if __name__ == '__main__':
input = 4
print(FirstFactorial(input))
print(factorial(input))
# Output: 24
input = 8
print(FirstFactorial(input))
print(factorial(input))
# Output: 40320
|
vivamoto/challenges
|
find_intersection/find_intersection.py
|
<gh_stars>0
"""
Find Intersection
Have the function FindIntersection(strArr) read the array of strings stored in
strArr which will contain 2 elements: the first element will represent a list of
comma-separated numbers sorted in ascending order, the second element will
represent a second list of comma-separated numbers (also sorted).
Your goal is to return a comma-separated string containing the numbers that
occur in elements of strArr in sorted order. If there is no intersection, return
the string 'false'.
Examples
Input: ["1, 3, 4, 7, 13", "1, 2, 4, 13, 15"]
Output: 1,4,13
Input: ["1, 3, 9, 10, 17, 18", "1, 4, 9, 10"]
Output: 1,9,10
"""
def FindIntersection(strArr):
# code goes here
a1 = strArr[0].split(', ')
a2 = strArr[1].split(', ')
result = []
for element in a1:
if element in a2:
result.append(element)
if len(result) == 0:
return 'false'
else:
return ','.join(result)
if __name__ == '__main__':
input = ["1, 3, 4, 7, 13", "1, 2, 4, 13, 15"]
print(FindIntersection(input))
# Output: 1, 4, 13
input = ["1, 3, 9, 10, 17, 18", "1, 4, 9, 10"]
print(FindIntersection(input))
# Output: 1, 9, 10
|
vivamoto/challenges
|
first_reverse/first_reverse.py
|
"""
First Reverse
Have the function FirstReverse(str) take the str parameter being passed and
return the string in reversed order. For example: if the input string is
"<NAME>" then your program should return the string "sredoC dna
dlroW olleH".
Examples
Input: "<NAME>"
Output: etybredoc
Input: "I Love Code"
Output: edoC evoL I
"""
def FirstReverse(strParam):
res = ''
for i in range(len(strParam)-1, -1, -1):
res += strParam[i]
return res
if __name__ == '__main__':
input = "<NAME>"
print(FirstReverse(input))
# Output: sredoC dna dlroW olleH
input = "I Love Code"
print(FirstReverse(input))
#Output: edoC evoL I
|
vivamoto/challenges
|
permutation/permutation.py
|
<gh_stars>0
"""
Permutation Step
Difficulty: Medium
Maximum Score: 10
Description: For this challenge you will determine the next greatest number
using the same numbers of a given argument.
Have the function PermutationStep(num) take the num parameter being passed and
return the next number greater than num using the same digits.
For example: if num is 123 return 132, if it's 12453 return 12534. If a number
has no greater permutations, return -1 (ie. 999).
Examples
Input: 11121
Output: 11211
Input: 41352
Output: 41523
"""
import copy as cp
import itertools as it
def PermutationStep(num):
# 1. create a list with all digits
snum = []
for i in str(num):
snum.append(i)
next_num = [] # store next number
# 2. Sort all digits (smallest value)
n = ''
for i in range(len(snum)):
for j in range(len(snum)):
if snum[i] < snum[j]:
n = n + snum[i]
temp = cp.copy(snum)
snum[i] = temp[j]
snum[j] = temp[i]
# print ('Smallest number:', snum)
# 3. Permutation: Pick next largest number
for i in range(len(snum)):
n = snum[:i]
for j in range(len(snum)):
if i != j :
temp = cp.copy(snum)
temp[i] = snum[j]
temp[j] = snum[i]
# print('i:', i, 'j:',j, 'temp:', temp)
# Convert list of strings to number
n = ''
for k in temp:
n = n + k
# print (n)
# Create a list of possible numbers
if int(n) > num:
next_num.append(int(n))
num = min(next_num)
return num
# Use of itertools library
def PermutationItertools(num):
# 1. Create list of all possible permutations
lnum = it.permutations(str(num), r=None)
lnum = cp.copy(list(lnum))
# Convert list to number and save if larger than num
next_num = []
for i in lnum:
# 2. Convert list of tupples to number
n = ''
for j in i:
n = n + j
# 3. Save number larger than num
if int(n) > num:
next_num.append(int(n))
# 4. Return smallest number
if len(next_num) == 0:
return -1
else:
return min(next_num)
# Demo
if __name__ == '__main__':
# My solution
# print(PermutationStep(11121))
# print(PermutationStep(41352))
# print(PermutationStep(12345))
# print(PermutationStep(53241))
print()
# With itertools library
print(PermutationItertools(11121))
print(PermutationItertools(41352))
print(PermutationItertools(12345))
print(PermutationItertools(53241))
print(PermutationItertools(999))
|
vivamoto/challenges
|
tree_constructor/tree_constructor.py
|
"""
Tree Constructor
Difficulty: Medium
Maximum Score: 10
Description: For this challenge you will determine if an array of integer pairs
can form a binary tree properly.
Have the function TreeConstructor(strArr) take the array of strings stored in
strArr, which will contain pairs of integers in the following format: (i1,i2),
where i1 represents a child node in a tree and the second integer i2 signifies
that it is the parent of i1.
For example: if strArr is ["(1,2)", "(2,4)", "(7,2)"], then this forms the
following tree:
4
/
2
/ \
1 7
which you can see forms a proper binary tree. Your program should, in this case,
return the string true because a valid binary tree can be formed. If a proper
binary tree cannot be formed with the integer pairs, then return the string false.
All of the integers within the tree will be unique, which means there can only
be one node in the tree with the given integer value.
Examples
Input: ["(1,2)", "(2,4)", "(5,7)", "(7,2)", "(9,5)"]
Output: true
Input: ["(1,2)", "(3,2)", "(2,12)", "(5,2)"]
Output: false
More examples in the end of this file.
"""
import re
def TreeConstructor(strArr):
# 0. Create son and father lists
son = []
father = []
for p in strArr:
_, n1, n2, _ = re.split(r'\(|,|\)', p)
son.append(n1)
father.append(n2)
# 1. Father can have 1 or 2 sons
for i in father:
count = 0
for j in father:
if i == j:
count += 1
if count > 2:
return 'Father has more than 2 sons'
# 2. Son can have only 1 father
for i in son:
count = 0
for j in son:
if i == j:
count += 1
if count > 1:
return 'Son has more than 1 father'
# 3. Root has no father
root = [] # save roots
for i in father:
# Father is son of another node
count = 0
for j in son:
if i == j:
count += 1
# Father is root
if count == 0:
root.append(i)
# 4. Only one root
if len(root) > 1:
if len(root) == 2:
if root[0] != root[1]:
return('More than 1 root')
elif len(root) > 2:
return ('More than 1 root')
# If passed all tests, the input list is a tree and return 'true'
return 'This is a single tree'
# Demo
if __name__ == '__main__':
input = ["(1,2)", "(2,4)", "(5,7)", "(7,2)", "(9,5)"]
print(TreeConstructor(input))
input = ["(1,2)", "(3,2)", "(2,12)", "(5,2)"]
print(TreeConstructor(input))
input = ["(2,7)", "(5,6)", "(11,6)", "(6,7)", "(4,9)", "(9,5)", "(5,2)", "(7,0)"]
print(TreeConstructor(input))
input = ["(1,2)", "(2,3)", "(4,3)", "(3,5)", "(6,7)", "(8,7)", "(7,5)"]
print(TreeConstructor(input))
input = ["(1,2)", "(2,3)", "(4,3)", "(0,5)", "(6,7)", "(8,7)", "(7,5)"]
print(TreeConstructor(input))
|
raman325/sensor.radarr_upcoming_media
|
custom_components/radarr_upcoming_media/sensor.py
|
"""
Home Assistant component to feed the Upcoming Media Lovelace card with
Radarr upcoming releases.
https://github.com/raman325/sensor.radarr_upcoming_media
https://github.com/custom-cards/upcoming-media-card
"""
from datetime import date, datetime, timedelta
import logging
from aiopyarr.exceptions import ArrException
from aiopyarr.models.host_configuration import PyArrHostConfiguration
from aiopyarr.radarr_client import RadarrCalendar, RadarrClient
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT, CONF_SSL
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import voluptuous as vol
__version__ = "0.3.6"
_LOGGER = logging.getLogger(__name__)
CONF_DAYS = "days"
CONF_URLBASE = "urlbase"
CONF_THEATERS = "theaters"
CONF_MAX = "max"
FIRST_CARD = {
"title_default": "$title",
"line1_default": "$release",
"line2_default": "$genres",
"line3_default": "$rating - $runtime",
"line4_default": "$studio",
"icon": "mdi:arrow-down-bold",
}
RELEASE_TEXT_MAP = {
"digitalRelease": "Available digitally on $day, $date",
"physicalRelease": "Available physically on $day, $date",
"inCinemas": "In theaters on $day, $date",
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DAYS, default=60): vol.Coerce(int),
vol.Optional(CONF_HOST, default="localhost"): cv.string,
vol.Optional(CONF_PORT, default=7878): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_URLBASE): cv.string,
vol.Optional(CONF_THEATERS, default=True): cv.boolean,
vol.Optional(CONF_MAX, default=5): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_devices, discovery_info=None):
add_devices([RadarrUpcomingMediaSensor(hass, config)], True)
class RadarrUpcomingMediaSensor(SensorEntity):
def __init__(self, hass, conf):
url_base = conf.get(CONF_URLBASE)
if url_base:
url_base = "{}/".format(url_base.strip("/"))
self._host_config = PyArrHostConfiguration(
api_token=conf[CONF_API_KEY],
hostname=conf[CONF_HOST],
port=conf[CONF_PORT],
ssl=conf[CONF_SSL],
base_api_path=url_base,
)
self.client = RadarrClient(
self._host_config, session=async_get_clientsession(hass)
)
self.days = conf.get(CONF_DAYS)
self.theaters = conf.get(CONF_THEATERS)
self.max_items = conf.get(CONF_MAX)
self._attr_available = True
self._attr_name = "Radarr Upcoming Media"
def _get_air_date_key(self, movie: RadarrCalendar):
"""Return air date key."""
out_of_bounds_date = date.today() + timedelta(days=(self.days + 1))
keys = ["digitalRelease", "physicalRelease"]
if self.theaters:
keys.append("inCinemas")
for key in keys:
try:
release_date = datetime.date(getattr(movie, key))
except AttributeError:
release_date = out_of_bounds_date
if release_date >= date.today() and release_date < out_of_bounds_date:
return key
return None
def _get_rating(self, movie: RadarrCalendar):
"""Return rating."""
for key in ("tmdb", "imdb", "rottenTomatoes", "metacritic"):
try:
return "\N{BLACK STAR} " + str(getattr(movie.ratings, key).value)
except AttributeError:
continue
return ""
async def async_update(self):
start = datetime.combine(date.today(), datetime.min.time())
end = start + timedelta(days=self.days)
try:
movies = await self.client.async_get_calendar(
start_date=start, end_date=end
)
except ArrException as err:
if self._attr_available:
_LOGGER.warning(err)
self._attr_available = False
return
else:
self._attr_available = True
movies = [movie for movie in movies if self._get_air_date_key(movie)][
: self.max_items
]
self._attr_native_value = len(movies)
self._attr_extra_state_attributes = {"data": [FIRST_CARD]}
for movie in movies:
air_date_key = self._get_air_date_key(movie)
movie_data = {
"airdate": datetime.date(getattr(movie, air_date_key)).isoformat(),
"release": RELEASE_TEXT_MAP[air_date_key],
"rating": self._get_rating(movie),
"flag": movie.hasFile,
"title": movie.attributes.get("title", ""),
"runtime": movie.attributes.get("runtime", ""),
"studio": movie.attributes.get("studio", ""),
"genres": ", ".join(movie.attributes.get("genres", [])),
}
movie_data["poster"] = next(
(image.url for image in movie.images if image.coverType == "poster"),
"",
)
movie_data["fanart"] = next(
(image.url for image in movie.images if image.coverType == "fanart"),
"",
)
self._attr_extra_state_attributes["data"].append(movie_data)
|
M3nin0/geo-knowledge-hub-rt
|
geo-metadata-previewer/geo_metadata_previewer/config.py
|
<filename>geo-metadata-previewer/geo_metadata_previewer/config.py
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 GEO Secretariat.
#
# GEO-Metadata-Previewer is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Geospatial metadata previewer support for the GEO Knowledge Hub"""
# TODO: This is an example file. Remove it if your package does not use any
# extra configuration variables.
GEO_METADATA_PREVIEWER_DEFAULT_VALUE = 'foobar'
"""Default value for the application."""
GEO_METADATA_PREVIEWER_BASE_TEMPLATE = 'geo_metadata_previewer/base.html'
"""Default base template for the demo page."""
|
M3nin0/geo-knowledge-hub-rt
|
geo-metadata-previewer/geo_metadata_previewer/__init__.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 GEO Secretariat.
#
# GEO-Metadata-Previewer is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Geospatial metadata previewer support for the GEO Knowledge Hub"""
from .ext import GEOMetadataPreviewer
from .version import __version__
__all__ = ('__version__', 'GEOMetadataPreviewer')
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/decision_back.py
|
<reponame>juancruzgassoloncan/Udacity-Robo-nanodegree<gh_stars>1-10
import numpy as np
import time
import copy
from helper import *
# This is where you can build a decision tree for determining throttle, brake and steer
# def rad2deg(rad):
# return (rad * 180.0) / np.pi
#
#
# def deg2rad(deg):
# return (deg * np.pi) / 180.0
#
#
# def distance_to_rock(Rover):
# if len(Rover.rock_dists):
# return np.min(Rover.rock_dists)
# else:
# return 1e9
#
#
# def is_obstacle_ahead(Rover, range=20, bearing=0):
# idx_in_front = np.where((np.abs(Rover.obs_angles - bearing) < deg2rad(15))
# & (Rover.obs_dists < range))[0]
# return len(idx_in_front)
def get_polar_points(Rover):
if Rover.nav_dists is not None:
dist = Rover.nav_dists
else:
dist = 0
if Rover.nav_angles is not None:
ang = Rover.nav_angles
else:
ang = 0
Rover.nav_angles
return np.array((dist, ang)).T
def get_frontal_distance(polar_points, arc=10):
central_view = [d for d, a in polar_points if rad2deg(a) < abs(arc)]
return np.array(central_view)
def get_wall_distance(polar_points, arc=15):
wall_d = [d for d, a in polar_points if rad2deg(a) < -arc]
wall_i = [d for d, a in polar_points if rad2deg(a) > arc]
return np.array(wall_i) if len(wall_i) else np.array(0),\
np.array(wall_d) if len(wall_d) else np.array(0)
def get_near_periferics(polar_points, alpha):
near_angles = [a for d, a in polar_points if d < alpha]
return np.array(near_angles)
def detec_rock(Rover):
global r_d_0
global r_a_0
if len(Rover.rock_angles):
rock_p = np.array((Rover.rock_dists, Rover.rock_angles)).T
r_d_0 = copy.copy(rock_p[:, 0].mean())
r_a_0 = copy.copy(rad2deg(rock_p[:, 1].mean()))
return rock_p
else:
return None
def side_areas(Rover):
i = 0
d = 0
if len(Rover.nav_angles) != 0:
for a in Rover.nav_angles:
if a > 0:
i += 1
else:
d += 1
return (1.0 * i / len(Rover.nav_angles), 1.0 * d / len(Rover.nav_angles))
else:
return (0, 0)
def counter_state(Rover, sta_change=True):
Rover.state_counter += 1
if Rover.state_counter >= 100:
Rover.state_counte = 0
if sta_change:
Rover.mode = Rover.states[np.random.randint(0, len(Rover.states))]
return True
else:
return False
# commands based on the output of the perception_step() function
turn = True
# r_a_0 = Rover.rock_dists
# r_d_0 = Rover.Roc
def decision_step(Rover):
global turn
# Implement conditionals to decide what to do given perception data
# Here you're all set up with some basic functionality but you'll need to
# improve on this decision tree to do a good job of navigating autonomously!
# detec_steer(Rover)
# flag = Rover.flag_direction
# Example:
# Check if we have vision data to make decisions with
print Rover.mode
if Rover.nav_angles is not None:
# Check for Rover.mode status
nav_data = get_polar_points(Rover)
# mean_dir = np.mean(Rover.nav_angles)
# desv = np.sqrt(Rover.nav_angles.var())
mean_dir = np.mean(get_near_periferics(nav_data, 15))
desv = np.sqrt(get_near_periferics(nav_data, 15).var())
mean_tita = rad2deg(mean_dir)
tita = rad2deg(mean_dir + desv)
tita2 = rad2deg(mean_dir - desv)
# rock = detec_rock(Rover)
ai, ad = side_areas(Rover)
fd = get_frontal_distance(nav_data, 25).mean()
wd = get_wall_distance(nav_data, 20)
wdi = wd[0].mean()
wdd = wd[1].mean()
r_a_0 = Rover.rock_angles
r_d_0 = Rover.rock_dists
if Rover.mode == 'forward':
# Rover.max_vel =2
if abs(Rover.throttle) >= 2:
Rover.throttle = 0
print Rover.mode
# Check the extent of navigable terrain
if len(Rover.nav_angles) >= Rover.stop_forward:
print 'Area Libre ', len(Rover.nav_angles)
# If mode is forward, navigable terrain looks good
# and velocity is below max, then throttle
if Rover.vel < Rover.max_vel:
# Set throttle value to throttle setting
Rover.throttle = Rover.throttle_set
print 'Acc: ', Rover.throttle, 'Vel: ', Rover.vel
if (Rover.throttle > 0) & (Rover.vel == 0):
# Atorado
print 'Atorado'
Rover.mode = 'Atorado'
# elif Rover.vel >= Rover.max_vel:
# Rover.throttle = 0
# Rover.brak = Rover.brake_set
else: # Else coast
Rover.throttle = 0
Rover.brake = 0
print 'tita: ', tita, ' media: ', mean_tita
print 'front d: ', fd, 'wdi', wdi, 'wdd', wdd
print 'Area Libre ', len(Rover.nav_angles), 'ai:', ai, 'ad:', ad
print 'Rock', r_d_0, r_d_0
if abs(mean_tita) < 2: #
print ' derecho'
Rover.steer = 5
else:
if abs(tita) <= 15: # tita
print '<NAME> <= 10'
if abs(tita) >= 4: # me choco la pared
print '+'
if ai >= 0.4:
Rover.steer = np.int(tita) # izq
else:
Rover.steer = np.int(tita)
print 'steer: ', Rover.steer
else:
Rover.steer = -np.int(3)
print 'steer: ', Rover.steer
else: # tits > 10
if tita > 15:
print 'tita > 10'
# print 'fronta d: ', get_frontal_distance(nav_data, 30).mean()
if fd > 10:
if ai > 0.4:
Rover.steer = np.clip(
np.int(tita / 3), -15, 15)
else:
Rover.steer = - \
np.clip(np.int(tita / 3), -15, 15)
print 'steer: ', Rover.steer
else:
Rover.mode = 'stop'
elif tita < -15:
print 'tita < -10'
# print 'fronta d: ', get_frontal_distance(nav_data, tita).mean()
if fd > 10:
if ai > 0.3:
Rover.steer = - \
np.clip(np.int(tita / 3), -15, 15)
else:
Rover.steer = 0
# Rover.steer = np.clip((tita/3), -15, 15)
print 'steer: ', Rover.steer
else:
Rover.mode = 'stop'
# If there's a lack of navigable terrain pixels then go to 'stop' mode
elif (len(Rover.nav_angles) < Rover.stop_forward) or (fd <= 15):
# Set mode to "stop" and hit the brakes!
Rover.throttle = 0
# Set brake to stored brake value
Rover.brake = Rover.brake_set
Rover.steer = 0
Rover.mode = 'stop'
print Rover.mode, 'nav_angles < stop_forward'
#
# if Rover.rock_dist > 0:
if distance_to_rock(Rover) > 0:
Rover.brake = Rover.brake_set
Rover.steer = 0
Rover.throttle = 0
Rover.mode = 'stop'
# raw_input()
# If we're already in "stop" mode then make different decisions
elif Rover.mode == 'stop':
print 'Area Libre ', len(Rover.nav_angles), 'ai:', ai, 'ad:', ad
print 'front d: ', fd, 'wdi', wdi, 'wdd', wdd
# If we're in stop mode but still moving keep braking
if abs(Rover.vel) > 0.2:
Rover.throttle = 0
Rover.brake = Rover.brake_set
Rover.steer = 0
# If we're not moving (vel < 0.2) then do something else
elif abs(Rover.vel) <= 0.2:
print 'steady'
# Now we're stopped and we have vision data to see if there's a path forward
if Rover.rock_dists > 0:
Rover.brake = 0
Rover.mode = 'Rock'
elif (len(Rover.nav_angles) < Rover.go_forward / 3) or \
(fd < 8):
print 'pa tra'
Rover.brake = 0
Rover.throttle = -0.1
# Release the brake to allow turning
elif (len(Rover.nav_angles) < Rover.go_forward / 2)\
or (ai < 0.25) or (wdd < 8) or (wdi < 8):
print 'rotate'
# elif len(Rover.nav_angles) < Rover.go_forward:
print 'fd: ', fd
Rover.throttle = 0
Rover.brake = 0
Rover.steer = -5 # der
# If we're stopped but see sufficient navigable terrain in front then go!
elif (len(Rover.nav_angles) >= Rover.go_forward) &\
(fd > 10) & (wdd > 10) & (wdi > 10):
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(mean_tita, -15, 15)
Rover.mode = 'forward'
elif Rover.mode == 'Rock':
# Rover.throttle = 0.02
# time.sleep(1)
print 'Vio una piedra'
# Rover.max_vel = 0.5
print 'Rock: ', r_d_0, r_a_0
Rover.throttle = 0
print 'shearching'
# rock = detec_rock(Rover)
# if rock is not None:
# rock_dist = rock[:, 0].mean()
# rock_ang = rad2deg(rock[:, 1].mean())
print 'rock d: ', r_d_0, 'rock a: ', r_a_0
Rover.brake = 0
if Rover.near_sample == 1:
Rover.rock_dists = []
Rover.rock_angles = []
Rover.mode = 'stop'
elif r_d_0 <= 10: # cerca
print 'cerca'
if len(r_a_0) < 10:
Rover.steer = 0
else:
Rover.steer = np.clip(np.int(r_a_0), -2, 2)
if (Rover.vel <= 0.2) & (r_d_0 == 0):
Rover.throttle = 0.1
else:
Rover.throttle = 0
elif (len(r_a_0) >= 35) & (r_d_0 > 10):
print ' lejos y abierta'
Rover.steer = np.clip(np.array(r_a_0, dtype='int'), -2, 2)
elif (len(r_a_0) < 45) & (r_d_0 > 10):
print ''
if Rover.vel >= 0.3:
Rover.throttle = 0
else:
Rover.throttle = 0.1
# else:
# print 'Rock es none'
# # Rover.brake = 0
# Rover.steer = np.clip(np.array(r_a_0, dtype='int'), -2, 2)
# # Rover.throttle = 0.01
# Rover.mode = ''
# if last:
# else:
# Rover.steer = np.clip(-5,-10,10)
elif Rover.mode == 'Atorado':
Rover.throttle = 0
# Rover.state_counter +=1
# if Rover.state_counter > 10:
# turn = not turn
# Rover.state_counter = 0
Rover.steer = -15
print len(Rover.nav_angles)
if(len(Rover.nav_angles) >= Rover.go_forward / 2):
# Set throttle back to stored value
Rover.throttle = Rover.throttle_set
# Release the brake
Rover.brake = 0
# Set steer to mean angle
Rover.steer = np.clip(np.int(mean_tita), -15, 15)
Rover.mode = 'forward'
# Rover.steer = 15
# Rover.mode = 'foward'
# Just to make the rover do something
# even if no modifications have been made to the code
else:
print 'la nada'
counter_state(Rover)
Rover.steer = 0
Rover.brake = 0
Rover.throttle = -Rover.throttle_set
if rock is not None:
Rover.mode = 'Rock'
if Rover.near_sample and Rover.vel == 0 and not Rover.picking_up:
Rover.send_pickup = True
# time.sleep(0.5)
# If in a state where want to pickup a rock send pickup command
return Rover
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/decision.py
|
import numpy as np
from rover_sates import *
from state_machine import *
# This is where you can build a decision tree for determining throttle, brake and steer
# commands based on the output of the perception_step() function
def decision_step(Rover, machine):
if Rover.nav_angles is not None:
machine.run()
else:
Rover.throttle = Rover.throttle_set
Rover.steer = 0
Rover.brake = 0
return Rover
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/perception.py
|
<gh_stars>1-10
import numpy as np
import cv2
# Identify pixels above the threshold
# Threshold of RGB > 160 does a nice job of identifying ground pixels only
def color_thresh_hsv(img, low_thresh=(20, 85, 85), high_thresh=(35, 255, 255), inv=False, roi=None):
img_hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(img_hsv, low_thresh, high_thresh, dst=None)
if roi is not None:
mask = roi * mask
if inv is True:
mask = -(mask - 1)
# mask = cv2.blur(mask, (7, 7))
res = cv2.bitwise_and(img, img, dst=None, mask=mask)
return res, mask
def segmentation(img, mask):
res = cv2.bitwise_and(img, img, dst=None, mask=mask)
return res
def world_segmentation(img,
l_r_thresh=(25, 65, 65),
h_r_thresh=(35, 255, 255),
l_n_thresh=(0, 0, 160),
h_n_thresh=(255, 120, 255),
roi=None,
l_o_thresh=(0, 40, 0),
h_o_thresh=(255, 255, 120)):
res_r, mask_r = color_thresh_hsv(img, l_r_thresh, h_r_thresh)
res_n, mask_n = color_thresh_hsv(img, l_n_thresh, h_n_thresh, roi=roi)
res_o, mask_o = color_thresh_hsv(img, l_o_thresh, h_o_thresh)
return mask_n, mask_o, mask_r
# Define a function to convert from image coords to rover coords
def rover_coords(binary_img):
# Identify nonzero pixels
ypos, xpos = binary_img.nonzero()
# Calculate pixel positions with reference to the rover position being at the
# center bottom of the image.
x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)
y_pixel = -(xpos - binary_img.shape[1] / 2).astype(np.float)
return x_pixel, y_pixel
# Define a function to convert to radial coords in rover space
def to_polar_coords(x_pixel, y_pixel):
# Convert (x_pixel, y_pixel) to (distance, angle)
# in polar coordinates in rover space
# Calculate distance to each pixel
dist = np.sqrt(x_pixel**2 + y_pixel**2)
# Calculate angle away from vertical for each pixel
angles = np.arctan2(y_pixel, x_pixel)
return dist, angles
# Define a function to map rover space pixels to world space
def rotate_pix(xpix, ypix, yaw):
# Convert yaw to radians
yaw_rad = yaw * np.pi / 180
xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))
ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))
# Return the result
return xpix_rotated, ypix_rotated
def translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale):
# Apply a scaling and a translation
xpix_translated = (xpix_rot / scale) + xpos
ypix_translated = (ypix_rot / scale) + ypos
# Return the result
return xpix_translated, ypix_translated
# Define a function to apply rotation and translation (and clipping)
# Once you define the two functions above this function should work
def pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):
# Apply rotation
xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)
# Apply translation
xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)
# Perform rotation, translation and clipping all at once
x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)
y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)
# Return the result
return x_pix_world, y_pix_world
# Define a function to perform a perspective transform
def perspect_transform(img, src, dst):
M = cv2.getPerspectiveTransform(src, dst)
# keep same size as input image
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
return warped
def range_view(xpix, ypix, max_r=50, min_r=0):
dist = np.sqrt(xpix**2 + ypix**2)
rxpix = xpix[(min_r <= dist) & (dist <= max_r)]
rypix = ypix[(min_r <= dist) & (dist <= max_r)]
return rxpix, rypix
def rad2deg(rad):
return (rad * 180.0) / np.pi
def deg2rad(deg):
return (deg * np.pi) / 180.0
# Apply the above functions in succession and update the Rover state accordingly
def perception_step(Rover):
# Perform perception steps to update Rover()
# TODO:
# NOTE: camera image is coming to you in Rover.img
# 1) Define source and destination points for perspective transform
image = Rover.img
dst_size = 5
bottom_offset = 6
roi = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
roi[70:, :] = 1
src = np.float32([[14, 140], [301, 140], [200, 96], [118, 96]])
dst = np.float32([[image.shape[1] / 2 - dst_size,
image.shape[0] - bottom_offset],
[image.shape[1] / 2 + dst_size,
image.shape[0] - bottom_offset],
[image.shape[1] / 2 + dst_size,
image.shape[0] - 2 * dst_size - bottom_offset],
[image.shape[1] / 2 - dst_size,
image.shape[0] - 2 * dst_size - bottom_offset],
])
scale = 10
world_size = Rover.ground_truth.shape[0]
# 2) Apply perspective transform
# p_img = perspect_transform(image, source, destination)
# 3) Apply color threshold to identify navigable terrain/obstacles/rock samples
mask_t, mask_o, mask_r = world_segmentation(image, roi=roi)
w_mask_t = perspect_transform(mask_t, src, dst)
w_mask_r = perspect_transform(mask_r, src, dst)
w_mask_o = perspect_transform(mask_o, src, dst)
# 4) Update Rover.vision_image (this will be displayed on left side of screen)
# Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
# Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
# Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
Rover.vision_image[:, :, 0] = mask_o
Rover.vision_image[:, :, 1] = mask_r
Rover.vision_image[:, :, 2] = mask_t
Rover.vision_image = perspect_transform(Rover.vision_image, src, dst)
# 5) Convert map image pixel values to rover-centric coords
t_xpix, t_ypix = rover_coords(w_mask_t)
o_xpix, o_ypix = rover_coords(w_mask_o)
r_xpix, r_ypix = rover_coords(w_mask_r)
vrang = 50
rt_xpix, rt_ypix = range_view(t_xpix, t_ypix, max_r=vrang)
ro_xpix, ro_ypix = range_view(o_xpix, o_ypix, max_r=vrang)
rr_xpix, rr_ypix = range_view(r_xpix, r_ypix, max_r=vrang)
# 6) Convert rover-centric pixel values to world coordinates
wrd_t_xpix, wrd_t_ypix = pix_to_world(rt_xpix,
rt_ypix,
Rover.pos[0],
Rover.pos[1],
Rover.yaw,
world_size,
scale)
wrd_o_xpix, wrd_o_ypix = pix_to_world(ro_xpix,
ro_ypix,
Rover.pos[0],
Rover.pos[1],
Rover.yaw,
world_size,
scale)
wrd_r_xpix, wrd_r_ypix = pix_to_world(r_xpix, r_ypix,
Rover.pos[0], Rover.pos[1],
Rover.yaw, world_size, scale)
# 7) Update Rover worldmap (to be displayed on right side of screen)
# Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
# Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
# Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
# print Rover.pitch, Rover.roll
if (359.5 < Rover.pitch or Rover.pitch < 0.5) and \
(359.5 < Rover.roll or Rover.roll < 0.5):
# print('update map')
Rover.worldmap[wrd_o_ypix, wrd_o_xpix, 0] += 10
Rover.worldmap[wrd_r_ypix, wrd_r_xpix, 1] += 10
Rover.worldmap[wrd_t_ypix, wrd_t_xpix, 2] += 10
# 8) Convert rover-centric pixel positions to polar coordinates
# Update Rover pixel distances and angles
# Rover.nav_dists = rover_centric_pixel_distances
# Rover.nav_angles = rover_centric_angles
rx_t, ry_t = range_view(t_xpix, t_ypix, max_r=40, min_r=0)
Rover.nav_dists, Rover.nav_angles = to_polar_coords(rx_t, ry_t)
rx_o, ry_o = range_view(o_xpix, o_ypix, max_r=50, min_r=0)
Rover.obs_dists, Rover.obs_angles = to_polar_coords(rx_o, ry_o)
rx_r, ry_r = range_view(r_xpix, r_ypix, max_r=35, min_r=0)
if w_mask_r.any():
Rover.rock_dists, Rover.rock_angles = to_polar_coords(rx_r, ry_r)
# Rover.rock_dists, Rover.rock_angles = to_polar_coords(r_xpix, r_ypix)
else:
Rover.rock_dists = []
Rover.rock_angles = []
return Rover
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/rover_sates.py
|
# -*- coding: utf-8 -*-
from helper import *
import time
class Stop(object):
def __init__(self, rover, brake=5):
self.rover = rover
self.brake = brake
self.rover.steer = 0
self.start_time = self.rover.total_time
def __str__(self):
return 'Stop'
def delay(self, sec):
if self.start_time == 0:
self.start_time = self.rover.total_time
delta = self.rover.total_time - self.start_time
if delta <= sec:
return False
else:
self.start_time = 0
return True
def run(self):
self.rover.brake = self.brake
self.rover.throttle = 0.0
self.rover.steer = 0
def next(self):
self.rover.throttle = 0.0
self.rover.steer = 0
if abs(self.rover.vel) < 0.02:
if self.delay(0.5):
self.rover.brake = 0.0
if self.rover.go_home:
return ReturnHome(self.rover)
else:
return SearchClearPath(self.rover)
else:
return self
else:
return self
class Go(object):
def __init__(self, rover, throttle=0.1):
self.rover = rover
self.throttle = throttle
self.bearing = 0
self.nav_data = None
self.front_area = 0
self.start_time = 0
def __str__(self):
return 'Go'
def delay(self, sec):
if self.start_time == 0:
self.start_time = self.rover.total_time
delta = self.rover.total_time - self.start_time
if delta <= sec:
# print 'stabilizing...', delta
return False
else:
self.start_time = 0
return True
def run(self):
self.rover.brake = 0
self.rover.throttle = self.throttle
def update_sterring(self):
self.nav_data = get_polar_points(self.rover)
mean_dir = rad2deg(np.mean(get_near_periferics(self.nav_data, 100)))
desv = rad2deg(np.sqrt(get_near_periferics(self.nav_data, 100).var()))
AI, AD = side_areas(self.rover)
if AI > 0.48:
self.bearing = np.int_(mean_dir)
self.rover.steer = np.clip(self.bearing, -15, 15)
elif AI > 0.25:
self.bearing = np.int_(mean_dir + 0.8 * desv)
self.rover.steer = np.clip(self.bearing, -2, 15)
elif AI < 0.20:
self.bearing = np.int_(mean_dir + 0.5 * desv)
self.rover.steer = np.clip(self.bearing, -12, 12)
else:
self.bearing = 0
self.rover.steer = self.bearing
def check_area_stop(self):
if len(self.rover.nav_angles) > self.rover.stop_forward:
return True
else:
return False
def check_vel_max(self):
if self.rover.vel >= self.rover.max_vel:
self.rover.throttle = 0.0
else:
self.rover.throttle = self.rover.throttle_set
def stuck(self):
if self.rover.vel < 0.02:
if self.delay(0.3):
if self.rover.throttle > 0:
return True
else:
return False
return False
else:
return False
def check_rock_sight(self):
if distance_to_rock(self.rover) > 0:
return True
else:
return False
def next(self):
print('area: ', len(self.rover.nav_angles))
print('fron area:', self.front_area)
if self.check_rock_sight():
self.rover.rock_detected = True
return Stop(self.rover, brake=2)
if self.check_area_stop(): # and is_obstacle_ahead(self.rover) ==
self.check_vel_max()
self.update_sterring()
self.front_area = is_obstacle_ahead(
self.rover, range=20, bearing=self.bearing)
if self.front_area > 100:
print('fron area:', self.front_area)
return Stop(self.rover)
if self.stuck():
return Stuck(self.rover)
else:
return self
else:
return Stop(self.rover)
class SearchClearPath(object):
def __init__(self, rover, turn='right'):
self.rover = rover
self.turn_direction = turn
self.iteration = 0
def __str__(self):
return 'SearchClearPath'
def run(self):
self.rover.brake = 0.0
def update_turn(self):
if self.turn_direction is 'right':
self.rover.steer = -15
elif self.turn_direction is 'left':
self.rover.steer = 15
self.iteration += 1
def toogle_turn(self):
if self.turn_direction is 'right':
self.turn_direction = 'left'
elif self.turn_direction is 'left':
self.turn_direction = 'right'
def next(self):
self.update_turn()
AI, AD = side_areas(self.rover)
print('iter: ', self.iteration)
print('area: ', len(self.rover.nav_angles))
print('AI:', AI)
if self.rover.rock_detected:
return Rock(self.rover)
else:
if len(self.rover.nav_angles) >= self.rover.go_forward:
if is_obstacle_ahead(self.rover, 25, 0, arc=15) > 40:
return self
else:
if AI < 0.48:
return self
else:
self.iteration = 0
self.rover.steer = 0
return Go(self.rover)
# elif self.iteration < 100:
# return self
elif self.iteration == 500:
# raw_input()
# self.iteration += 1
self.toogle_turn()
return self
elif self.iteration >= 1000:
# raw_input()
# self.iteration += 1
# if self.iteration >= 200:
# raw_input()
self.iteration = 0
return Go(self.rover)
# else:
# return self
else:
# self.iteration +=1
return self
class Stuck(object):
def __init__(self, rover):
self.rover = rover
self.times = 0
def __str__(self):
return 'Stuck'
def check_vel_max(self):
if self.rover.vel < -self.rover.max_vel:
return False
else:
return True
def run(self):
self.rover.steer = 0
self.rover.throttle = 0
def next(self):
if self.rover.picking_up:
return Stop(self.rover)
self.times += 1
if self.times >= 1:
if self.times >= 35:
self.rover.throttle = 0.0
self.times = 0
return Stop(self.rover)
else:
if self.check_vel_max():
self.rover.throttle = -0.1
else:
self.rover.throttle = 0
return self
return self
class Rock(Go):
def __init__(self, rover):
self.rover = rover
self.distance = 0
self.angle = 0
self.iteration = 0
self.bearing = 0
self.start_time = 0
def __str__(self):
return 'Rock'
def update_rock_data(self):
self.distance = distance_to_rock(self.rover)
if self.distance != 0:
self.angle = rad2deg(np.mean(self.rover.rock_angles))
def check(self):
# If in a state where want to pickup a rock send pickup command
if self.rover.near_sample:
self.rover.throttle = 0
self.rover.brake = self.rover.brake_set
if self.rover.vel == 0 and not self.rover.picking_up:
self.rover.send_pickup = True
while self.rover.picking_up:
print('picking up')
self.rover.rock_detected = False
self.rover.max_vel = 1
return True
else:
self.rover.brake = self.rover.brake_set
return False
else:
return False
def putting_rock_infront(self):
if abs(self.angle) > 25:
self.bearing = self.angle
self.rover.steer = np.clip(self.bearing, -15, 15, out=None)
else:
self.rover.steer = 0
def go(self):
if self.rover.vel > self.rover.max_vel:
self.rover.throttle = 0
else:
self.rover.throttle = 0.1
def run(self):
# self.delay(100)
self.rover.max_vel = 0.5
self.update_rock_data()
# self.putting_rock_infront()
# self.first_angle = self.angle
def next(self):
self.update_rock_data()
if self.distance == 0:
self.iteration += 1
self.rover.steer = np.clip(self.bearing, -15, 15)
self.go()
if self.iteration >= 5:
self.rover.max_vel = 1
self.rover.rock_detected = False
self.iteration = 0
return Go(self.rover, throttle=0.1)
else:
return self
else:
self.iteration = 0
print('distance:', self.distance)
print('angle: ', self.angle)
self.go()
self.putting_rock_infront()
if self.check():
if self.rover.samples_to_find == 0:
return s(self.rover)
else:
if np.sign(self.bearing):
turn = 'right'
else:
turn = 'left'
return SearchClearPath(self.rover, turn)
else:
if self.stuck():
self.putting_rock_infront()
return Stuck(self.rover)
else:
return self
class ReturnHome(Go):
def __init__(self, rover):
self.rover = rover
self.home = self.rover.pos
self.front_area = 0
def __str__(self):
return 'ReturnHome'
def bearing_to_home_position(self):
x = self.rover.pos[0] - self.home[0]
y = self.rover.pos[1] - self.home[1]
bearing = rad2deg(np.arctan2(y, x))
return bearing
def update_sterring(self):
min_nav_angle = rad2deg(np.min(self.rover.nav_angles)) + 45
max_nav_angle = rad2deg(np.max(self.rover.nav_angles)) - 45
min_obs_angle = rad2deg(np.min(self.rover.nav_angles)) + 45
max_obs_angle = rad2deg(np.max(self.rover.nav_angles)) - 45
min_angle = max(min_nav_angle, min_obs_angle)
max_angle = min(max_nav_angle, max_obs_angle)
self.rover.steer = np.clip(self.bearing_to_home_position(),
min_angle, max_angle)
self.front_area = is_obstacle_ahead(
self.rover, range=30, bearing=self.bearing_to_home_position())
def run(self):
pass
def next(self):
if self.rover.samples_to_find == 0:
self.rover.go_home = True
print('area: ', len(self.rover.nav_angles))
print('front area:', self.front_area)
if self.check_area_stop(): # and is_obstacle_ahead(self.rover) ==
self.update_sterring()
self.check_vel_max()
if self.front_area > 150:
return Stop(self.rover)
if self.stuck():
return Stuck(self.rover)
else:
return self
else:
return Stop(self.rover)
else:
return Go(self.rover, throttle=0.1)
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/helper.py
|
import numpy as np
def rad2deg(rad):
return (rad * 180.0) / np.pi
def deg2rad(deg):
return (deg * np.pi) / 180.0
def distance_to_rock(Rover):
if Rover.rock_dists is not None:
if len(Rover.rock_dists):
return np.min(Rover.rock_dists)
else:
return 0
else:
return 0
def is_obstacle_ahead(Rover, range=25, bearing=0, arc=15):
idx_in_front = np.where((np.abs(Rover.obs_angles - bearing) < deg2rad(arc))
& (Rover.obs_dists < range))[0]
return len(idx_in_front)
def get_polar_points(Rover):
dist = Rover.nav_dists
ang = Rover.nav_angles
return np.array((dist, ang)).T
def get_frontal_distance(polar_points, arc=10):
central_view = [d for d, a in polar_points if rad2deg(abs(a)) < abs(arc)]
return np.array(central_view)
def get_near_periferics(polar_points, alpha):
near_angles = [a for d, a in polar_points if d < alpha]
return np.array(near_angles)
def side_areas(Rover):
i = 0
d = 0
if len(Rover.nav_angles) != 0:
for a in Rover.nav_angles:
if a > 0:
i += 1
else:
d += 1
return (1.0 * i / len(Rover.nav_angles), 1.0 * d / len(Rover.nav_angles))
else:
return (0, 0)
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/ex_2/warp_perspect.py
|
<reponame>juancruzgassoloncan/Udacity-Robo-nanodegree<gh_stars>1-10
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
image_name = '../data/IMG/robocam_2017_10_03_15_35_32_475.jpg'
image = mpimg.imread(image_name)
def perspect_transform(img, src, dst):
# Get transform matrix using cv2.getPerspectivTransform()
M = cv2.getPerspectiveTransform(src, dst)
# Warp image using cv2.warpPerspective()
# keep same size as input image
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
# Return the result
return warped
# TODO:
# Define a box in source (original) and
# destination (desired) coordinates
# Right now source and destination are just
# set to equal the four corners
# of the image so no transform is taking place
# Try experimenting with different values!
source = np.float32([[35, 135],
[120, 97],
[202, 97],
[300, 135]])
dst_size = 5
bottom_offset = 6
destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset - 2*dst_size],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset - 2*dst_size],
[image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
])
warped = perspect_transform(image, source, destination)
# Draw Source and destination points on images (in blue) before plotting
cv2.polylines(image, np.int32([source]), True, (0, 0, 255), 2)
cv2.polylines(warped, np.int32([destination]), True, (0, 0, 255), 2)
# Display the original image and binary
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 6), sharey=True)
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(warped, cmap='gray')
ax2.set_title('Result', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
plt.show() # Uncomment if running on your local machine
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/ex_4/extra_functions.py
|
import numpy as np
import cv2
import matplotlib.image as mpimg
def perspect_transform(img, src, dst):
# Get transform matrix using cv2.getPerspectivTransform()
M = cv2.getPerspectiveTransform(src, dst)
# Warp image using cv2.warpPerspective()
# keep same size as input image
warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))
# Return the result
return warped
def color_thresh(img, rgb_thresh=(160, 160, 160)):
# Create an array of zeros same xy size as img, but single channel
color_select = np.zeros_like(img[:, :, 0])
# Require that each pixel be above all thre threshold values in RGB
# above_thresh will now contain a boolean array with "True"
# where threshold was met
above_thresh = (img[:, :, 0] > rgb_thresh[0]) \
& (img[:, :, 1] > rgb_thresh[1]) \
& (img[:, :, 2] > rgb_thresh[2])
# Index the array of zeros with the boolean array and set to 1
color_select[above_thresh] = 1
# Return the binary image
return color_select
image_name = '../data/IMG/robocam_2017_10_03_15_35_32_475.jpg'
image = mpimg.imread(image_name)
# Define calibration box in source (actual) and destination (desired) coordinates
# These source and destination points are defined to warp the image
# to a grid where each 10x10 pixel square represents 1 square meter
dst_size = 5
# Set a bottom offset to account for the fact that the bottom of the image
# is not the position of the rover but a bit in front of it
bottom_offset = 6
source = np.float32([[35, 135], [120, 97], [202, 97], [300, 135]])
destination = np.float32([[image.shape[1] / 2 - dst_size, image.shape[0] - bottom_offset],
[image.shape[1] / 2 - dst_size, image.shape[0] -
bottom_offset - 2 * dst_size],
[image.shape[1] / 2 + dst_size, image.shape[0] -
bottom_offset - 2 * dst_size],
[image.shape[1] / 2 + dst_size, image.shape[0] - bottom_offset]])
|
juancruzgassoloncan/Udacity-Robo-nanodegree
|
src/rover/project/code/state_machine.py
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
from time import sleep
class state_machine(object):
def __init__(self, machine, *args):
self.machine = machine
self.state_stack = list(args)
self.current_state = self.state_stack[0]
self.current_state.run()
# self.next_state = ''
def run(self):
if len(self.state_stack) == 0:
print('end')
raw_input()
return
print('current:')
print(self.current_state)
self.next_state = self.current_state.next()
# sleep(5)
# raw_input()
print('Next:')
print(self.next_state)
if str(self.next_state) == str(self.current_state):
# print('same state')
# sleep(5)
pass
elif self.next_state is None:
# print('Non state')
# raw_input()
self.state_stack.pop(0)
else:
# print('new state')
self.state_stack.insert(0, self.next_state)
self.current_state = self.state_stack[0]
self.current_state.run()
|
moerwald/download-images-via-google-search
|
main.py
|
<filename>main.py
import argparse
import json
import itertools
import logging
import re
import os
import uuid
import sys
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
def configure_logging():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter('[%(asctime)s %(levelname)s %(module)s]: %(message)s'))
logger.addHandler(handler)
return logger
logger = configure_logging()
REQUEST_HEADER = {
'User-Agent': "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.134 Safari/537.36"}
def get_soup(url, header):
response = urlopen(Request(url, headers=header))
return BeautifulSoup(response, 'html.parser')
def get_query_url(query):
return "https://www.google.co.in/search?q=%s&source=lnms&tbm=isch" % query
def extract_images_from_soup(soup):
image_elements = soup.find_all("div", {"class": "rg_meta"})
metadata_dicts = (json.loads(e.text) for e in image_elements)
link_type_records = ((d["ou"], d["ity"]) for d in metadata_dicts)
return link_type_records
def extract_images(query, num_images):
url = get_query_url(query)
logger.info("Souping")
soup = get_soup(url, REQUEST_HEADER)
logger.info("Extracting image urls")
link_type_records = extract_images_from_soup(soup)
return itertools.islice(link_type_records, num_images)
def get_raw_image(url):
req = Request(url, headers=REQUEST_HEADER)
resp = urlopen(req)
return resp.read()
def save_image(raw_image, image_type, save_directory):
extension = image_type if image_type else 'jpg'
file_name = uuid.uuid4().hex
save_path = os.path.join(save_directory, file_name)
with open(save_path, 'wb') as image_file:
image_file.write(raw_image)
def download_images_to_dir(images, save_directory, num_images):
for i, (url, image_type) in enumerate(images):
try:
logger.info("Making request (%d/%d): %s", i, num_images, url)
raw_image = get_raw_image(url)
save_image(raw_image, image_type, save_directory)
except Exception as e:
logger.exception(e)
def run(query, save_directory, num_images=100):
query = '+'.join(query.split())
logger.info("Extracting image links")
images = extract_images(query, num_images)
logger.info("Downloading images")
download_images_to_dir(images, save_directory, num_images)
logger.info("Finished")
def main():
parser = argparse.ArgumentParser(description='Scrape Google images')
parser.add_argument('-s', '--search', default='politiker', type=str, help='search term')
parser.add_argument('-n', '--num_images', default=100, type=int, help='num images to save')
parser.add_argument('-d', '--directory', default='C:/temp/teachableMachine/politiker', type=str, help='save directory')
args = parser.parse_args()
run(args.search, args.directory, args.num_images)
if __name__ == '__main__':
main()
|
bharadwaj-raju/island
|
Island3DRender.py
|
import sys
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def normalize_land_water(data, threshold=0.1):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
for idh, hcell in enumerate(vline):
if hcell >= threshold:
res[idv][idh] = 1
return res
def normalize_0_1(data):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
maxval = max(vline)
minval = min(vline)
for idh, hcell in enumerate(vline):
try:
res[idv][idh] = (hcell - minval)/(maxval - minval)
except ZeroDivisionError:
res[idv][idh] = (hcell - minval)
return res
def read_data_from_hmap_file(fname):
data = []
with open(fname) as f:
for line in f:
data.append([float(x) for x in line.split()])
return data
def main():
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
data = read_data_from_hmap_file(sys.argv[1])
x = list(range(len(data)))
y = list(range(len(data)))
X, Y = np.meshgrid(x, y)
Z = np.array(data)
ax.plot_surface(X, Y, Z, cmap='terrain')
ax.set_zlim([0, 1.5])
plt.show()
if __name__ == '__main__':
main()
|
bharadwaj-raju/island
|
IslandRender.py
|
<filename>IslandRender.py
import png
import io
from PIL import Image
import argparse
import json
import os
import sys
def normalize_land_water(data, threshold=0.1):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
for idh, hcell in enumerate(vline):
if hcell >= threshold:
res[idv][idh] = 1
return res
def normalize_0_1(data):
res = [[0 for i in range(len(data))] for j in range(len(data))]
for idv, vline in enumerate(data):
maxval = max(vline)
minval = min(vline)
for idh, hcell in enumerate(vline):
try:
res[idv][idh] = (hcell - minval)/(maxval - minval)
except ZeroDivisionError:
res[idv][idh] = (hcell - minval)
return res
def normalize_0_255(norm_0_1_data):
res = [[0 for i in range(len(norm_0_1_data))] for j in range(len(norm_0_1_data))]
for idv, vline in enumerate(norm_0_1_data):
for idh, hcell in enumerate(vline):
res[idv][idh] = hcell * 255
return res
def generate_simple_pixel_matrix(data, color_config):
WATER_COLOR, LAND_COLOR = color_config['water'], color_config['land']
pixels = [[0 for i in range(len(data))] for i in range(len(data))]
WATER_VALUE = 0
for idv, vline in enumerate(data):
for idh, hcell in enumerate(vline):
pixels[idv][idh] = WATER_COLOR if (hcell == WATER_VALUE) else LAND_COLOR
return pixels
def generate_color_heights_pixel_matrix(data, color_config, threshold=0.1):
pixels = [[0 for i in range(len(data))] for i in range(len(data))]
HEIGHT_COLORS = color_config['color-heights']
for idv, vline in enumerate(data):
for idh, hcell in enumerate(vline):
if hcell <= threshold:
pixels[idv][idh] = HEIGHT_COLORS['0.0']
else:
try:
pixels[idv][idh] = HEIGHT_COLORS[str(round(hcell, 1))]
except KeyError:
pixels[idv][idh] = HEIGHT_COLORS['1.0']
return pixels
def generate_biome_pixel_matrix(elevation_data, moisture_data, color_config, threshold=0.1):
def biome(elevation, moisture):
if elevation <= threshold:
return 'water'
if elevation <= (threshold + 0.01):
return 'beach'
if elevation >= 0.7:
if moisture <= 0.3:
return 'cold-desert'
return 'snow'
if moisture <= 0.15:
return 'desert'
if moisture >= 0.9:
return 'marshland'
#if elevation < 0.4:
if moisture >= 0.7:
return 'rainforest'
if elevation > 0.4:
if moisture >= 0.3:
return 'forest'
return 'grassland'
pixels = [[0 for i in range(len(elevation_data))] for i in range(len(elevation_data))]
COLORS = color_config['biome']
for idv in range(len(elevation_data)):
for idh in range(len(elevation_data[idv])):
elevation = elevation_data[idv][idh]
moisture = moisture_data[idv][idh]
pixels[idv][idh] = COLORS[biome(elevation, moisture)]
return pixels
def make_png_file(pixels, fname, grayscale=False):
if grayscale:
pngimg = png.from_array(pixels, 'L')
else:
pngimg = png.from_array(pixels, 'RGB')
with open(fname, 'wb') as f:
pngimg.save(f)
def make_svg_file(pixels, fname, grayscale=False):
if grayscale:
pngimg = png.from_array(pixels, 'L')
else:
pngimg = png.from_array(pixels, 'RGB')
pngfile = io.BytesIO()
pngimg.save(pngfile)
image = Image.open(pngfile).convert('RGB')
imagedata = image.load()
svgdata = ''
svgdata += ('<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n')
svgdata += ('<svg id="svg2" xmlns="http://www.w3.org/2000/svg" version="1.1" width="%(x)i" height="%(y)i" viewBox="0 0 %(x)i %(y)i">\n' % {'x':image.size[0], 'y':image.size[1]})
for y in range(image.size[1]):
for x in range(image.size[0]):
rgb = imagedata[x, y]
rgb = '#%02x%02x%02x' % rgb
svgdata += ('<rect width="1" height="1" x="%i" y="%i" fill="%s" />\n' % (x, y, rgb))
svgdata += ('</svg>\n')
with open(fname, 'w') as f:
f.write(svgdata)
def read_data_from_hmap_file(fname):
data = []
with open(fname) as f:
for line in f:
data.append([float(x) for x in line.split()])
return data
def main():
arg_parser = argparse.ArgumentParser(description='IslandRender — Render islands (.hmap) as images')
arg_parser.add_argument(
'heightmap_file', metavar='heightmap-file', type=str,
help='The height map (.hmap) file')
arg_parser.add_argument(
'output_file', metavar='image-file', type=str,
help='The output image file')
arg_parser.add_argument(
'--output-format', type=str,
help='The output image format (png or svg). Default: png',
choices=['png', 'svg'], default='png',
required=False, metavar='png|svg')
arg_parser.add_argument(
'--mode', type=str,
help='The type of image generated: either with varying colors for heights, or simple land/water, or a raw grayscale heightmap',
choices=['color-heights', 'simple', 'heightmap', 'biome'], default='simple',
required=False, metavar='color-heights|simple|heightmap')
arg_parser.add_argument(
'--water-level', type=float, default=0.2,
help='The water or flooding level (all values below this will be rendered as water)',
required=False)
arg_parser.add_argument(
'--biome-file', type=str,
help='Optional .biome file (of same size as .hmap) to use for biome rendering mode',
required=False)
args = arg_parser.parse_args()
print(args)
data = read_data_from_hmap_file(args.heightmap_file)
color_config = ''
with open('IslandRenderColors.json') as f:
for line in f:
if not (line.strip().startswith('/*') or line.strip().startswith('//')):
color_config += line
color_config = json.loads(color_config)
if args.mode == 'color-heights':
pixels = generate_color_heights_pixel_matrix(data, color_config, threshold=args.water_level)
elif args.mode == 'simple':
pixels = generate_simple_pixel_matrix(normalize_land_water(data, threshold=args.water_level), color_config)
elif args.mode == 'biome':
if not args.biome_file:
print('If mode is biome, then biome file must be specified with --biome-file.')
sys.exit(1)
moisture_data = read_data_from_hmap_file(args.biome_file)
pixels = generate_biome_pixel_matrix(data, moisture_data, color_config, threshold=args.water_level)
else:
pixels = normalize_0_255(normalize_0_1(data))
if args.mode == 'heightmap':
grayscale = True
else:
grayscale = False
if args.output_format == 'png':
make_png_file(pixels, args.output_file, grayscale=grayscale)
else:
make_svg_file(pixels, args.output_file, grayscale=grayscale)
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Session info to table/modules/html.py
|
<reponame>fantopop/post-production-scripts
#!/usr/bin/python
tab = ' '
class Tag():
def __init__(self, name, HTML):
self.name = name
self.HTML = HTML
def __enter__(self):
self.HTML.content += tab * self.HTML.indent + '<' + self.name + '>\n'
self.HTML.indent += 1
def __exit__(self, exc_type, exc_value, traceback):
self.HTML.indent -= 1
self.HTML.content += tab * self.HTML.indent + '</' + self.name + '>\n'
class HTML():
def __init__(self):
self.indent = 0
self.content = '<!DOCTYPE html>\n'
def __str__(self):
return self.content
def add(self, text):
for line in text.split('\n'):
self.content += tab * self.indent + line + '\n'
def html(self):
return Tag('html', self)
def body(self):
return Tag('body', self)
def head(self):
return Tag('head', self)
def title(self):
return Tag('title', self)
def h1(self):
return Tag('h1', self)
def h2(self):
return Tag('h2', self)
def style(self):
return Tag('style', self)
def main():
pass
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Correct edl/make_dict.py
|
#!/usr/bin/env python3
"""
This script creates dictionary and writes it
to the .json file for use by correct_edl.py script.
"""
import json
import sys
from os import path
# Dictionary with strings to be replaced.
d = {}
d[','] = '_'
d['/'] = '_'
d[' A'] = 'A'
d[' B'] = 'B'
d[' C'] = 'C'
# Scene names from shooting day 160803.
d['A139C002_160803_L0F7.MXF'] = '113-11-1'
d['A136C001_160803_L0F7.MXF'] = '113-06-1A'
d['A139C001_160803_L0F7.MXF'] = '113-10-01'
d['A134C003_160803_L0F7.MXF'] = '113-02-03A'
d['A137C002_160803_L0F7.MXF'] = '113-07-02'
d['A140C001_160803_L0F7.MXF'] = '113-12-01'
d['A134C004_160803_L0F7.MXF'] = '113-04-01A'
d['A137C001_160803_L0F7.MXF'] = '113-07-01'
d['B014C013_160801_R59N.MXF'] = '113-01-04'
# Scene names from scenes 61-62.
d['A214C010_160814_L0F7.MXF'] = '61-02-02'
d['A214C007_160814_L0F7.MXF'] = '61-01-03'
d['A215C004_160814_L0F7.MXF'] = '62-01-04'
d['A216C002_160814_L0F7.MXF'] = '62-03-01'
d['A216C001_160814_L0F7.MXF'] = '62-02-01'
d['A215C002_160814_L0F7.MXF'] = '62-01-02'
d['A215C001_160814_L0F7.MXF'] = '62-01-01'
d['A069C002_160725_R0F7.MXF'] = '60_63-02-02'
d['A218C001_160814_L0F7.MXF'] = '62-04-03'
d['A219C001_160814_L0F7.MXF'] = '62-05-02'
d['A218C002_160814_L0F7.MXF'] = '62-04-04'
d['A218C003_160814_L0F7.MXF'] = '62-05-01'
d['A219C004_160814_L0F7.MXF'] = '62-08-01'
d['A217C003_160814_L0F7.MXF'] = '62-04-02'
def main():
filename = path.join(path.dirname(sys.argv[0]), 'replacements_dict.json')
f = open(filename, 'w')
json.dump(obj=d, fp=f, indent=4)
f.close()
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Correct edl/Correct EDL.app/Contents/Resources/Scripts/correct_edl.py
|
<reponame>fantopop/post-production-scripts
#!/usr/bin/env python3
"""
This script replaces strings in *FROM CLIP NAME field of the EDL,
accroding to the dictionary from the .json file.
Corrected EDL is written to the new file, with '.corrected' added to the filename.
"""
# Constants
from_clip_name = '*FROM CLIP NAME: '
source_file = '*SOURCE FILE: '
import sys, json, argparse
from os import path
def print_separator():
print('\n========================================\n')
def print_dict(d):
print('Replacements dictionary: \n')
for key in d:
print("'%s' >>> '%s'" %(key, d[key]))
print()
def outname(filename):
"""
Constructs output filename from input file,
adding '.corrected' between filename and extension.
Example:
input.txt >>> input.corrected.txt
"""
split = (path.basename(filename)).split('.')
l = len(split)
if l > 1:
output = '.'.join(split[0:l-1] + ['corrected', split[l-1]])
else:
output = filename + '.corrected'
return path.join(path.dirname(filename), output)
def correct(inputfile, outputfile, dictionary, print_statistics=False):
"""
Reads input file line by line and writes them into output file,
applying corrections:
1. Lines, that start with "*SOURCE FILE" are omitted.
2. '*FROM CLIP NAME ' lines are written with replacements according to dictionary.
3. Other lines are written unchanged.
Returns lines with replacements count.
"""
# Corrected events count.
count = 0
for line in inputfile:
# Remove lines that start with "*SOURCE FILE".
if not line.startswith(source_file):
# Working with '*FROM CLIP NAME ' lines only.
if line.startswith(from_clip_name):
old_line = line
for key in dictionary:
# Replace.
line = from_clip_name + line[len(from_clip_name):].replace(key, dictionary[key])
# Write down corrected line.
if not line == old_line:
count += 1
# Print out replacements.
if print_statistics:
print(old_line[len(from_clip_name):-1] + ' >>> ' + line[len(from_clip_name):-1])
outputfile.write(line)
else:
# Write main event line unchanged.
outputfile.write(line)
return count
def main():
# Parsing command line arguments.
parser = argparse.ArgumentParser(
description='Replaces strings in *FROM CLIP NAME field of the EDL, accroding to the dictionary from the .json file')
parser.add_argument(
'edl', type=argparse.FileType(mode='r'),
help='source EDL file')
parser.add_argument(
'-d', '--dict', type=argparse.FileType(mode='r'),
default='replacements_dict.json',
help='replacements dictionary in JSON format, \'replacements_dict.json\' is used, if not specified')
parser.add_argument(
'-s', '--stat', action='store_true',
help='print detailed replacements statistics')
args = parser.parse_args()
# Constructing name for the corrected file.
output = outname(args.edl.name)
# Read dictionary from file.
replacements_dict = json.load(args.dict)
args.dict.close()
# Printing settings.
# print_separator()
print('Input EDL: %s' % path.basename(args.edl.name))
print('Output filename: %s' % path.basename(output))
print('Replacements dictionary: %s' % args.dict.name)
print('Print statistics: %s' % args.stat)
# print_separator()
with open(output, 'w') as output_edl:
count = correct(args.edl, output_edl, replacements_dict, args.stat)
# Cleanup and result.
args.edl.close()
print('\n%d events corrected.\n' % count)
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Session info to table/macOS app/Tracks to Table/tracks_to_table.py
|
<filename>Session info to table/macOS app/Tracks to Table/tracks_to_table.py
#!/usr/bin/python
"""
tracks_to_table.py
Author: <NAME>
https://github.com/fantopop/post-production-scripts
Special thanks to <NAME> for HTML.py module for generating HTML tables.
http://www.decalage.info/python/html
This scripts converts .txt file, that could be exported from Pro Tools
using "Export Session Info as Text" command into .csv file.
This CSV file can be easily opened with Number app.
There are two formats available:
- with TRACK_NAME column as one table.
- grouped by TRACK NAME with [--tracks] option.
"""
import sys, csv, argparse
from os import path
# Separator.
sep = '\t'
header = ['#', 'EVENT', 'START', 'END', 'DURATION']
footer = [''] * 5
# TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
TABLE_STYLE_THINBORDER = ""
table_style = 'table {border-collapse: collapse;} th, td {border: 1px solid #ccc;padding: 8px;}'
#--- CONSTANTS -----------------------------------------------------------------
# Table style to get thin black lines in Mozilla/Firefox instead of 3D borders
TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
#TABLE_STYLE_THINBORDER = "border: 1px solid #000000;"
#=== CLASSES ===================================================================
class TableCell (object):
"""
a TableCell object is used to create a cell in a HTML table. (TD or TH)
Attributes:
- text: text in the cell (may contain HTML tags). May be any object which
can be converted to a string using str().
- header: bool, false for a normal data cell (TD), true for a header cell (TH)
- bgcolor: str, background color
- width: str, width
- align: str, horizontal alignement (left, center, right, justify or char)
- char: str, alignment character, decimal point if not specified
- charoff: str, see HTML specs
- valign: str, vertical alignment (top|middle|bottom|baseline)
- style: str, CSS style
- attribs: dict, additional attributes for the TD/TH tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.6
"""
def __init__(self, text="", bgcolor=None, header=False, width=None,
align=None, char=None, charoff=None, valign=None, style=None,
attribs=None):
"""TableCell constructor"""
self.text = text
self.bgcolor = bgcolor
self.header = header
self.width = width
self.align = align
self.char = char
self.charoff = charoff
self.valign = valign
self.style = style
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table cell as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
if self.width: self.attribs['width'] = self.width
if self.align: self.attribs['align'] = self.align
if self.char: self.attribs['char'] = self.char
if self.charoff: self.attribs['charoff'] = self.charoff
if self.valign: self.attribs['valign'] = self.valign
if self.style: self.attribs['style'] = self.style
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.text:
text = str(self.text)
else:
# An empty cell should at least contain a non-breaking space
text = ' '
if self.header:
return ' <TH%s>%s</TH>\n' % (attribs_str, text)
else:
return ' <TD%s>%s</TD>\n' % (attribs_str, text)
#-------------------------------------------------------------------------------
class TableRow (object):
"""
a TableRow object is used to create a row in a HTML table. (TR tag)
Attributes:
- cells: list, tuple or any iterable, containing one string or TableCell
object for each cell
- header: bool, true for a header row (TH), false for a normal data row (TD)
- bgcolor: str, background color
- col_align, col_valign, col_char, col_charoff, col_styles: see Table class
- attribs: dict, additional attributes for the TR tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.5
"""
def __init__(self, cells=None, bgcolor=None, header=False, attribs=None,
col_align=None, col_valign=None, col_char=None,
col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.bgcolor = bgcolor
self.cells = cells
self.header = header
self.col_align = col_align
self.col_valign = col_valign
self.col_char = col_char
self.col_charoff = col_charoff
self.col_styles = col_styles
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table row as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.header:
result = '<THEAD>'
else:
result = ''
result += ' <TR%s>\n' % attribs_str
for cell in self.cells:
col = self.cells.index(cell) # cell column index
if not isinstance(cell, TableCell):
cell = TableCell(cell, header=self.header)
# apply column alignment if specified:
if self.col_align and cell.align==None:
cell.align = self.col_align[col]
if self.col_char and cell.char==None:
cell.char = self.col_char[col]
if self.col_charoff and cell.charoff==None:
cell.charoff = self.col_charoff[col]
if self.col_valign and cell.valign==None:
cell.valign = self.col_valign[col]
# apply column style if specified:
if self.col_styles and cell.style==None:
cell.style = self.col_styles[col]
result += str(cell)
result += ' </TR>\n'
if self.header:
result += '</THEAD>'
return result
#-------------------------------------------------------------------------------
class Table (object):
"""
a Table object is used to create a HTML table. (TABLE tag)
Attributes:
- rows: list, tuple or any iterable, containing one iterable or TableRow
object for each row
- header_row: list, tuple or any iterable, containing the header row (optional)
- border: str or int, border width
- style: str, table style in CSS syntax (thin black borders by default)
- width: str, width of the table on the page
- attribs: dict, additional attributes for the TABLE tag
- col_width: list or tuple defining width for each column
- col_align: list or tuple defining horizontal alignment for each column
- col_char: list or tuple defining alignment character for each column
- col_charoff: list or tuple defining charoff attribute for each column
- col_valign: list or tuple defining vertical alignment for each column
- col_styles: list or tuple of HTML styles for each column
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.1
"""
def __init__(self, rows=None, border='1', style=None, width=None,
cellspacing=None, cellpadding=4, attribs=None, header_row=None,
col_width=None, col_align=None, col_valign=None,
col_char=None, col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.border = border
self.style = style
# style for thin borders by default
if style == None: self.style = TABLE_STYLE_THINBORDER
self.width = width
self.cellspacing = cellspacing
self.cellpadding = cellpadding
self.header_row = header_row
self.rows = rows
if not rows: self.rows = []
self.attribs = attribs
if not attribs: self.attribs = {}
self.col_width = col_width
self.col_align = col_align
self.col_char = col_char
self.col_charoff = col_charoff
self.col_valign = col_valign
self.col_styles = col_styles
def __str__(self):
"""return the HTML code for the table as a string"""
attribs_str = ""
if self.border: self.attribs['border'] = self.border
if self.style: self.attribs['style'] = self.style
if self.width: self.attribs['width'] = self.width
if self.cellspacing: self.attribs['cellspacing'] = self.cellspacing
if self.cellpadding: self.attribs['cellpadding'] = self.cellpadding
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
result = '<TABLE%s>\n' % attribs_str
# insert column tags and attributes if specified:
if self.col_width:
for width in self.col_width:
result += ' <COL width="%s">\n' % width
# First insert a header row if specified:
if self.header_row:
if not isinstance(self.header_row, TableRow):
result += str(TableRow(self.header_row, header=True))
else:
result += str(self.header_row)
# Then all data rows:
for row in self.rows:
if not isinstance(row, TableRow):
row = TableRow(row)
# apply column alignments and styles to each row if specified:
# (Mozilla bug workaround)
if self.col_align and not row.col_align:
row.col_align = self.col_align
if self.col_char and not row.col_char:
row.col_char = self.col_char
if self.col_charoff and not row.col_charoff:
row.col_charoff = self.col_charoff
if self.col_valign and not row.col_valign:
row.col_valign = self.col_valign
if self.col_styles and not row.col_styles:
row.col_styles = self.col_styles
result += str(row)
result += '</TABLE>'
return result
def table(*args, **kwargs):
'return HTML code for a table as a string. See Table class for parameters.'
return str(Table(*args, **kwargs))
#-------------------------------------------------------------------------------
tab = ' '
class Tag():
'''
A class to provide correct opening and closing tags,
with intendation support via HTML class instance.
Implies usage of the "with" statement:
with Tag('tag', HTML-instance):
<code>
'''
def __init__(self, name, HTML):
self.name = name
self.HTML = HTML
def __enter__(self):
self.HTML.content += tab * self.HTML.indent + '<' + self.name + '>\n'
self.HTML.indent += 1
def __exit__(self, exc_type, exc_value, traceback):
self.HTML.indent -= 1
self.HTML.content += tab * self.HTML.indent + '</' + self.name + '>\n'
class HTML():
'''
HTML() class instance accumulates generated HTML code, handles indentation
and provides several html-tags as methods, returning Tag() class instances.
Common usage pattern:
h = HTML()
with h.html():
with h.head():
with h.title()
h.add('Hello world page')
with h.body():
with h.h1():
h.add('Hello World!')
with h.p():
h.add('This is the HTML code')
print(str(h))
'''
def __init__(self):
self.indent = 0
self.content = '<!DOCTYPE html>\n'
def __str__(self):
return self.content
def add(self, text):
for line in text.split('\n'):
self.content += tab * self.indent + line + '\n'
def html(self):
return Tag('html', self)
def body(self):
return Tag('body', self)
def head(self):
return Tag('head', self)
def title(self):
return Tag('title', self)
def h1(self):
return Tag('h1', self)
def h2(self):
return Tag('h2', self)
def style(self):
return Tag('style', self)
def p(self):
return Tag('p', self)
#-------------------------------------------------------------------------------
class Track():
'''
Stores track name and list of track events:
[NUMBER, CLIP_NAME, START TC, END TC, DURATION TC]
'''
def __init__(self, name):
self.name = name
self.events = []
class Session():
'''
Session() instance reads .txt file, exported from Pro Tools and
stores every tracks EDL as list of Track() instances.
Supports export to .csv and .html formats.
'''
def __init__(self, filename):
# Open session info file for reading
csv_reader = csv.reader(filename, dialect='excel-tab')
# Create array for Track() objects
self.tracks = []
for raw_row in csv_reader:
# Check, whether the row is not empty.
if raw_row:
# Remove all whitespaces from start and end of the cells.
row = [cell.strip() for cell in raw_row]
# Get track name.
if row[0].startswith('TRACK NAME:'):
track = Track(name=row[1])
self.tracks.append(track)
continue
# Skip original header lines.
if row[0].startswith('CHANNEL'):
continue
if len(row) > 6:
track.events.append(row[1:6])
def to_csv(self, filename):
with open(filename, 'w') as outputfile:
csv_writer = csv.writer(outputfile, dialect='excel-tab')
for track in self.tracks:
csv_writer.writerow([''] + [track.name] + ['']*3)
csv_writer.writerow(header)
for line in track.events:
csv_writer.writerow(line)
csv_writer.writerow(footer)
def to_html(self, filename):
h = HTML()
with h.html():
with h.head():
h.add('<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">')
with h.title():
# Add document title
h.add(filename.split('.')[-2].split('/')[-1])
with h.style():
h.add('@media print {')
h.indent += 1
# Add page break after each track's table when printing
h.add('TABLE { page-break-after: always}')
# Configure correct display of table over multiple printing pages
h.add('TR { page-break-inside:avoid; page-break-after:auto }')
h.add('TD { page-break-inside:avoid; page-break-after:auto }')
h.add('THEAD { display:table-header-group }')
h.add('TFOOT { display:table-footer-group }')
# Set default landscape orientation when printing
h.add('@page {size: landscape;}}')
h.indent -= 1
h.add(table_style)
with h.body():
for track in self.tracks:
# Add track name as header
with h.h2():
h.add(track.name)
# Add track's EDL table
h.add(table(track.events,
header_row=header,
width='100%',
border=None,
cellpadding=None,
col_width=['2.5%', '', '5%', '5%', '5%'],
col_align=['center', 'left', 'center', 'center', 'center'],
style=TABLE_STYLE_THINBORDER
))
with open(filename, 'w') as outputfile:
outputfile.write(str(h))
def export(self, filename, to):
outputfile = outname(filename, to)
if to == 'csv':
self.to_csv(outputfile)
else:
self.to_html(outputfile)
print('Source: ' + filename)
print('Result: ' + outputfile)
def outname(filename, ext='csv'):
"""
Constructs output filename from input file,
replacing extension with '.csv'.
Example:
input.txt >>> input.csv
"""
split = (path.basename(filename)).split('.')
l = len(split)
if l > 1:
output = '.'.join(split[0:l-1] + [ext])
else:
output = filename + '.' + ext
return path.join(path.dirname(filename), output)
def main():
parser = argparse.ArgumentParser(
description="Converts '.txt' file from Pro Tools 'Export Session Info as Text' command to '.csv' or '.html' file")
parser.add_argument(
'txt', metavar='textfile', type=argparse.FileType(mode='rU'),
help='session info text file from Pro Tools')
parser.add_argument(
'--to', choices=['csv', 'html'], required=True,
help='export format: "csv" or "html"')
args = parser.parse_args()
# Read session info to Session() object
session = Session(args.txt)
args.txt.close()
# Export to the file of choses format.
session.export(filename=args.txt.name, to=args.to)
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Rename mixer files/rename_mixer_files.py
|
<filename>Rename mixer files/rename_mixer_files.py
#!/usr/bin/env python3.6
"""
This script renames files from format:
05-5-160928_1009.wav
to:
1009_05-5-160928.wav
where:
1009: recording time,
05-5: channel number,
160928: date.
This was done to group files by takes (recording time).
"""
import re, os, sys, argparse
expression = r'([a-zA-Z0-9\-]+)\_(\d\d\d\d)(\.wav)'
def rename_files(path, rename=False):
# Set working path.
try:
os.chdir(path)
except OSError as err:
print(err)
sys.exit(1)
# Get list of filenames.
file_list = os.listdir(path)
# Processed files counter.
count = 0
# Rename files.
for name in file_list:
match = re.search(expression, name)
if match:
new_name = match.group(2) + '_' + match.group(1) + match.group(3)
print(name + ' => ' + new_name)
if rename:
os.rename(name, new_name)
count += 1
return count
def main():
# Parsing command line arguments.
parser = argparse.ArgumentParser(
description="""rename files from '05-5-160928_1009.wav' to '1009_05-5-160928.wav, '
where 1009 - recording time, 05-5 - channel number, 160928 - date""",
epilog="""Warning: Always backup files before renaming!""")
parser.add_argument(
'path', help='path to the folder with files to be renamed')
parser.add_argument(
'-r', '--rename', action='store_true',
help='run script without this option to preview results')
args = parser.parse_args()
path = os.path.abspath(os.path.normpath(args.path))
count = rename_files(path, args.rename)
# Printing result.
if args.rename:
print('\nComplete! %d files renamed.' % count)
else:
print('\n%d matches found. Run script with [-r] option to rename files.' % count)
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Filter edl tracks/filter_tracks.py
|
<filename>Filter edl tracks/filter_tracks.py<gh_stars>10-100
#!/usr/bin/env python3
'''
Filter events in EDL with matching track name.
It can be used to filter video or audio events.
Example usage:
python filter_tracks path/to/sample.edl -t V -f 24
Dependencies: timecode, edl
'''
import argparse
from os import path
from edl import Parser
def outname(filename, suffix='filtered'):
'''
Constructs output filename from input file,
adding suffix between filename and extension.
Example:
input.txt >>> input.filtered.txt
'''
split = (path.basename(filename)).split('.')
l = len(split)
if l > 1:
output = '.'.join(split[0:l-1] + [suffix, split[l-1]])
else:
output = filename + suffix
return path.join(path.dirname(filename), output)
def filter_tracks(edl, pattern):
'''
Removes events from edl matching with pattern in track name.
Returns number of filtered events.
'''
events = edl.events
# number of events in edl before filtering
before = len(events)
filtered = list(filter(lambda x: x.track.find(pattern) < 0, events))
edl.events = filtered
# number of events in edl after filtering
after = len(filtered)
# return difference
return before - after
def main():
'''
Filters tracks in EDL according to passed argument (A by default)
and writes to a new EDL file.
'''
# Parsing command line arguments.
parser = argparse.ArgumentParser(
description='''
Filter tracks in EDL.
''')
parser.add_argument('source', type=argparse.FileType(mode='r'), help='source EDL file')
parser.add_argument('-t', '--tracks', type=str, help='tracks to filter', default='A')
parser.add_argument('-f', '--fps', type=str, help='EDL frame rate', default='24')
args = parser.parse_args()
# tracks name pattern to filter
pattern = args.tracks
# create edl parser with corresponding frame rate
print(f'Working in {args.fps}fps')
parser = Parser(args.fps)
# read EDL file
edl = parser.parse(args.source)
# filter events, with matching track name
num_filtered = filter_tracks(edl, pattern)
with open(outname(args.source.name), 'w') as output:
output.write(edl.to_string())
print(f'Filtered {num_filtered} events matching \'{pattern}\' in track name')
if __name__ == '__main__':
main()
|
fantopop/post-production-scripts
|
Session info to table/modules/table.py
|
<reponame>fantopop/post-production-scripts<filename>Session info to table/modules/table.py<gh_stars>10-100
#!/usr/bin/python
# -*- coding: iso-8859-1 -*-
"""
HTML.py - v0.04 2009-07-28 <NAME>
This module provides a few classes to easily generate HTML code such as tables
and lists.
Project website: http://www.decalage.info/python/html
License: CeCILL (open-source GPL compatible), see source code for details.
http://www.cecill.info
"""
__version__ = '0.04'
__date__ = '2009-07-28'
__author__ = '<NAME>'
#--- LICENSE ------------------------------------------------------------------
# Copyright <NAME> - see http://www.decalage.info/contact for contact info
#
# This module provides a few classes to easily generate HTML tables and lists.
#
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
#
# A copy of the CeCILL license is also provided in these attached files:
# Licence_CeCILL_V2-en.html and Licence_CeCILL_V2-fr.html
#
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
#
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
#--- CHANGES ------------------------------------------------------------------
# 2008-10-06 v0.01 PL: - First version
# 2008-10-13 v0.02 PL: - added cellspacing and cellpadding to table
# - added functions to ease one-step creation of tables
# and lists
# 2009-07-21 v0.03 PL: - added column attributes and styles (first attempt)
# (thanks to an idea submitted by <NAME>)
# 2009-07-28 v0.04 PL: - improved column styles, workaround for Mozilla
#-------------------------------------------------------------------------------
#TODO:
# - method to return a generator (yield each row) instead of a single string
# - unicode support (input and output)
# - escape text in cells (optional)
# - constants for standard colors
# - use lxml to generate well-formed HTML ?
# - add classes/functions to generate a HTML page, paragraphs, headings, etc...
#--- THANKS --------------------------------------------------------------------
# - <NAME>, for the idea of column styles.
#--- REFERENCES ----------------------------------------------------------------
# HTML 4.01 specs: http://www.w3.org/TR/html4/struct/tables.html
# Colors: http://www.w3.org/TR/html4/types.html#type-color
# Columns alignement and style, one of the oldest and trickiest bugs in Mozilla:
# https://bugzilla.mozilla.org/show_bug.cgi?id=915
#--- CONSTANTS -----------------------------------------------------------------
# Table style to get thin black lines in Mozilla/Firefox instead of 3D borders
TABLE_STYLE_THINBORDER = "border: 1px solid #000000; border-collapse: collapse;"
#TABLE_STYLE_THINBORDER = "border: 1px solid #000000;"
#=== CLASSES ===================================================================
class TableCell (object):
"""
a TableCell object is used to create a cell in a HTML table. (TD or TH)
Attributes:
- text: text in the cell (may contain HTML tags). May be any object which
can be converted to a string using str().
- header: bool, false for a normal data cell (TD), true for a header cell (TH)
- bgcolor: str, background color
- width: str, width
- align: str, horizontal alignement (left, center, right, justify or char)
- char: str, alignment character, decimal point if not specified
- charoff: str, see HTML specs
- valign: str, vertical alignment (top|middle|bottom|baseline)
- style: str, CSS style
- attribs: dict, additional attributes for the TD/TH tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.6
"""
def __init__(self, text="", bgcolor=None, header=False, width=None,
align=None, char=None, charoff=None, valign=None, style=None,
attribs=None):
"""TableCell constructor"""
self.text = text
self.bgcolor = bgcolor
self.header = header
self.width = width
self.align = align
self.char = char
self.charoff = charoff
self.valign = valign
self.style = style
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table cell as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
if self.width: self.attribs['width'] = self.width
if self.align: self.attribs['align'] = self.align
if self.char: self.attribs['char'] = self.char
if self.charoff: self.attribs['charoff'] = self.charoff
if self.valign: self.attribs['valign'] = self.valign
if self.style: self.attribs['style'] = self.style
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.text:
text = str(self.text)
else:
# An empty cell should at least contain a non-breaking space
text = ' '
if self.header:
return ' <TH%s>%s</TH>\n' % (attribs_str, text)
else:
return ' <TD%s>%s</TD>\n' % (attribs_str, text)
#-------------------------------------------------------------------------------
class TableRow (object):
"""
a TableRow object is used to create a row in a HTML table. (TR tag)
Attributes:
- cells: list, tuple or any iterable, containing one string or TableCell
object for each cell
- header: bool, true for a header row (TH), false for a normal data row (TD)
- bgcolor: str, background color
- col_align, col_valign, col_char, col_charoff, col_styles: see Table class
- attribs: dict, additional attributes for the TR tag
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.5
"""
def __init__(self, cells=None, bgcolor=None, header=False, attribs=None,
col_align=None, col_valign=None, col_char=None,
col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.bgcolor = bgcolor
self.cells = cells
self.header = header
self.col_align = col_align
self.col_valign = col_valign
self.col_char = col_char
self.col_charoff = col_charoff
self.col_styles = col_styles
self.attribs = attribs
if attribs==None:
self.attribs = {}
def __str__(self):
"""return the HTML code for the table row as a string"""
attribs_str = ""
if self.bgcolor: self.attribs['bgcolor'] = self.bgcolor
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.header:
result = '<THEAD>'
else:
result = ''
result += ' <TR%s>\n' % attribs_str
for cell in self.cells:
col = self.cells.index(cell) # cell column index
if not isinstance(cell, TableCell):
cell = TableCell(cell, header=self.header)
# apply column alignment if specified:
if self.col_align and cell.align==None:
cell.align = self.col_align[col]
if self.col_char and cell.char==None:
cell.char = self.col_char[col]
if self.col_charoff and cell.charoff==None:
cell.charoff = self.col_charoff[col]
if self.col_valign and cell.valign==None:
cell.valign = self.col_valign[col]
# apply column style if specified:
if self.col_styles and cell.style==None:
cell.style = self.col_styles[col]
result += str(cell)
result += ' </TR>\n'
if self.header:
result += '</THEAD>'
return result
#-------------------------------------------------------------------------------
class Table (object):
"""
a Table object is used to create a HTML table. (TABLE tag)
Attributes:
- rows: list, tuple or any iterable, containing one iterable or TableRow
object for each row
- header_row: list, tuple or any iterable, containing the header row (optional)
- border: str or int, border width
- style: str, table style in CSS syntax (thin black borders by default)
- width: str, width of the table on the page
- attribs: dict, additional attributes for the TABLE tag
- col_width: list or tuple defining width for each column
- col_align: list or tuple defining horizontal alignment for each column
- col_char: list or tuple defining alignment character for each column
- col_charoff: list or tuple defining charoff attribute for each column
- col_valign: list or tuple defining vertical alignment for each column
- col_styles: list or tuple of HTML styles for each column
Reference: http://www.w3.org/TR/html4/struct/tables.html#h-11.2.1
"""
def __init__(self, rows=None, border='1', style=None, width=None,
cellspacing=None, cellpadding=4, attribs=None, header_row=None,
col_width=None, col_align=None, col_valign=None,
col_char=None, col_charoff=None, col_styles=None):
"""TableCell constructor"""
self.border = border
self.style = style
# style for thin borders by default
if style == None: self.style = TABLE_STYLE_THINBORDER
self.width = width
self.cellspacing = cellspacing
self.cellpadding = cellpadding
self.header_row = header_row
self.rows = rows
if not rows: self.rows = []
self.attribs = attribs
if not attribs: self.attribs = {}
self.col_width = col_width
self.col_align = col_align
self.col_char = col_char
self.col_charoff = col_charoff
self.col_valign = col_valign
self.col_styles = col_styles
def __str__(self):
"""return the HTML code for the table as a string"""
attribs_str = ""
if self.border: self.attribs['border'] = self.border
if self.style: self.attribs['style'] = self.style
if self.width: self.attribs['width'] = self.width
if self.cellspacing: self.attribs['cellspacing'] = self.cellspacing
if self.cellpadding: self.attribs['cellpadding'] = self.cellpadding
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
result = '<TABLE%s>\n' % attribs_str
# insert column tags and attributes if specified:
if self.col_width:
for width in self.col_width:
result += ' <COL width="%s">\n' % width
# The following code would also generate column attributes for style
# and alignement according to HTML4 specs,
# BUT it is not supported completely (only width) on Mozilla Firefox:
# see https://bugzilla.mozilla.org/show_bug.cgi?id=915
## n_cols = max(len(self.col_styles), len(self.col_width),
## len(self.col_align), len(self.col_valign))
## for i in range(n_cols):
## col = ''
## try:
## if self.col_styles[i]:
## col += ' style="%s"' % self.col_styles[i]
## except: pass
## try:
## if self.col_width[i]:
## col += ' width="%s"' % self.col_width[i]
## except: pass
## try:
## if self.col_align[i]:
## col += ' align="%s"' % self.col_align[i]
## except: pass
## try:
## if self.col_valign[i]:
## col += ' valign="%s"' % self.col_valign[i]
## except: pass
## result += '<COL%s>\n' % col
# First insert a header row if specified:
if self.header_row:
if not isinstance(self.header_row, TableRow):
result += str(TableRow(self.header_row, header=True))
else:
result += str(self.header_row)
# Then all data rows:
for row in self.rows:
if not isinstance(row, TableRow):
row = TableRow(row)
# apply column alignments and styles to each row if specified:
# (Mozilla bug workaround)
if self.col_align and not row.col_align:
row.col_align = self.col_align
if self.col_char and not row.col_char:
row.col_char = self.col_char
if self.col_charoff and not row.col_charoff:
row.col_charoff = self.col_charoff
if self.col_valign and not row.col_valign:
row.col_valign = self.col_valign
if self.col_styles and not row.col_styles:
row.col_styles = self.col_styles
result += str(row)
result += '</TABLE>'
return result
#-------------------------------------------------------------------------------
class List (object):
"""
a List object is used to create an ordered or unordered list in HTML.
(UL/OL tag)
Attributes:
- lines: list, tuple or any iterable, containing one string for each line
- ordered: bool, choice between an ordered (OL) or unordered list (UL)
- attribs: dict, additional attributes for the OL/UL tag
Reference: http://www.w3.org/TR/html4/struct/lists.html
"""
def __init__(self, lines=None, ordered=False, start=None, attribs=None):
"""List constructor"""
if lines:
self.lines = lines
else:
self.lines = []
self.ordered = ordered
self.start = start
if attribs:
self.attribs = attribs
else:
self.attribs = {}
def __str__(self):
"""return the HTML code for the list as a string"""
attribs_str = ""
if self.start: self.attribs['start'] = self.start
for attr in self.attribs:
attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
if self.ordered: tag = 'OL'
else: tag = 'UL'
result = '<%s%s>\n' % (tag, attribs_str)
for line in self.lines:
result += ' <LI>%s\n' % str(line)
result += '</%s>\n' % tag
return result
##class Link (object):
## """
## a Link object is used to create link in HTML. (<a> tag)
##
## Attributes:
## - text: str, text of the link
## - url: str, URL of the link
## - attribs: dict, additional attributes for the A tag
##
## Reference: http://www.w3.org/TR/html4
## """
##
## def __init__(self, text, url=None, attribs=None):
## """Link constructor"""
## self.text = text
## self.url = url
## if attribs:
## self.attribs = attribs
## else:
## self.attribs = {}
##
## def __str__(self):
## """return the HTML code for the link as a string"""
## attribs_str = ""
## if self.url: self.attribs['href'] = self.url
## for attr in self.attribs:
## attribs_str += ' %s="%s"' % (attr, self.attribs[attr])
## return '<a%s>%s</a>' % (attribs_str, text)
#=== FUNCTIONS ================================================================
# much simpler definition of a link as a function:
def Link(text, url):
return '<a href="%s">%s</a>' % (url, text)
def link(text, url):
return '<a href="%s">%s</a>' % (url, text)
def table(*args, **kwargs):
'return HTML code for a table as a string. See Table class for parameters.'
return str(Table(*args, **kwargs))
def list(*args, **kwargs):
'return HTML code for a list as a string. See List class for parameters.'
return str(List(*args, **kwargs))
#=== MAIN =====================================================================
# Show sample usage when this file is launched as a script.
if __name__ == '__main__':
# open an HTML file to show output in a browser
f = open('test.html', 'w')
t = Table()
t.rows.append(TableRow(['A', 'B', 'C'], header=True))
t.rows.append(TableRow(['D', 'E', 'F']))
t.rows.append(('i', 'j', 'k'))
f.write(str(t) + '<p>\n')
print(str(t))
print('-'*79)
t2 = Table([
('1', '2'),
['3', '4']
], width='100%', header_row=('col1', 'col2'),
col_width=('', '75%'))
f.write(str(t2) + '<p>\n')
print(t2)
print('-'*79)
t2.rows.append(['5', '6'])
t2.rows[1][1] = TableCell('new', bgcolor='red')
t2.rows.append(TableRow(['7', '8'], attribs={'align': 'center'}))
f.write(str(t2) + '<p>\n')
print(t2)
print('-'*79)
# sample table with column attributes and styles:
table_data = [
['Smith', 'John', 30, 4.5],
['Carpenter', 'Jack', 47, 7],
['Johnson', 'Paul', 62, 10.55],
]
htmlcode = HTML.table(table_data,
header_row = ['<NAME>', '<NAME>', 'Age', 'Score'],
col_width=['', '20%', '10%', '10%'],
col_align=['left', 'center', 'right', 'char'],
col_styles=['font-size: large', '', 'font-size: small', 'background-color:yellow'])
f.write(htmlcode + '<p>\n')
print(htmlcode)
print('-'*79)
def gen_table_squares(n):
"""
Generator to create table rows for integers from 1 to n
"""
## # First, header row:
## yield TableRow(('x', 'square(x)'), header=True, bgcolor='blue')
## # Then all rows:
for x in range(1, n+1):
yield (x, x*x)
t = Table(rows=gen_table_squares(10), header_row=('x', 'square(x)'))
f.write(str(t) + '<p>\n')
print('-'*79)
l = List(['aaa', 'bbb', 'ccc'])
f.write(str(l) + '<p>\n')
l.ordered = True
f.write(str(l) + '<p>\n')
l.start=10
f.write(str(l) + '<p>\n')
f.close()
|
lmawalker/bpnet
|
bpnet/BPNet.py
|
import os
from kipoi_utils.data_utils import get_dataset_item, numpy_collate_concat
from kipoi_utils.utils import unique_list
import keras.backend as K
import matplotlib.ticker as ticker
from bpnet.functions import softmax
from genomelake.extractors import FastaExtractor
from keras.models import load_model
from collections import OrderedDict
from bpnet.plot.tracks import plot_tracks, filter_tracks
from bpnet.extractors import extract_seq
from bpnet.data import nested_numpy_minibatch
from bpnet.seqmodel import SeqModel
from tqdm import tqdm
from bpnet.utils import flatten_list
from concise.utils.plot import seqlogo
from bpnet.functions import mean
from concise.preprocessing import encodeDNA
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from genomelake.extractors import BigwigExtractor
import pyBigWig
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
# TODO - remove the fasta file
# TODO is it possible to get rid of this class entirely?
class BPNetSeqModel:
"""BPNet based on SeqModel
"""
def __init__(self, seqmodel, fasta_file=None):
self.seqmodel = seqmodel
self.tasks = self.seqmodel.tasks
self.fasta_file = fasta_file
assert isinstance(self.seqmodel, SeqModel)
# TODO - add some sanity checks (profile head available etc)
@classmethod
def from_mdir(cls, model_dir):
from bpnet.seqmodel import SeqModel
# figure out also the fasta_file if present (from dataspec)
from bpnet.dataspecs import DataSpec
ds_path = os.path.join(model_dir, "dataspec.yml")
if os.path.exists(ds_path):
ds = DataSpec.load(ds_path)
fasta_file = ds.fasta_file
else:
fasta_file = None
return cls(SeqModel.from_mdir(model_dir), fasta_file=fasta_file)
def input_seqlen(self):
return self.seqmodel.seqlen
def predict(self, seq, batch_size=512):
"""Make model prediction
Args:
seq: numpy array of one-hot-encoded array of sequences
batch_size: batch size
Returns:
dictionary key=task and value=prediction for the task
"""
preds = self.seqmodel.predict(seq, batch_size=batch_size)
return {task: preds[f'{task}/profile'] * np.exp(preds[f'{task}/counts'][:, np.newaxis])
for task in self.seqmodel.tasks}
def contrib_score_all(self, seq, method='deeplift', aggregate_strand=True, batch_size=512,
pred_summaries=['profile/wn', 'counts/pre-act']):
"""Compute all contribution scores
Args:
seq: one-hot encoded DNA sequences
method: 'grad', 'deeplift' or 'ism'
aggregate_strands: if True, the average contribution scores across strands will be returned
batch_size: batch size when computing the contribution scores
Returns:
dictionary with keys: {task}/{pred_summary}/{strand_i} or {task}/{pred_summary}
and values with the same shape as `seq` corresponding to contribution scores
"""
assert aggregate_strand
contrib_scores = self.seqmodel.contrib_score_all(seq, method=method)
return {f"{task}/" + self._get_old_contrib_score_name(pred_summary): contrib_scores[f"{task}/{pred_summary}"]
for task in self.seqmodel.tasks
for pred_summary in pred_summaries}
def _get_old_contrib_score_name(self, s):
# TODO - get rid of the old nomenclature
s2s = {"profile/wn": 'profile', 'counts/pre-act': 'count'}
return s2s[s]
def sim_pred(self, central_motif, side_motif=None, side_distances=[], repeat=128, contribution=[]):
"""Embed two motifs in random sequences and obtain their average predictions.
Args:
contribution: list of contribution scores
"""
from bpnet.simulate import generate_seq, average_profiles, flatten
batch_size = repeat
seqlen = self.seqmodel.seqlen
tasks = self.seqmodel.tasks
# simulate sequence
seqs = encodeDNA([generate_seq(central_motif, side_motif=side_motif,
side_distances=side_distances, seqlen=seqlen)
for i in range(repeat)])
# get predictions
scaled_preds = self.predict(seqs, batch_size=batch_size)
if contribution:
# get the contribution scores (compute only the profile and counts contribution)
contrib_scores_all = self.seqmodel.contrib_score_all(seqs, intp_pattern=['*/profile/wn', '*/counts/pre-act'])
contrib_scores = {t: {self._get_old_contrib_score_name(contrib_score_name): seqs * contrib_scores_all[f'{t}/{contrib_score_name}']
for contrib_score_name in contribution}
for t in tasks}
# merge and aggregate the profiles
out = {"contrib": contrib_scores, "profile": scaled_preds}
else:
out = {"profile": scaled_preds}
return average_profiles(flatten(out, "/"))
def get_seq(self, regions, variants=None, use_strand=False, fasta_file=None):
"""Get the one-hot-encoded sequence used to make model predictions and
optionally augment it with the variants
"""
if fasta_file is None:
fasta_file = self.fasta_file
if variants is not None:
if use_strand:
raise NotImplementedError("use_strand=True not implemented for variants")
# Augment the regions using a variant
if not isinstance(variants, list):
variants = [variants] * len(regions)
else:
assert len(variants) == len(regions)
seq = np.stack([extract_seq(interval, variant, fasta_file, one_hot=True)
for variant, interval in zip(variants, regions)])
else:
variants = [None] * len(regions)
seq = FastaExtractor(fasta_file, use_strand=use_strand)(regions)
return seq
def predict_all(self, seq, contrib_method='grad', batch_size=512, pred_summaries=['profile/wn', 'counts/pre-act']):
"""Make model prediction based
"""
preds = self.predict(seq, batch_size=batch_size)
if contrib_method is not None:
contrib_scores = self.contrib_score_all(seq, method=contrib_method, aggregate_strand=True,
batch_size=batch_size, pred_summaries=pred_summaries)
else:
contrib_scores = dict()
out = [dict(
seq=get_dataset_item(seq, i),
# interval=regions[i],
pred=get_dataset_item(preds, i),
# TODO - shall we call it hyp_contrib score or contrib_score?
contrib_score=get_dataset_item(contrib_scores, i),
) for i in range(len(seq))]
return out
def predict_regions(self, regions,
variants=None,
contrib_method='grad',
pred_summaries=['profile/wn', 'counts/pre-act'],
use_strand=False,
fasta_file=None,
batch_size=512):
"""
Args:
regions: list of pybedtools.Interval
variant: a single instance or a list bpnet.extractors.Variant
pred_summary: 'mean' or 'max', summary function name for the profile gradients
compute_grads: if False, skip computing gradients
"""
seq = self.get_seq(regions, variants, use_strand=use_strand, fasta_file=fasta_file)
preds = self.predict_all(seq, contrib_method, batch_size, pred_summaries=pred_summaries)
# append regions
for i in range(len(seq)):
preds[i]['interval'] = regions[i]
if variants is not None:
preds[i]['variant'] = variants[i]
return preds
def plot_regions(self, regions, ds=None, variants=None,
seqlets=[],
pred_summary='profile/wn',
contrib_method='grad',
batch_size=128,
# ylim=None,
xlim=None,
# seq_height=1,
rotate_y=0,
add_title=True,
fig_height_per_track=2,
same_ylim=False,
fig_width=20):
"""Plot predictions
Args:
regions: list of pybedtools.Interval
variant: a single instance or a list of bpnet.extractors.Variant
ds: DataSpec. If provided, the ground truth will be added to the plot
pred_summary: 'mean' or 'max', summary function name for the profile gradients
"""
out = self.predict_regions(regions,
variants=variants,
contrib_method=contrib_method,
# pred_summary=pred_summary,
batch_size=batch_size)
figs = []
if xlim is None:
xmin = 0
else:
xmin = xlim[0]
shifted_seqlets = [s.shift(-xmin) for s in seqlets]
for i in range(len(out)):
pred = out[i]
interval = out[i]['interval']
if ds is not None:
obs = {task: ds.task_specs[task].load_counts([interval])[0] for task in self.tasks}
else:
obs = None
title = "{i.chrom}:{i.start}-{i.end}, {i.name} {v}".format(i=interval, v=pred.get('variant', ''))
# handle the DNase case
if isinstance(pred['seq'], dict):
seq = pred['seq']['seq']
else:
seq = pred['seq']
if obs is None:
# TODO - simplify?
viz_dict = OrderedDict(flatten_list([[
(f"{task} Pred", pred['pred'][task]),
(f"{task} Contrib profile", pred['contrib_score'][f"{task}/{pred_summary}"] * seq),
# (f"{task} Contrib counts", sum(pred['grads'][task_idx]['counts'].values()) / 2 * seq),
] for task_idx, task in enumerate(self.tasks)]))
else:
viz_dict = OrderedDict(flatten_list([[
(f"{task} Pred", pred['pred'][task]),
(f"{task} Obs", obs[task]),
(f"{task} Contrib profile", pred['contrib_score'][f"{task}/{pred_summary}"] * seq),
# (f"{task} Contrib counts", sum(pred['grads'][task_idx]['counts'].values()) / 2 * seq),
] for task_idx, task in enumerate(self.tasks)]))
if add_title:
title = "{i.chrom}:{i.start}-{i.end}, {i.name} {v}".format(i=interval, v=pred.get('variant', '')),
else:
title = None
if same_ylim:
fmax = {feature: max([np.abs(viz_dict[f"{task} {feature}"]).max() for task in self.tasks])
for feature in ['Pred', 'Contrib profile', 'Obs']}
ylim = []
for k in viz_dict:
f = k.split(" ", 1)[1]
if "Contrib" in f:
ylim.append((-fmax[f], fmax[f]))
else:
ylim.append((0, fmax[f]))
else:
ylim = None
fig = plot_tracks(filter_tracks(viz_dict, xlim),
seqlets=shifted_seqlets,
title=title,
fig_height_per_track=fig_height_per_track,
rotate_y=rotate_y,
fig_width=fig_width,
ylim=ylim,
legend=True)
figs.append(fig)
return figs
def export_bw(self,
regions,
output_prefix,
fasta_file=None,
contrib_method='grad',
pred_summaries=['profile/wn', 'counts/pre-act'],
batch_size=512,
scale_contribution= False,
flip_negative_strand = False,
chromosomes=None):
"""Export predictions and model contributions to big-wig files
Args:
regions: list of genomic regions
output_prefix: output file prefix
batch_size:
scale_contribution: if True, multiple the contribution scores by the predicted count value
chromosomes: a list of chromosome names consisting a genome
"""
from pysam import FastaFile
# pred_summary: which operation to use for the profile gradients
logger.info("Get model predictions and contribution scores")
out = self.predict_regions(regions,
contrib_method=contrib_method,
pred_summaries=pred_summaries,
fasta_file=fasta_file,
batch_size=batch_size)
#Determine how many strands to write in export-bw
n_tracks = out[0]['pred'][self.tasks[0]].shape[1]
assert n_tracks <= 2, "More than 2 tracks predicted...please evaluate application of exporting bigwig tracks..."
if n_tracks == 1:
output_feats = ['preds', 'contrib.profile', 'contrib.counts']
elif n_tracks == 2:
output_feats = ['preds.pos', 'preds.neg', 'contrib.profile', 'contrib.counts']
logger.info("Setup bigWigs for writing")
# Get the genome lengths
if fasta_file is None:
fasta_file = self.fasta_file
fa = FastaFile(fasta_file)
if chromosomes is None:
genome = OrderedDict([(c, l) for c, l in zip(fa.references, fa.lengths)])
else:
genome = OrderedDict([(c, l) for c, l in zip(fa.references, fa.lengths) if c in chromosomes])
fa.close()
# make sure the regions are in the right order
first_chr = list(np.unique(np.array([interval.chrom for interval in regions])))
last_chr = [c for c, l in genome.items() if c not in first_chr]
genome = [(c, genome[c]) for c in first_chr + last_chr]
# open bigWigs for writing
bws = {}
for task in self.tasks:
bws[task] = {}
for feat in output_feats:
delim = "." if not output_prefix.endswith("/") else ""
bw_preds_pos = pyBigWig.open(f"{output_prefix}{delim}{task}.{feat}.bw", "w")
bw_preds_pos.addHeader(genome)
bws[task][feat] = bw_preds_pos
def add_entry(bw, arr, interval, start_idx=0):
"""Macro for adding an entry to the bigwig file
Args:
bw: pyBigWig file handle
arr: 1-dimensional numpy array
interval: genomic interval pybedtools.Interval
start_idx: how many starting values in the array to skip
"""
assert arr.ndim == 1
assert start_idx < len(arr)
if interval.stop - interval.start != len(arr):
logger.warning(f"interval.stop - interval.start ({interval.stop - interval.start})!= len(arr) ({len(arr)})")
logger.warning(f"Skipping the entry: {interval}")
return
bw.addEntries(interval.chrom, interval.start + start_idx,
values=arr[start_idx:],
span=1, step=1)
def to_1d_contrib(hyp_contrib, seq):
# mask the hyp_contrib + add them up
return (hyp_contrib * seq).sum(axis=-1)
# interval logic to handle overlapping intervals
# assumption: all intervals are sorted w.r.t the start coordinate
# strategy: don't write values at the same position twice (skip those)
#
# graphical representation:
# ... ] - prev_stop
# [ ] - new interval 1
# [ ] - added chunk from interval 1
# [ ] - new interval 2 - skip
# [ ] - new interval 3, fully add
logger.info("Writing to bigWigs")
prev_stop = None # Keep track of what the previous interval already covered
prev_chrom = None
for i in tqdm(range(len(out))):
interval = out[i]['interval']
if prev_chrom != interval.chrom:
# Encountered a new chromosome
prev_stop = 0 # Restart the end-counter
prev_chrom = interval.chrom
if prev_stop >= interval.stop:
# Nothing new to add to that range
continue
start_idx = max(prev_stop - interval.start, 0)
for tid, task in enumerate(self.tasks):
# Write predictions
preds = out[i]['pred'][task]
if n_tracks == 1:
add_entry(bws[task]['preds'], preds[:, 0],
interval, start_idx)
elif n_tracks == 2:
add_entry(bws[task]['preds.pos'], preds[:, 0],
interval, start_idx)
if flip_negative_strand:
add_entry(bws[task]['preds.neg'], preds[:, 1]*-1,
interval, start_idx)
else:
add_entry(bws[task]['preds.neg'], preds[:, 1],
interval, start_idx)
# Get the contribution scores
seq = out[i]['seq']
hyp_contrib = out[i]['contrib_score']
if scale_contribution:
si_profile = preds.sum() # Total number of counts in the region
si_counts = preds.sum()
else:
si_profile = 1
si_counts = 1
# Assertion to prevent multiple nucleotides being encoded at a genomic position.
if not np.all(seq.astype(bool).sum(axis=-1).max() == 1):
continue
if 'profile/wn' in pred_summaries:
add_entry(bws[task]['contrib.profile'],
to_1d_contrib(hyp_contrib[f'{task}/profile'], seq) * si_profile,
interval, start_idx)
if 'counts/pre-act' in pred_summaries:
add_entry(bws[task]['contrib.counts'],
to_1d_contrib(hyp_contrib[f'{task}/count'], seq) * si_counts,
interval, start_idx)
prev_stop = max(interval.stop, prev_stop)
logger.info("Done writing. Closing bigWigs")
# Close all the big-wig files
for task in self.tasks:
for feat in output_feats:
bws[task][feat].close()
logger.info(f"Done! Output files stored as: {output_prefix}{delim}*")
|
lmawalker/bpnet
|
setup.py
|
<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
requirements = [
"argh",
"attr",
"related",
"cloudpickle>=1.0.0",
"concise==0.6.7",
"deepexplain",
# ml
"gin-config",
"keras==2.2.4",
"scikit-learn",
# "tensorflow",
# numerics
"h5py",
"numpy",
"pandas",
"scipy",
"statsmodels",
# Plotting
"matplotlib>=3.0.2",
"plotnine",
"seaborn",
# genomics
"pybigwig",
"pybedtools", # remove?
"modisco==0.5.3.0",
# "pyranges",
"joblib",
"cloudpickle>=1.0.0", # - remove?
"kipoi>=0.6.8",
"kipoi-utils>=0.3.0",
"kipoiseq>=0.2.2",
"papermill",
"jupyter_client>=6.1.2",
"ipykernel",
"nbconvert>=5.5.0",
"vdom>=0.6",
# utils
"ipython",
"tqdm",
# "pprint",
# Remove
"genomelake",
"pysam", # replace with pyfaidx
]
optional = [
"comet_ml",
"wandb",
"fastparquet",
"python-snappy",
"ipywidgets", # for motif simulation
]
test_requirements = [
"pytest>=3.3.1",
"pytest-cov>=2.6.1",
# "pytest-xdist",
"gdown", # download files from google drive
"virtualenv",
]
dependency_links = [
"deepexplain @ git+https://github.com/kundajelab/DeepExplain.git@#egg=deepexplain-0.1"
]
setup(
name="bpnet",
version='0.0.23',
description=("BPNet: toolkit to learn motif synthax from high-resolution functional genomics data"
" using convolutional neural networks"),
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/kundajelab/bpnet",
packages=find_packages(),
install_requires=requirements,
extras_require={
"dev": test_requirements,
"extras": optional,
},
license="MIT license",
entry_points={'console_scripts': ['bpnet = bpnet.__main__:main']},
zip_safe=False,
keywords=["deep learning",
"computational biology",
"bioinformatics",
"genomics"],
test_suite="tests",
package_data={'bpnet': ['logging.conf']},
include_package_data=True,
tests_require=test_requirements
)
|
iskander-akhmetov/WIM-Summarization
|
GreedSum.py
|
<filename>GreedSum.py
import argparse
from sklearn.feature_extraction.text import TfidfVectorizer
import numpy as np
def greed_sum(text, num_sent, min_df=1, max_df=1.0):
#fit a TFIDF vectorizer
vectorizer = TfidfVectorizer(min_df=min_df, max_df=max_df)
vectorizer.fit(text)
#get the matrix
X = vectorizer.transform(text).toarray()
#get the sentence indices
idx = []
while sum(sum(X)) != 0:
ind = np.argmax(X.sum(axis=1))
idx.append(ind)
#update the matrix deleting the columns corresponding to the words found in previous step
cols = X[ind]
col_idx = [i for i in range(len(cols)) if cols[i] > 0]
X = np.delete(X, col_idx, 1)
idx = idx[:num_sent]
idx.sort()
summary = [text[i] for i in idx]
return summary
def main():
#parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input_fname', help="Summarization input file name")
parser.add_argument('--output_fname', help="Summarization output file name")
parser.add_argument('--min_df', help="Minimum document frequency word threshold")
parser.add_argument('--max_df', help="Maximum document frequency word threshold")
parser.add_argument('--num_sent', help="Number of sentences for summary")
args = parser.parse_args()
if not args.input_fname:
print("No output file name was provided, quitting")
quit
else:
INPUT_FILENAME = args.input_fname
print("Input file name was provided:", INPUT_FILENAME)
OUTPUT_FILENAME = 'summary_output.txt' #the default file name
if not args.output_fname:
print("No output file name was provided, using the default", OUTPUT_FILENAME)
else:
OUTPUT_FILENAME = args.output_fname
print("Output file name was provided:", OUTPUT_FILENAME)
MIN_DF = 1 #the default min_df
if not args.min_df:
print("No minimum document frequency parameter was provided, using the default", MIN_DF)
else:
MIN_DF = float(args.min_df)
print("Minimum document frequency parameter was provided:", MIN_DF)
MAX_DF = 1 #the default max_df
if not args.max_df:
print("No maximum document frequency parameter was provided, using the default", MAX_DF)
else:
MAX_DF = float(args.max_df)
print("Maximum document frequency parameter was provided:", MAX_DF)
NUM_SENT = 1 #the default max_df
if not args.num_sent:
print("No number of sentences was provided, using the default", NUM_SENT)
else:
NUM_SENT = int(args.num_sent)
print("Number of sentences was provided:", NUM_SENT)
# 1. Get the text from the file provided
f = open(INPUT_FILENAME, 'r')
text = ' '.join(f.readlines()).replace('\n',' ').replace(' ',' ').split('. ')
f.close()
# 2. Summarize
summary = greed_sum(text, NUM_SENT, min_df=MIN_DF, max_df=MAX_DF)
# 3. Save summary to a file
f = open(OUTPUT_FILENAME, 'w')
f.writelines(summary)
f.close()
if __name__ == "__main__":
main()
|
singh4java/python-core
|
conversationBot.py
|
<gh_stars>0
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
import os
from termcolor import colored
colors = {'G': 'grey', 'R': 'red', 'GR': 'green', 'Y': 'yellow', 'B': 'blue', 'M': 'magenta', 'C': 'cyan', 'W': 'white'}
print('------Color List-------')
print(colors)
x = input('Select bot response color from color list ')
color = colors.get(x)
def botreply(reply, color):
text = f'ChatBot :{reply}'
coloradan_art = colored(text, color=color)
print(coloradan_art)
bot = ChatBot('God')
trainer = ListTrainer(bot)
pathurl = "C:/software/Python-PyCharm/chatterbot-corpus-master/chatterbot_corpus/data/english/"
for files in os.listdir(pathurl):
data = open(pathurl + files, 'r').readlines()
trainer.train(data)
while True:
msg = input('You : ')
if msg.strip() != 'Bye':
reply = bot.get_response(msg)
botreply(reply, color)
if msg.strip() == 'Bye':
botreply('Bye', color)
break
|
KeithCes/cipherCreator
|
main.py
|
<filename>main.py
"""Copyright (c) 2017 * <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
def userInput():
print("Please enter which cipher you would like to use: (CAESAR, ATBASH, \
MORSE)")
cipherChoice = str(input()).upper()
if cipherChoice == "CAESAR":
print("Please type the phrase you want converted: ")
phrase = input()
print("Please enter how many numbers you want your phrase shifted (0-26): ")
shift = int(input())
if shift > 26 or shift < 0:
print("Please input a valid shift")
else:
caesar(phrase, shift)
elif cipherChoice == "ATBASH":
print("Please type the phrase you want converted: ")
phrase = input()
atbash(phrase)
elif cipherChoice == "MORSE":
print("Please type the phrase you want converted: ")
phrase = input()
morse(phrase)
def morse(phrase):
morsePhrase = []
phrase.lower()
i = 0
while i < len(phrase):
if phrase[i] == "a":
morsePhrase.append(".-")
elif phrase[i] == "b":
morsePhrase.append("-...")
elif phrase[i] == "c":
morsePhrase.append("-.-.")
elif phrase[i] == "d":
morsePhrase.append("-..")
elif phrase[i] == "e":
morsePhrase.append(".")
elif phrase[i] == "f":
morsePhrase.append("..-.")
elif phrase[i] == "g":
morsePhrase.append("--.")
elif phrase[i] == "h":
morsePhrase.append("....")
elif phrase[i] == "i":
morsePhrase.append("..")
elif phrase[i] == "j":
morsePhrase.append(".---")
elif phrase[i] == "k":
morsePhrase.append("-.-")
elif phrase[i] == "l":
morsePhrase.append(".-..")
elif phrase[i] == "m":
morsePhrase.append("--")
elif phrase[i] == "n":
morsePhrase.append("-.")
elif phrase[i] == "o":
morsePhrase.append("---")
elif phrase[i] == "p":
morsePhrase.append(".--.")
elif phrase[i] == "q":
morsePhrase.append("--.-")
elif phrase[i] == "r":
morsePhrase.append(".-.")
elif phrase[i] == "s":
morsePhrase.append("...")
elif phrase[i] == "t":
morsePhrase.append("-")
elif phrase[i] == "u":
morsePhrase.append("..-")
elif phrase[i] == "v":
morsePhrase.append("...-")
elif phrase[i] == "w":
morsePhrase.append(".--")
elif phrase[i] == "x":
morsePhrase.append("-..-")
elif phrase[i] == "y":
morsePhrase.append("-.--")
elif phrase[i] == "z":
morsePhrase.append("--..")
i += 1
print(" ".join(morsePhrase))
def atbash(phrase):
atbashPhrase = []
phrase.lower()
i = 0
while i < len(phrase):
if phrase[i] == "a":
atbashPhrase.append("z")
elif phrase[i] == "b":
atbashPhrase.append("y")
elif phrase[i] == "c":
atbashPhrase.append("x")
elif phrase[i] == "d":
atbashPhrase.append("w")
elif phrase[i] == "e":
atbashPhrase.append("v")
elif phrase[i] == "f":
atbashPhrase.append("u")
elif phrase[i] == "g":
atbashPhrase.append("t")
elif phrase[i] == "h":
atbashPhrase.append("s")
elif phrase[i] == "i":
atbashPhrase.append("r")
elif phrase[i] == "j":
atbashPhrase.append("q")
elif phrase[i] == "k":
atbashPhrase.append("p")
elif phrase[i] == "l":
atbashPhrase.append("o")
elif phrase[i] == "m":
atbashPhrase.append("n")
elif phrase[i] == "n":
atbashPhrase.append("m")
elif phrase[i] == "o":
atbashPhrase.append("l")
elif phrase[i] == "p":
atbashPhrase.append("k")
elif phrase[i] == "q":
atbashPhrase.append("j")
elif phrase[i] == "r":
atbashPhrase.append("i")
elif phrase[i] == "s":
atbashPhrase.append("h")
elif phrase[i] == "t":
atbashPhrase.append("g")
elif phrase[i] == "u":
atbashPhrase.append("f")
elif phrase[i] == "v":
atbashPhrase.append("e")
elif phrase[i] == "w":
atbashPhrase.append("d")
elif phrase[i] == "x":
atbashPhrase.append("c")
elif phrase[i] == "y":
atbashPhrase.append("b")
elif phrase[i] == "z":
atbashPhrase.append("a")
i += 1
print("".join(atbashPhrase))
def caesar(phrase, shift):
caesarPhrase = []
phrase.lower()
i = 0
while i < len(phrase):
if phrase[i] == "a":
if 1 + shift > 26:
caesarPhrase.append(str((1 + shift) - 26))
else:
caesarPhrase.append(str(1 + shift))
elif phrase[i] == "b":
if 2 + shift > 26:
caesarPhrase.append(str((2 + shift) - 26))
else:
caesarPhrase.append(str(2 + shift))
elif phrase[i] == "c":
if 3 + shift > 26:
caesarPhrase.append(str((3 + shift) - 26))
else:
caesarPhrase.append(str(3 + shift))
elif phrase[i] == "d":
if 4 + shift > 26:
caesarPhrase.append(str((4 + shift) - 26))
else:
caesarPhrase.append(str(4 + shift))
elif phrase[i] == "e":
if 5 + shift > 26:
caesarPhrase.append(str((5 + shift) - 26))
else:
caesarPhrase.append(str(5 + shift))
elif phrase[i] == "f":
if 6 + shift > 26:
caesarPhrase.append(str((6 + shift) - 26))
else:
caesarPhrase.append(str(6 + shift))
elif phrase[i] == "g":
if 7 + shift > 26:
caesarPhrase.append(str((7 + shift) - 26))
else:
caesarPhrase.append(str(7 + shift))
elif phrase[i] == "h":
if 8 + shift > 26:
caesarPhrase.append(str((8 + shift) - 26))
else:
caesarPhrase.append(str(8 + shift))
elif phrase[i] == "i":
if 9 + shift > 26:
caesarPhrase.append(str((9 + shift) - 26))
else:
caesarPhrase.append(str(9 + shift))
elif phrase[i] == "j":
if 10 + shift > 26:
caesarPhrase.append(str((10 + shift) - 26))
else:
caesarPhrase.append(str(10 + shift))
elif phrase[i] == "k":
if 11 + shift > 26:
caesarPhrase.append(str((11 + shift) - 26))
else:
caesarPhrase.append(str(11 + shift))
elif phrase[i] == "l":
if 12 + shift > 26:
caesarPhrase.append(str((12 + shift) - 26))
else:
caesarPhrase.append(str(12 + shift))
elif phrase[i] == "m":
if 13 + shift > 26:
caesarPhrase.append(str((13 + shift) - 26))
else:
caesarPhrase.append(str(13 + shift))
elif phrase[i] == "n":
if 14 + shift > 26:
caesarPhrase.append(str((14 + shift) - 26))
else:
caesarPhrase.append(str(14 + shift))
elif phrase[i] == "o":
if 15 + shift > 26:
caesarPhrase.append(str((15 + shift) - 26))
else:
caesarPhrase.append(str(15 + shift))
elif phrase[i] == "p":
if 16 + shift > 26:
caesarPhrase.append(str((16 + shift) - 26))
else:
caesarPhrase.append(str(16 + shift))
elif phrase[i] == "q":
if 17 + shift > 26:
caesarPhrase.append(str((17 + shift) - 26))
else:
caesarPhrase.append(str(17 + shift))
elif phrase[i] == "r":
if 18 + shift > 26:
caesarPhrase.append(str((18 + shift) - 26))
else:
caesarPhrase.append(str(18 + shift))
elif phrase[i] == "s":
if 19 + shift > 26:
caesarPhrase.append(str((19 + shift) - 26))
else:
caesarPhrase.append(str(19 + shift))
elif phrase[i] == "t":
if 20 + shift > 26:
caesarPhrase.append(str((20 + shift) - 26))
else:
caesarPhrase.append(str(20 + shift))
elif phrase[i] == "u":
if 21 + shift > 26:
caesarPhrase.append(str((21 + shift) - 26))
else:
caesarPhrase.append(str(21 + shift))
elif phrase[i] == "v":
if 22 + shift > 26:
caesarPhrase.append(str((22 + shift) - 26))
else:
caesarPhrase.append(str(22 + shift))
elif phrase[i] == "w":
if 23 + shift > 26:
caesarPhrase.append(str((23 + shift) - 26))
else:
caesarPhrase.append(str(23 + shift))
elif phrase[i] == "x":
if 24 + shift > 26:
caesarPhrase.append(str((24 + shift) - 26))
else:
caesarPhrase.append(str(24 + shift))
elif phrase[i] == "y":
if 25 + shift > 26:
caesarPhrase.append(str((25 + shift) - 26))
else:
caesarPhrase.append(str(25 + shift))
elif phrase[i] == "z":
if 26 + shift > 26:
caesarPhrase.append(str((26 + shift) - 26))
else:
caesarPhrase.append(str(26 + shift))
elif phrase[i] == " ":
caesarPhrase.append(phrase[i])
i += 1
i = 0
while i < len(caesarPhrase):
if caesarPhrase[i] == "1":
caesarPhrase[i] = "a"
elif caesarPhrase[i] == "2":
caesarPhrase[i] = "b"
elif caesarPhrase[i] == "3":
caesarPhrase[i] = "c"
elif caesarPhrase[i] == "4":
caesarPhrase[i] = "d"
elif caesarPhrase[i] == "5":
caesarPhrase[i] = "e"
elif caesarPhrase[i] == "6":
caesarPhrase[i] = "f"
elif caesarPhrase[i] == "7":
caesarPhrase[i] = "g"
elif caesarPhrase[i] == "8":
caesarPhrase[i] = "h"
elif caesarPhrase[i] == "9":
caesarPhrase[i] = "i"
elif caesarPhrase[i] == "10":
caesarPhrase[i] = "j"
elif caesarPhrase[i] == "11":
caesarPhrase[i] = "k"
elif caesarPhrase[i] == "12":
caesarPhrase[i] = "l"
elif caesarPhrase[i] == "13":
caesarPhrase[i] = "m"
elif caesarPhrase[i] == "14":
caesarPhrase[i] = "n"
elif caesarPhrase[i] == "15":
caesarPhrase[i] = "o"
elif caesarPhrase[i] == "16":
caesarPhrase[i] = "p"
elif caesarPhrase[i] == "17":
caesarPhrase[i] = "q"
elif caesarPhrase[i] == "18":
caesarPhrase[i] = "r"
elif caesarPhrase[i] == "19":
caesarPhrase[i] = "s"
elif caesarPhrase[i] == "20":
caesarPhrase[i] = "t"
elif caesarPhrase[i] == "21":
caesarPhrase[i] = "u"
elif caesarPhrase[i] == "22":
caesarPhrase[i] = "v"
elif caesarPhrase[i] == "23":
caesarPhrase[i] = "w"
elif caesarPhrase[i] == "24":
caesarPhrase[i] = "x"
elif caesarPhrase[i] == "25":
caesarPhrase[i] = "y"
elif caesarPhrase[i] == "26":
caesarPhrase[i] = "z"
i += 1
print("".join(caesarPhrase))
userInput()
|
mariusavram91/python_collection
|
fizzbuzz.py
|
<filename>fizzbuzz.py
#!/usr/bin/python3
import unittest
'''
Write a program that: for multiples of three returns “Fizz” instead of the
number and for the multiples of five returns “Buzz”.
For numbers which are multiples of both three and five returns “FizzBuzz”.
For the rest it returns the number.
'''
def fizzbuzz(number):
if number % 3 == 0 and number % 5 == 0:
return 'FizzBuzz'
elif number % 3 == 0:
return 'Fizz'
elif number % 5 == 0:
return 'Buzz'
else:
return str(number)
class FizzBuzzTest(unittest.TestCase):
def setup(self):
pass
def test_fizzbuzz_for_3(self):
self.assertEqual(fizzbuzz(3), 'Fizz')
def test_fizzbuzz_for_5(self):
self.assertEqual(fizzbuzz(5), 'Buzz')
def test_fizzbuzz_for_multiple_of_3(self):
self.assertEqual(fizzbuzz(9), 'Fizz')
def test_fizzbuzz_for_multiple_of_5(self):
self.assertEqual(fizzbuzz(10), 'Buzz')
def test_fizzbuzz_for_multiple_of_3_and_5(self):
self.assertEqual(fizzbuzz(15), 'FizzBuzz')
def test_fizzbuzz_for_other_number(self):
self.assertEqual(fizzbuzz(7), '7')
if __name__ == '__main__':
unittest.main()
|
mariusavram91/python_collection
|
1_multiples_of_3_and_5.py
|
#!/usr/bin/python3
import unittest
'''
Problem 1: https://projecteuler.net/problem=1
If we list all the natural numbers below 10 that are multiples of 3 or 5,
we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
Correct answer: 233168
'''
def sum_3_or_5_multiples(lastNumber):
sum = 0
if(isinstance(lastNumber, int) and lastNumber > 0):
for n in range(0, lastNumber):
if (n % 3 == 0) or (n % 5 == 0):
sum += n
return sum
print("Sum: " + str(sum_3_or_5_multiples(1000)))
class Sum3Or5MultiplesTest(unittest.TestCase):
def setup(self):
pass
def test_sum_of_3_and_5_multiples_below_10(self):
self.assertEqual(sum_3_or_5_multiples(10), 23)
def test_passed_number_is_negative(self):
self.assertEqual(sum_3_or_5_multiples(-10), 0)
def test_passed_number_is_string(self):
self.assertFalse(sum_3_or_5_multiples('10'))
def test_passed_number_is_float(self):
self.assertFalse(sum_3_or_5_multiples(10.5))
def test_passed_number_is_zero(self):
self.assertFalse(sum_3_or_5_multiples(0))
if __name__ == '__main__':
unittest.main()
|
mariusavram91/python_collection
|
binary_tree_search.py
|
#!/usr/bin/python3
import unittest
class Node:
def __init__(self, number):
self.number = number
self.left = None
self.right = None
def count_nodes_in_longest_unique_path(tree):
return len(find_longest_unique_path(tree, [], 0, []))
def find_all_paths(tree):
if tree is None:
return []
if tree.left is None and tree.right is None:
return [tree.number]
left_subtree = find_all_paths(tree.left)
right_subtree = find_all_paths(tree.right)
full_subtree = left_subtree + right_subtree
list1 = []
for leaf in full_subtree:
list1.append([2].extend([tree.number, leaf]))
print(list1)
return list1
def find_longest_unique_path(node, paths, pathLength=0, path=None):
if node is None:
return []
if path is None:
path = []
if node.number not in path:
if len(path) > pathLength:
path[pathLength] = node.number
del path[pathLength+1:]
else:
path.append(node.number)
pathLength += 1
if node.left is None and node.right is None:
paths.append(path)
return
find_longest_unique_path(node.left, paths, pathLength, path)
find_longest_unique_path(node.right, paths, pathLength, path)
return path
class BinaryTreeTest(unittest.TestCase):
def setUp(self):
pass
def test_node_1(self):
tree = Node(4)
tree.left = Node(5)
tree.left.left = Node(4)
tree.left.left.left = Node(5)
tree.right = Node(6)
tree.right.left = Node(1)
tree.right.right = Node(6)
paths = []
self.assertEqual(find_longest_unique_path(tree, paths), [4, 6, 1])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 3)
def test_node_2(self):
tree = Node(4)
tree.left = Node(4)
tree.right = Node(6)
self.assertEqual(find_longest_unique_path(tree, []), [4, 6])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 2)
def test_node_3(self):
tree = Node(4)
tree.left = Node(4)
tree.left.left = Node(9)
tree.right = Node(6)
self.assertEqual(find_longest_unique_path(tree, []), [4, 6])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 2)
def test_node_4(self):
tree = Node(2)
tree.left = Node(5)
tree.left.left = Node(1)
tree.left.left.left = Node(8)
tree.right = Node(7)
print(find_all_paths(tree))
self.assertEqual(find_longest_unique_path(tree, []), [2, 5, 1, 8])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 4)
def test_node_5(self):
tree = Node(2)
tree.left = Node(6)
tree.right = Node(1)
tree.right.left = Node(3)
tree.right.right = Node(6)
self.assertEqual(find_longest_unique_path(tree, []), [2, 1, 6])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 3)
def test_node_6(self):
tree = Node(2)
tree.left = Node(6)
tree.right = Node(1)
tree.right.left = Node(3)
tree.right.left.left = Node(8)
tree.right.right = Node(6)
print(find_all_paths(tree))
self.assertEqual(find_longest_unique_path(tree, []), [2, 1, 3, 8])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 4)
def test_node_7(self):
tree = Node(2)
tree.left = Node(6)
tree.right = Node(1)
tree.right.left = Node(3)
tree.right.right = Node(6)
tree.right.right.left = Node(7)
self.assertEqual(find_longest_unique_path(tree, []), [2, 1, 6, 7])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 4)
def test_node_8(self):
tree = Node(4)
tree.left = Node(6)
tree.right = Node(4)
self.assertEqual(find_longest_unique_path(tree, []), [4, 6])
self.assertEqual(count_nodes_in_longest_unique_path(tree), 2)
if __name__ == '__main__':
unittest.main()
|
mariusavram91/python_collection
|
sequence_in_array.py
|
<gh_stars>0
#!/usr/bin/python3
import unittest
'''
Given an array of ints, return True if the sequence.. 1, 3, 4 .. appears in the
array somewhere.
'''
def array_has_134(array):
for i in range(len(array)-2):
if array[i] == 1 and array[i+1] == 3 and array[i+2] == 4:
return True
return False
class ArraySequenceTest(unittest.TestCase):
def setup(self):
pass
def test_134_sequence_is_in_array(self):
self.assertTrue(array_has_134([6, 2, 1, 3, 4, 7]))
def test_134_sequence_is_not_in_array(self):
self.assertFalse(array_has_134([1, 2, 3, 4, 9, 10]))
if __name__ == '__main__':
unittest.main()
|
mariusavram91/python_collection
|
sum_numbers_in_array.py
|
#!/usr/bin/python3
import time
import unittest
'''
Write a program which returns a pair of numbers from a given array that sum up
to a given number. Assume the list is sorted and it has integers.
'''
def solution(list, sum):
low = 0
high = len(list) - 1
while low < high:
current_sum = list[low] + list[high]
if current_sum == sum:
return [list[low], list[high]]
elif current_sum < sum:
low += 1
elif current_sum > sum:
high -= 1
return -1
class SolutionTest(unittest.TestCase):
def setup(self):
pass
def test_solution_when_it_is_not_possible(self):
self.assertEqual(solution([2, 4, 7, 9], 8), -1)
def test_solution_when_it_is_possible(self):
self.assertEqual(solution([1, 3, 6, 9], 7), [1, 6])
if __name__ == "__main__":
start_time = time.time()
result = solution([1, 3, 6, 9], 7)
end_time = time.time()
print("Execution time:")
print(end_time - start_time)
print()
unittest.main()
|
mariusavram91/python_collection
|
process_mad_lib.py
|
#!/usr/bin/python3
# Let's put it all together. Write code for the function process_madlib, which
# takes in a string "madlib" and returns the string "processed", where each
# instance of "NOUN" is replaced with a random noun and each instance of "VERB"
# is replaced with a random verb. You're free to change what the random
# functions return as verbs or nouns for your own fun, but for submissions keep
# the code the way it is!
from random import randint
def random_verb():
random_num = randint(0, 1)
if random_num == 0:
return "run"
else:
return "kayak"
def random_noun():
random_num = randint(0, 1)
if random_num == 0:
return "sofa"
else:
return "llama"
def word_transformer(word):
if word == "NOUN":
return random_noun()
elif word == "VERB":
return random_verb()
else:
return word[0]
def process_madlib(mad_lib):
processed = ""
sentence_len = len(mad_lib)
char_position = 0
substring_len = 4
while char_position < sentence_len:
processed_word = word_transformer(
mad_lib[char_position:char_position + substring_len])
processed += processed_word
if len(processed_word) > 1:
char_position += substring_len
else:
char_position += 1
return processed
test_string_1 = "This is a good NOUN to use when you VERB your food"
test_string_2 = "I'm going to VERB to the store and pick up a NOUN or two."
print(process_madlib(test_string_1))
print(process_madlib(test_string_2))
|
mariusavram91/python_collection
|
2_even_fibonacci_numbers.py
|
#!/usr/bin/python3
import unittest
'''
Problem 2: https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous
two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Correct answer: 4613732
'''
def sum_evens_fibonacci(limit):
sum = 0
a, b = 1, 2
if(isinstance(limit, int) and limit > 0):
while a < limit:
if a % 2 == 0:
sum += a
a, b = b, a + b
return sum
print("Sum: " + str(sum_evens_fibonacci(4*(10**6))))
class SumEvenFibonacciNumbersTest(unittest.TestCase):
def setup(self):
pass
def test_sum_of_even_fibonacci_below_10(self):
self.assertEqual(sum_evens_fibonacci(10), 10)
def test_passed_number_is_negative(self):
self.assertEqual(sum_evens_fibonacci(-10), 0)
def test_passed_number_is_string(self):
self.assertFalse(sum_evens_fibonacci('10'))
def test_passed_number_is_float(self):
self.assertFalse(sum_evens_fibonacci(10.5))
def test_passed_number_is_zero(self):
self.assertFalse(sum_evens_fibonacci(0))
if __name__ == '__main__':
unittest.main()
|
alekm/hambot
|
hambot.py
|
import time
import json
from turtle import done
import discord
import logging
from discord.ext import commands, tasks
import random
intents = discord.Intents(
guilds=True,
members=True,
messages=True,
reactions=True
)
bot = discord.Bot(
description="Hambot",
intents=intents,
)
#logging.basicConfig(level=logging.INFO)
async def on_ready(self):
print(f' Username: {self.user}')
print(f' Servers: {len(self.guilds)}')
print('-----\nReady...')
print('WELCOME TO HAMBOT\n-----\n')
config = {}
with open('config.json', 'r') as f:
print('loading config...')
config = json.load(f)
config['embedcolor'] = int(config['embedcolor'], 16)
print(' config loaded.')
bot.owner_id = config['ownerId']
bot.start_time = time.time()
bot.config = config
#Load modules
cogs = [
'modules.lookup',
'modules.dxcc',
'modules.utils.embed',
'modules.setstatus',
'modules.misc',
]
print('loading extensions...')
for cog in cogs:
bot.load_extension(cog)
print(' done.')
print('starting bot...')
try:
bot.run(config['token'])
except discord.LoginFailure as ex:
raise SystemExit("Error: Failed to authenticate: {}".format(ex))
except discord.ConnectionClosed as ex:
raise SystemExit("Error: Discord gateway connection closed: [Code {}] {}".format(ex.code, ex.reason))
except ConnectionResetError as ex:
raise SystemExit("ConnectionResetError: {}".format(ex))
|
alekm/hambot
|
modules/dxcc.py
|
"""
DXCC Prefix Lookup extension for qrm
---
Copyright (C) 2019-2020 classabbyamp, 0x5c (as lookup.py)
Copyright (C) 2021 classabbyamp, 0x5c
SPDX-License-Identifier: LiLiQ-Rplus-1.1
Modified by N4OG 2022 - converted to slash command
"""
import threading
from pathlib import Path
from ctyparser import BigCty
from discord.ext import commands, tasks
from discord.commands import ( # Importing the decorator that makes slash commands.
slash_command,
)
#import common as cmn
from onlinelookup import olresult, hamqth, callook, olerror
import discord
from datetime import datetime
cty_path = Path("cty.json")
class DXCCCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.embed = bot.get_cog('EmbedCog')
try:
self.cty = BigCty(cty_path)
except OSError:
self.cty = BigCty()
@slash_command(name="dx", description="Get DXCC information about a callsign prefix")
async def _dxcc_lookup(self, ctx, query: str):
await ctx.trigger_typing()
query = query.upper()
full_query = query
embed = discord.Embed(title = "DXCC Info for ", colour=0x31a896, timestamp=datetime.now())
embed.description = f"*Last Updated: {self.cty.formatted_version}*"
while query:
if query in self.cty.keys():
data = self.cty[query]
embed.add_field(name="Entity", value=data["entity"])
embed.add_field(name="CQ Zone", value=data["cq"])
embed.add_field(name="ITU Zone", value=data["itu"])
embed.add_field(name="Continent", value=data["continent"])
embed.add_field(name="Time Zone",
value=f"+{data['tz']}" if data["tz"] > 0 else str(data["tz"]))
embed.title += query
break
else:
query = query[:-1]
else:
embed.title += full_query + " not found"
await ctx.respond(embed=embed)
@tasks.loop(hours=24)
async def _update_cty(self):
update = threading.Thread(target=run_update, args=(self.cty, cty_path))
update.start()
def run_update(cty_obj, dump_loc):
update = cty_obj.update()
if update:
cty_obj.dump(dump_loc)
def setup(bot: commands.Bot):
dxcccog = DXCCCog(bot)
bot.add_cog(dxcccog)
dxcccog._update_cty.start()
|
alekm/hambot
|
modules/misc.py
|
<reponame>alekm/hambot
import time
import discord
from discord.ext import commands
from datetime import datetime, timedelta
from discord.commands import ( # Importing the decorator that makes slash commands.
slash_command,
)
'''
TODO:
- Oof counter
- Make this server specific
'''
class MiscCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.embed_service = bot.get_cog('EmbedCog')
@slash_command(name="utc", description="Replies with Universal Coordinated Time")
async def utc(self, ctx):
full_time = str(datetime.utcnow())
full_time_split = full_time.strip().split()
date = full_time_split[0]
time = full_time_split[1][0:8]
await ctx.respond(embed=self.embed_service
.generate(
title='Universal Coordinated Time',
description=f'**Date:** {date}\n**Time:** {time}'
), ephemeral=True
)
@slash_command(name="uptime", description="hambot uptime")
async def uptime(self, ctx):
await ctx.respond(f'I have been alive for {self.calc_uptime()}')
@slash_command(name="help", description="hambot help")
async def help(self, ctx):
await ctx.respond(embed=self.embed_service
.generate(
title="Help",
description=help_message
), ephemeral=True
)
@slash_command(name="about", description="hambot about")
async def about(self, ctx):
await ctx.respond(embed=self.embed_service
.generate(
title="Help",
description=hb_about + self.calc_uptime(),
footer='hambot 1.1.0 by N4OG\n'
'\tbased on HamTheMan by thisguyistotallyben'
), ephemeral=True
)
@slash_command(name="study", description="License Study Information")
async def study(self, ctx):
embed=discord.Embed(title="Study using the Ham.Study app or Website",description=study_text, colour=0x31a896, timestamp=datetime.now())
embed.set_image(url='https://blog.hamstudy.org/wp-content/uploads/2013/10/hamstudy_blue.png')
await ctx.respond(embed=embed)
def calc_uptime(self):
up = str(timedelta(seconds=(time.time() - self.bot.start_time)))
# parse it pretty-like
upsplit = up.split(',', 1)
if len(upsplit) == 1:
days = '0'
else:
days = upsplit[0].split()[0]
upsplit[0] = upsplit[1]
upsplit = upsplit[0].split(':')
if len(upsplit) != 3:
return ''
hours = upsplit[0]
minutes = upsplit[1]
if minutes[0] == '0':
minutes = minutes[1]
seconds = upsplit[2].split('.', 1)[0]
if seconds[0] == '0':
seconds = seconds[1]
# horribly complicated, but appeases my awful need for proper plurality
rets = ''
rets += f"{days} day{'' if days == '1' else 's'}, "
rets += f"{hours} hour{'' if hours == '1' else 's'}, "
rets += f"{minutes} minute{'' if minutes == '1' else 's'}, "
rets += f"{seconds} second{'' if seconds == '1' else 's'}"
return rets
def setup(bot):
bot.add_cog(MiscCog(bot))
'''
STRINGS AND STUFF
'''
# help dialog
help_message = ('**Core commands**\n'
'\t`/cond`: Solar conditions (Source: hamqsl.com)\n'
'\t`/muf`: Maximum Usable Frequency information (Source: prop.kc2g.com)\n'
'\t`/fof2`: Frequency of F2 Layer (NVIS) information (Source: prop.kc2g.com)\n '
'\t`/drap`: D Region Absorption Prediction map\n'
'\t`/utc`: Time in UTC\n'
'\t`/call [callsign]`: Callsign information (Sources: HamQTH'
', callook.info)\n'
'\t`/dx [prefix]`: DXCC information about a call prefix\n'
'\n\t`/about`: About the bot\n'
'\t`/uptime`: Bot uptime\n')
hb_about = ('**Author**\n'
'\tAlek, N4OG\n'
'\n**Tools**\n'
'\tPython 3.10\n'
'\tPy-Cord 2.0.0-beta.7\n'
'\tlibrsvg2\n'
'\n**Data Sources**\n'
'\tSolar conditions from hamqsl.com\n'
'\tOnline callsign lookups from HamQTH and callook.info\n'
'\tMaximum Usable Frequency (MUF) data from prop.kc2g.com\n'
'\n**Uptime**\n\t')
study_text = ("A good way to study is with the ham.study application. You can install the "
"application on a phone or tablet, or you can use it on-line. So, if you don't want to "
"pay the $4 for the application, you can just access it through a browser from any "
"device, even if you're not connected to the Internet. If you access hamstudy with a "
"browser, it's always free, but you do need to Register with your email address for it "
"to keep track of your progress.\n"
'In either case, you should create an account by "Registering" on hamstudy.org. Do '
'not use Google or Facebook - register with an email address. This creates a free '
"account that keeps track of your progress.\n"
"Once you've Registered for your account, do this:\n"
"Login to ham.study using your username and password.\n"
"Choose the Technician (2018 - 2022) exam by clicking on Technician (2018 - 2022):\n"
"Click on Study Mode:\n"
"Use the drop-down option in the top bar to change from All Questions to just T1:\n"
"Click on T1.\n"
"Now go through each question in T1, until you've Seen 100% of the questions, and "
"your Aptitude is 85% or more.\n"
"Only then go to the next Sub element (T2).\n"
"Continue doing this with each sub element.\n"
"Do not skip sub elements.\n"
"Do not take practice exams until you've Seen 100% of each sub element and your "
"Aptitude is 85% or more in each sub element.\n"
"The bar graph on the right will display your Seen and Aptitude.\n"
"If you have any questions about how to use hamstudy, or questions about the "
"questions and answers, just reply to this email. There's an explanation of the answer "
"when you're in Study Mode. Just click on I don't know. The reason for I don't know "
"instead of guessing is the app is designed to give you questions more frequently if "
"you select I don't know instead of getting it wrong.\n\n"
"Once you are done studying for Technician you can do the same for General and "
"Extra when ready. You would just substitute the appropriate element you are "
"studying. All credit for this method goes to Norm K6YXH\n")
|
alekm/hambot
|
modules/setstatus.py
|
from discord.ext import commands, tasks
import discord
import random
statuses = [ '7.200',
'14.313',
'3.927',
'3.860'
]
class StatusCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
self.status_change.start()
@tasks.loop(minutes=10)
async def status_change(self):
botStatus = random.choice(statuses)
await self.bot.change_presence(activity=discord.Activity(type=discord.ActivityType.listening, name=botStatus))
def setup(bot):
bot.add_cog(StatusCog(bot))
|
alekm/hambot
|
modules/utils/embed.py
|
import discord
from discord.ext import commands
class EmbedCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
def generate(self, **kwargs):
title = ''
description = ''
footer = ''
if 'title' in kwargs:
title = kwargs['title']
if 'description' in kwargs:
description = kwargs['description']
if 'footer' in kwargs:
footer = kwargs['footer']
return discord.Embed(
title=title,
description=description,
colour=self.bot.config['embedcolor']
).set_footer(text=footer)
def setup(bot):
bot.add_cog(EmbedCog(bot))
|
alekm/hambot
|
onlinelookup/callook.py
|
"""
Callook Callsign Lookup
Author: <NAME>, AB3NJ
Uses Callook's API to retreive information on callsigns
"""
import json
import os.path
from urllib import request, error
from . import olerror, olresult
# importorator
__all__ = ['CallookLookup']
def prettify(name):
names = name.split()
newname = ''
for i in names:
if len(name) > 1:
newname += i[0] + i[1:].lower()
else:
newname += i
newname += ' '
return newname
# hamqth lookup class
class CallookLookup:
def lookup(self, call):
"""
Uses callook.info to look up information on a US callsign
:param call: the callsign to look up
:returns: LookupResult class filled with information from HamQTH
:raises LookupResultError: if the lookup returns no information
"""
# setup
lr = olresult.LookupResult()
# make request
req = (f'https://callook.info/{call}/json')
with request.urlopen(req) as url:
data = json.loads(url.read().decode())
# check if callsign or not
if data['status'] == 'INVALID':
raise olerror.LookupResultError('Callook')
# ## GET THE GOODS ## #
lr.source = 'Callook'
# basic info
lr.callsign = data['current']['callsign']
lr.prevcall = data['previous']['callsign']
lr.name = prettify(data['name'])
lr.opclass = prettify(data['current']['operClass'])
# location
lr.country = 'United States'
lr.grid = data['location']['gridsquare']
addrs = data['address']['line2'].split(',')
addrs2 = addrs[1].split()
lr.city = prettify(addrs[0])
lr.state = addrs2[0]
lr.zip = addrs2[1]
# club stuff
if data['type'] == 'CLUB':
lr.club = True
lr.trusteename = data['trustee']['name']
lr.trusteecall = data['trustee']['callsign']
# uls stuff
lr.frn = data['otherInfo']['frn']
lr.uls = data['otherInfo']['ulsUrl']
# raw data
lr.raw = data
return lr
|
alekm/hambot
|
modules/lookup.py
|
import importlib
import os
import time
from datetime import datetime
from pathlib import Path
import discord
import requests
from ctyparser import BigCty
from discord.commands import \
slash_command # Importing the decorator that makes slash commands.
from discord.ext import commands, tasks
from onlinelookup import callook, hamqth, olerror, olresult
cty_path = Path("cty.json")
class LookupCog(commands.Cog):
def __init__(self, bot):
# reload any changes to the lookup classes
importlib.reload(olresult)
importlib.reload(olerror)
importlib.reload(callook)
importlib.reload(hamqth)
self.bot = bot
self.embed = bot.get_cog('EmbedCog')
self.callook = callook.CallookLookup()
self.hamqth = hamqth.HamQTHLookup(
bot.config['hamqth']['username'],
bot.config['hamqth']['password'])
@slash_command(name="cond", description="Replies with Current Solar Conditions")
async def cond(self, ctx):
await ctx.trigger_typing()
# remove possibly conficting old file
if os.path.isfile("conditions.jpg"):
os.remove("conditions.jpg")
# download the latest conditions
r = requests.get('https://www.hamqsl.com/solar101pic.php')
open('conditions.jpg', 'wb').write(r.content)
embed=discord.Embed(title=":sunny: Current Solar Conditions :sunny:",description='Images from https://hamqsl.com', colour=0x31a896, timestamp=datetime.now())
embed.set_image(url='attachment://conditions.jpg')
with open('conditions.jpg', 'rb') as f:
#await ctx.send(file=discord.File(f, 'conditions.gif'))
await ctx.respond(embed=embed, file=discord.File(f, 'conditions.jpg'))
@slash_command(name="drap", description="D Region Absorption Predictions Map" )
async def drap(self, ctx):
await ctx.trigger_typing()
# remove possibly conficting old file
if os.path.isfile("d-rap.png"):
os.remove("d-rap.png")
# download the latest conditions
r = requests.get('https://services.swpc.noaa.gov/images/animations/d-rap/global_f05/d-rap/latest.png')
open('d-rap.png', 'wb').write(r.content)
embed=discord.Embed(title=":globe_with_meridians: D Region Absorption Predictions Map :globe_with_meridians:",description='Images from https://www.swpc.noaa.gov/', colour=0x31a896, timestamp=datetime.now())
embed.set_image(url='attachment://d-rap.png')
with open('d-rap.png', 'rb') as f:
await ctx.respond(embed=embed, file=discord.File(f, 'd-rap.png'))
@slash_command(name="fof2", description="Frequency of F2 Layer Map" )
async def fof2(self, ctx):
await ctx.trigger_typing()
fileName="fof2.jpg"
svgName="fof2.svg"
url="https://prop.kc2g.com/renders/current/fof2-normal-now.svg"
embed=discord.Embed(title="Frequency of F2 Layer Map", colour=0x31a896, timestamp=datetime.now())
embed.set_image(url=f'attachment://{fileName}')
#if the muf image already exists and is less than 15 minutes old, send it
if os.path.isfile(fileName) and int(time.time()-os.path.getmtime(fileName))/60<15:
with open(fileName, 'rb') as f:
await ctx.respond(embed=embed, file=discord.File(f, fileName))
#if the muf image does not exist or the image is older than 15 minutes, cleanup files and grab a new one
elif not os.path.isfile(fileName) or int(time.time()-os.path.getmtime(fileName))/60>=15:
if os.path.isfile(fileName):
os.remove(fileName)
if os.path.isfile(svgName):
os.remove(svgName)
#download the latest muf map
r = requests.get(url)
open(svgName, 'wb').write(r.content)
#convert svg to jpg
convert_svg = os.system(f"rsvg-convert {svgName} > {fileName}")
#cleanup svg because we don't need it hanging around once we have a jpg
if os.path.isfile(svgName):
os.remove(svgName)
with open(fileName, 'rb') as f:
await ctx.respond(embed=embed, file=discord.File(f, fileName))
@slash_command(name="muf", description="Maximum Usable Frequency Map")
async def muf(self, ctx):
await ctx.trigger_typing()
fileName="muf.jpg"
svgName="muf.svg"
url="https://prop.kc2g.com/renders/current/mufd-normal-now.svg"
embed=discord.Embed(title="Maximum Usable Frequency Map", colour=0x31a896, timestamp=datetime.now())
embed.set_image(url=f'attachment://{fileName}')
#if the muf image already exists and is less than 15 minutes old, send it
if os.path.isfile(fileName) and int(time.time()-os.path.getmtime(fileName))/60<15:
with open(fileName, 'rb') as f:
await ctx.respond(embed=embed, file=discord.File(f, fileName))
#if the muf image does not exist or the image is older than 15 minutes, cleanup files and grab a new one
elif not os.path.isfile(fileName) or int(time.time()-os.path.getmtime(fileName))/60>=15:
if os.path.isfile(fileName):
os.remove(fileName)
if os.path.isfile(svgName):
os.remove(svgName)
#download the latest muf map
r = requests.get(url)
open(svgName, 'wb').write(r.content)
#convert svg to jpg
convert_svg = os.system(f"rsvg-convert {svgName} > {fileName}")
#cleanup svg because we don't need it hanging around once we have a jpg
if os.path.isfile(svgName):
os.remove(svgName)
with open(fileName, 'rb') as f:
await ctx.respond(embed=embed, file=discord.File(f, fileName))
@slash_command(name="call", description="Display information about a callsign")
async def call(self, ctx, callsign: str):
await ctx.trigger_typing()
result = self.lookup(callsign)
result_embed_desc = ''
if result == None:
await ctx.respond('oof no callsign found', ephemeral=True)
return
elif result.source == 'Callook':
result_embed_desc += self.format_for_callook(result)
elif result.source == 'HamQTH':
result_embed_desc += self.format_for_hamqth(result)
embed=discord.Embed(title=result.callsign,description=result_embed_desc, colour=0x31a896, timestamp=datetime.now())
embed.set_footer(text=f'Source: {result.source}')
await ctx.respond(embed=embed, ephemeral=True)
def lookup(self, callsign):
'''
Try US callsigns first
If that fails, try for all calls
'''
try:
result = self.callook.lookup(callsign)
except olerror.LookupResultError:
try:
result = self.hamqth.lookup(callsign)
except:
return None
return result
''' lookup formatting '''
def format_for_callook(self, r, hqr=None):
rets = ''
# extra info if neccessary
if hqr is not None:
itu = hqr.itu
cq = hqr.cq
# about field
about = ''
about += f'\t**Name:** {r.name}\n'
if not r.club:
about += f'\t**Class:** {r.opclass}\n'
if r.prevcall != '':
about += f'\t**Previous Callsign:** {r.prevcall}\n'
# location field
loc = ''
loc += f'\t**Country:** {r.country}\n'
loc += f'\t**Grid Square:** {r.grid}\n'
loc += f'\t**State:** {r.state}\n'
loc += f'\t**City:** {r.city}\n'
# club field
club = ''
if r.club:
club = '**Club Info**\n'
club += f'\t**Trustee:** {r.trusteename} ({r.trusteecall})\n\n'
# links
links = ''
links += f'\t**QRZ:** https://qrz.com/db/{r.callsign}\n'
links += f'\t**ULS:** {r.uls}\n'
# build magical string
rets = ('**About**\n'
f'{about}'
'\n**Location**\n'
f'{loc}'
'\n'
f'{club}'
'**Links**\n'
f'{links}')
return rets
em = discord.Embed(title=r.callsign, url=f'https://qrz.com/db/{r.callsign}', description=rets, colour=0x00c0ff)
em = em.set_footer(text='Source: callook.info')
# return
return em
def format_for_hamqth(self, r):
rets = ''
# about field
if r.name != '':
rets = r.name
elif 'nick' in r.raw:
rets = r.raw['nick']
else:
rets = 'no name given'
rets = f'**About**\n\t**Name:** {rets}\n\n'
# location
rets += f'**Location**\n\t**Country:** {r.country}\n'
rets += f'\t**Grid Square:** {r.grid}\n'
rets += f'\t**City:** {r.city}\n\n'
# links
rets += f'**Links**\n\t**QRZ:** https://qrz.com/db/{r.callsign}\n'
return rets
def setup(bot):
bot.add_cog(LookupCog(bot))
|
feruzfauzi/variable-type-tools
|
fonts/ttf2woff.py
|
#!python3
import sys, re, os, json
from fontTools.ttLib import TTFont
from collections import OrderedDict
fontsdir = os.path.dirname(sys.argv[0])
infont = outfont = None
fontExtension = re.compile(r'\.[ot]tf', re.I)
allAxes = {}
def getName(ttf, nameID):
return [n for n in ttf['name'].names if n.nameID==nameID][0].toUnicode()
os.chdir(fontsdir)
for infont in os.listdir(os.getcwd()):
if not fontExtension.search(infont):
continue
print(infont)
outwoff = fontExtension.sub('.woff', infont)
outwoff2 = fontExtension.sub('.woff2', infont)
try:
ttf = TTFont(infont)
except:
print("Error opening {}".format(infont))
continue
ttf.flavor='woff'
ttf.save(outwoff)
ttf.flavor='woff2'
ttf.save(outwoff2)
axes = OrderedDict()
if 'STAT' in ttf and hasattr(ttf['STAT'], 'table'):
axes['order'] = [a.AxisTag for a in sorted(ttf['STAT'].table.DesignAxisRecord.Axis, key=lambda a:a.AxisOrdering)]
if 'fvar' in ttf:
for axis in ttf['fvar'].axes:
axes[axis.axisTag] = {
'name': getName(ttf, namesaxis.nameID if hasattr(axis, 'nameID') else axis.axisNameID),
'min': axis.minValue,
'max': axis.maxValue,
'default': axis.defaultValue
}
axes['instances'] = []
if hasattr(ttf['fvar'], 'instances'):
for instance in ttf['fvar'].instances:
axes['instances'].append({
'axes': instance.coordinates,
'name': getName(ttf, instance.nameID if hasattr(instance, 'nameID') else instance.subfamilyNameID),
})
allAxes[fontExtension.sub('', infont)] = axes
with open('axes.json', 'w', encoding='utf-8') as axesFile:
json.dump(allAxes, axesFile, indent=2, ensure_ascii=False)
|
movermeyer/mls
|
mls.py
|
# coding=utf-8
"""
Unicode-like storage for multiple language mutations.
"""
from locale import getlocale, getdefaultlocale, locale_alias
from six import text_type, u, PY2
__all__ = ["mls", "MultiLingualString"]
def _get_system_locale():
"""
Returns current (or default) system locale.
"""
return getlocale()[0] or getdefaultlocale()[0]
def _extract_language(locale_string):
"""
Extracts language from locale string.
:param locale_string: Something like language_COUNTRY.encoding
:return: language
"""
return locale_string.split("_")[0].lower()
def _convert_to_unicode(value):
"""
Returns provided `value` to unicode or raises ValueError if it can't
be converted.
"""
if PY2 and isinstance(value, str):
value = value.decode("utf-8")
elif not isinstance(value, text_type):
raise ValueError(value)
return text_type(value)
LANGUAGES = sorted(set([
_extract_language(locale)
for locale in locale_alias.values()
if locale != "C"
]))
class MultiLingualString(text_type):
"""
MultiLingualString (or it's alias `mls`) is a simple unicode-like storage
for multiple language translation of one string. You can use it as normal
string, but it can be easily translation to another language (using
provided translations or using current locale translation if no language
mutation for requested language provided).
Usage examples:
>>> import locale; locale.setlocale(locale.LC_ALL, "en_US.utf-8")
'en_US.utf-8'
>>> s = mls("Hello, world!")
>>> print(s)
Hello, world!
>>> print(repr(s))
en'Hello, world!'
>>> print(s.language)
en
>>> t = mls(en="Hello, world!", de="Hallo Welt!")
>>> v = t.translate_to("de")
>>> print(v)
Hallo Welt!
>>> w = v.translate_to("en", "Hi, world!")
>>> print(w)
Hi, world!
"""
# pylint: disable=R0904
def __new__(cls, mapping=None, language=None, **kwargs):
# pylint: disable=W0212
if isinstance(mapping, MultiLingualString):
language = language or mapping.language
instance = text_type.__new__(cls, text_type(mapping >> language))
instance._mapping = mapping._mapping
instance.language = language
return instance
language = _extract_language(language or _get_system_locale())
if language not in LANGUAGES:
raise ValueError("Unknown language: {}".format(language))
if mapping is None:
mapping = u("")
if not isinstance(mapping, dict):
mapping = {language: _convert_to_unicode(mapping)}
mapping.update(kwargs)
for key in mapping:
if key not in LANGUAGES:
raise ValueError("Unknown mutation mapping: {}".format(key))
value = mapping.get(language, mapping.get(
_extract_language(_get_system_locale()), u("")))
instance = text_type.__new__(cls, value)
instance._mapping = mapping
instance.language = language
return instance
def translate_to(self, language, value=None):
"""
Create copy of current MultiLingualString, translated to another
`language` mutation. Example:
>>> s = mls({"cs": "Ahoj", "en": "Hello"}, language="en")
>>> t = s.translate_to("cs")
>>> print(t)
Ahoj
>>> v = s.translate_to("en", "Hi")
>>> print(v)
Hi
:param language: to translate current `mls`
:param value: new translation for provided `language`
:return: `mls` translated to `language`
"""
mapping = self._mapping.copy()
if value:
mapping[_extract_language(language)] = _convert_to_unicode(value)
return MultiLingualString(mapping, language)
def __repr__(self):
return u("%s%s") % (
self.language,
super(MultiLingualString, self).__repr__()[PY2:]
)
def __ilshift__(self, translation):
"""
Syntax sugar which replaces current language translation to provided.
Example:
>>> s = mls("Hello", language="en")
>>> s <<= "Hi"
>>> print(s)
Hi
:param translation: new translation
:return: copy of `mls` which will rewrite current object
"""
mapping = self._mapping.copy()
mapping[self.language] = _convert_to_unicode(translation)
return MultiLingualString(mapping, self.language)
def __rshift__(self, language):
"""
Syntax sugar which returns another `language` mutation for current
`mls`. Example:
>>> s = mls(en="Hi", cs="Ahoj", language="en")
>>> t = s >> "cs"
>>> print(t)
Ahoj
:param language: request language
:return: copy of `mls` translated to provided `language`
"""
return MultiLingualString(self._mapping, language)
# pylint: disable=C0103
mls = MultiLingualString
|
movermeyer/mls
|
setup.py
|
#!/usr/bin/env python
from os.path import dirname, abspath, join
from setuptools import setup
here = abspath(dirname(__file__))
readme = open(join(here, "README.rst"))
setup(
name="mls",
version="1.2.2",
py_modules=["mls"],
url="https://github.com/rembish/mls",
license="BSD",
author="<NAME>",
author_email="<EMAIL>",
description="MultiLingualString",
long_description="".join(readme.readlines()),
test_suite="tests",
install_requires=["six"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Internationalization",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"
]
)
|
movermeyer/mls
|
tests.py
|
#!/usr/bin/env python
# coding=utf-8
from locale import LC_ALL, setlocale
from six import u, text_type, PY3
from unittest import TestCase as BaseTestCase, main
from mls import mls
class TestCase(BaseTestCase):
def setUp(self):
setlocale(LC_ALL, "en_US.UTF-8")
def test_translate_to(self):
s = mls("Hello, world")
t = s.translate_to("ru", u("Здравствуй, мир"))
p = t.translate_to("en")
v = s.translate_to("cs")
self.assertEqual(s, p)
self.assertEqual(s, v)
def test_dict_init(self):
a = mls({"ru": u("Привет"), "cs": "Ahoj", "en": "Hi"})
b = a.translate_to("ru_RU")
c = b.translate_to("cs", "Nazdar")
self.assertEqual(repr(a), "en'Hi'")
self.assertEqual(
repr(b),
"ru'\u041f\u0440\u0438\u0432\u0435\u0442'" if PY3 else
"ru'\\xd0\\x9f\\xd1\\x80\\xd0\\xb8\\xd0\\xb2\\xd0\\xb5\\xd1\\x82'"
)
self.assertEqual(repr(c), "cs'Nazdar'")
def test_empty(self):
x = mls()
self.assertEqual(str(x), u(""))
self.assertEqual(x.language, "en")
def test_lshift(self):
x = mls()
x <<= "Hello"
self.assertEqual(str(x), u("Hello"))
self.assertTrue("ll" in x)
def test_rshift(self):
p = mls("Hello, world")
t = p.translate_to("ru", u("Здравствуй, мир"))
y = t >> "ru"
self.assertEqual(text_type(y), u("Здравствуй, мир"))
z = mls(y)
self.assertEqual(text_type(z), text_type(y))
self.assertEqual(z.language, "ru")
self.assertEqual(z >> "en", "Hello, world")
def test_from_mls(self):
p = mls("Hello, world")
t = p.translate_to("ru", u("Здравствуй, мир"))
w = mls(t, language="en")
self.assertEqual(w, "Hello, world")
def test_invalids(self):
self.assertRaises(ValueError, mls, "Error", language="xxx")
self.assertRaises(ValueError, mls, {"xxx": u("Проблема")})
self.assertRaises(ValueError, mls, type("Nothing", (), {}))
if __name__ == "__main__":
main()
|
robputt796/netizenship
|
venv/lib/python3.7/site-packages/netizenship.py
|
<reponame>robputt796/netizenship<gh_stars>1-10
#!/usr/bin/python3
"""Tool to automatically check the membership of a given username
in popular websites.
Inspired by: https://github.com/thelinuxchoice/userrecon/blob/master/userrecon.sh
MIT License
Copyright (c) 2020 <NAME>
"""
import requests
from termcolor import colored
from time import sleep
from bs4 import BeautifulSoup
import concurrent.futures
from pyfiglet import figlet_format
global websites
websites = {
'Facebook': 'https://www.facebook.com/',
'Twitter': 'https://twitter.com/',
'Instagram': 'https://www.instagram.com/',
'Youtube': 'https://www.youtube.com/user/',
'Reddit': 'https://www.reddit.com/user/',
'PInterest': 'https://www.pinterest.com/',
'Flickr': 'https://www.flickr.com/people/',
'Vimeo': 'https://vimeo.com/',
'Soundcloud': 'https://soundcloud.com/',
'Disqus': 'https://disqus.com/',
'Medium': 'https://medium.com/',
'AboutMe': 'https://about.me/',
'Imgur': 'https://imgur.com/user/',
'Flipboard': 'https://flipboard.com/',
'Slideshare': 'https://slideshare.net/',
'Spotify': 'https://open.spotify.com/user/',
'Scribd': 'https://www.scribd.com/',
'Patreon': 'https://www.patreon.com/',
'BitBucket': 'https://bitbucket.org/',
'GitLab': 'https://gitlab.com/',
'Github': 'https://www.github.com/',
'GoodReads': 'https://www.goodreads.com/',
'Instructable': 'https://www.instructables.com/member/',
'CodeAcademy': 'https://www.codecademy.com/',
'Gravatar': 'https://en.gravatar.com/',
'Pastebin': 'https://pastebin.com/u/',
'FourSquare': 'https://foursquare.com/',
'TripAdvisor': 'https://tripadvisor.com/members/',
'Wikipedia': 'https://www.wikipedia.org/wiki/User:',
'HackerNews': 'https://news.ycombinator.com/user?id=',
'CodeMentor': 'https://www.codementor.io/',
'Trip': 'https://www.trip.skyscanner.com/user/',
'Blogger': '.blogspot.com',
'Wordpress': '.wordpress.com',
'Tumbler': '.tumblr.com',
'Deviantart': '.deviantart.com"',
'LiveJournel': '.livejournal.com',
'Slack': '.slack.com',
}
def main():
def banner(text, ch='=', length=78):
spaced_text = ' %s ' % text
banner = spaced_text.center(length, ch)
print(banner)
def get_website_membership(site):
global uname
global websites
url = websites[site]
if not url[:1]=='h':
link = 'https://'+uname+url
else:
link = url+uname
state = "FAIL"
try:
# with eventlet.Timeout(20):
response = requests.get(link)
tag = soup.find(id=response.status_code)
msg = tag.find_parent('dt').text
response.raise_for_status()
except:
print(site.rjust(width), ':', colored(state.ljust(width//2), 'red') , '(Status:', msg, ')')
else:
state = 'SUCCESS'
print(site.rjust(width), ':', colored(state.ljust(width//2), 'green'), '(Status:', msg, ')')
ascii_banner = figlet_format('Netizenship')
print(ascii_banner)
banner_text = "MIT License, Copyright (c) 2020 <NAME>"
banner(banner_text)
status_code_html = 'https://en.wikipedia.org/wiki/List_of_HTTP_status_codes'
global uname
uname = input("Enter the username: ")
width = 15
page = requests.get(status_code_html)
soup = BeautifulSoup(page.content, 'html.parser')
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
try:
executor.map(get_website_membership, list(websites.keys()), timeout=5)
except:
print('Exception occured, skipping')
pass
if __name__ == '__main__':
main()
|
ignatowski/u-test
|
server/u_test/html_parser_bs4.py
|
import sys
from bs4 import BeautifulSoup
from .models import Customer, Account, Statement
from typing import List
class HtmlParserBs4:
"""A class used to parse html to objects."""
def __init__(self) -> None:
pass
def parse_customers_html_to_objects(self, customers_html: str, username: str) -> List[Customer]:
"""Iterate over the customers' html, and create and return Customer objects."""
customers = list()
soup = BeautifulSoup(customers_html, 'html.parser')
custs = soup.find_all('ul', attrs={'class':'collection with-header'})
for cust in custs:
attributes = cust.find_all('li')
name = ''
address = ''
emails = ''
phones = ''
i = 0
for attribute in attributes:
if i == 0:
name = attribute.text
elif i == 1:
phones = attribute.text
elif i == 2:
emails = attribute.text
elif i == 3:
address = attribute.text
i += 1
customers.append(Customer(name, username, address, emails, phones))
return customers
def parse_accounts_html_to_objects(self, accounts_html: str) -> List[Account]:
"""Iterate over the accounts' html, and create and return Account objects."""
accounts = list()
soup = BeautifulSoup(accounts_html, 'html.parser')
acts = soup.find_all('li', attrs={'class':'collection-item avatar'})
for act in acts:
name = act.find('span', attrs={'class':'title'}).text
number_and_balance = act.find('p')
number = number_and_balance.next_element.strip()
balance = number_and_balance.find('span').text
account_id = act.find('a')['href']
account_id = account_id[account_id.index('/')+1:]
accounts.append(Account(name, number, balance, account_id))
return accounts
def parse_statements_html_to_objects(self, statements_html: str) -> List[Statement]:
"""Iterate over the statements' html, and create and return Statement objects."""
statements = list()
soup = BeautifulSoup(statements_html, 'html.parser')
thead = soup.find('thead')
headers = thead.find_all('th')
i = 0
headerPositions = {}
for header in headers:
headerPositions[i] = header.text.lower()
i += 1
tbody = soup.find('tbody')
stmts = tbody.find_all('tr')
for stmt in stmts:
attributes = stmt.find_all('td')
date = ''
amount = ''
balance = ''
concept = ''
i = 0
for attribute in attributes:
# if the attribute is in the header,
# user the header for reference
if i in headerPositions:
if headerPositions[i] == 'statement':
concept = attribute.text
elif headerPositions[i] == 'date':
date = attribute.text
elif headerPositions[i] == 'amount':
amount = attribute.text
elif headerPositions[i] == 'balance':
balance = attribute.text
# otherwise fall back to a set position
else:
if i == 0:
concept = attribute.text
elif i == 1:
date = attribute.text
elif i == 2:
amount = attribute.text
elif i == 3:
balance = attribute.text
i += 1
statements.append(Statement(date, amount, balance, concept))
return statements
|
ignatowski/u-test
|
server/u_test/api.py
|
from http import client, cookies
from urllib import request, parse
class Api:
"""A class used to make http requests."""
base_url = 'test.unnax.com'
base_login = '/login'
customer_url = 'http://test.unnax.com/customer'
account_url = 'http://test.unnax.com/account'
statement_url = 'http://test.unnax.com/statements/'
def __init__(self, username: str, password: str) -> None:
self.username = username
self.password = password
self.sessionCookies = None
def login(self) -> None:
"""Login the user and store their cookies."""
conn = client.HTTPConnection(self.base_url)
body = parse.urlencode({'username': self.username, 'password': self.password})
headers = {
'Content-Type': "application/x-www-form-urlencoded",
'cache-control': "no-cache"
}
conn.request("POST", self.base_login, body, headers)
res = conn.getresponse()
if (res.status != 302):
raise Exception('Unable to get login')
rawCookies = res.getheader('Set-Cookie')
self.sessionCookies = cookies.SimpleCookie()
self.sessionCookies.load(rawCookies)
conn.close()
def get_customers_html(self) -> str:
"""Get and return the html from the customer page."""
req = request.Request(url=self.customer_url)
req.add_header('Cookie', 'session=' + self.sessionCookies['session'].value)
res = request.urlopen(req)
if (res.getcode() != 200):
raise Exception('Unable to get customers')
return res.read().decode('utf-8')
def get_accounts_html(self) -> str:
"""Get and return the html from the account page."""
req = request.Request(url=self.account_url)
req.add_header('Cookie', 'session=' + self.sessionCookies['session'].value)
res = request.urlopen(req)
if (res.getcode() != 200):
raise Exception('Unable to get accounts')
return res.read().decode('utf-8')
def get_statements_html(self, account_id: str) -> str:
"""Get and return the html from the statements page."""
req = request.Request(url=self.statement_url+account_id)
req.add_header('Cookie', 'session=' + self.sessionCookies['session'].value)
res = request.urlopen(req)
if (res.getcode() != 200):
raise Exception('Unable to get statements')
return res.read().decode('utf-8')
|
ignatowski/u-test
|
server/u_test/api_r.py
|
<filename>server/u_test/api_r.py
import requests
class ApiR:
"""A class used to make http requests."""
login_url = 'http://test.unnax.com/login'
customer_url = 'http://test.unnax.com/customer'
account_url = 'http://test.unnax.com/account'
statement_url = 'http://test.unnax.com/statements/'
def __init__(self, username: str, password: str) -> None:
self.username = username
self.password = password
self.ses = requests.session()
def login(self) -> None:
"""Login the user and create a session."""
data = {'username': self.username, 'password': self.password}
res = self.ses.post(self.login_url, data=data, allow_redirects=False)
if (res.status_code != 302):
raise Exception('Unable to login')
def get_customers_html(self) -> str:
"""Get and return the html from the customer page."""
res = self.ses.get(self.customer_url)
if (res.status_code != 200):
raise Exception('Unable to get customer')
return res.text
def get_accounts_html(self) -> str:
"""Get and return the html from the account page."""
res = self.ses.get(self.account_url)
if (res.status_code != 200):
raise Exception('Unable to get accounts')
return res.text
def get_statements_html(self, account_id: str) -> str:
"""Get and return the html from the statements page."""
res = self.ses.get(self.statement_url+account_id)
if (res.status_code != 200):
raise Exception('Unable to get statements')
return res.text
|
ignatowski/u-test
|
server/u_test/printer.py
|
import sys, os, datetime, re
from .models import Account, Customer, Statement
from typing import List
class Printer:
"A class to print accounts, customers, and statements."
tab_size = 4
def __init__(self, accounts: List[Account], customers: List[Customer]) -> None:
"""Iterate over and print accounts, customers, and statements."""
self.print_header()
self.print_account_header(len(accounts))
for account in accounts:
self.print_account(account)
self.print_customer_header(len(customers))
for customer in customers:
self.print_customer(customer)
self.print_statement_header(len(account.statements))
# sort statements by date
statements = sorted(account.statements, key=lambda s: s.date, reverse=True)
# remove currency symbols and convert to numbers
for statement in statements:
statement.amount = float(re.sub("[^0-9.]", "", statement.amount))
statement.balance = float(re.sub("[^0-9.]", "", statement.balance))
# add negative sign if account balance decreased
for i in range(0, len(statements)):
if (i+1 < len(statements) and statements[i].balance < statements[i+1].balance):
statements[i].amount = -1 * statements[i].amount
for statement in statements:
self.print_statement(statement)
self.print_empty_row()
def tab(self, number_of_tabs: int) -> str:
"""Return tabs as spaces."""
return ' ' * (number_of_tabs * self.tab_size)
def print_empty_row(self) -> None:
"""Prints an empty row."""
sys.stdout.buffer.write((os.linesep).encode('utf-8'))
def print_header(self) -> None:
"""Prints the global header."""
self.print_empty_row()
sys.stdout.buffer.write(('# Resultado Ex:'+os.linesep).encode('utf-8'))
sys.stdout.flush()
def print_account_header(self, size: int) -> None:
"""Prints the account header with number of accounts."""
sys.stdout.buffer.write(('Accounts ( ' + str(size) + ' )' + os.linesep).encode('utf-8'))
sys.stdout.flush()
def print_account(self, account: Account) -> None:
"""Prints an individual account object."""
sys.stdout.buffer.write((self.tab(1) + 'Account Data:' + os.linesep).encode('utf-8'))
sys.stdout.buffer.write(str(account).expandtabs(self.tab_size).encode('utf-8'))
self.print_empty_row()
sys.stdout.flush()
def print_customer_header(self, size: int) -> None:
"""Prints the customer header with number of customers"""
sys.stdout.buffer.write((self.tab(1) + 'Total customers: ' + str(size) + os.linesep).encode('utf-8'))
sys.stdout.flush()
def print_customer(self, customer: Customer) -> None:
"""Prints an individual customer object."""
sys.stdout.buffer.write((self.tab(2) + 'Customer Data:' + os.linesep).encode('utf-8'))
sys.stdout.buffer.write(str(customer).expandtabs(self.tab_size).encode('utf-8'))
self.print_empty_row()
sys.stdout.flush()
def print_statement_header(self, size: int) -> None:
"""Prints the statement header with number of statements."""
sys.stdout.buffer.write((self.tab(1) + 'Statements ( ' + str(size) + ' )' + os.linesep).encode('utf-8'))
sys.stdout.buffer.write((self.tab(2) + 'Date | Amount | Balance | Concept' + os.linesep).encode('utf-8'))
sys.stdout.flush()
def print_statement(self, statement: Statement) -> None:
"""Prints an individual statement object."""
sys.stdout.buffer.write(str(statement).expandtabs(self.tab_size).encode('utf-8'))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.