id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
5,507 | import json
from uuid import uuid4
import requests
from aws_lambda_powertools.utilities.idempotency import (
DynamoDBPersistenceLayer,
IdempotencyConfig,
idempotent,
)
class PaymentError(Exception):
def create_subscription_payment(user: str, product_id: str) -> dict:
def handler(event, context) -> dict:
body = json.loads(event["body"])
try:
payment: dict = create_subscription_payment(user=body["user"], product_id=body["product_id"])
return {"payment_id": payment.get("id"), "message": "success", "statusCode": 200}
except requests.HTTPError as e:
raise PaymentError("Unable to create payment subscription") from e | null |
5,508 | import base64
import binascii
import json
from dataclasses import asdict, dataclass, field, is_dataclass
from uuid import uuid4
import powertools_base64_jmespath_schema as schemas
from jmespath.exceptions import JMESPathTypeError
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import SchemaValidationError, validate
class Order:
class DataclassCustomEncoder(json.JSONEncoder):
def default(self, obj):
def return_error_message(message: str) -> dict:
def lambda_handler(event, context: LambdaContext) -> dict:
# Try to validate the schema
try:
validate(event=event, schema=schemas.INPUT, envelope="powertools_json(powertools_base64(payload))")
# alternatively, extract_data_from_envelope works here too
payload_decoded = base64.b64decode(event["payload"]).decode()
order_payload: dict = json.loads(payload_decoded)
return {
"order": json.dumps(Order(**order_payload), cls=DataclassCustomEncoder),
"message": "order created",
"success": True,
}
except JMESPathTypeError:
return return_error_message(
"The powertools_json(powertools_base64()) envelope function must match a valid path.",
)
except binascii.Error:
return return_error_message("Payload must be a valid base64 encoded string")
except json.JSONDecodeError:
return return_error_message("Payload must be valid JSON (base64 encoded).")
except SchemaValidationError as exception:
# SchemaValidationError indicates where a data mismatch is
return return_error_message(str(exception)) | null |
5,509 | import base64
import binascii
import zlib
from jmespath.exceptions import JMESPathTypeError
from jmespath.functions import signature
from aws_lambda_powertools.utilities.jmespath_utils import (
PowertoolsFunctions,
extract_data_from_envelope,
)
custom_jmespath_options = {"custom_functions": CustomFunctions()}
def return_error_message(message: str) -> dict:
return {"logs": None, "message": message, "success": False}
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
"""Searches and extracts data using JMESPath
Envelope being the JMESPath expression to extract the data you're after
Built-in JMESPath functions include: powertools_json, powertools_base64, powertools_base64_gzip
Examples
--------
**Deserialize JSON string and extracts data from body key**
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
def handler(event: dict, context: LambdaContext):
# event = {"body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}"} # noqa: ERA001
payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)")
customer = payload.get("customerId") # now deserialized
...
Parameters
----------
data : Dict
Data set to be filtered
envelope : str
JMESPath expression to filter data against
jmespath_options : Dict
Alternative JMESPath options to be included when filtering expr
Returns
-------
Any
Data found using JMESPath expression given in envelope
"""
if not jmespath_options:
jmespath_options = {"custom_functions": PowertoolsFunctions()}
try:
logger.debug(f"Envelope detected: {envelope}. JMESPath options: {jmespath_options}")
return jmespath.search(envelope, data, options=jmespath.Options(**jmespath_options))
except (LexerError, TypeError, UnicodeError) as e:
message = f"Failed to unwrap event from envelope using expression. Error: {e} Exp: {envelope}, Data: {data}" # noqa: B306, E501
raise InvalidEnvelopeExpressionError(message)
def lambda_handler(event, context) -> dict:
try:
logs = []
logs.append(
extract_data_from_envelope(
data=event,
# NOTE: Use the prefix `_func_` before the name of the function
envelope="Records[*].decode_zlib_compression(log)",
jmespath_options=custom_jmespath_options,
),
)
return {"logs": logs, "message": "Extracted messages", "success": True}
except JMESPathTypeError:
return return_error_message("The envelope function must match a valid path.")
except zlib.error:
return return_error_message("Log must be a valid zlib compressed message")
except binascii.Error:
return return_error_message("Log must be a valid base64 encoded string") | null |
5,510 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics()
def book_flight(flight_id: str, **kwargs):
# logic to book flight
...
metrics.add_metric(name="SuccessfulBooking", value=1)
def lambda_handler(event: dict, context: LambdaContext):
try:
book_flight(flight_id=event.get("flight_id", ""))
finally:
metrics.flush_metrics() | null |
5,511 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics()
def lambda_handler(event: dict, context: LambdaContext):
metrics.add_metric(name="SuccessfulBooking", value=1, tag1="powertools", tag2="python") | null |
5,512 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
def lambda_handler(event: dict, context: LambdaContext):
return | null |
5,513 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
def lambda_handler(event: dict, context: LambdaContext):
# no metrics being created will now raise SchemaValidationError
return | null |
5,514 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics(flush_to_log=True)
def lambda_handler(event: dict, context: LambdaContext):
metrics.add_metric(name="SuccessfulBooking", value=1) | null |
5,515 | import time
from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics()
def lambda_handler(event: dict, context: LambdaContext):
metrics.add_metric(name="SuccessfulBooking", value=1, timestamp=int(time.time())) | null |
5,516 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics()
def lambda_handler(event: dict, context: LambdaContext):
metrics.add_metric(name="SuccessfulBooking", value=1) | null |
5,517 | from aws_lambda_powertools.metrics.provider.datadog import DatadogMetrics
from aws_lambda_powertools.utilities.typing import LambdaContext
metrics = DatadogMetrics()
metrics.set_default_tags(tag1="powertools", tag2="python")
def lambda_handler(event: dict, context: LambdaContext):
metrics.add_metric(name="SuccessfulBooking", value=1) | null |
5,518 | import add_datadog_metrics
def test_log_metrics(capsys):
add_datadog_metrics.lambda_handler({}, {})
log = capsys.readouterr().out.strip() # remove any extra line
assert "SuccessfulBooking" in log # basic string assertion in JSON str | null |
5,519 | from dataclasses import dataclass, field
from typing import Callable
from uuid import uuid4
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.jmespath_utils import (
envelopes,
extract_data_from_envelope,
)
from aws_lambda_powertools.utilities.typing import LambdaContext
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
def middleware_before(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
) -> dict:
# extract payload from a EventBridge event
detail: dict = extract_data_from_envelope(data=event, envelope=envelopes.EVENTBRIDGE)
# check if status_id exists in payload, otherwise add default state before processing payment
if "status_id" not in detail:
event["detail"]["status_id"] = "pending"
response = handler(event, context)
return response | null |
5,520 | from dataclasses import dataclass, field
from typing import Callable
from uuid import uuid4
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.jmespath_utils import (
envelopes,
extract_data_from_envelope,
)
from aws_lambda_powertools.utilities.typing import LambdaContext
class Payment:
class PaymentError(Exception):
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
def lambda_handler(event: dict, context: LambdaContext) -> dict:
try:
payment_payload: dict = extract_data_from_envelope(data=event, envelope=envelopes.EVENTBRIDGE)
return {
"order": Payment(**payment_payload).__dict__,
"message": "payment created",
"success": True,
}
except Exception as e:
raise PaymentError("Unable to create payment") from e | null |
5,521 | import base64
from dataclasses import dataclass, field
from typing import Any, Callable, List
from uuid import uuid4
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.jmespath_utils import (
envelopes,
extract_data_from_envelope,
)
from aws_lambda_powertools.utilities.typing import LambdaContext
def obfuscate_data(value: str) -> bytes:
# base64 is not effective for obfuscation, this is an example
return base64.b64encode(value.encode("ascii"))
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
"""Searches and extracts data using JMESPath
Envelope being the JMESPath expression to extract the data you're after
Built-in JMESPath functions include: powertools_json, powertools_base64, powertools_base64_gzip
Examples
--------
**Deserialize JSON string and extracts data from body key**
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
def handler(event: dict, context: LambdaContext):
# event = {"body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}"} # noqa: ERA001
payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)")
customer = payload.get("customerId") # now deserialized
...
Parameters
----------
data : Dict
Data set to be filtered
envelope : str
JMESPath expression to filter data against
jmespath_options : Dict
Alternative JMESPath options to be included when filtering expr
Returns
-------
Any
Data found using JMESPath expression given in envelope
"""
if not jmespath_options:
jmespath_options = {"custom_functions": PowertoolsFunctions()}
try:
logger.debug(f"Envelope detected: {envelope}. JMESPath options: {jmespath_options}")
return jmespath.search(envelope, data, options=jmespath.Options(**jmespath_options))
except (LexerError, TypeError, UnicodeError) as e:
message = f"Failed to unwrap event from envelope using expression. Error: {e} Exp: {envelope}, Data: {data}" # noqa: B306, E501
raise InvalidEnvelopeExpressionError(message)
def obfuscate_sensitive_data(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
fields: List,
) -> dict:
# extracting payload from a EventBridge event
detail: dict = extract_data_from_envelope(data=event, envelope=envelopes.EVENTBRIDGE)
guest_data: Any = detail.get("guest")
# Obfuscate fields (email, vat, passport) before calling Lambda handler
for guest_field in fields:
if guest_data.get(guest_field):
event["detail"]["guest"][guest_field] = obfuscate_data(str(guest_data.get(guest_field)))
response = handler(event, context)
return response | null |
5,522 | import base64
from dataclasses import dataclass, field
from typing import Any, Callable, List
from uuid import uuid4
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.jmespath_utils import (
envelopes,
extract_data_from_envelope,
)
from aws_lambda_powertools.utilities.typing import LambdaContext
class Booking:
days: int
date_from: str
date_to: str
hotel_id: int
country: str
city: str
guest: dict
booking_id: str = field(default_factory=lambda: f"{uuid4()}")
class BookingError(Exception):
...
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
"""Searches and extracts data using JMESPath
Envelope being the JMESPath expression to extract the data you're after
Built-in JMESPath functions include: powertools_json, powertools_base64, powertools_base64_gzip
Examples
--------
**Deserialize JSON string and extracts data from body key**
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
def handler(event: dict, context: LambdaContext):
# event = {"body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}"} # noqa: ERA001
payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)")
customer = payload.get("customerId") # now deserialized
...
Parameters
----------
data : Dict
Data set to be filtered
envelope : str
JMESPath expression to filter data against
jmespath_options : Dict
Alternative JMESPath options to be included when filtering expr
Returns
-------
Any
Data found using JMESPath expression given in envelope
"""
if not jmespath_options:
jmespath_options = {"custom_functions": PowertoolsFunctions()}
try:
logger.debug(f"Envelope detected: {envelope}. JMESPath options: {jmespath_options}")
return jmespath.search(envelope, data, options=jmespath.Options(**jmespath_options))
except (LexerError, TypeError, UnicodeError) as e:
message = f"Failed to unwrap event from envelope using expression. Error: {e} Exp: {envelope}, Data: {data}" # noqa: B306, E501
raise InvalidEnvelopeExpressionError(message)
def lambda_handler(event: dict, context: LambdaContext) -> dict:
try:
booking_payload: dict = extract_data_from_envelope(data=event, envelope=envelopes.EVENTBRIDGE)
return {
"book": Booking(**booking_payload).__dict__,
"message": "booking created",
"success": True,
}
except Exception as e:
raise BookingError("Unable to create booking") from e | null |
5,523 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
def middleware_after(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
) -> dict:
start_time = time.time()
response = handler(event, context)
execution_time = time.time() - start_time
# adding custom headers in response object after lambda executing
response["headers"]["execution_time"] = execution_time
response["headers"]["aws_request_id"] = context.aws_request_id
return response | null |
5,524 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
app = APIGatewayRestResolver()
def create_todo() -> dict:
todo_data: dict = app.current_event.json_body # deserialize json str to dict
todo: Response = requests.post("https://jsonplaceholder.typicode.com/todos", data=todo_data)
todo.raise_for_status()
return {"todo": todo.json()} | null |
5,525 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
app = APIGatewayRestResolver()
def lambda_handler(event: dict, context: LambdaContext) -> dict:
return app.resolve(event, context) | null |
5,526 | import json
from typing import Callable
import boto3
import combining_powertools_utilities_schema as schemas
import requests
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.event_handler.exceptions import InternalServerError
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.shared.types import JSONType
from aws_lambda_powertools.utilities.feature_flags import AppConfigStore, FeatureFlags
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import SchemaValidationError, validate
tracer = Tracer()
logger = Logger()
def save_api_execution_history(path: str, headers: dict, request_context: dict) -> None:
try:
# using the feature flags utility to check if the new feature "save api call to history" is enabled by default
# see: https://docs.powertools.aws.dev/lambda/python/latest/utilities/feature_flags/#static-flags
save_history: JSONType = feature_flags.evaluate(name="save_history", default=False)
if save_history:
# saving history in dynamodb table
tracer.put_metadata(key="execution detail", value=request_context)
table_historic.put_item(
Item={
"customer_id": headers.get("X-Customer-Id"),
"request_id": request_context.get("requestId"),
"path": path,
"request_time": request_context.get("requestTime"),
"source_ip": request_context.get("identity", {}).get("sourceIp"),
"http_method": request_context.get("httpMethod"),
},
)
return None
except Exception:
# you can add more logic here to handle exceptions or even save this to a DLQ
# but not to make this example too long, we just return None since the Lambda has been successfully executed
return None
def extract_data_from_envelope(data: Union[Dict, str], envelope: str, jmespath_options: Optional[Dict] = None) -> Any:
"""Searches and extracts data using JMESPath
Envelope being the JMESPath expression to extract the data you're after
Built-in JMESPath functions include: powertools_json, powertools_base64, powertools_base64_gzip
Examples
--------
**Deserialize JSON string and extracts data from body key**
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
def handler(event: dict, context: LambdaContext):
# event = {"body": "{\"customerId\":\"dd4649e6-2484-4993-acb8-0f9123103394\"}"} # noqa: ERA001
payload = extract_data_from_envelope(data=event, envelope="powertools_json(body)")
customer = payload.get("customerId") # now deserialized
...
Parameters
----------
data : Dict
Data set to be filtered
envelope : str
JMESPath expression to filter data against
jmespath_options : Dict
Alternative JMESPath options to be included when filtering expr
Returns
-------
Any
Data found using JMESPath expression given in envelope
"""
if not jmespath_options:
jmespath_options = {"custom_functions": PowertoolsFunctions()}
try:
logger.debug(f"Envelope detected: {envelope}. JMESPath options: {jmespath_options}")
return jmespath.search(envelope, data, options=jmespath.Options(**jmespath_options))
except (LexerError, TypeError, UnicodeError) as e:
message = f"Failed to unwrap event from envelope using expression. Error: {e} Exp: {envelope}, Data: {data}" # noqa: B306, E501
raise InvalidEnvelopeExpressionError(message)
def middleware_custom(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
) -> dict:
# validating the INPUT with the given schema
# X-Customer-Id header must be informed in all requests
try:
validate(event=event, schema=schemas.INPUT)
except SchemaValidationError as e:
return {
"statusCode": 400,
"body": json.dumps(str(e)),
}
# extracting headers and requestContext from event
headers = extract_data_from_envelope(data=event, envelope="headers")
request_context = extract_data_from_envelope(data=event, envelope="requestContext")
logger.debug(f"X-Customer-Id => {headers.get('X-Customer-Id')}")
tracer.put_annotation(key="CustomerId", value=headers.get("X-Customer-Id"))
response = handler(event, context)
# automatically adding security headers to all responses
# see: https://securityheaders.com/
logger.info("Injecting security headers")
response["headers"]["Referrer-Policy"] = "no-referrer"
response["headers"]["Strict-Transport-Security"] = "max-age=15552000; includeSubDomains; preload"
response["headers"]["X-DNS-Prefetch-Control"] = "off"
response["headers"]["X-Content-Type-Options"] = "nosniff"
response["headers"]["X-Permitted-Cross-Domain-Policies"] = "none"
response["headers"]["X-Download-Options"] = "noopen"
logger.info("Saving api call in history table")
save_api_execution_history(str(event.get("path")), headers, request_context)
# return lambda execution
return response | null |
5,527 | import json
from typing import Callable
import boto3
import combining_powertools_utilities_schema as schemas
import requests
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.event_handler.exceptions import InternalServerError
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.shared.types import JSONType
from aws_lambda_powertools.utilities.feature_flags import AppConfigStore, FeatureFlags
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import SchemaValidationError, validate
class InternalServerError(ServiceError):
"""API Gateway and ALB Not Found Internal Server Error (500)"""
def __init__(self, message: str):
super().__init__(HTTPStatus.INTERNAL_SERVER_ERROR, message)
def get_comments():
try:
comments: requests.Response = requests.get("https://jsonplaceholder.typicode.com/comments")
comments.raise_for_status()
return {"comments": comments.json()[:10]}
except Exception as exc:
raise InternalServerError(str(exc)) | null |
5,528 | import json
from typing import Callable
import boto3
import combining_powertools_utilities_schema as schemas
import requests
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.event_handler.exceptions import InternalServerError
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.shared.types import JSONType
from aws_lambda_powertools.utilities.feature_flags import AppConfigStore, FeatureFlags
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import SchemaValidationError, validate
class InternalServerError(ServiceError):
"""API Gateway and ALB Not Found Internal Server Error (500)"""
def __init__(self, message: str):
super().__init__(HTTPStatus.INTERNAL_SERVER_ERROR, message)
def get_comments_by_id(comment_id: str):
try:
comments: requests.Response = requests.get(f"https://jsonplaceholder.typicode.com/comments/{comment_id}")
comments.raise_for_status()
return {"comments": comments.json()}
except Exception as exc:
raise InternalServerError(str(exc)) | null |
5,529 | import json
from typing import Callable
import boto3
import combining_powertools_utilities_schema as schemas
import requests
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.event_handler.exceptions import InternalServerError
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.shared.types import JSONType
from aws_lambda_powertools.utilities.feature_flags import AppConfigStore, FeatureFlags
from aws_lambda_powertools.utilities.jmespath_utils import extract_data_from_envelope
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import SchemaValidationError, validate
app = APIGatewayRestResolver()
def lambda_handler(event: dict, context: LambdaContext) -> dict:
return app.resolve(event, context) | null |
5,530 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
def middleware_with_tracing(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
) -> dict:
start_time = time.time()
response = handler(event, context)
execution_time = time.time() - start_time
# adding custom headers in response object after lambda executing
response["headers"]["execution_time"] = execution_time
response["headers"]["aws_request_id"] = context.aws_request_id
return response | null |
5,531 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
def create_product() -> dict:
product: Response = requests.get("https://dummyjson.com/products/1")
product.raise_for_status()
return {"product": product.json()} | null |
5,533 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
tracer = Tracer()
def middleware_with_advanced_tracing(
handler: Callable[[dict, LambdaContext], dict],
event: dict,
context: LambdaContext,
) -> dict:
tracer.put_metadata(key="resource", value=event.get("resource"))
start_time = time.time()
response = handler(event, context)
execution_time = time.time() - start_time
tracer.put_annotation(key="TotalExecutionTime", value=str(execution_time))
# adding custom headers in response object after lambda executing
response["headers"]["execution_time"] = execution_time
response["headers"]["aws_request_id"] = context.aws_request_id
return response | null |
5,534 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
def create_product() -> dict:
product: Response = requests.get("https://dummyjson.com/products/1")
product.raise_for_status()
return {"product": product.json()} | null |
5,535 | import time
from typing import Callable
import requests
from requests import Response
from aws_lambda_powertools import Tracer
from aws_lambda_powertools.event_handler import APIGatewayRestResolver
from aws_lambda_powertools.middleware_factory import lambda_handler_decorator
from aws_lambda_powertools.utilities.typing import LambdaContext
app = APIGatewayRestResolver()
def lambda_handler(event: dict, context: LambdaContext) -> dict:
return app.resolve(event, context) | null |
5,536 | import requests
from requests import Response
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import LambdaFunctionUrlResolver
from aws_lambda_powertools.logging import correlation_paths
from aws_lambda_powertools.utilities.typing import LambdaContext
def get_todos():
todos: Response = requests.get("https://jsonplaceholder.typicode.com/todos")
todos.raise_for_status()
# for brevity, we'll limit to the first 10 only
return {"todos": todos.json()[:10]} | null |
5,537 | import requests
from requests import Response
from aws_lambda_powertools import Logger, Tracer
from aws_lambda_powertools.event_handler import LambdaFunctionUrlResolver
from aws_lambda_powertools.logging import correlation_paths
from aws_lambda_powertools.utilities.typing import LambdaContext
app = LambdaFunctionUrlResolver()
def lambda_handler(event: dict, context: LambdaContext) -> dict:
return app.resolve(event, context) | null |
5,538 | import datetime
import json
import os
import platform
from importlib.metadata import version
import boto3
from pydantic import HttpUrl
from aws_lambda_powertools import Logger, Metrics, Tracer
from aws_lambda_powertools.utilities.parser import BaseModel, envelopes, event_parser
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import validator
class OrderItem(BaseModel):
order_id: int
quantity: int
description: str
url: HttpUrl
def envelope_handler(event: OrderItem, context: LambdaContext):
assert event.order_id != 1 | null |
5,539 | import datetime
import json
import os
import platform
from importlib.metadata import version
import boto3
from pydantic import HttpUrl
from aws_lambda_powertools import Logger, Metrics, Tracer
from aws_lambda_powertools.utilities.parser import BaseModel, envelopes, event_parser
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import validator
def validator_handler(event, context: LambdaContext):
pass | null |
5,540 | import datetime
import json
import os
import platform
from importlib.metadata import version
import boto3
from pydantic import HttpUrl
from aws_lambda_powertools import Logger, Metrics, Tracer
from aws_lambda_powertools.utilities.parser import BaseModel, envelopes, event_parser
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.utilities.validation import validator
def on_create(event):
props = event["ResourceProperties"]
logger.info("create new resource with properties %s" % props)
handler(event)
def on_event(event, context):
request_type = event["RequestType"]
# we handle only create events, because we recreate the canary on each run
if request_type == "Create":
return on_create(event)
return "Nothing to be processed" | null |
5,541 |
def handler(event, context):
return {
"message": "success"
} | null |
5,542 | from aws_lambda_powertools import (Logger, Metrics, Tracer)
def handler(event, context):
return {
"message": "success"
} | null |
5,543 |
def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
# 创建一个可以在给定图像上绘图的对象
draw = ImageDraw.Draw(img)
# 字体的格式
fontStyle = ImageFont.truetype(
'C:/Windows/Fonts/STHUPO.TTF', textSize, encoding="utf-8")
#"D:/python/辅助/锐字真言体.ttf"
# 绘制文本
draw.text((left, top), text, textColor, font=fontStyle)
# 转换回OpenCV格式
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR) | null |
5,544 |
def start_listen():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | null |
5,545 | from torch.autograd import Variable
import torch
import numpy as np
def 打印抽样数据(数_词表,数据, 输出_分):
临 = 数据[0]
欲打印=[数_词表[str(临[i,0])] for i in range(0,临.shape[0])]
临 = 输出_分.cpu().numpy()
欲打印2 = [数_词表[str(临[i])] for i in range(0,临.shape[0])]
print("抽样输出",欲打印)
print("目标输出", 欲打印2)
# for i in range(16):
# print(数_词表[str(临[i, 0])]) | null |
5,546 | from torch.autograd import Variable
import torch
import numpy as np
def nopeak_mask(size, device):
np_mask = np.triu(np.ones((1, size, size)),
k=1).astype('uint8')
np_mask = Variable(torch.from_numpy(np_mask) == 0)
np_mask = np_mask.cuda(device)
return np_mask | null |
5,547 | from torch.autograd import Variable
import torch
import numpy as np
def 打印测试数据(数_词表,数据, 输人_分,标签):
临 = 数据[0]
欲打印=[数_词表[str(临[i])] for i in range(临.size)]
打印=""
for i in range(len(欲打印)):
打印=打印+欲打印[i]
临 = 输人_分.cpu().numpy()[0]
欲打印2 = [数_词表[str(临[i])]for i in range(输人_分.size(1))]
# 欲打印2=str(欲打印2)
# print("输入:", 欲打印2)
if 标签==打印:
return True
else:
print(打印)
return False
print("输出:",打印)
# for i in range(16):
# print(数_词表[str(临[i, 0])]) | null |
5,548 | from torch.autograd import Variable
import torch
import numpy as np
def 打印测试数据_A(数_词表,数据, 输人_分):
if 数据.shape[0]!=0:
临 = 数据[0]
欲打印=[数_词表[str(临[i])] for i in range(临.size)]
打印=""
for i in range(len(欲打印)):
打印=打印+欲打印[i]
临 = 输人_分.cpu().numpy()[0]
欲打印2 = [数_词表[str(临[i])]for i in range(输人_分.size(1))]
欲打印2=str(欲打印2)
#print("输入:", 欲打印2)
print("输出:",打印) | null |
5,549 | import torch
import torchvision
import json
from PIL import Image
from resnet_utils import myResnet
import numpy as np
import torch.nn as nn
from Sublayers import Norm, 全连接层
import math
import torch.nn.functional as F
from 模型_策略梯度 import Transformer
from Batch import create_masks
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | null |
5,550 | import torch
import torchvision
import json
from PIL import Image
from resnet_utils import myResnet
import numpy as np
import torch.nn as nn
from Sublayers import Norm, 全连接层
import math
import torch.nn.functional as F
from 模型_策略梯度 import Transformer
from Batch import create_masks
def random_dic(dicts):
dict_key_ls = list(dicts.keys())
shuffle(dict_key_ls)
new_dic = {}
for key in dict_key_ls:
new_dic[key] = dicts.get(key)
return new_dic | null |
5,551 | import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categoricalrt torch.nn as nn
from Layers import DecoderLayer
from Embed import Embedder, PositionalEncoder
from Sublayers import Norm, 全连接层
import copy
import os.path
import torchvision
from config import TransformerConfig
import torch.nn.functional as F
from Batch import create_masks
from 杂项 import 打印抽样数据
import pickle
import gc
:
def __init__(self, 动作数, 输入维度, 优势估计参数G=0.9999, 学习率=0.0003, 泛化优势估计参数L=0.985,
策略裁剪幅度=0.2, 并行条目数=64, 轮数=10,熵系数=0.01):
self.优势估计参数G = 优势估计参数G
self.策略裁剪幅度 = 策略裁剪幅度
self.轮数 = 轮数
self.熵系数=熵系数
self.泛化优势估计参数L = 泛化优势估计参数L
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
模型名称 = '模型_策略梯度_丙TA'
config = TransformerConfig()
model = get_model(config, 130, 模型名称)
# model_dict = model.state_dict()
#
# pretrained_dict = torch.load('weights/model_weights_2021-05-7D11')
#
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#
# model_dict.update(pretrained_dict)
#
# model.load_state_dict(model_dict)
model = model.cuda(device)
self.动作 = model
#torch.save(self.动作.state_dict(), 'weights/模型_动作ppo阶段停bZ1')
self.优化函数 = torch.optim.Adam(self.动作.parameters(), lr=2e-5, betas=(0.9, 0.95), eps=1e-9)
self.数据集 = PPO_数据集(并行条目数)
self.文件名集=[]
def 记录数据(self, 状态, 动作, 动作概率, 评价, 回报, 完结,计数):
self.数据集.记录数据(状态, 动作, 动作概率, 评价, 回报, 完结,计数)
def 存硬盘(self, 文件名):
self.数据集.存硬盘(文件名)
self.文件名集.append(文件名)
def 读硬盘(self, 文件名):
self.数据集.读硬盘(文件名)
def 保存模型(self,轮号):
print('... 保存模型 ...')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N{}'.format(轮号))
#torch.save(self.评论.state_dict(), 'weights/模型_评论')
#torch.save(self.评论.state_dict(), 'weights/模型_评论2')
def 载入模型(self):
print('... 载入模型 ...')
self.动作.载入权重()
#self.评价.载入权重()
def 选择动作(self, 状态,device,传入动作,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
价值 = 价值[:, - 1, :]
分布 = F.softmax(分布, dim=-1)
分布 = 分布[:, - 1, :]
分布 = Categorical(分布)
if 手动:
动作 = 传入动作
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作)).item()
动作 = T.squeeze(动作).item()
return 动作, 动作概率, 价值
def 选择动作批量(self, 状态,device,目标输出_分_torch,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
分布 = F.softmax(分布, dim=-1)
分布 = Categorical(分布)
if 手动:
动作 = 目标输出_分_torch
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作))
动作 = T.squeeze(动作)
return 动作, 动作概率, 价值
def 学习(self,device):
for i in range(1):
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
for _ in range(self.轮数):
动作集, 旧_动作概率集, 评价集, 回报集, 完结集,图片集合,动作数组, 条目集 = self.数据集.提取数据()
print('回报集',回报集[0:10])
价值 = 评价集
优势函数值 = np.zeros(len(回报集), dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数=0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0-计数)) * (回报集[k] + self.优势估计参数G * 价值[k + 1] * (1 - int(完结集[k])) - 价值[k])
计数=计数+1
if (1 - int(完结集[k]))==0 or 计数>100:
break
优势函数值[t] = 优势值
# https://blog.csdn.net/zhkmxx930xperia/article/details/88257891
# GAE的形式为多个价值估计的加权平均数
优势函数值 = T.tensor(优势函数值).to(device)
价值 = T.tensor(价值).to(device)
for 条 in 条目集:
条末=条[-1:]
旧_动作概率s = T.tensor(旧_动作概率集[条末]).to(device)
动作s = T.tensor(动作集[条末]).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(动作数组[条].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(图片集合[:, 条, :]).cuda(device).float()
src_mask, trg_mask = create_masks(操作序列.unsqueeze(0), 操作序列.unsqueeze(0), device)
分布, 评价结果 = self.动作(图片张量,操作序列,trg_mask)
分布=分布[:,-1:,:]
评价结果 = 评价结果[:, -1:, :]
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 概率比 = 新_动作概率s.exp() / 旧_动作概率s.exp()
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值[条末] * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值[条末]
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
总回报 = 优势函数值[条末] + 价值[条末]
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
评价损失 = (总回报 - 评价结果) ** 2
评价损失 = 评价损失 .mean()
总损失 = 动作损失 + 0.5 * 评价损失-self.熵系数*熵损失
#print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
print('总损失',总损失)
self.数据集.清除数据()
self.文件名集=[]
def 监督强化学习(self,device,状态,回报,动作,动作可能性,评价):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k])
计数 = 计数 + 1
if 计数 > 200:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督强化学习A(self,device,状态,回报,动作,动作可能性,评价,完结集):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k]*(1-完结集[0,k]*0))
计数 = 计数 + 1
if 计数 > 200 or 完结集[0,k]==2111111:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督学习(self, 状态,目标输出,打印,数_词表,操作_分_torch,device):
分布, 价值 = self.动作(状态,device)
lin = 分布.view(-1, 分布.size(-1))
_, 抽样 = torch.topk(分布, k=1, dim=-1)
抽样np = 抽样.cpu().numpy()
self.优化函数.zero_grad()
loss = F.cross_entropy(lin, 目标输出.contiguous().view(-1), ignore_index=-1)
if 打印:
print(loss)
打印抽样数据(数_词表, 抽样np[0:1, :, :], 操作_分_torch[0, :])
loss.backward()
self.优化函数.step()
def 选择动作_old(self, 状态):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
输出_实际_A, 价值 = self.动作(状态)
输出_实际_A = F.softmax(输出_实际_A, dim=-1)
输出_实际_A = 输出_实际_A[:, - 1, :]
抽样 = torch.multinomial(输出_实际_A, num_samples=1)
抽样np = 抽样.cpu().numpy()
return 抽样np[0,-1]
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL) | null |
5,552 | import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categoricalrt torch.nn as nn
from Layers import DecoderLayer
from Embed import Embedder, PositionalEncoder
from Sublayers import Norm, 全连接层
import copy
import os.path
import torchvision
from config import TransformerConfig
import torch.nn.functional as F
from Batch import create_masks
from 杂项 import 打印抽样数据
import pickle
import gc
:
def __init__(self, 动作数, 输入维度, 优势估计参数G=0.9999, 学习率=0.0003, 泛化优势估计参数L=0.985,
策略裁剪幅度=0.2, 并行条目数=64, 轮数=10,熵系数=0.01):
self.优势估计参数G = 优势估计参数G
self.策略裁剪幅度 = 策略裁剪幅度
self.轮数 = 轮数
self.熵系数=熵系数
self.泛化优势估计参数L = 泛化优势估计参数L
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
模型名称 = '模型_策略梯度_丙TA'
config = TransformerConfig()
model = get_model(config, 130, 模型名称)
# model_dict = model.state_dict()
#
# pretrained_dict = torch.load('weights/model_weights_2021-05-7D11')
#
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#
# model_dict.update(pretrained_dict)
#
# model.load_state_dict(model_dict)
model = model.cuda(device)
self.动作 = model
#torch.save(self.动作.state_dict(), 'weights/模型_动作ppo阶段停bZ1')
self.优化函数 = torch.optim.Adam(self.动作.parameters(), lr=2e-5, betas=(0.9, 0.95), eps=1e-9)
self.数据集 = PPO_数据集(并行条目数)
self.文件名集=[]
def 记录数据(self, 状态, 动作, 动作概率, 评价, 回报, 完结,计数):
self.数据集.记录数据(状态, 动作, 动作概率, 评价, 回报, 完结,计数)
def 存硬盘(self, 文件名):
self.数据集.存硬盘(文件名)
self.文件名集.append(文件名)
def 读硬盘(self, 文件名):
self.数据集.读硬盘(文件名)
def 保存模型(self,轮号):
print('... 保存模型 ...')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N{}'.format(轮号))
#torch.save(self.评论.state_dict(), 'weights/模型_评论')
#torch.save(self.评论.state_dict(), 'weights/模型_评论2')
def 载入模型(self):
print('... 载入模型 ...')
self.动作.载入权重()
#self.评价.载入权重()
def 选择动作(self, 状态,device,传入动作,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
价值 = 价值[:, - 1, :]
分布 = F.softmax(分布, dim=-1)
分布 = 分布[:, - 1, :]
分布 = Categorical(分布)
if 手动:
动作 = 传入动作
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作)).item()
动作 = T.squeeze(动作).item()
return 动作, 动作概率, 价值
def 选择动作批量(self, 状态,device,目标输出_分_torch,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
分布 = F.softmax(分布, dim=-1)
分布 = Categorical(分布)
if 手动:
动作 = 目标输出_分_torch
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作))
动作 = T.squeeze(动作)
return 动作, 动作概率, 价值
def 学习(self,device):
for i in range(1):
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
for _ in range(self.轮数):
动作集, 旧_动作概率集, 评价集, 回报集, 完结集,图片集合,动作数组, 条目集 = self.数据集.提取数据()
print('回报集',回报集[0:10])
价值 = 评价集
优势函数值 = np.zeros(len(回报集), dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数=0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0-计数)) * (回报集[k] + self.优势估计参数G * 价值[k + 1] * (1 - int(完结集[k])) - 价值[k])
计数=计数+1
if (1 - int(完结集[k]))==0 or 计数>100:
break
优势函数值[t] = 优势值
# https://blog.csdn.net/zhkmxx930xperia/article/details/88257891
# GAE的形式为多个价值估计的加权平均数
优势函数值 = T.tensor(优势函数值).to(device)
价值 = T.tensor(价值).to(device)
for 条 in 条目集:
条末=条[-1:]
旧_动作概率s = T.tensor(旧_动作概率集[条末]).to(device)
动作s = T.tensor(动作集[条末]).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(动作数组[条].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(图片集合[:, 条, :]).cuda(device).float()
src_mask, trg_mask = create_masks(操作序列.unsqueeze(0), 操作序列.unsqueeze(0), device)
分布, 评价结果 = self.动作(图片张量,操作序列,trg_mask)
分布=分布[:,-1:,:]
评价结果 = 评价结果[:, -1:, :]
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 概率比 = 新_动作概率s.exp() / 旧_动作概率s.exp()
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值[条末] * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值[条末]
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
总回报 = 优势函数值[条末] + 价值[条末]
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
评价损失 = (总回报 - 评价结果) ** 2
评价损失 = 评价损失 .mean()
总损失 = 动作损失 + 0.5 * 评价损失-self.熵系数*熵损失
#print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
print('总损失',总损失)
self.数据集.清除数据()
self.文件名集=[]
def 监督强化学习(self,device,状态,回报,动作,动作可能性,评价):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k])
计数 = 计数 + 1
if 计数 > 200:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督强化学习A(self,device,状态,回报,动作,动作可能性,评价,完结集):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k]*(1-完结集[0,k]*0))
计数 = 计数 + 1
if 计数 > 200 or 完结集[0,k]==2111111:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督学习(self, 状态,目标输出,打印,数_词表,操作_分_torch,device):
分布, 价值 = self.动作(状态,device)
lin = 分布.view(-1, 分布.size(-1))
_, 抽样 = torch.topk(分布, k=1, dim=-1)
抽样np = 抽样.cpu().numpy()
self.优化函数.zero_grad()
loss = F.cross_entropy(lin, 目标输出.contiguous().view(-1), ignore_index=-1)
if 打印:
print(loss)
打印抽样数据(数_词表, 抽样np[0:1, :, :], 操作_分_torch[0, :])
loss.backward()
self.优化函数.step()
def 选择动作_old(self, 状态):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
输出_实际_A, 价值 = self.动作(状态)
输出_实际_A = F.softmax(输出_实际_A, dim=-1)
输出_实际_A = 输出_实际_A[:, - 1, :]
抽样 = torch.multinomial(输出_实际_A, num_samples=1)
抽样np = 抽样.cpu().numpy()
return 抽样np[0,-1]
def load_obj(name ):
with open(name , 'rb') as f:
return pickle.load(f) | null |
5,553 | import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categoricalrt torch.nn as nn
from Layers import DecoderLayer
from Embed import Embedder, PositionalEncoder
from Sublayers import Norm, 全连接层
import copy
import os.path
import torchvision
from config import TransformerConfig
import torch.nn.functional as F
from Batch import create_masks
from 杂项 import 打印抽样数据
import pickle
import gc
:
def __init__(self, 动作数, 输入维度, 优势估计参数G=0.9999, 学习率=0.0003, 泛化优势估计参数L=0.985,
策略裁剪幅度=0.2, 并行条目数=64, 轮数=10,熵系数=0.01):
self.优势估计参数G = 优势估计参数G
self.策略裁剪幅度 = 策略裁剪幅度
self.轮数 = 轮数
self.熵系数=熵系数
self.泛化优势估计参数L = 泛化优势估计参数L
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
模型名称 = '模型_策略梯度_丙TA'
config = TransformerConfig()
model = get_model(config, 130, 模型名称)
# model_dict = model.state_dict()
#
# pretrained_dict = torch.load('weights/model_weights_2021-05-7D11')
#
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#
# model_dict.update(pretrained_dict)
#
# model.load_state_dict(model_dict)
model = model.cuda(device)
self.动作 = model
#torch.save(self.动作.state_dict(), 'weights/模型_动作ppo阶段停bZ1')
self.优化函数 = torch.optim.Adam(self.动作.parameters(), lr=2e-5, betas=(0.9, 0.95), eps=1e-9)
self.数据集 = PPO_数据集(并行条目数)
self.文件名集=[]
def 记录数据(self, 状态, 动作, 动作概率, 评价, 回报, 完结,计数):
self.数据集.记录数据(状态, 动作, 动作概率, 评价, 回报, 完结,计数)
def 存硬盘(self, 文件名):
self.数据集.存硬盘(文件名)
self.文件名集.append(文件名)
def 读硬盘(self, 文件名):
self.数据集.读硬盘(文件名)
def 保存模型(self,轮号):
print('... 保存模型 ...')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N{}'.format(轮号))
#torch.save(self.评论.state_dict(), 'weights/模型_评论')
#torch.save(self.评论.state_dict(), 'weights/模型_评论2')
def 载入模型(self):
print('... 载入模型 ...')
self.动作.载入权重()
#self.评价.载入权重()
def 选择动作(self, 状态,device,传入动作,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
价值 = 价值[:, - 1, :]
分布 = F.softmax(分布, dim=-1)
分布 = 分布[:, - 1, :]
分布 = Categorical(分布)
if 手动:
动作 = 传入动作
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作)).item()
动作 = T.squeeze(动作).item()
return 动作, 动作概率, 价值
def 选择动作批量(self, 状态,device,目标输出_分_torch,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
分布 = F.softmax(分布, dim=-1)
分布 = Categorical(分布)
if 手动:
动作 = 目标输出_分_torch
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作))
动作 = T.squeeze(动作)
return 动作, 动作概率, 价值
def 学习(self,device):
for i in range(1):
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
for _ in range(self.轮数):
动作集, 旧_动作概率集, 评价集, 回报集, 完结集,图片集合,动作数组, 条目集 = self.数据集.提取数据()
print('回报集',回报集[0:10])
价值 = 评价集
优势函数值 = np.zeros(len(回报集), dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数=0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0-计数)) * (回报集[k] + self.优势估计参数G * 价值[k + 1] * (1 - int(完结集[k])) - 价值[k])
计数=计数+1
if (1 - int(完结集[k]))==0 or 计数>100:
break
优势函数值[t] = 优势值
# https://blog.csdn.net/zhkmxx930xperia/article/details/88257891
# GAE的形式为多个价值估计的加权平均数
优势函数值 = T.tensor(优势函数值).to(device)
价值 = T.tensor(价值).to(device)
for 条 in 条目集:
条末=条[-1:]
旧_动作概率s = T.tensor(旧_动作概率集[条末]).to(device)
动作s = T.tensor(动作集[条末]).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(动作数组[条].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(图片集合[:, 条, :]).cuda(device).float()
src_mask, trg_mask = create_masks(操作序列.unsqueeze(0), 操作序列.unsqueeze(0), device)
分布, 评价结果 = self.动作(图片张量,操作序列,trg_mask)
分布=分布[:,-1:,:]
评价结果 = 评价结果[:, -1:, :]
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 概率比 = 新_动作概率s.exp() / 旧_动作概率s.exp()
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值[条末] * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值[条末]
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
总回报 = 优势函数值[条末] + 价值[条末]
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
评价损失 = (总回报 - 评价结果) ** 2
评价损失 = 评价损失 .mean()
总损失 = 动作损失 + 0.5 * 评价损失-self.熵系数*熵损失
#print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
print('总损失',总损失)
self.数据集.清除数据()
self.文件名集=[]
def 监督强化学习(self,device,状态,回报,动作,动作可能性,评价):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k])
计数 = 计数 + 1
if 计数 > 200:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督强化学习A(self,device,状态,回报,动作,动作可能性,评价,完结集):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k]*(1-完结集[0,k]*0))
计数 = 计数 + 1
if 计数 > 200 or 完结集[0,k]==2111111:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督学习(self, 状态,目标输出,打印,数_词表,操作_分_torch,device):
分布, 价值 = self.动作(状态,device)
lin = 分布.view(-1, 分布.size(-1))
_, 抽样 = torch.topk(分布, k=1, dim=-1)
抽样np = 抽样.cpu().numpy()
self.优化函数.zero_grad()
loss = F.cross_entropy(lin, 目标输出.contiguous().view(-1), ignore_index=-1)
if 打印:
print(loss)
打印抽样数据(数_词表, 抽样np[0:1, :, :], 操作_分_torch[0, :])
loss.backward()
self.优化函数.step()
def 选择动作_old(self, 状态):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
输出_实际_A, 价值 = self.动作(状态)
输出_实际_A = F.softmax(输出_实际_A, dim=-1)
输出_实际_A = 输出_实际_A[:, - 1, :]
抽样 = torch.multinomial(输出_实际_A, num_samples=1)
抽样np = 抽样.cpu().numpy()
return 抽样np[0,-1]
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
5,554 | import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categoricalrt torch.nn as nn
from Layers import DecoderLayer
from Embed import Embedder, PositionalEncoder
from Sublayers import Norm, 全连接层
import copy
import os.path
import torchvision
from config import TransformerConfig
import torch.nn.functional as F
from Batch import create_masks
from 杂项 import 打印抽样数据
import pickle
import gc
class Transformer(nn.Module):
def __init__(self, trg_vocab, d_model, N, heads, dropout,图向量尺寸=6*6*2048):
super().__init__()
self.图转= 全连接层(图向量尺寸,d_model)
self.decoder = Decoder(trg_vocab, d_model, N, heads, dropout)
self.outX = 全连接层(d_model, trg_vocab)
self.评价 = 全连接层(d_model, 1)
def forward(self, 图向量 ,操作, trg_mask):
图向量=self.图转(图向量)
d_output = self.decoder(图向量,操作 , trg_mask)
output = self.outX(d_output)
评价 = self.评价(d_output)
return output,评价
:
def __init__(self, 动作数, 输入维度, 优势估计参数G=0.9999, 学习率=0.0003, 泛化优势估计参数L=0.985,
策略裁剪幅度=0.2, 并行条目数=64, 轮数=10,熵系数=0.01):
self.优势估计参数G = 优势估计参数G
self.策略裁剪幅度 = 策略裁剪幅度
self.轮数 = 轮数
self.熵系数=熵系数
self.泛化优势估计参数L = 泛化优势估计参数L
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
模型名称 = '模型_策略梯度_丙TA'
config = TransformerConfig()
model = get_model(config, 130, 模型名称)
# model_dict = model.state_dict()
#
# pretrained_dict = torch.load('weights/model_weights_2021-05-7D11')
#
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#
# model_dict.update(pretrained_dict)
#
# model.load_state_dict(model_dict)
model = model.cuda(device)
self.动作 = model
#torch.save(self.动作.state_dict(), 'weights/模型_动作ppo阶段停bZ1')
self.优化函数 = torch.optim.Adam(self.动作.parameters(), lr=2e-5, betas=(0.9, 0.95), eps=1e-9)
self.数据集 = PPO_数据集(并行条目数)
self.文件名集=[]
def 记录数据(self, 状态, 动作, 动作概率, 评价, 回报, 完结,计数):
self.数据集.记录数据(状态, 动作, 动作概率, 评价, 回报, 完结,计数)
def 存硬盘(self, 文件名):
self.数据集.存硬盘(文件名)
self.文件名集.append(文件名)
def 读硬盘(self, 文件名):
self.数据集.读硬盘(文件名)
def 保存模型(self,轮号):
print('... 保存模型 ...')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N{}'.format(轮号))
#torch.save(self.评论.state_dict(), 'weights/模型_评论')
#torch.save(self.评论.state_dict(), 'weights/模型_评论2')
def 载入模型(self):
print('... 载入模型 ...')
self.动作.载入权重()
#self.评价.载入权重()
def 选择动作(self, 状态,device,传入动作,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
价值 = 价值[:, - 1, :]
分布 = F.softmax(分布, dim=-1)
分布 = 分布[:, - 1, :]
分布 = Categorical(分布)
if 手动:
动作 = 传入动作
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作)).item()
动作 = T.squeeze(动作).item()
return 动作, 动作概率, 价值
def 选择动作批量(self, 状态,device,目标输出_分_torch,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
分布 = F.softmax(分布, dim=-1)
分布 = Categorical(分布)
if 手动:
动作 = 目标输出_分_torch
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作))
动作 = T.squeeze(动作)
return 动作, 动作概率, 价值
def 学习(self,device):
for i in range(1):
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
for _ in range(self.轮数):
动作集, 旧_动作概率集, 评价集, 回报集, 完结集,图片集合,动作数组, 条目集 = self.数据集.提取数据()
print('回报集',回报集[0:10])
价值 = 评价集
优势函数值 = np.zeros(len(回报集), dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数=0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0-计数)) * (回报集[k] + self.优势估计参数G * 价值[k + 1] * (1 - int(完结集[k])) - 价值[k])
计数=计数+1
if (1 - int(完结集[k]))==0 or 计数>100:
break
优势函数值[t] = 优势值
# https://blog.csdn.net/zhkmxx930xperia/article/details/88257891
# GAE的形式为多个价值估计的加权平均数
优势函数值 = T.tensor(优势函数值).to(device)
价值 = T.tensor(价值).to(device)
for 条 in 条目集:
条末=条[-1:]
旧_动作概率s = T.tensor(旧_动作概率集[条末]).to(device)
动作s = T.tensor(动作集[条末]).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(动作数组[条].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(图片集合[:, 条, :]).cuda(device).float()
src_mask, trg_mask = create_masks(操作序列.unsqueeze(0), 操作序列.unsqueeze(0), device)
分布, 评价结果 = self.动作(图片张量,操作序列,trg_mask)
分布=分布[:,-1:,:]
评价结果 = 评价结果[:, -1:, :]
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 概率比 = 新_动作概率s.exp() / 旧_动作概率s.exp()
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值[条末] * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值[条末]
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
总回报 = 优势函数值[条末] + 价值[条末]
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
评价损失 = (总回报 - 评价结果) ** 2
评价损失 = 评价损失 .mean()
总损失 = 动作损失 + 0.5 * 评价损失-self.熵系数*熵损失
#print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
print('总损失',总损失)
self.数据集.清除数据()
self.文件名集=[]
def 监督强化学习(self,device,状态,回报,动作,动作可能性,评价):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k])
计数 = 计数 + 1
if 计数 > 200:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督强化学习A(self,device,状态,回报,动作,动作可能性,评价,完结集):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k]*(1-完结集[0,k]*0))
计数 = 计数 + 1
if 计数 > 200 or 完结集[0,k]==2111111:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督学习(self, 状态,目标输出,打印,数_词表,操作_分_torch,device):
分布, 价值 = self.动作(状态,device)
lin = 分布.view(-1, 分布.size(-1))
_, 抽样 = torch.topk(分布, k=1, dim=-1)
抽样np = 抽样.cpu().numpy()
self.优化函数.zero_grad()
loss = F.cross_entropy(lin, 目标输出.contiguous().view(-1), ignore_index=-1)
if 打印:
print(loss)
打印抽样数据(数_词表, 抽样np[0:1, :, :], 操作_分_torch[0, :])
loss.backward()
self.优化函数.step()
def 选择动作_old(self, 状态):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
输出_实际_A, 价值 = self.动作(状态)
输出_实际_A = F.softmax(输出_实际_A, dim=-1)
输出_实际_A = 输出_实际_A[:, - 1, :]
抽样 = torch.multinomial(输出_实际_A, num_samples=1)
抽样np = 抽样.cpu().numpy()
return 抽样np[0,-1]
def get_model(opt, trg_vocab, model_weights='model_weights'):
assert opt.d_model % opt.heads == 0
assert opt.dropout < 1
model = Transformer(trg_vocab, opt.d_model, opt.n_layers, opt.heads, opt.dropout)
if opt.load_weights is not None and os.path.isfile(opt.load_weights + '/' + model_weights):
print("loading pretrained weights...")
model.load_state_dict(torch.load(f'{opt.load_weights}/' + model_weights))
else:
量 = 0
for p in model.parameters():
if p.dim() > 1:
# nn.init.xavier_uniform_(p)
a = 0
长 = len(p.shape)
点数 = 1
for j in range(长):
点数 = p.shape[j] * 点数
量 += 点数
print('使用参数:{}百万'.format(量 / 1000000))
return model | null |
5,555 | import os
import numpy as np
import torch as T
import torch.nn as nn
import torch.optim as optim
from torch.distributions.categorical import Categoricalrt torch.nn as nn
from Layers import DecoderLayer
from Embed import Embedder, PositionalEncoder
from Sublayers import Norm, 全连接层
import copy
import os.path
import torchvision
from config import TransformerConfig
import torch.nn.functional as F
from Batch import create_masks
from 杂项 import 打印抽样数据
import pickle
import gc
:
def __init__(self, 动作数, 输入维度, 优势估计参数G=0.9999, 学习率=0.0003, 泛化优势估计参数L=0.985,
策略裁剪幅度=0.2, 并行条目数=64, 轮数=10,熵系数=0.01):
self.优势估计参数G = 优势估计参数G
self.策略裁剪幅度 = 策略裁剪幅度
self.轮数 = 轮数
self.熵系数=熵系数
self.泛化优势估计参数L = 泛化优势估计参数L
device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
模型名称 = '模型_策略梯度_丙TA'
config = TransformerConfig()
model = get_model(config, 130, 模型名称)
# model_dict = model.state_dict()
#
# pretrained_dict = torch.load('weights/model_weights_2021-05-7D11')
#
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
#
# model_dict.update(pretrained_dict)
#
# model.load_state_dict(model_dict)
model = model.cuda(device)
self.动作 = model
#torch.save(self.动作.state_dict(), 'weights/模型_动作ppo阶段停bZ1')
self.优化函数 = torch.optim.Adam(self.动作.parameters(), lr=2e-5, betas=(0.9, 0.95), eps=1e-9)
self.数据集 = PPO_数据集(并行条目数)
self.文件名集=[]
def 记录数据(self, 状态, 动作, 动作概率, 评价, 回报, 完结,计数):
self.数据集.记录数据(状态, 动作, 动作概率, 评价, 回报, 完结,计数)
def 存硬盘(self, 文件名):
self.数据集.存硬盘(文件名)
self.文件名集.append(文件名)
def 读硬盘(self, 文件名):
self.数据集.读硬盘(文件名)
def 保存模型(self,轮号):
print('... 保存模型 ...')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N')
torch.save(self.动作.state_dict(), 'weights/模型_策略梯度_丙N{}'.format(轮号))
#torch.save(self.评论.state_dict(), 'weights/模型_评论')
#torch.save(self.评论.state_dict(), 'weights/模型_评论2')
def 载入模型(self):
print('... 载入模型 ...')
self.动作.载入权重()
#self.评价.载入权重()
def 选择动作(self, 状态,device,传入动作,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
价值 = 价值[:, - 1, :]
分布 = F.softmax(分布, dim=-1)
分布 = 分布[:, - 1, :]
分布 = Categorical(分布)
if 手动:
动作 = 传入动作
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作)).item()
动作 = T.squeeze(动作).item()
return 动作, 动作概率, 价值
def 选择动作批量(self, 状态,device,目标输出_分_torch,手动=False):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
self.动作.requires_grad_(False)
操作序列=torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量=torch.from_numpy(状态['图片张量']).cuda(device)
trg_mask=状态['trg_mask']
分布, 价值 = self.动作(图片张量,操作序列,trg_mask)
分布 = F.softmax(分布, dim=-1)
分布 = Categorical(分布)
if 手动:
动作 = 目标输出_分_torch
else:
动作 = 分布.sample()
动作概率 = T.squeeze(分布.log_prob(动作))
动作 = T.squeeze(动作)
return 动作, 动作概率, 价值
def 学习(self,device):
for i in range(1):
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
for _ in range(self.轮数):
动作集, 旧_动作概率集, 评价集, 回报集, 完结集,图片集合,动作数组, 条目集 = self.数据集.提取数据()
print('回报集',回报集[0:10])
价值 = 评价集
优势函数值 = np.zeros(len(回报集), dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数=0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0-计数)) * (回报集[k] + self.优势估计参数G * 价值[k + 1] * (1 - int(完结集[k])) - 价值[k])
计数=计数+1
if (1 - int(完结集[k]))==0 or 计数>100:
break
优势函数值[t] = 优势值
# https://blog.csdn.net/zhkmxx930xperia/article/details/88257891
# GAE的形式为多个价值估计的加权平均数
优势函数值 = T.tensor(优势函数值).to(device)
价值 = T.tensor(价值).to(device)
for 条 in 条目集:
条末=条[-1:]
旧_动作概率s = T.tensor(旧_动作概率集[条末]).to(device)
动作s = T.tensor(动作集[条末]).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(动作数组[条].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(图片集合[:, 条, :]).cuda(device).float()
src_mask, trg_mask = create_masks(操作序列.unsqueeze(0), 操作序列.unsqueeze(0), device)
分布, 评价结果 = self.动作(图片张量,操作序列,trg_mask)
分布=分布[:,-1:,:]
评价结果 = 评价结果[:, -1:, :]
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 概率比 = 新_动作概率s.exp() / 旧_动作概率s.exp()
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值[条末] * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值[条末]
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
总回报 = 优势函数值[条末] + 价值[条末]
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
评价损失 = (总回报 - 评价结果) ** 2
评价损失 = 评价损失 .mean()
总损失 = 动作损失 + 0.5 * 评价损失-self.熵系数*熵损失
#print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
print('总损失',总损失)
self.数据集.清除数据()
self.文件名集=[]
def 监督强化学习(self,device,状态,回报,动作,动作可能性,评价):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k])
计数 = 计数 + 1
if 计数 > 200:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督强化学习A(self,device,状态,回报,动作,动作可能性,评价,完结集):
#print(device,状态,回报,动作,动作可能性,评价)
# for k, v in self.动作.named_parameters():
#
# if k == '评价.weight' or k=='评价.bias':
# v.requires_grad = True
回报集=回报
价值=评价.cpu().numpy()[0,:,0]
优势函数值 = np.zeros(回报集.shape[0], dtype=np.float32)
for t in range(len(回报集) - 1):
折扣率 = 1
优势值 = 0
折扣率 = self.优势估计参数G * self.泛化优势估计参数L
计数 = 0
for k in range(t, len(回报集) - 1):
优势值 += pow(折扣率, abs(0 - 计数)) * (回报集[k]*(1-完结集[0,k]*0))
计数 = 计数 + 1
if 计数 > 200 or 完结集[0,k]==2111111:
break
优势函数值[t] = 优势值
价值 = T.tensor(价值).to(device)
for i in range(3):
优势函数值 = T.tensor(优势函数值).to(device)
旧_动作概率s = T.tensor(动作可能性).to(device)
动作s = T.tensor(动作).to(device)
self.动作.requires_grad_(True)
操作序列 = torch.from_numpy(状态['操作序列'].astype(np.int64)).cuda(device)
图片张量 = torch.from_numpy(状态['图片张量']).cuda(device).float()
trg_mask = 状态['trg_mask']
分布, 评价结果 = self.动作(图片张量, 操作序列, trg_mask)
分布 = F.softmax(分布, dim=-1)
# 分布 = 分布[:, - 1, :]
# 评价结果 = 评价结果[:, - 1, :]
评价结果 = T.squeeze(评价结果)
分布 = Categorical(分布)
#熵损失 = torch.mean(分布.entropy())
新_动作概率s = 分布.log_prob(动作s)
# 旧_动作概率s=旧_动作概率s.exp()
# 概率比 = 新_动作概率s / 旧_动作概率s
# # prob_ratio = (new_probs - old_probs).exp()
# 加权概率 = 优势函数值 * 概率比
# 加权_裁剪_概率 = T.clamp(概率比, 1 - self.策略裁剪幅度,
# 1 + self.策略裁剪幅度) * 优势函数值
# 动作损失 = -T.min(加权概率, 加权_裁剪_概率).mean()
#概率比2 = 新_动作概率s.mean() / 旧_动作概率s.mean()
总回报 = 优势函数值#+ 价值
动作损失 = -总回报 * 新_动作概率s
动作损失 = 动作损失.mean()
#评价损失 = (总回报 - 评价结果) ** 2
#评价损失 = 评价损失.mean()
print(总回报[10:20],新_动作概率s[:,10:20].exp())
总损失 = 动作损失# + 0.5 * 评价损失 - self.熵系数 * 熵损失
# print(总损失)
self.优化函数.zero_grad()
# self.优化函数_评论.zero_grad()
总损失.backward()
self.优化函数.step()
# self.优化函数_评论.step()
def 监督学习(self, 状态,目标输出,打印,数_词表,操作_分_torch,device):
分布, 价值 = self.动作(状态,device)
lin = 分布.view(-1, 分布.size(-1))
_, 抽样 = torch.topk(分布, k=1, dim=-1)
抽样np = 抽样.cpu().numpy()
self.优化函数.zero_grad()
loss = F.cross_entropy(lin, 目标输出.contiguous().view(-1), ignore_index=-1)
if 打印:
print(loss)
打印抽样数据(数_词表, 抽样np[0:1, :, :], 操作_分_torch[0, :])
loss.backward()
self.优化函数.step()
def 选择动作_old(self, 状态):
# 分布,q_ = self.动作(状态)
# r_, 价值 = self.评论(状态)
输出_实际_A, 价值 = self.动作(状态)
输出_实际_A = F.softmax(输出_实际_A, dim=-1)
输出_实际_A = 输出_实际_A[:, - 1, :]
抽样 = torch.multinomial(输出_实际_A, num_samples=1)
抽样np = 抽样.cpu().numpy()
return 抽样np[0,-1]
def create_masks(src, trg, device):
src_mask = (src != -1).unsqueeze(-2)
if trg is not None:
trg_mask = (trg != -1).unsqueeze(-2)
trg_mask.cuda(device)
size = trg.size(1) # get seq_len for matrix
np_mask = nopeak_mask(size, device)
trg_mask = trg_mask & np_mask
else:
trg_mask = None
return src_mask, trg_mask
def 处理状态参数(状态组,device):
最长=0
状态组合={}
# 操作序列 = np.ones((1,))
for 状态A in 状态组:
if 状态A['图片张量'].shape[1]>最长:
最长=状态A['图片张量'].shape[1]
for 状态 in 状态组:
状态A = 状态.copy()
if 状态A['图片张量'].shape[1] == 最长:
单元=状态A
操作序列 = np.ones((最长,))
遮罩序列 = torch.from_numpy(操作序列.astype(np.int64)).cuda(device).unsqueeze(0)
单元['遮罩序列']=遮罩序列
else:
有效长度=状态A['图片张量'].shape[1]
差值=最长-有效长度
形状=状态A['图片张量'].shape
图片张量_拼接 = torch.zeros(形状[0],差值,形状[2],形状[3]).cuda(device).float()
图片张量_拼接 = 图片张量_拼接.cpu().numpy()
状态A['图片张量']=np.append(状态A['图片张量'],图片张量_拼接, axis=1)
#状态A['图片张量'] = torch.cat((状态A['图片张量'], 图片张量_拼接), 1)
形状 = 状态A['角度集张量_序列'].shape
角度集张量_拼接=torch.zeros(形状[0],差值,形状[2]).cuda(device).float()
状态A['角度集张量_序列'] = torch.cat((状态A['角度集张量_序列'], 角度集张量_拼接), 1)
形状 = 状态A['位置张量_序列'].shape
位置张量_拼接=torch.zeros(形状[0],差值,形状[2]).cuda(device).float()
状态A['位置张量_序列'] = torch.cat((状态A['位置张量_序列'], 位置张量_拼接), 1)
形状 = 状态A['速度张量_序列'].shape
速度张量_拼接=torch.zeros(形状[0],差值,形状[2]).cuda(device).float()
状态A['速度张量_序列'] = torch.cat((状态A['速度张量_序列'], 速度张量_拼接), 1)
操作序列 = np.ones((有效长度,))
遮罩序列 = torch.from_numpy(操作序列.astype(np.int64)).cuda(device).unsqueeze(0)
状态A['遮罩序列']=遮罩序列
操作序列 = np.ones((差值,))*-1
遮罩序列 = torch.from_numpy(操作序列.astype(np.int64)).cuda(device).unsqueeze(0)
状态A['遮罩序列'] = torch.cat((状态A['遮罩序列'], 遮罩序列), 1)
单元=状态A
if 状态组合=={}:
状态组合=单元
else:
状态组合['遮罩序列'] = torch.cat((状态组合['遮罩序列'], 单元['遮罩序列']), 0)
状态组合['速度张量_序列'] = torch.cat((状态组合['速度张量_序列'], 单元['速度张量_序列'],), 0)
状态组合['位置张量_序列'] = torch.cat((状态组合['位置张量_序列'], 单元['位置张量_序列']), 0)
状态组合['角度集张量_序列'] = torch.cat((状态组合['角度集张量_序列'], 单元['角度集张量_序列']), 0)
#状态组合['图片张量'] = torch.cat((状态组合['图片张量'], 单元['图片张量']), 0)
状态组合['图片张量'] =np.append(状态组合['图片张量'], 单元['图片张量'], axis=0)
src_mask, trg_mask = create_masks(状态组合['遮罩序列'], 状态组合['遮罩序列'], device)
状态组合['trg_mask']=trg_mask
return 状态组合 | null |
5,556 | import socket
import json
import sys
import time, threading
import cv2
import torch
import numpy as np
from 辅助功能 import 状态信息综合
import torchvision
from resnet_utils import myResnet
from 模型_策略梯度 import 智能体
from Batch import create_masks
import subprocess
from PyQt5.QtWidgets import QApplication
from PIL import Image, ImageQt
import os
import win32gui, win32ui, win32con
from 取训练数据 import 读出引索
from 运行辅助 import MyMNTDevice,取图
from pynput.keyboard import Key, Listener
from pynput import keyboard
import random
from 模型_策略梯度 import Transformer
release(key):
global 一键按下,状况
key_name=get_key_name(key)
if key_name=='1':
一键按下=False
if key_name == '2':
二键按下 = False
if key_name == '3':
三键按下 = False
if key_name == '4':
四键按下 = False
if key_name == '5':
五键按下 = False
if key_name == '6':
六键按下 = False
if key_name == '7':
七键按下 = False
if key_name == '8':
八键按下 = False
if key_name == 'Key.page_down':
状况='无状况'
print("已经释放:", key_name)
if key == Key.esc:
# 停止监听
return False
def on_press(key):
def start_listen():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | null |
5,557 | import json
import numpy as np
def 读取训练数据(路径):
输入表单 = []
输出表单 = []
with open(路径, encoding='utf-8') as f:
while True:
行 = f.readline()
if not 行:
break
json_行 = json.loads(行)
内容 = json_行['内容______']
内容_输入 = 内容['输入______']
内容_输出 = 内容['输出______']
#这里的数据还得进行分割先暂时分割成16份吧
单元长度 = len(内容_输入)//16
for i in range(16):
#print(内容_输入[i*单元长度:(i+1)*单元长度])
输入表单.append(内容_输入[i*单元长度:(i+1)*单元长度])
输出表单.append(内容_输出[i*单元长度:(i+1)*单元长度])
return 输入表单, 输出表单 | null |
5,558 | import json
import numpy as np
def 写出词标号引索(总词表, 词_数表路径, 数_词表路径):
print("正在写出词的标号引索数据可能需要较长时间")
标号_到_字符 = {}
字符_到_标号 = {}
标号_字符 = []
# 标号_到_字符 = list(set(总表单))
i = 0
j = 0
for 词表 in 总词表:
j = j + 1
for 字符 in 词表:
if 字符 not in 标号_字符:
标号_字符.append(字符)
字符_到_标号[字符] = i
标号_到_字符[i] = 字符
i = i + 1
if j % 10000 == 0:
print(i, 标号_到_字符[i - 1], j/len(总词表))
#print(标号_到_字符[1], 标号_到_字符[111], len(标号_到_字符))
with open(词_数表路径, 'w', encoding='utf-8') as f:
json.dump(字符_到_标号, f, ensure_ascii=False)
with open(数_词表路径, 'w', encoding='utf-8') as f:
json.dump(标号_到_字符, f, ensure_ascii=False) | null |
5,559 | import json
import numpy as np
def 读出引索(词_数表路径, 数_词表路径):
with open(词_数表路径, encoding='utf-8') as f:
词_数表= json.load(f)
with open(数_词表路径, encoding='utf-8') as f:
数_词表 = json.load(f)
return 词_数表, 数_词表 | null |
5,560 | import json
import numpy as np
def 生成训练用numpy数组(输入表单, 词_数表, numpy数组路径):
表_1 = []
表_2 = []
i = 0
临 = ''
for 表单 in 输入表单:
表_3 = []
for 字符 in 表单:
if (u'\u0041' <= 字符 <= u'\u005a') or (u'\u0061' <= 字符 <= u'\u007a'):
if 临 == '':
临 = 字符
else:
临 = 临 + 字符
else:
if 临 == '':
if 字符.lower() in 词_数表:
表_3.append(词_数表[字符.lower()])
else:
表_3.append(14999)
else:
if 临.lower() in 词_数表:
表_3.append(词_数表[临.lower()])
else:
表_3.append(14999)
临 = ''
if 字符.lower() in 词_数表:
表_3.append(词_数表[字符.lower()])
else:
表_3.append(14999)
if 临 != '':
if 临.lower() in 词_数表:
表_3.append(词_数表[临.lower()])
else:
表_3.append(14999)
临 = ''
if len(表_3) != 667:
# 表_1.append(np.array(表_3[0:-1]))
# 表_2.append(np.array(表_3[1:]))
print(表_3)
else:
表_1.append(np.array(表_3[0:-1]))
表_2.append(np.array(表_3[1:]))
if i % 1000 == 0:
print("数据转化为numpy数组完成度百分比{}".format(i / len(输入表单) * 100))
i = i + 1
print("数据转化为numpy数组完成。")
输入np = np.array(表_1)
输出np = np.array(表_2)
np.savez(numpy数组路径, 输出np=输出np, 输入np=输入np) | null |
5,561 | import json
import numpy as np
def 生成测试用numpy数组(输入表单, 词_数表):
表_1 = []
for 字符 in 输入表单:
if 字符.lower() in 词_数表:
表_1.append(词_数表[字符])
else:
表_1.append(14999)
输入np = np.array(表_1)
return (输入np) | null |
5,562 | import json
import numpy as np
def 生成训练用numpy数组_A(输入表单, 词_数表, numpy数组路径):
表_1 = []
表_2 = []
i=0
临=''
for 表单 in 输入表单:
表_3=[]
for 字符 in 表单:
if (u'\u0041' <= 字符 <= u'\u005a') or (u'\u0061' <= 字符 <= u'\u007a'):
if 临 == '':
临 = 字符
else:
临 = 临 + 字符
else:
if 临 == '':
if 字符.lower() in 词_数表:
if 字符 != ' ':
表_3.append(词_数表[字符.lower()])
else:
表_3.append(14999)
else:
if 临.lower() in 词_数表:
if 临 != ' ':
表_3.append(词_数表[临.lower() ])
else:
表_3.append(14999)
临=''
if 字符.lower() in 词_数表:
if 字符 != ' ':
表_3.append(词_数表[字符.lower() ])
else:
表_3.append(14999)
if 临!='':
if 临.lower() in 词_数表:
if 字符 != ' ':
表_3.append(词_数表[临.lower() ])
else:
表_3.append(14999)
临 = ''
if len(表_3)!=667:
#表_1.append(np.array(表_3[0:-1]))
#表_2.append(np.array(表_3[1:]))
print(表_3)
else:
表_1.append(np.array(表_3[0:-1]))
表_2.append(np.array(表_3[1:]))
if i % 1000 == 0:
print("数据转化为numpy数组完成度百分比{}".format(i/len(输入表单)*100))
i = i + 1
print("数据转化为numpy数组完成。")
输入np = np.array(表_1)
输出np = np.array(表_2)
np.savez(numpy数组路径, 输出np=输出np, 输入np=输入np) | null |
5,563 | import json
import numpy as np
def 读取训练数据_A(路径):
输入表单 = []
with open(路径, encoding='utf-8') as f:
while True:
行 = f.readline()
if not 行:
break
json_行 = json.loads(行)
内容 = json_行['input']
输入表单.append(内容)
return 输入表单 | null |
5,564 | import json
import numpy as np
def 生成测试用numpy数组_A(输入表单, 词_数表):
表_3 = []
临 = ''
for 字符 in 输入表单:
if 字符.lower() in 词_数表:
if (u'\u0041' <= 字符 <= u'\u005a') or (u'\u0061' <= 字符 <= u'\u007a'):
if 临 == '':
临 = 字符
else:
临 = 临 + 字符
else:
if 临 == '':
if 字符.lower() in 词_数表:
if 字符.lower() != ' ':
表_3.append(词_数表[字符.lower()])
else:
表_3.append(14999)
else:
if 临.lower() in 词_数表:
if 临.lower() != ' ':
表_3.append(词_数表[临.lower()])
else:
表_3.append(14999)
临 = ''
if 字符.lower() in 词_数表:
if 字符.lower() != ' ':
表_3.append(词_数表[字符.lower()])
else:
表_3.append(14999)
输入np = np.array(表_3)
return (输入np) | null |
5,565 | import numpy as np
def 状态信息综合(图片张量,操作序列,trg_mask):
状态={}
状态['图片张量']=图片张量[np.newaxis, :]
状态['操作序列'] = 操作序列
状态['trg_mask']=trg_mask
return 状态 | null |
5,566 | import win32gui, win32ui, win32con
from PIL import Image
from pyminitouch import MNTDevice
def 取图(窗口名称):
# 获取后台窗口的句柄,注意后台窗口不能最小化
hWnd = win32gui.FindWindow(0,窗口名称) # 窗口的类名可以用Visual Studio的SPY++工具获取
# 获取句柄窗口的大小信息
left, top, right, bot = win32gui.GetWindowRect(hWnd)
width = right - left
height = bot - top
# 返回句柄窗口的设备环境,覆盖整个窗口,包括非客户区,标题栏,菜单,边框
hWndDC = win32gui.GetWindowDC(hWnd)
# 创建设备描述表
mfcDC = win32ui.CreateDCFromHandle(hWndDC)
# 创建内存设备描述表
saveDC = mfcDC.CreateCompatibleDC()
# 创建位图对象准备保存图片
saveBitMap = win32ui.CreateBitmap()
# 为bitmap开辟存储空间
saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)
# 将截图保存到saveBitMap中
saveDC.SelectObject(saveBitMap)
# 保存bitmap到内存设备描述表
saveDC.BitBlt((0, 0), (width, height), mfcDC, (0, 0), win32con.SRCCOPY)
bmpinfo = saveBitMap.GetInfo()
bmpstr = saveBitMap.GetBitmapBits(True)
###生成图像
im_PIL = Image.frombuffer('RGB',(bmpinfo['bmWidth'],bmpinfo['bmHeight']),bmpstr,'raw','BGRX')
#im_PIL= Image.frombuffer('RGB', (bmpinfo['bmWidth'], bmpinfo['bmHeight']), bmpstr)
#im_PIL =Image.frombytes('RGB',(bmpinfo['bmWidth'],bmpinfo['bmHeight']),bmpstr)
box = (8,31,968,511)
im2 = im_PIL.crop(box)
#im2.save('./dd2d.jpg')
win32gui.DeleteObject(saveBitMap.GetHandle())
saveDC.DeleteDC()
mfcDC.DeleteDC()
win32gui.ReleaseDC(hWnd, hWndDC)
return im2 | null |
5,567 | import torch
import numpy as np
from torch.autograd import Variable
global max_src_in_batch, max_tgt_in_batch
The provided code snippet includes necessary dependencies for implementing the `batch_size_fn` function. Write a Python function `def batch_size_fn(new, count, sofar)` to solve the following problem:
Keep augmenting batch and calculate total number of tokens + padding.
Here is the function:
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements) | Keep augmenting batch and calculate total number of tokens + padding. |
5,568 | import os
import torchvision
from Batch import create_masks
from 辅助功能 import 状态信息综合
from 取训练数据 import *
from 杂项 import *
from resnet_utils import myResnet
from 运行辅助 import *
from pynput.keyboard import Controller, Key, Listener
from pynput import keyboard
import time, threading
from 模型_策略梯度 import 智能体
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join(
def start_listen():
with Listener(on_press=on_press, on_release=on_release) as listener:
listener.join() | null |
5,569 | import os
import torchvision
from Batch import create_masks
from 辅助功能 import 状态信息综合
from 取训练数据 import *
from 杂项 import *
from resnet_utils import myResnet
from 运行辅助 import *
from pynput.keyboard import Controller, Key, Listener
from pynput import keyboard
import time, threading
from 模型_策略梯度 import 智能体
W键按下=False
S键按下=False
A键按下=False
D键按下=False
Q键按下=False
f Q键按下 == True:
return ('移动停')
elif W键按下 == True and S键按下 == False and A键按下 == False and D键按下 == False:
return ('上移')
elif W键按下 == False and S键按下 == True and A键按下 == False and D键按下 == False:
return ('下移')
elif W键按下 == False and S键按下 == False and A键按下 == True and D键按下 == False:
return ('左移')
elif W键按下 == False and S键按下 == False and A键按下 == False and D键按下 == True:
return ('右移')
elif W键按下 == True and S键按下 == False and A键按下 == True and D键按下 == False:
return ('左上移')
elif W键按下 == True and S键按下 == False and A键按下 == False and D键按下 == True:
return ('右上移')
elif W键按下 == False and S键按下 == True and A键按下 == True and D键按下 == False:
return ('左下移')
elif W键按下 == False and S键按下 == True and A键按下 == False and D键按下 == True:
return ('右下移')
else:
return (''
def 处理方向():
# W键按下 = False
# S键按下 = False
# A键按下 = False
# D键按下 = False
if Q键按下 == True:
return ('移动停')
elif W键按下 == True and S键按下 == False and A键按下 == False and D键按下 == False:
return ('上移')
elif W键按下 == False and S键按下 == True and A键按下 == False and D键按下 == False:
return ('下移')
elif W键按下 == False and S键按下 == False and A键按下 == True and D键按下 == False:
return ('左移')
elif W键按下 == False and S键按下 == False and A键按下 == False and D键按下 == True:
return ('右移')
elif W键按下 == True and S键按下 == False and A键按下 == True and D键按下 == False:
return ('左上移')
elif W键按下 == True and S键按下 == False and A键按下 == False and D键按下 == True:
return ('右上移')
elif W键按下 == False and S键按下 == True and A键按下 == True and D键按下 == False:
return ('左下移')
elif W键按下 == False and S键按下 == True and A键按下 == False and D键按下 == True:
return ('右下移')
else:
return ('') | null |
5,570 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | null |
5,571 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
def attention(q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
mask = mask.unsqueeze(1)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output | null |
5,572 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
async def akina(e):
if not akinator:
return
sta = akinator.Akinator()
games.update({e.chat_id: {e.id: sta}})
try:
m = await e.client.inline_query(asst.me.username, f"aki_{e.chat_id}_{e.id}")
await m[0].click(e.chat_id)
except BotMethodInvalidError:
await asst.send_file(
e.chat_id,
aki_photo,
buttons=Button.inline(get_string("aki_2"), data=f"aki_{e.chat_id}_{e.id}"),
)
except Exception as er:
return await e.eor(f"**ERROR :** `{er}`")
if e.out:
await e.delete()
async def _akokk(e):
await akina(e) | null |
5,573 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
games = {}
async def doai(e):
adt = e.pattern_match.group(1).strip().decode("utf-8")
dt = adt.split("_")
ch = int(dt[0])
mid = int(dt[1])
await e.edit(get_string("com_1"))
try:
qu = games[ch][mid].start_game(child_mode=True)
# child mode should be promoted
except KeyError:
return await e.answer(get_string("aki_1"), alert=True)
bts = [Button.inline(o, f"aka_{adt}_{o}") for o in ["Yes", "No", "Idk"]]
cts = [Button.inline(o, f"aka_{adt}_{o}") for o in ["Probably", "Probably Not"]]
bts = [bts, cts]
# ignored Back Button since it makes the Pagination looks Bad
await e.edit(f"Q. {qu}", buttons=bts) | null |
5,574 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
games = {}
async def okah(e):
mk = e.pattern_match.group(1).decode("utf-8").split("_")
ch = int(mk[0])
mid = int(mk[1])
ans = mk[2]
try:
gm = games[ch][mid]
except KeyError:
await e.answer(get_string("aki_3"))
return
text = gm.answer(ans)
if gm.progression >= 80:
gm.win()
gs = gm.first_guess
text = "It's " + gs["name"] + "\n " + gs["description"]
return await e.edit(text, file=gs["absolute_picture_path"])
bts = [Button.inline(o, f"aka_{ch}_{mid}_{o}") for o in ["Yes", "No", "Idk"]]
cts = [
Button.inline(o, f"aka_{ch}_{mid}_{o}") for o in ["Probably", "Probably Not"]
]
bts = [bts, cts]
await e.edit(text, buttons=bts) | null |
5,575 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
aki_photo = "https://graph.org/file/3cc8825c029fd0cab9edc.jpg"
async def eiagx(e):
bts = Button.inline(get_string("aki_2"), data=e.text)
ci = types.InputWebDocument(aki_photo, 0, "image/jpeg", [])
ans = [
await e.builder.article(
"Akinator",
type="photo",
content=ci,
text="Akinator",
thumb=ci,
buttons=bts,
include_media=True,
)
]
await e.answer(ans) | null |
5,576 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
GIMAGES = [
"https://graph.org/file/1c51015bae5205a65fd69.jpg",
"https://imgwhale.xyz/3xyr322l64j9590",
]
async def magic(event):
buttons = [
[Button.inline("Trivia Quiz", "trzia")],
[Button.inline("Cancel ❌", "delit")],
]
await event.reply(
get_string("games_1"),
file=choice(GIMAGES),
buttons=buttons,
) | null |
5,577 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
async def delete_it(event):
await event.delete() | null |
5,578 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
async def ct_spam(e):
n = e.data_match.group(1).decode("utf-8")
await e.answer(f"Wait {n} seconds..", alert=True) | null |
5,579 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
TR_BTS = {}
DIFI_KEYS = ["Easy", "Medium", "Hard"]
TRIVIA_CHATS = {}
POLLS = {}
CONGO_STICKER = [
"CAADAgADSgIAAladvQrJasZoYBh68AI",
"CAADAgADXhIAAuyZKUl879mlR_dkOwI",
"CAADAgADpQAD9wLID-xfZCDwOI5LAg",
"CAADAgADjAADECECEFZM-SrKO9GgAg",
"CAADAgADSwIAAj-VzArAzNCDiGWAHAI",
"CAADAgADhQADwZxgDIuMHR9IU10iAg",
"CAADAgADiwMAAsSraAuoe2BwYu1sdQI",
]
from . import *
def inline_mention(user, custom=None, html=False):
async def choose_cata(event):
match = event.data_match.group(1).decode("utf-8")
if not match:
if TR_BTS.get("category"):
buttons = TR_BTS["category"]
else:
req = (
await async_searcher(
"https://opentdb.com/api_category.php", re_json=True
)
)["trivia_categories"]
btt = []
for i in req:
name = i["name"]
if ":" in name:
name = name.split(":")[1]
btt.append(Button.inline(name, f"trziad_{i['id']}"))
buttons = list(zip(btt[::2], btt[1::2]))
if len(btt) % 2 == 1:
buttons.append((btt[-1],))
buttons.append([Button.inline("Cancel ❌", "delit")])
TR_BTS.update({"category": buttons})
text = get_string("games_2")
elif match[0] == "d":
cat = match[1:]
buttons = [[Button.inline(i, f"trziac{cat}_{i}") for i in DIFI_KEYS]]
buttons.append(get_back_button("trzia"))
text = get_string("games_3")
elif match[0] == "c":
m = match[1:]
buttons = [[Button.inline(str(i), f"trziat{m}_{i}") for i in range(10, 70, 20)]]
text = get_string("games_4")
elif match[0] == "t":
m_ = match[1:]
buttons = [
[Button.inline(str(i), f"trzias{m_}_{i}") for i in [10, 30, 60, 120]]
]
text = get_string("games_5")
elif match[0] == "s":
chat = event.chat_id
cat, le, nu, in_ = match[2:].split("_")
msg = await event.edit(get_string("games_6").format(le, nu))
for i in reversed(range(5)):
msg = await msg.edit(buttons=Button.inline(f"{i} ⏰", f"ctdown{i}"))
await asyncio.sleep(1)
await msg.edit(
msg.text + "\n\n• Send /cancel to stop the Quiz...", buttons=None
)
qsss = await async_searcher(
f"https://opentdb.com/api.php?amount={nu}&category={cat}&difficulty={le.lower()}",
re_json=True,
)
qs = qsss["results"]
if not qs:
await event.respond("Sorry, No Question Found for the given Criteria..")
await event.delete()
return
TRIVIA_CHATS.update({chat: {}})
for copper, q in enumerate(qs):
if TRIVIA_CHATS[chat].get("cancel") is not None:
break
ansi = str(uuid.uuid1()).split("-")[0].encode()
opts = [PollAnswer(unescape(q["correct_answer"]), ansi)]
[
opts.append(
PollAnswer(unescape(a), str(uuid.uuid1()).split("-")[0].encode())
)
for a in q["incorrect_answers"]
]
shuffle(opts)
poll = InputMediaPoll(
Poll(
0,
f"[{copper+1}]. " + unescape(q["question"]),
answers=opts,
public_voters=True,
quiz=True,
close_period=int(in_),
),
correct_answers=[ansi],
solution="Join @TeamUltroid",
solution_entities=[],
)
m_ = await event.client.send_message(chat, file=poll)
POLLS.update({m_.poll.poll.id: {"chat": m_.chat_id, "answer": ansi}})
await asyncio.sleep(int(in_))
if not TRIVIA_CHATS[chat]:
await event.respond(
"No-One Got Any Score in the Quiz!\nBetter Luck Next Time!"
)
else:
try:
await event.respond(file=choice(CONGO_STICKER))
except ChatSendStickersForbiddenError:
pass
LBD = "🎯 **Scoreboard of the Quiz.**\n\n"
TRC = TRIVIA_CHATS[chat]
if "cancel" in TRC.keys():
del TRC["cancel"]
for userid, user_score in dict(
sorted(TRC.items(), key=operator.itemgetter(1), reverse=True)
).items():
user = inline_mention(await event.client.get_entity(userid))
LBD += f"••• {user} - {user_score}\n"
await event.respond(LBD)
del TRIVIA_CHATS[chat]
list_ = list(POLLS.copy().keys())
for key in list_:
if POLLS[key]["chat"] == chat:
del POLLS[key]
return
await event.edit(text, buttons=buttons) | null |
5,580 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
TRIVIA_CHATS = {}
POLLS = {}
async def pollish(eve):
if POLLS.get(eve.poll_id)["chat"] not in TRIVIA_CHATS.keys():
return
if POLLS[eve.poll_id]["answer"] != eve.options[0]:
return
chat = POLLS.get(eve.poll_id)["chat"]
user = eve.user_id
if not TRIVIA_CHATS.get(chat, {}).get(user):
TRIVIA_CHATS[chat][user] = 1
else:
TRIVIA_CHATS[chat][user] += 1 | null |
5,581 | import asyncio
import operator
import re
import uuid
from html import unescape
from random import choice, shuffle
from . import LOGS
from telethon.errors.rpcerrorlist import (
BotMethodInvalidError,
ChatSendStickersForbiddenError,
)
from telethon.events import Raw
from telethon.tl.types import InputMediaPoll, Poll, PollAnswer, UpdateMessagePollVote
from pyUltroid._misc._decorators import ultroid_cmd
from pyUltroid.fns.helper import inline_mention
from pyUltroid.fns.tools import async_searcher
from . import *
TRIVIA_CHATS = {}
async def cancelish(event):
chat = TRIVIA_CHATS.get(event.chat_id)
chat.update({"cancel": True})
await event.respond("Cancelled!") | null |
5,582 | import re
from . import (
Button,
ULTConfig,
callback,
get_back_button,
get_languages,
get_string,
udB,
)
async def setlang(event):
languages = get_languages()
tultd = [
Button.inline(
f"{languages[ult]['natively']} [{ult.lower()}]",
data=f"set_{ult}",
)
for ult in languages
]
buttons = list(zip(tultd[::2], tultd[1::2]))
if len(tultd) % 2 == 1:
buttons.append((tultd[-1],))
buttons.append([Button.inline("« Back", data="mainmenu")])
await event.edit(get_string("ast_4"), buttons=buttons) | null |
5,583 | import re
from . import (
Button,
ULTConfig,
callback,
get_back_button,
get_languages,
get_string,
udB,
)
async def settt(event):
lang = event.data_match.group(1).decode("UTF-8")
languages = get_languages()
ULTConfig.lang = lang
udB.del_key("language") if lang == "en" else udB.set_key("language", lang)
await event.edit(
f"Your language has been set to {languages[lang]['natively']} [{lang}].",
buttons=get_back_button("lang"),
) | null |
5,584 | import os
from telethon.errors.rpcerrorlist import UserNotParticipantError
from telethon.tl.custom import Button
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from telethon.tl.types import Channel, Chat
from telethon.utils import get_display_name
from pyUltroid.dB.base import KeyManager
from pyUltroid.dB.botchat_db import *
from pyUltroid.fns.helper import inline_mention
from . import *
FSUB = udB.get_key("PMBOT_FSUB")
CACHE = {}
from . import *
def inline_mention(user, custom=None, html=False):
mention_text = get_display_name(user) or "Deleted Account" if not custom else custom
if isinstance(user, types.User):
if html:
return f"<a href=tg://user?id={user.id}>{mention_text}</a>"
return f"[{mention_text}](tg://user?id={user.id})"
if isinstance(user, types.Channel) and user.username:
if html:
return f"<a href=https://t.me/{user.username}>{mention_text}</a>"
return f"[{mention_text}](https://t.me/{user.username})"
return mention_text
async def on_new_mssg(event):
who = event.sender_id
# doesn't reply to that user anymore
if event.text.startswith("/") or who == OWNER_ID:
return
if FSUB:
MSG = ""
BTTS = []
for chat in FSUB:
try:
await event.client.get_permissions(chat, event.sender_id)
except UserNotParticipantError:
if not MSG:
MSG += get_string("pmbot_1")
try:
uri = ""
TAHC_ = await event.client.get_entity(chat)
if hasattr(TAHC_, "username") and TAHC_.username:
uri = f"t.me/{TAHC_.username}"
elif CACHE.get(chat):
uri = CACHE[chat]
else:
if isinstance(TAHC_, Channel):
FUGB = await event.client(GetFullChannelRequest(chat))
elif isinstance(TAHC_, Chat):
FUGB = await event.client(GetFullChatRequest(chat))
else:
return
if FUGB.full_chat.exported_invite:
CACHE[chat] = FUGB.full_chat.exported_invite.link
uri = CACHE[chat]
BTTS.append(Button.url(get_display_name(TAHC_), uri))
except Exception as er:
LOGS.exception(f"Error On PmBot Force Sub!\n - {chat} \n{er}")
if MSG and BTTS:
return await event.reply(MSG, buttons=BTTS)
xx = await event.forward_to(OWNER_ID)
if event.fwd_from:
await xx.reply(f"From {inline_mention(event.sender)} [`{event.sender_id}`]")
add_stuff(xx.id, who) | null |
5,585 | import os
from telethon.errors.rpcerrorlist import UserNotParticipantError
from telethon.tl.custom import Button
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from telethon.tl.types import Channel, Chat
from telethon.utils import get_display_name
from pyUltroid.dB.base import KeyManager
from pyUltroid.dB.botchat_db import *
from pyUltroid.fns.helper import inline_mention
from . import *
from . import *
def inline_mention(user, custom=None, html=False):
mention_text = get_display_name(user) or "Deleted Account" if not custom else custom
if isinstance(user, types.User):
if html:
return f"<a href=tg://user?id={user.id}>{mention_text}</a>"
return f"[{mention_text}](tg://user?id={user.id})"
if isinstance(user, types.Channel) and user.username:
if html:
return f"<a href=https://t.me/{user.username}>{mention_text}</a>"
return f"[{mention_text}](https://t.me/{user.username})"
return mention_text
async def on_out_mssg(event):
x = event.reply_to_msg_id
to_user = get_who(x)
if event.text.startswith("/who"):
try:
k = await asst.get_entity(to_user)
photu = await event.client.download_profile_photo(k.id)
await event.reply(
f"• **Name :** {get_display_name(k)}\n• **ID :** `{k.id}`\n• **Link :** {inline_mention(k)}",
file=photu,
)
if photu:
os.remove(photu)
return
except BaseException as er:
return await event.reply(f"**ERROR : **{str(er)}")
elif event.text.startswith("/"):
return
if to_user:
await asst.send_message(to_user, event.message) | null |
5,586 | import os
from telethon.errors.rpcerrorlist import UserNotParticipantError
from telethon.tl.custom import Button
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from telethon.tl.types import Channel, Chat
from telethon.utils import get_display_name
from pyUltroid.dB.base import KeyManager
from pyUltroid.dB.botchat_db import *
from pyUltroid.fns.helper import inline_mention
from . import *
botb = KeyManager("BOTBLS", cast=list)
async def banhammer(event):
if not event.is_reply:
return await event.reply(get_string("pmbot_2"))
target = get_who(event.reply_to_msg_id)
if botb.contains(target):
return await event.reply(get_string("pmbot_3"))
botb.add(target)
await event.reply(f"#BAN\nUser : {target}")
await asst.send_message(target, get_string("pmbot_4")) | null |
5,587 | import os
from telethon.errors.rpcerrorlist import UserNotParticipantError
from telethon.tl.custom import Button
from telethon.tl.functions.channels import GetFullChannelRequest
from telethon.tl.functions.messages import GetFullChatRequest
from telethon.tl.types import Channel, Chat
from telethon.utils import get_display_name
from pyUltroid.dB.base import KeyManager
from pyUltroid.dB.botchat_db import *
from pyUltroid.fns.helper import inline_mention
from . import *
botb = KeyManager("BOTBLS", cast=list)
async def unbanhammer(event):
if not event.is_reply:
return await event.reply(get_string("pmbot_5"))
target = get_who(event.reply_to_msg_id)
if not botb.contains(target):
return await event.reply(get_string("pmbot_6"))
botb.remove(target)
await event.reply(f"#UNBAN\nUser : {target}")
await asst.send_message(target, get_string("pmbot_7")) | null |
5,588 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
Owner_info_msg = udB.get_key("BOT_INFO_START")
custom_info = True
if Owner_info_msg is None:
custom_info = False
Owner_info_msg = f"""
**Owner** - {OWNER_NAME}
**OwnerID** - `{OWNER_ID}`
**Message Forwards** - {udB.get_key("PMBOT")}
**Ultroid [v{ultroid_version}](https://github.com/TeamUltroid/Ultroid), powered by @TeamUltroid**
"""
from . import *
def inline_mention(user, custom=None, html=False):
mention_text = get_display_name(user) or "Deleted Account" if not custom else custom
if isinstance(user, types.User):
if html:
return f"<a href=tg://user?id={user.id}>{mention_text}</a>"
return f"[{mention_text}](tg://user?id={user.id})"
if isinstance(user, types.Channel) and user.username:
if html:
return f"<a href=https://t.me/{user.username}>{mention_text}</a>"
return f"[{mention_text}](https://t.me/{user.username})"
return mention_text
async def own(event):
msg = Owner_info_msg.format(
mention=event.sender.mention, me=inline_mention(ultroid_bot.me)
)
if custom_info:
msg += "\n\n• Powered by **@TeamUltroid**"
await event.edit(
msg,
buttons=[Button.inline("Close", data="closeit")],
link_preview=False,
) | null |
5,589 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
async def closet(lol):
try:
await lol.delete()
except MessageDeleteForbiddenError:
await lol.answer("MESSAGE_TOO_OLD", alert=True) | null |
5,590 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
Owner_info_msg = udB.get_key("BOT_INFO_START")
if Owner_info_msg is None:
custom_info = False
Owner_info_msg = f"""
**Owner** - {OWNER_NAME}
**OwnerID** - `{OWNER_ID}`
**Message Forwards** - {udB.get_key("PMBOT")}
**Ultroid [v{ultroid_version}](https://github.com/TeamUltroid/Ultroid), powered by @TeamUltroid**
"""
_settings = [
[
Button.inline("API Kᴇʏs", data="cbs_apiset"),
Button.inline("Pᴍ Bᴏᴛ", data="cbs_chatbot"),
],
[
Button.inline("Aʟɪᴠᴇ", data="cbs_alvcstm"),
Button.inline("PᴍPᴇʀᴍɪᴛ", data="cbs_ppmset"),
],
[
Button.inline("Fᴇᴀᴛᴜʀᴇs", data="cbs_otvars"),
Button.inline("VC Sᴏɴɢ Bᴏᴛ", data="cbs_vcb"),
],
[Button.inline("« Bᴀᴄᴋ", data="mainmenu")],
]
_start = [
[
Button.inline("Lᴀɴɢᴜᴀɢᴇ 🌐", data="lang"),
Button.inline("Sᴇᴛᴛɪɴɢs ⚙️", data="setter"),
],
[
Button.inline("Sᴛᴀᴛs ✨", data="stat"),
Button.inline("Bʀᴏᴀᴅᴄᴀsᴛ 📻", data="bcast"),
],
[Button.inline("TɪᴍᴇZᴏɴᴇ 🌎", data="tz")],
]
from .. import *
SUDO_M = _SudoManager()
owner_and_sudos = SUDO_M.owner_and_sudos
class KeyManager:
def __init__(self, key, cast=None) -> None:
self._key = key
self._cast = cast
def get(self):
_data = udB.get_key(self._key)
if self._cast and not isinstance(_data, self._cast):
return [_data] if self._cast == list else self._cast(_data)
return _data or (self._cast() if callable(self._cast) else self._cast)
def get_child(self, key):
return self.get()[key]
def count(self):
return len(self.get())
def add(self, item):
content = self.get()
if content == None and callable(type(item)):
content = type(item)()
if isinstance(content, dict) and isinstance(item, dict):
content.update(item)
elif isinstance(content, list) and item not in content:
content.append(item)
else:
return
udB.set_key(self._key, content)
def remove(self, item):
content = self.get()
if isinstance(content, list) and item in content:
content.remove(item)
elif isinstance(content, dict) and content.get(item):
del content[item]
else:
return
udB.set_key(self._key, content)
def contains(self, item):
return item in self.get()
from . import *
def inline_mention(user, custom=None, html=False):
mention_text = get_display_name(user) or "Deleted Account" if not custom else custom
if isinstance(user, types.User):
if html:
return f"<a href=tg://user?id={user.id}>{mention_text}</a>"
return f"[{mention_text}](tg://user?id={user.id})"
if isinstance(user, types.Channel) and user.username:
if html:
return f"<a href=https://t.me/{user.username}>{mention_text}</a>"
return f"[{mention_text}](https://t.me/{user.username})"
return mention_text
def get_string(key: str, _res: bool = True) -> Any:
lang = ULTConfig.lang or "en"
try:
return languages[lang][key]
except KeyError:
try:
en_ = languages["en"][key]
tr = translate(en_, lang_tgt=lang).replace("\ N", "\n")
if en_.count("{}") != tr.count("{}"):
tr = en_
if languages.get(lang):
languages[lang][key] = tr
else:
languages.update({lang: {key: tr}})
return tr
except KeyError:
if not _res:
return
return f"Warning: could not load any string with the key `{key}`"
except TypeError:
pass
except Exception as er:
LOGS.exception(er)
if not _res:
return None
return languages["en"].get(key) or f"Failed to load language string '{key}'"
async def ultroid(event):
args = event.pattern_match.group(1).strip()
keym = KeyManager("BOT_USERS", cast=list)
if not keym.contains(event.sender_id) and event.sender_id not in owner_and_sudos():
keym.add(event.sender_id)
kak_uiw = udB.get_key("OFF_START_LOG")
if not kak_uiw or kak_uiw != True:
msg = f"{inline_mention(event.sender)} `[{event.sender_id}]` started your [Assistant bot](@{asst.me.username})."
buttons = [[Button.inline("Info", "itkkstyo")]]
if event.sender.username:
buttons[0].append(
Button.mention(
"User", await event.client.get_input_entity(event.sender_id)
)
)
await event.client.send_message(
udB.get_key("LOG_CHANNEL"), msg, buttons=buttons
)
if event.sender_id not in SUDO_M.fullsudos:
ok = ""
me = inline_mention(ultroid_bot.me)
mention = inline_mention(event.sender)
if args and args != "set":
await get_stored_file(event, args)
if not udB.get_key("STARTMSG"):
if udB.get_key("PMBOT"):
ok = "You can contact my master using this bot!!\n\nSend your Message, I will Deliver it To Master."
await event.reply(
f"Hey there {mention}, this is Ultroid Assistant of {me}!\n\n{ok}",
file=udB.get_key("STARTMEDIA"),
buttons=[Button.inline("Info.", data="ownerinfo")]
if Owner_info_msg
else None,
)
else:
await event.reply(
udB.get_key("STARTMSG").format(me=me, mention=mention),
file=udB.get_key("STARTMEDIA"),
buttons=[Button.inline("Info.", data="ownerinfo")]
if Owner_info_msg
else None,
)
else:
name = get_display_name(event.sender)
if args == "set":
await event.reply(
"Choose from the below options -",
buttons=_settings,
)
elif args:
await get_stored_file(event, args)
else:
await event.reply(
get_string("ast_3").format(name),
buttons=_start,
) | null |
5,591 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
async def ekekdhdb(e):
text = f"When New Visitor will visit your Assistant Bot. You will get this log message!\n\nTo Disable : {HNDLR}setdb OFF_START_LOG True"
await e.answer(text, alert=True) | null |
5,592 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
_start = [
[
Button.inline("Lᴀɴɢᴜᴀɢᴇ 🌐", data="lang"),
Button.inline("Sᴇᴛᴛɪɴɢs ⚙️", data="setter"),
],
[
Button.inline("Sᴛᴀᴛs ✨", data="stat"),
Button.inline("Bʀᴏᴀᴅᴄᴀsᴛ 📻", data="bcast"),
],
[Button.inline("TɪᴍᴇZᴏɴᴇ 🌎", data="tz")],
]
def get_string(key: str, _res: bool = True) -> Any:
lang = ULTConfig.lang or "en"
try:
return languages[lang][key]
except KeyError:
try:
en_ = languages["en"][key]
tr = translate(en_, lang_tgt=lang).replace("\ N", "\n")
if en_.count("{}") != tr.count("{}"):
tr = en_
if languages.get(lang):
languages[lang][key] = tr
else:
languages.update({lang: {key: tr}})
return tr
except KeyError:
if not _res:
return
return f"Warning: could not load any string with the key `{key}`"
except TypeError:
pass
except Exception as er:
LOGS.exception(er)
if not _res:
return None
return languages["en"].get(key) or f"Failed to load language string '{key}'"
async def ultroid(event):
await event.edit(
get_string("ast_3").format(OWNER_NAME),
buttons=_start,
) | null |
5,593 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
async def botstat(event):
ok = len(udB.get_key("BOT_USERS") or [])
msg = """Ultroid Assistant - Stats
Total Users - {}""".format(
ok,
)
await event.answer(msg, cache_time=0, alert=True) | null |
5,594 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
class KeyManager:
def __init__(self, key, cast=None) -> None:
self._key = key
self._cast = cast
def get(self):
_data = udB.get_key(self._key)
if self._cast and not isinstance(_data, self._cast):
return [_data] if self._cast == list else self._cast(_data)
return _data or (self._cast() if callable(self._cast) else self._cast)
def get_child(self, key):
return self.get()[key]
def count(self):
return len(self.get())
def add(self, item):
content = self.get()
if content == None and callable(type(item)):
content = type(item)()
if isinstance(content, dict) and isinstance(item, dict):
content.update(item)
elif isinstance(content, list) and item not in content:
content.append(item)
else:
return
udB.set_key(self._key, content)
def remove(self, item):
content = self.get()
if isinstance(content, list) and item in content:
content.remove(item)
elif isinstance(content, dict) and content.get(item):
del content[item]
else:
return
udB.set_key(self._key, content)
def contains(self, item):
return item in self.get()
async def bdcast(event):
keym = KeyManager("BOT_USERS", cast=list)
total = keym.count()
await event.edit(f"• Broadcast to {total} users.")
async with event.client.conversation(OWNER_ID) as conv:
await conv.send_message(
"Enter your broadcast message.\nUse /cancel to stop the broadcast.",
)
response = await conv.get_response()
if response.message == "/cancel":
return await conv.send_message("Cancelled!!")
success = 0
fail = 0
await conv.send_message(f"Starting a broadcast to {total} users...")
start = datetime.now()
for i in keym.get():
try:
await asst.send_message(int(i), response)
success += 1
except BaseException:
fail += 1
end = datetime.now()
time_taken = (end - start).seconds
await conv.send_message(
f"""
**Broadcast completed in {time_taken} seconds.**
Total Users in Bot - {total}
**Sent to** : `{success} users.`
**Failed for** : `{fail} user(s).`""",
) | null |
5,595 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
_settings = [
[
Button.inline("API Kᴇʏs", data="cbs_apiset"),
Button.inline("Pᴍ Bᴏᴛ", data="cbs_chatbot"),
],
[
Button.inline("Aʟɪᴠᴇ", data="cbs_alvcstm"),
Button.inline("PᴍPᴇʀᴍɪᴛ", data="cbs_ppmset"),
],
[
Button.inline("Fᴇᴀᴛᴜʀᴇs", data="cbs_otvars"),
Button.inline("VC Sᴏɴɢ Bᴏᴛ", data="cbs_vcb"),
],
[Button.inline("« Bᴀᴄᴋ", data="mainmenu")],
]
async def setting(event):
await event.edit(
"Choose from the below options -",
buttons=_settings,
) | null |
5,596 | from datetime import datetime
from pytz import timezone as tz
from telethon import Button, events
from telethon.errors.rpcerrorlist import MessageDeleteForbiddenError
from telethon.utils import get_display_name
from pyUltroid._misc import SUDO_M, owner_and_sudos
from pyUltroid.dB.base import KeyManager
from pyUltroid.fns.helper import inline_mention
from strings import get_string
from . import *
async def timezone_(event):
await event.delete()
pru = event.sender_id
var = "TIMEZONE"
name = "Timezone"
async with event.client.conversation(pru) as conv:
await conv.send_message(
"Send Your TimeZone From This List [Check From Here](http://www.timezoneconverter.com/cgi-bin/findzone.tzc)"
)
response = conv.wait_event(events.NewMessage(chats=pru))
response = await response
themssg = response.message.message
if themssg == "/cancel":
return await conv.send_message(
"Cancelled!!",
buttons=get_back_button("mainmenu"),
)
try:
tz(themssg)
await setit(event, var, themssg)
await conv.send_message(
f"{name} changed to {themssg}\n",
buttons=get_back_button("mainmenu"),
)
except BaseException:
await conv.send_message(
"Wrong TimeZone, Try again",
buttons=get_back_button("mainmenu"),
) | null |
5,597 | from datetime import datetime as dt
from telethon.events import NewMessage
from telethon.tl.types import (
Message,
MessageEntityMention,
MessageEntityMentionName,
User,
)
from telethon.utils import get_display_name
from pyUltroid.fns.helper import inline_mention, time_formatter
from . import asst, asst_cmd
AFK = {}
from . import *
def inline_mention(user, custom=None, html=False):
mention_text = get_display_name(user) or "Deleted Account" if not custom else custom
if isinstance(user, types.User):
if html:
return f"<a href=tg://user?id={user.id}>{mention_text}</a>"
return f"[{mention_text}](tg://user?id={user.id})"
if isinstance(user, types.Channel) and user.username:
if html:
return f"<a href=https://t.me/{user.username}>{mention_text}</a>"
return f"[{mention_text}](https://t.me/{user.username})"
return mention_text
async def go_afk(event):
sender = await event.get_sender()
if (not isinstance(sender, User)) or sender.bot:
return
try:
reason = event.text.split(" ", maxsplit=1)[1]
except IndexError:
reason = None
if event.is_reply and not reason:
replied = await event.get_reply_message()
if not reason and replied.text and not replied.media:
reason = replied.text
else:
reason = replied
time_ = dt.now()
if AFK.get(event.chat_id):
AFK[event.chat_id].update({event.sender_id: {"reason": reason, "time": time_}})
else:
AFK.update(
{event.chat_id: {event.sender_id: {"reason": reason, "time": time_}}}
)
mention = inline_mention(sender)
msg = f"**{mention} went AFK Now!**"
if reason and not isinstance(reason, str):
await event.reply(reason)
else:
msg += f"\n\n**Reason : ** `{reason}`"
await event.reply(msg) | null |
5,598 | from datetime import datetime as dt
from telethon.events import NewMessage
from telethon.tl.types import (
Message,
MessageEntityMention,
MessageEntityMentionName,
User,
)
from telethon.utils import get_display_name
from pyUltroid.fns.helper import inline_mention, time_formatter
from . import asst, asst_cmd
AFK = {}
from . import *
def time_formatter(milliseconds):
async def make_change(event):
if event.text.startswith("/afk"):
return
sender = await event.get_sender()
if (not isinstance(sender, User)) or sender.bot:
return
chat_ = AFK[event.chat_id]
if event.sender_id in chat_.keys():
name = get_display_name(event.sender)
cha_send = chat_[event.sender_id]
time_ = time_formatter((dt.now() - cha_send["time"]).seconds * 1000)
msg = f"**{name}** is No Longer AFK!\n**Was AFK for** {time_}"
await event.reply(msg)
del chat_[event.sender_id]
if not chat_:
del AFK[event.chat_id]
ST_SPAM = []
replied = await event.get_reply_message()
if replied:
name = get_display_name(replied.sender)
if replied.sender_id in chat_.keys():
s_der = chat_[replied.sender_id]
res_ = s_der["reason"]
time_ = time_formatter((dt.now() - s_der["time"]).seconds * 1000)
msg = f"**{name}** is AFK Currently!\n**From :** {time_}"
if res_ and isinstance(res_, str):
msg += f"\n**Reason :** {res_}"
elif res_ and isinstance(res_, Message):
await event.reply(res_)
await event.reply(msg)
ST_SPAM.append(replied.sender_id)
for ent, text in event.get_entities_text():
dont_send, entity = None, None
if isinstance(ent, MessageEntityMentionName):
c_id = ent.user_id
elif isinstance(ent, MessageEntityMention):
c_id = text
else:
c_id = None
if c_id:
entity = await event.client.get_entity(c_id)
if entity and entity.id in chat_.keys() and entity.id not in ST_SPAM:
ST_SPAM.append(entity.id)
s_der = chat_[entity.id]
name = get_display_name(entity)
res_ = s_der["reason"]
time_ = time_formatter((dt.now() - s_der["time"]).seconds * 1000)
msg = f"**{name}** is AFK Currently!\n**From :** {time_}"
if res_ and isinstance(res_, str):
msg += f"\n**Reason :** {res_}"
elif res_ and isinstance(res_, Message):
await event.reply(res_)
await event.reply(msg) | null |
5,599 | from telethon import events
from . import *
async def dueha(e):
user = await e.get_user()
if not user.is_self:
return
sm = udB.get_key("ON_MNGR_ADD")
if sm == "OFF":
return
if not sm:
sm = "Thanks for Adding me :)"
await e.reply(sm, link_preview=False) | null |
5,600 | import random
import aiohttp
from pyUltroid.dB import DEVLIST
from pyUltroid.fns.admins import admin_check
from . import *
async def dheh(e):
text = ["Yes", "NoU", "Maybe", "IDK"]
text = random.choice(text)
ri = e.reply_to_msg_id or e.id
await e.client.send_message(e.chat_id, text, reply_to=ri) | null |
5,601 | import random
import aiohttp
from pyUltroid.dB import DEVLIST
from pyUltroid.fns.admins import admin_check
from . import *
async def admin_check(event, require=None, silent: bool = False):
async def oqha(e):
if not await admin_check(e):
return
if match := e.pattern_match.group(1).strip():
text = match
reply_to = e
elif e.is_reply:
text = (await e.get_reply_message()).text
reply_to = e.reply_to_msg_id
else:
return await e.eor("What to Echo?", time=5)
try:
await e.delete()
except BaseException as ex:
LOGS.error(ex)
await e.client.send_message(e.chat_id, text, reply_to=reply_to) | null |
5,602 | import random
import aiohttp
from pyUltroid.dB import DEVLIST
from pyUltroid.fns.admins import admin_check
from . import *
from .. import *
DEVLIST = [
719195224, # @xditya
1322549723, # @danish_00
1903729401, # @its_buddhhu
1303895686, # @Sipak_OP
611816596, # @Arnab431
1318486004, # @sppidy
803243487, # @hellboi_atul
]
async def doit(e):
if e.sender_id in DEVLIST:
return await eod(e, "`I will Not Kick You, my Developer..`")
try:
await e.client.kick_participant(e.chat_id, e.sender_id)
except Exception as Fe:
return await e.eor(str(Fe), time=5)
await e.eor("Yes, You are right, get out.", time=5) | null |
5,603 | import random
import aiohttp
from pyUltroid.dB import DEVLIST
from pyUltroid.fns.admins import admin_check
from . import *
async def do_joke(e):
e = await e.get_reply_message() if e.is_reply else e
link = "https://v2.jokeapi.dev/joke/Any?blacklistFlags=nsfw,religious,political,racist,sexist,explicit&type=single"
async with aiohttp.ClientSession() as ses:
async with ses.get(link) as out:
out = await out.json()
await e.reply(out["joke"]) | null |
5,604 | import re
from telethon.errors.rpcerrorlist import UserNotParticipantError
from pyUltroid import _ult_cache
from . import *
async def dowj(e):
replied = await e.get_reply_message()
if replied:
user = replied.sender_id
else:
return await e.eor("Reply to a message...")
try:
await replied.delete()
if e.pattern_match.group(1).strip() == "kick":
await e.client.kick_participant(e.chat_id, user)
te = "Kicked"
else:
await e.client.edit_permissions(e.chat_id, user, view_messages=False)
te = "Banned"
await e.eor(f"{te} Successfully!")
except Exception as E:
await e.eor(str(E)) | null |
5,605 | import re
from telethon.errors.rpcerrorlist import UserNotParticipantError
from pyUltroid import _ult_cache
from . import *
async def callback_(event):
data = event.data_match.group(1).decode("utf-8")
if data not in _ult_cache.get("admin_callback", {}):
return
try:
perm = await event.client.get_permissions(event.chat_id, event.sender_id)
except UserNotParticipantError:
return await event.answer("Join the Group First!", alert=True)
if not perm.is_admin:
return await event.answer("You are not an Admin!", alert=True)
_ult_cache["admin_callback"].update({data: (event.sender, perm)})
await event.answer("Verification Done!")
await event.delete() | null |
5,606 | import random
from telethon import errors
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.tl.functions.messages import GetStickerSetRequest as GetSticker
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.functions.stickers import AddStickerToSetRequest as AddSticker
from telethon.tl.functions.stickers import CreateStickerSetRequest
from telethon.tl.types import InputPeerSelf
from telethon.tl.types import InputStickerSetItem as SetItem
from telethon.tl.types import InputStickerSetShortName, User
from telethon.utils import get_display_name, get_input_document
from pyUltroid.fns.misc import Quotly
from pyUltroid.fns.tools import TgConverter
from . import LOGS, asst, asst_cmd, udB
from .. import *
class Quotly:
_API = "https://quoteampi.onrender.com/generate"
_entities = {
types.MessageEntityPhone: "phone_number",
types.MessageEntityMention: "mention",
types.MessageEntityBold: "bold",
types.MessageEntityCashtag: "cashtag",
types.MessageEntityStrike: "strikethrough",
types.MessageEntityHashtag: "hashtag",
types.MessageEntityEmail: "email",
types.MessageEntityMentionName: "text_mention",
types.MessageEntityUnderline: "underline",
types.MessageEntityUrl: "url",
types.MessageEntityTextUrl: "text_link",
types.MessageEntityBotCommand: "bot_command",
types.MessageEntityCode: "code",
types.MessageEntityPre: "pre",
types.MessageEntitySpoiler: "spoiler",
}
async def _format_quote(self, event, reply=None, sender=None, type_="private"):
async def telegraph(file_):
file = file_ + ".png"
Image.open(file_).save(file, "PNG")
files = {"file": open(file, "rb").read()}
uri = (
"https://graph.org"
+ (
await async_searcher(
"https://graph.org/upload", post=True, data=files, re_json=True
)
)[0]["src"]
)
os.remove(file)
os.remove(file_)
return uri
if reply and reply.raw_text:
reply = {
"name": get_display_name(reply.sender) or "Deleted Account",
"text": reply.raw_text,
"chatId": reply.chat_id,
}
else:
reply = {}
is_fwd = event.fwd_from
name = None
last_name = None
if sender and sender.id not in DEVLIST:
id_ = get_peer_id(sender)
elif not is_fwd:
id_ = event.sender_id
sender = await event.get_sender()
else:
id_, sender = None, None
name = is_fwd.from_name
if is_fwd.from_id:
id_ = get_peer_id(is_fwd.from_id)
try:
sender = await event.client.get_entity(id_)
except ValueError:
pass
if sender:
name = get_display_name(sender)
if hasattr(sender, "last_name"):
last_name = sender.last_name
entities = []
if event.entities:
for entity in event.entities:
if type(entity) in self._entities:
enti_ = entity.to_dict()
del enti_["_"]
enti_["type"] = self._entities[type(entity)]
entities.append(enti_)
text = event.raw_text
if isinstance(event, types.MessageService):
if isinstance(event.action, types.MessageActionGameScore):
text = f"scored {event.action.score}"
rep = await event.get_reply_message()
if rep and rep.game:
text += f" in {rep.game.title}"
elif isinstance(event.action, types.MessageActionPinMessage):
text = "pinned a message."
# TODO: Are there any more events with sender?
message = {
"entities": entities,
"chatId": id_,
"avatar": True,
"from": {
"id": id_,
"first_name": (name or (sender.first_name if sender else None))
or "Deleted Account",
"last_name": last_name,
"username": sender.username if sender else None,
"language_code": "en",
"title": name,
"name": name or "Deleted Account",
"type": type_,
},
"text": text,
"replyMessage": reply,
}
if event.document and event.document.thumbs:
file_ = await event.download_media(thumb=-1)
uri = await telegraph(file_)
message["media"] = {"url": uri}
return message
async def create_quotly(
self,
event,
url="https://bot.lyo.su/quote/generate",
reply={},
bg=None,
sender=None,
file_name="quote.webp",
):
"""Create quotely's quote."""
if not isinstance(event, list):
event = [event]
from .. import udB
if udB.get_key("OQAPI"):
url = Quotly._API
if not bg:
bg = "#1b1429"
content = {
"type": "quote",
"format": "webp",
"backgroundColor": bg,
"width": 512,
"height": 768,
"scale": 2,
"messages": [
await self._format_quote(message, reply=reply, sender=sender)
for message in event
],
}
try:
request = await async_searcher(url, post=True, json=content, re_json=True)
except ContentTypeError as er:
if url != self._API:
return await self.create_quotly(
event,
url=self._API,
bg=bg,
sender=sender,
reply=reply,
file_name=file_name,
)
raise er
if request.get("ok"):
with open(file_name, "wb") as file:
image = base64.decodebytes(request["result"]["image"].encode("utf-8"))
file.write(image)
return file_name
raise Exception(str(request))
from .. import *
class TgConverter:
"""Convert files related to Telegram"""
async def animated_sticker(file, out_path="sticker.tgs", throw=False, remove=False):
"""Convert to/from animated sticker."""
if out_path.endswith("webp"):
er, out = await bash(
f"lottie_convert.py --webp-quality 100 --webp-skip-frames 100 '{file}' '{out_path}'"
)
else:
er, out = await bash(f"lottie_convert.py '{file}' '{out_path}'")
if er and throw:
raise LottieException(er)
if remove:
os.remove(file)
if os.path.exists(out_path):
return out_path
async def animated_to_gif(file, out_path="gif.gif"):
"""Convert animated sticker to gif."""
await bash(
f"lottie_convert.py '{_unquote_text(file)}' '{_unquote_text(out_path)}'"
)
return out_path
def resize_photo_sticker(photo):
"""Resize the given photo to 512x512 (for creating telegram sticker)."""
image = Image.open(photo)
if (image.width and image.height) < 512:
size1 = image.width
size2 = image.height
if image.width > image.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
image = image.resize(sizenew)
else:
maxsize = (512, 512)
image.thumbnail(maxsize)
return image
async def ffmpeg_convert(input_, output, remove=False):
if output.endswith(".webm"):
return await TgConverter.create_webm(
input_, name=output[:-5], remove=remove
)
if output.endswith(".gif"):
await bash(f"ffmpeg -i '{input_}' -an -sn -c:v copy '{output}.mp4' -y")
else:
await bash(f"ffmpeg -i '{input_}' '{output}' -y")
if remove:
os.remove(input_)
if os.path.exists(output):
return output
async def create_webm(file, name="video", remove=False):
_ = await metadata(file)
name += ".webm"
h, w = _["height"], _["width"]
if h == w and h != 512:
h, w = 512, 512
if h != 512 or w != 512:
if h > w:
h, w = 512, -1
if w > h:
h, w = -1, 512
await bash(
f'ffmpeg -i "{file}" -preset fast -an -to 00:00:03 -crf 30 -bufsize 256k -b:v {_["bitrate"]} -vf "scale={w}:{h},fps=30" -c:v libvpx-vp9 "{name}" -y'
)
if remove:
os.remove(file)
return name
def to_image(input_, name, remove=False):
try:
import cv2
except ImportError:
raise DependencyMissingError("This function needs 'cv2' to be installed.")
img = cv2.VideoCapture(input_)
ult, roid = img.read()
cv2.imwrite(name, roid)
if remove:
os.remove(input_)
return name
async def convert(
input_file,
outname="converted",
convert_to=None,
allowed_formats=[],
remove_old=True,
):
if "." in input_file:
ext = input_file.split(".")[-1].lower()
else:
return input_file
if (
ext in allowed_formats
or ext == convert_to
or not (convert_to or allowed_formats)
):
return input_file
def recycle_type(exte):
return convert_to == exte or exte in allowed_formats
# Sticker to Something
if ext == "tgs":
for extn in ["webp", "json", "png", "mp4", "gif"]:
if recycle_type(extn):
name = outname + "." + extn
return await TgConverter.animated_sticker(
input_file, name, remove=remove_old
)
if recycle_type("webm"):
input_file = await TgConverter.convert(
input_file, convert_to="gif", remove_old=remove_old
)
return await TgConverter.create_webm(input_file, outname, remove=True)
# Json -> Tgs
elif ext == "json":
if recycle_type("tgs"):
name = outname + ".tgs"
return await TgConverter.animated_sticker(
input_file, name, remove=remove_old
)
# Video to Something
elif ext in ["webm", "mp4", "gif"]:
for exte in ["webm", "mp4", "gif"]:
if recycle_type(exte):
name = outname + "." + exte
return await TgConverter.ffmpeg_convert(
input_file, name, remove=remove_old
)
for exte in ["png", "jpg", "jpeg", "webp"]:
if recycle_type(exte):
name = outname + "." + exte
return TgConverter.to_image(input_file, name, remove=remove_old)
# Image to Something
elif ext in ["jpg", "jpeg", "png", "webp"]:
for extn in ["png", "webp", "ico"]:
if recycle_type(extn):
img = Image.open(input_file)
name = outname + "." + extn
img.save(name, extn.upper())
if remove_old:
os.remove(input_file)
return name
for extn in ["webm", "gif", "mp4"]:
if recycle_type(extn):
name = outname + "." + extn
if extn == "webm":
input_file = await TgConverter.convert(
input_file,
convert_to="png",
remove_old=remove_old,
)
return await TgConverter.ffmpeg_convert(
input_file, name, remove=True if extn == "webm" else remove_old
)
async def kang_cmd(ult):
sender = await ult.get_sender()
if not isinstance(sender, User):
return
if not ult.is_reply:
return await ult.eor("`Reply to a sticker/photo..`", time=5)
reply = await ult.get_reply_message()
if sender.username:
pre = sender.username[:4]
else:
pre = random.random_string(length=3)
animated, dl, video = None, None, None
try:
emoji = ult.text.split(maxsplit=1)[1]
except IndexError:
emoji = None
if reply.sticker:
file = get_input_document(reply.sticker)
emoji = emoji or reply.file.emoji
name = reply.file.name
if name.endswith(".tgs"):
animated = True
dl = await reply.download_media()
elif name.endswith(".webm"):
video = True
dl = await reply.download_media()
elif reply.photo:
dl = await reply.download_media()
name = "sticker.webp"
image = TgConverter.resize_photo_sticker(dl)
image.save(name, "WEBP")
elif reply.text:
dl = await Quotly().create_quotly(reply)
else:
return await ult.eor("`Reply to sticker or text to add it in your pack...`")
if not emoji:
emoji = "🏵"
if dl:
upl = await ult.client.upload_file(dl)
file = get_input_document(
await ult.client(UploadMediaRequest(InputPeerSelf(), upl))
)
get_ = udB.get_key("STICKERS") or {}
type_ = "anim" if animated else "static"
if not get_.get(ult.sender_id) or not get_.get(ult.sender_id, {}).get(type_):
sn = f"{pre}_{ult.sender_id}"
title = f"{get_display_name(sender)}'s Kang Pack"
if animated:
type_ = "anim"
sn += "_anim"
title += " (Animated)"
elif video:
type_ = "vid"
sn += "_vid"
title += " (Video)"
sn += f"_by_{asst.me.username}"
try:
await asst(GetSticker(InputStickerSetShortName(sn), hash=0))
sn = sn.replace(str(ult.sender_id), f"{ult.sender_id}_{ult.id}")
except StickersetInvalidError:
pass
try:
pack = await ult.client(
CreateStickerSetRequest(
user_id=sender.id,
title=title,
short_name=sn,
stickers=[SetItem(file, emoji=emoji)],
videos=video,
animated=animated,
software="@TeamUltroid",
)
)
except Exception as er:
return await ult.eor(str(er))
sn = pack.set.short_name
if not get_.get(ult.sender_id):
get_.update({ult.sender_id: {type_: [sn]}})
else:
get_[ult.sender_id].update({type_: [sn]})
udB.set_key("STICKERS", get_)
return await ult.reply(
f"**Kanged Successfully!\nEmoji :** {emoji}\n**Link :** [Click Here](https://t.me/addstickers/{sn})"
)
name = get_[ult.sender_id][type_][-1]
try:
await asst(GetSticker(InputStickerSetShortName(name), hash=0))
except StickersetInvalidError:
get_[ult.sender_id][type_].remove(name)
try:
await asst(
AddSticker(InputStickerSetShortName(name), SetItem(file, emoji=emoji))
)
except (errors.StickerpackStickersTooMuchError, errors.StickersTooMuchError):
sn = f"{pre}{ult.sender_id}_{ult.id}"
title = f"{get_display_name(sender)}'s Kang Pack"
if animated:
sn += "_anim"
title += " (Animated)"
elif video:
sn += "_vid"
title += "(Video)"
sn += f"_by_{asst.me.username}"
try:
pack = await ult.client(
CreateStickerSetRequest(
user_id=sender.id,
title=title,
short_name=sn,
stickers=[SetItem(file, emoji=emoji)],
animated=animated,
)
)
except Exception as er:
return await ult.eor(str(er))
get_[ult.sender_id][type_].append(pack.set.short_name)
udB.set_key("STICKERS", get_)
return await ult.reply(
f"**Created New Kang Pack!\nEmoji :** {emoji}\n**Link :** [Click Here](https://t.me/addstickers/{sn})"
)
except Exception as er:
LOGS.exception(er)
return await ult.reply(str(er))
await ult.reply(
f"Sticker Added to Pack Successfully\n**Link :** [Click Here](https://t.me/addstickers/{name})"
) | null |
5,607 | import random
from telethon import errors
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.tl.functions.messages import GetStickerSetRequest as GetSticker
from telethon.tl.functions.messages import UploadMediaRequest
from telethon.tl.functions.stickers import AddStickerToSetRequest as AddSticker
from telethon.tl.functions.stickers import CreateStickerSetRequest
from telethon.tl.types import InputPeerSelf
from telethon.tl.types import InputStickerSetItem as SetItem
from telethon.tl.types import InputStickerSetShortName, User
from telethon.utils import get_display_name, get_input_document
from pyUltroid.fns.misc import Quotly
from pyUltroid.fns.tools import TgConverter
from . import LOGS, asst, asst_cmd, udB
async def do_magic(ult):
ko = udB.get_key("STICKERS") or {}
if not ko.get(ult.sender_id):
return await ult.reply("No Sticker Pack Found!")
al_ = []
ul = ko[ult.sender_id]
for _ in ul.keys():
al_.extend(ul[_])
msg = "• **Stickers Owned by You!**\n\n"
for _ in al_:
try:
pack = await ult.client(GetSticker(InputStickerSetShortName(_), hash=0))
msg += f"• [{pack.set.title}](https://t.me/addstickers/{_})\n"
except StickerSetInvalidError:
if ul.get("anim") and _ in ul["anim"]:
ul["anim"].remove(_)
elif ul.get("vid") and _ in ul["vid"]:
ul["vid"].remove(_)
else:
ul["static"].remove(_)
udB.set_key("STICKERS", ko)
await ult.reply(msg) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.