blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6e6ab7d02971a974af8aa461a3f148f5f4eb1a9a | d0ee0d199ee5595ba38615ecd89ada5be334a0f9 | /button.py | 886e4b542c33024d533ee064950f15c00beb24ca | [] | no_license | davidChibueze/Alien-Force-Invasion | ab045388aee61a0e9c3c6ced726a83cb8e50531a | 68fe0634273a1a019a0a3dfe06c34b9a7e0603de | refs/heads/main | 2023-04-22T06:06:52.932349 | 2021-05-14T12:06:19 | 2021-05-14T12:06:19 | 367,349,070 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,099 | py | import pygame.font
class Button:
def __init__(self, ai_game, msg):
"""Initialize button attributes."""
self.screen = ai_game.screen
self.screen_rect = self.screen.get_rect()
# Set the dimensions and properties of the button.
self.width, self.height = 200, 50
self.button_color = (0, 255, 0)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it.
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
# The button message needs to be prepped only once.
self._prep_msg(msg)
def _prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the button."""
self.msg_image = self.font.render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message.
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect) | [
"noreply@github.com"
] | noreply@github.com |
9258811529068e0ef737d4531c5f0d6ea7426561 | 4d6975caece0acdc793a41e8bc6d700d8c2fec9a | /leetcode/1692.number-of-ways-to-reorder-array-to-get-same-bst/1692.number-of-ways-to-reorder-array-to-get-same-bst.py | 00644cb83012c5b2e15d2232e9f6f7f861427b4f | [] | no_license | guiconti/workout | 36a3923f2381d6e7023e127100409b3a2e7e4ccb | 5162d14cd64b720351eb30161283e8727cfcf376 | refs/heads/master | 2021-08-03T10:32:02.108714 | 2021-07-26T04:38:14 | 2021-07-26T04:38:14 | 221,025,113 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | class Solution:
def numOfWays(self, nums: List[int]) -> int:
| [
"guibasconti@gmail.com"
] | guibasconti@gmail.com |
0ab0e12de59845ee780146749d355e57acb66977 | 1783102b22fae04b4c32552d9b6191cc18ef6eee | /app/map_maker_app.py | 51fbfc331ab15d54e43e89c3068c288cbbe14ab8 | [] | no_license | ryanbeales/photo_library | 61b3f13db19ccd3e4cd45441aceced9793ce2b5f | 41295b26d7fde5a1783fce72e932472ec704bd3b | refs/heads/main | 2023-08-16T22:00:24.539270 | 2021-10-15T14:58:09 | 2021-10-15T14:58:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | from config import config
from processed_images.processed_images import LockingProcessedImages
from progress.bar import Bar
from datetime import datetime
import folium
import folium.plugins as folium_plugins
import os
import base64
import io
from PIL import Image
from concurrent.futures import ThreadPoolExecutor, wait, ALL_COMPLETED
import logging
logger = logging.getLogger(__name__)
def make_popup(imagedata):
img = Image.open(io.BytesIO(base64.b64decode(imagedata)))
width, height = 128, 128
img.thumbnail((width, height, ))
buffered = io.BytesIO()
img.save(buffered, format="JPEG")
result = base64.b64encode(buffered.getvalue()).decode('utf-8')
html = '<img src="data:image/jpeg;base64,{}">'.format
iframe = folium.IFrame(html(result), width=width+20, height=height+20)
return folium.Popup(iframe, max_width=width+20)
def single_image_process(photos, photo, progress_callback):
p = photos.retrieve(photo)
location = [p.latitude, p.longitude]
popup = make_popup(p.thumbnail)
icon = folium.Icon(color='red', icon='ok')
progress_callback()
return location, popup, icon
def date_range_map(photos, start_date, end_date):
print(f'Generating marker cluster map for date range: {start_date} - {end_date}')
photodaterange = photos.get_file_list_date_range(start_date, end_date)
mapdata = []
mappopups = []
mapicons = []
print('Launching threads to process markers')
progress = Bar('Making markers', width=110, max=len(photodaterange), suffix='%(index)d/%(max)d - %(eta)ds')
with ThreadPoolExecutor() as executor:
results = [
executor.submit(
single_image_process,
photos,
photo,
progress.next
)
for photo in photodaterange
]
wait(results, return_when=ALL_COMPLETED)
print('Threads completed, getting results')
for result in results:
if result.result():
location, popup, icon = result.result()
mapdata.append(location)
mappopups.append(popup)
mapicons.append(icon)
progress.finish()
print('Adding points to map...')
mc = folium_plugins.MarkerCluster(
locations = mapdata,
popups = mappopups,
icons = mapicons
)
m = folium.Map(control_scale=True)
m.add_child(mc)
m.save(config['DEFAULT']['output_dir'] + os.sep + 'marker_cluster.html')
print('Marker cluster map generated!')
def heatmap(photos):
print('Generating heat map')
m = folium.Map(control_scale=True)
locations = photos.get_locations()
data = [[r[1],r[2]] for r in locations]
heatmap = folium_plugins.HeatMap(data)
m.add_child(heatmap)
m.save(config['DEFAULT']['output_dir'] + os.sep + 'heatmap.html')
print('Done generating heat map')
if __name__ == '__main__':
photos = LockingProcessedImages(db_dir=config['photo_database']['database_dir'])
photos.load()
if config['map_maker'].getboolean('heatmap'):
heatmap(photos)
if config['map_maker'].getboolean('date_range_map'):
start_date = datetime.strptime(config['map_maker']['date_range_start'], '%d-%m-%Y')
end_date = datetime.strptime(config['map_maker']['date_range_end'], '%d-%m-%Y')
date_range_map(photos, start_date, end_date)
photos.close() | [
"ryanbeales@gmail.com"
] | ryanbeales@gmail.com |
e30926a419b5d166b02a76f3f5c8ed329de20e60 | ff9fedd28f7436ba9945421e061fd2e1dadbf5c3 | /Alogithms/Dijkstra/dijkstra.py | 3d1510e8e6c59b494d2b934513ca7381f575586b | [] | no_license | ritwikbadola/Empirical-Analysis-Of-Algorithms | 0ed1b9c2c92813d11af33405527a4ecced8b2845 | 7ffb7a03e9d356d5368d2d79a49a8dabf49ed6c7 | refs/heads/master | 2022-08-19T12:39:24.875859 | 2020-05-16T03:53:35 | 2020-05-16T03:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,979 | py | # Python program for Dijkstra's single
# source shortest path algorithm. The program is
# for adjacency matrix representation of the graph
# Library for INT_MAX
import sys
class Graph():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]
for row in range(vertices)]
def printSolution(self, dist):
print "Vertex \tDistance from Source"
for node in range(self.V):
print node, "\t", dist[node]
# A utility function to find the vertex with
# minimum distance value, from the set of vertices
# not yet included in shortest path tree
def minDistance(self, dist, sptSet):
# Initilaize minimum distance for next node
min = sys.maxint
# Search not nearest vertex not in the
# shortest path tree
for v in range(self.V):
if dist[v] < min and sptSet[v] == False:
min = dist[v]
min_index = v
return min_index
# Funtion that implements Dijkstra's single source
# shortest path algorithm for a graph represented
# using adjacency matrix representation
def dijkstra(self, src):
dist = [sys.maxint] * self.V
dist[src] = 0
sptSet = [False] * self.V
for cout in range(self.V):
# Pick the minimum distance vertex from
# the set of vertices not yet processed.
# u is always equal to src in first iteration
u = self.minDistance(dist, sptSet)
# Put the minimum distance vertex in the
# shotest path tree
sptSet[u] = True
# Update dist value of the adjacent vertices
# of the picked vertex only if the current
# distance is greater than new distance and
# the vertex in not in the shotest path tree
for v in range(self.V):
if self.graph[u][v] > 0 and sptSet[v] == False and \
dist[v] > dist[u] + self.graph[u][v]:
dist[v] = dist[u] + self.graph[u][v]
# self.printSolution(dist)
# Driver program
g = Graph(25)
g.graph = [ [0, 156, 0, 0, 246, 0, 184, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 171, 0, 157, 0, 363],
[156, 0, 323, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 323, 0, 151, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 151, 0, 0, 545, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[246, 0, 0, 0, 0, 174, 0, 100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 545, 174, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[184, 0, 0, 0, 0, 0, 0, 83, 224, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 192, 0, 0, 0],
[0, 0, 0, 0, 100, 0, 83, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 224, 0, 0, 209, 0, 0, 0, 0, 217, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 209, 0, 116, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 116, 0, 180, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 180, 0, 157, 251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 157, 0, 342, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 251, 342, 0, 111, 208, 0, 0, 0, 0, 0, 382, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 217, 0, 0, 0, 0, 111, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 208, 0, 0, 335, 462, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 335, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[462, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 462, 0, 0, 212, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 212, 0, 135, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 135, 0, 174, 0, 0, 0, 0],
[171, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 174, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 192, 0, 0, 0, 0, 0, 0, 382, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[157, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 171, 0, 0],
[363, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] ];
g.dijkstra(0);
# This code is contributed by Divyanshu Mehta
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
fc72058027cff3d6df1073e107bb3a426e164f7b | 85b6e009c45f2dd530d8ae186feb7e6e67d076a8 | /cohesity_management_sdk/models/protection_job_request.py | 3109e3d98f4406b033242dbb266e3567bd18c46e | [
"MIT"
] | permissive | priyambiswas0/management-sdk-python | 4a60153b038d0a04de02f2308362a2531b0ff9cb | 5807c85e003f271ce069b52529b31abfd08ec153 | refs/heads/master | 2021-10-20T05:43:34.626369 | 2018-05-22T06:04:20 | 2019-02-25T23:56:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,335 | py | # Copyright 2019 Cohesity Inc.
# -*- coding: utf-8 -*-
import cohesity_management_sdk.models.alerting_config
import cohesity_management_sdk.models.cloud_parameters
import cohesity_management_sdk.models.environment_specific_common_job_parameters
import cohesity_management_sdk.models.time_of_day
import cohesity_management_sdk.models.indexing_policy
import cohesity_management_sdk.models.backup_script
import cohesity_management_sdk.models.remote_adapter
import cohesity_management_sdk.models.source_special_parameters
class ProtectionJobRequest(object):
"""Implementation of the 'Protection Job Request.' model.
Specifies information about a Protection Job.
Attributes:
abort_in_blackout_period (bool): If true, the Cohesity Cluster aborts
any currently executing Job Runs of this Protection Job when a
blackout period specified for this Job starts, even if the Job Run
started before the blackout period began. If false, a Job Run
continues to execute, if the Job Run started before the blackout
period starts.
alerting_config (AlertingConfig): Specifies optional settings for
alerting.
alerting_policy (list of AlertingPolicyEnum): Array of Job Events.
During Job Runs, the following Job Events are generated: 1) Job
succeeds 2) Job fails 3) Job violates the SLA These Job Events can
cause Alerts to be generated. 'kSuccess' means the Protection Job
succeeded. 'kFailure' means the Protection Job failed.
'kSlaViolation' means the Protection Job took longer than the time
period specified in the SLA.
cloud_parameters (CloudParameters): Specifies Cloud parameters that
are applicable to all Protection Sources in a Protection Job in
certain scenarios.
continue_on_quiesce_failure (bool): Whether to continue backing up on
quiesce failure.
dedup_disabled_source_ids (list of long|int): List of source ids for
which source side dedup is disabled from the backup job.
description (string): Specifies a text description about the
Protection Job.
end_time_usecs (long|int): Specifies the epoch time (in microseconds)
after which the Protection Job becomes dormant.
environment (Environment10Enum): Specifies the environment type (such
as kVMware or kSQL) of the Protection Source this Job is
protecting. Supported environment types such as 'kView', 'kSQL',
'kVMware', etc. NOTE: 'kPuppeteer' refers to Cohesity's Remote
Adapter. 'kVMware' indicates the VMware Protection Source
environment. 'kHyperV' indicates the HyperV Protection Source
environment. 'kSQL' indicates the SQL Protection Source
environment. 'kView' indicates the View Protection Source
environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter.
'kPhysical' indicates the physical Protection Source environment.
'kPure' indicates the Pure Storage Protection Source environment.
'kAzure' indicates the Microsoft's Azure Protection Source
environment. 'kNetapp' indicates the Netapp Protection Source
environment. 'kAgent' indicates the Agent Protection Source
environment. 'kGenericNas' indicates the Genreric Network Attached
Storage Protection Source environment. 'kAcropolis' indicates the
Acropolis Protection Source environment. 'kPhsicalFiles' indicates
the Physical Files Protection Source environment. 'kIsilon'
indicates the Dell EMC's Isilon Protection Source environment.
'kKVM' indicates the KVM Protection Source environment. 'kAWS'
indicates the AWS Protection Source environment. 'kExchange'
indicates the Exchange Protection Source environment. 'kHyperVVSS'
indicates the HyperV VSS Protection Source environment. 'kOracle'
indicates the Oracle Protection Source environment. 'kGCP'
indicates the Google Cloud Platform Protection Source environment.
'kFlashBlade' indicates the Flash Blade Protection Source
environment. 'kAWSNative' indicates the AWS Native Protection
Source environment. 'kVCD' indicates the VMware's Virtual cloud
Director Protection Source environment. 'kO365' indicates the
Office 365 Protection Source environment. 'kO365Outlook' indicates
Office 365 outlook Protection Source environment. 'kHyperFlex'
indicates the Hyper Flex Protection Source environment.
'kGCPNative' indicates the GCP Native Protection Source
environment. 'kAzureNative' indicates the Azure Native Protection
Source environment.
environment_parameters (EnvironmentSpecificCommonJobParameters):
Specifies additional parameters that are common to all Protection
Sources in a Protection Job created for a particular environment
type.
exclude_source_ids (list of long|int): Array of Excluded Source
Objects. List of Object ids from a Protection Source that should
not be protected and are excluded from being backed up by the
Protection Job. Leaf and non-leaf Objects may be in this list and
an Object in this list must have an ancestor in the sourceId
list.
exclude_vm_tag_ids (list of long|int): Array of Arrays of VM Tag Ids
that Specify VMs to Exclude. Optionally specify a list of VMs to
exclude from protecting by listing Protection Source ids of VM
Tags in this two dimensional array. Using this two dimensional
array of Tag ids, the Cluster generates a list of VMs to exclude
from protecting, which are derived from intersections of the inner
arrays and union of the outer array, as shown by the following
example. For example a Datacenter is selected to be protected but
you want to exclude all the 'Former Employees' VMs in the East and
West but keep all the VMs for 'Former Employees' in the South
which are also stored in this Datacenter, by specifying the
following tag id array: [ [1000, 2221], [1000, 3031] ], where 1000
is the 'Former Employee' VM Tag id, 2221 is the 'East' VM Tag id
and 3031 is the 'West' VM Tag id. The first inner array [1000,
2221] produces a list of VMs that are both tagged with 'Former
Employees' and 'East' (an intersection). The second inner array
[1000, 3031] produces a list of VMs that are both tagged with
'Former Employees' and 'West' (an intersection). The outer array
combines the list of VMs from the two inner arrays. The list of
resulting VMs are excluded from being protected this Job.
full_protection_sla_time_mins (long|int): If specified, this setting
is number of minutes that a Job Run of a Full (no CBT) backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
full_protection_start_time (TimeOfDay): Specifies the time of day to
start the Full Protection Schedule. This is optional and only
applicable if the Protection Policy defines a monthly or a daily
Full (no CBT) Protection Schedule. Default value is 02:00 AM.
deprecated: true
incremental_protection_sla_time_mins (long|int): If specified, this
setting is number of minutes that a Job Run of a CBT-based backup
schedule is expected to complete, which is known as a
Service-Level Agreement (SLA). A SLA violation is reported when
the run time of a Job Run exceeds the SLA time period specified
for this backup schedule.
incremental_protection_start_time (TimeOfDay): Specifies the time of
day to start the CBT-based Protection Schedule. This is optional
and only applicable if the Protection Policy defines a monthly or
a daily CBT-based Protection Schedule. Default value is 02:00 AM.
deprecated: true
indexing_policy (IndexingPolicy): Specifies settings for indexing
files found in an Object (such as a VM) so these files can be
searched and recovered. This also specifies inclusion and
exclusion rules that determine the directories to index.
leverage_storage_snapshots (bool): Specifies whether to leverage the
storage array based snapshots for this backup job. To leverage
storage snapshots, the storage array has to be registered as a
source. If storage based snapshots can not be taken, job will
fallback to the default backup method.
leverage_storage_snapshots_for_hyperflex (bool): Specifies whether to
leverage Hyperflex as the storage snapshot array
name (string): Specifies the name of the Protection Job.
parent_source_id (long|int): Specifies the id of the registered
Protection Source that is the parent of the Objects that may be
protected by this Job. For example when a vCenter Server is
registered on a Cohesity Cluster, the Cohesity Cluster assigns a
unique id to this field that represents the vCenter Server.
perform_source_side_dedup (bool): Specifies whether source side dedupe
should be performed or not.
policy_id (string): Specifies the unique id of the Protection Policy
associated with the Protection Job. The Policy provides retry
settings, Protection Schedules, Priority, SLA, etc. The Job
defines the Storage Domain (View Box), the Objects to Protect (if
applicable), Start Time, Indexing settings, etc.
post_backup_script (BackupScript): Specifies the script associated
with the backup job. This field must be specified for 'kPhysical'
jobs. This script will be executed post backup run.
pre_backup_script (BackupScript): Specifies the script associated with
the backup job. This field must be specified for 'kPhysical' jobs.
This script will be executed pre backup run. The 'remoteScript'
field will be used for remote adapter jobs and 'preBackupScript'
field will be used for 'kPhysical' jobs.
priority (PriorityEnum): Specifies the priority of execution for a
Protection Job. Cohesity supports concurrent backups but if the
number of Jobs exceeds the ability to process Jobs, the specified
priority determines the execution Job priority. This field also
specifies the replication priority. 'kLow' indicates lowest
execution priority for a Protection job. 'kMedium' indicates
medium execution priority for a Protection job. 'kHigh' indicates
highest execution priority for a Protection job.
qos_type (QosTypeEnum): Specifies the QoS policy type to use for this
Protection Job. 'kBackupHDD' indicates the Cohesity Cluster writes
data directly to the HDD tier for this Protection Job. This is the
recommended setting. 'kBackupSSD' indicates the Cohesity Cluster
writes data directly to the SSD tier for this Protection Job. Only
specify this policy if you need fast ingest speed for a small
number of Protection Jobs.
quiesce (bool): Indicates if the App-Consistent option is enabled for
this Job. If the option is enabled, the Cohesity Cluster quiesces
the file system and applications before taking
Application-Consistent Snapshots. VMware Tools must be installed
on the guest Operating System.
remote_script (RemoteAdapter): For a Remote Adapter 'kPuppeteer' Job,
this field specifies the settings about the remote script that
will be executed by this Job. Only specify this field for Remote
Adapter 'kPuppeteer' Jobs.
source_ids (list of long|int): Array of Protected Source Objects.
Specifies the list of Object ids from the Protection Source to
protect (or back up) by the Protection Job. An Object in this list
may be descendant of another Object in this list. For example a
Datacenter could be selected but its child Host excluded. However,
a child VM under the Host could be explicitly selected to be
protected. Both the Datacenter and the VM are listed.
source_special_parameters (list of SourceSpecialParameters): Array of
Special Source Parameters. Specifies additional settings that can
apply to a subset of the Sources listed in the Protection Job. For
example, you can specify a list of files and folders to protect
instead of protecting the entire Physical Server. If this field's
setting conflicts with environmentParameters, then this setting
will be used.
start_time (TimeOfDay): Specifies the time of day to start the
Protection Schedule. This is optional and only applicable if the
Protection Policy defines a monthly or a daily Protection
Schedule. Default value is 02:00 AM.
timezone (string): Specifies the timezone to use when calculating time
for this Protection Job such as the Job start time. Specify the
timezone in the following format: "Area/Location", for example:
"America/New_York".
view_box_id (long|int): Specifies the Storage Domain (View Box) id
where this Job writes data.
view_name (string): For a Remote Adapter 'kPuppeteer' Job or a 'kView'
Job, this field specifies a View name that should be protected.
Specify this field when creating a Protection Job for the first
time for a View. If this field is specified, ParentSourceId,
SourceIds, and ExcludeSourceIds should not be specified.
vm_tag_ids (list of long|int): Array of Arrays of VMs Tags Ids that
Specify VMs to Protect. Optionally specify a list of VMs to
protect by listing Protection Source ids of VM Tags in this two
dimensional array. Using this two dimensional array of Tag ids,
the Cluster generates a list of VMs to protect which are derived
from intersections of the inner arrays and union of the outer
array, as shown by the following example. To protect only 'Eng'
VMs in the East and all the VMs in the West, specify the following
tag id array: [ [1101, 2221], [3031] ], where 1101 is the 'Eng' VM
Tag id, 2221 is the 'East' VM Tag id and 3031 is the 'West' VM Tag
id. The inner array [1101, 2221] produces a list of VMs that are
both tagged with 'Eng' and 'East' (an intersection). The outer
array combines the list from the inner array with list of VMs
tagged with 'West' (a union). The list of resulting VMs are
protected by this Job.
"""
# Create a mapping from Model property names to API property names
_names = {
"name":'name',
"policy_id":'policyId',
"view_box_id":'viewBoxId',
"abort_in_blackout_period":'abortInBlackoutPeriod',
"alerting_config":'alertingConfig',
"alerting_policy":'alertingPolicy',
"cloud_parameters":'cloudParameters',
"continue_on_quiesce_failure":'continueOnQuiesceFailure',
"dedup_disabled_source_ids":'dedupDisabledSourceIds',
"description":'description',
"end_time_usecs":'endTimeUsecs',
"environment":'environment',
"environment_parameters":'environmentParameters',
"exclude_source_ids":'excludeSourceIds',
"exclude_vm_tag_ids":'excludeVmTagIds',
"full_protection_sla_time_mins":'fullProtectionSlaTimeMins',
"full_protection_start_time":'fullProtectionStartTime',
"incremental_protection_sla_time_mins":'incrementalProtectionSlaTimeMins',
"incremental_protection_start_time":'incrementalProtectionStartTime',
"indexing_policy":'indexingPolicy',
"leverage_storage_snapshots":'leverageStorageSnapshots',
"leverage_storage_snapshots_for_hyperflex":'leverageStorageSnapshotsForHyperflex',
"parent_source_id":'parentSourceId',
"perform_source_side_dedup":'performSourceSideDedup',
"post_backup_script":'postBackupScript',
"pre_backup_script":'preBackupScript',
"priority":'priority',
"qos_type":'qosType',
"quiesce":'quiesce',
"remote_script":'remoteScript',
"source_ids":'sourceIds',
"source_special_parameters":'sourceSpecialParameters',
"start_time":'startTime',
"timezone":'timezone',
"view_name":'viewName',
"vm_tag_ids":'vmTagIds'
}
def __init__(self,
name=None,
policy_id=None,
view_box_id=None,
abort_in_blackout_period=None,
alerting_config=None,
alerting_policy=None,
cloud_parameters=None,
continue_on_quiesce_failure=None,
dedup_disabled_source_ids=None,
description=None,
end_time_usecs=None,
environment=None,
environment_parameters=None,
exclude_source_ids=None,
exclude_vm_tag_ids=None,
full_protection_sla_time_mins=None,
full_protection_start_time=None,
incremental_protection_sla_time_mins=None,
incremental_protection_start_time=None,
indexing_policy=None,
leverage_storage_snapshots=None,
leverage_storage_snapshots_for_hyperflex=None,
parent_source_id=None,
perform_source_side_dedup=None,
post_backup_script=None,
pre_backup_script=None,
priority=None,
qos_type=None,
quiesce=None,
remote_script=None,
source_ids=None,
source_special_parameters=None,
start_time=None,
timezone=None,
view_name=None,
vm_tag_ids=None):
"""Constructor for the ProtectionJobRequest class"""
# Initialize members of the class
self.abort_in_blackout_period = abort_in_blackout_period
self.alerting_config = alerting_config
self.alerting_policy = alerting_policy
self.cloud_parameters = cloud_parameters
self.continue_on_quiesce_failure = continue_on_quiesce_failure
self.dedup_disabled_source_ids = dedup_disabled_source_ids
self.description = description
self.end_time_usecs = end_time_usecs
self.environment = environment
self.environment_parameters = environment_parameters
self.exclude_source_ids = exclude_source_ids
self.exclude_vm_tag_ids = exclude_vm_tag_ids
self.full_protection_sla_time_mins = full_protection_sla_time_mins
self.full_protection_start_time = full_protection_start_time
self.incremental_protection_sla_time_mins = incremental_protection_sla_time_mins
self.incremental_protection_start_time = incremental_protection_start_time
self.indexing_policy = indexing_policy
self.leverage_storage_snapshots = leverage_storage_snapshots
self.leverage_storage_snapshots_for_hyperflex = leverage_storage_snapshots_for_hyperflex
self.name = name
self.parent_source_id = parent_source_id
self.perform_source_side_dedup = perform_source_side_dedup
self.policy_id = policy_id
self.post_backup_script = post_backup_script
self.pre_backup_script = pre_backup_script
self.priority = priority
self.qos_type = qos_type
self.quiesce = quiesce
self.remote_script = remote_script
self.source_ids = source_ids
self.source_special_parameters = source_special_parameters
self.start_time = start_time
self.timezone = timezone
self.view_box_id = view_box_id
self.view_name = view_name
self.vm_tag_ids = vm_tag_ids
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
name = dictionary.get('name')
policy_id = dictionary.get('policyId')
view_box_id = dictionary.get('viewBoxId')
abort_in_blackout_period = dictionary.get('abortInBlackoutPeriod')
alerting_config = cohesity_management_sdk.models.alerting_config.AlertingConfig.from_dictionary(dictionary.get('alertingConfig')) if dictionary.get('alertingConfig') else None
alerting_policy = dictionary.get('alertingPolicy')
cloud_parameters = cohesity_management_sdk.models.cloud_parameters.CloudParameters.from_dictionary(dictionary.get('cloudParameters')) if dictionary.get('cloudParameters') else None
continue_on_quiesce_failure = dictionary.get('continueOnQuiesceFailure')
dedup_disabled_source_ids = dictionary.get('dedupDisabledSourceIds')
description = dictionary.get('description')
end_time_usecs = dictionary.get('endTimeUsecs')
environment = dictionary.get('environment')
environment_parameters = cohesity_management_sdk.models.environment_specific_common_job_parameters.EnvironmentSpecificCommonJobParameters.from_dictionary(dictionary.get('environmentParameters')) if dictionary.get('environmentParameters') else None
exclude_source_ids = dictionary.get('excludeSourceIds')
exclude_vm_tag_ids = dictionary.get('excludeVmTagIds')
full_protection_sla_time_mins = dictionary.get('fullProtectionSlaTimeMins')
full_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('fullProtectionStartTime')) if dictionary.get('fullProtectionStartTime') else None
incremental_protection_sla_time_mins = dictionary.get('incrementalProtectionSlaTimeMins')
incremental_protection_start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('incrementalProtectionStartTime')) if dictionary.get('incrementalProtectionStartTime') else None
indexing_policy = cohesity_management_sdk.models.indexing_policy.IndexingPolicy.from_dictionary(dictionary.get('indexingPolicy')) if dictionary.get('indexingPolicy') else None
leverage_storage_snapshots = dictionary.get('leverageStorageSnapshots')
leverage_storage_snapshots_for_hyperflex = dictionary.get('leverageStorageSnapshotsForHyperflex')
parent_source_id = dictionary.get('parentSourceId')
perform_source_side_dedup = dictionary.get('performSourceSideDedup')
post_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('postBackupScript')) if dictionary.get('postBackupScript') else None
pre_backup_script = cohesity_management_sdk.models.backup_script.BackupScript.from_dictionary(dictionary.get('preBackupScript')) if dictionary.get('preBackupScript') else None
priority = dictionary.get('priority')
qos_type = dictionary.get('qosType')
quiesce = dictionary.get('quiesce')
remote_script = cohesity_management_sdk.models.remote_adapter.RemoteAdapter.from_dictionary(dictionary.get('remoteScript')) if dictionary.get('remoteScript') else None
source_ids = dictionary.get('sourceIds')
source_special_parameters = None
if dictionary.get('sourceSpecialParameters') != None:
source_special_parameters = list()
for structure in dictionary.get('sourceSpecialParameters'):
source_special_parameters.append(cohesity_management_sdk.models.source_special_parameters.SourceSpecialParameters.from_dictionary(structure))
start_time = cohesity_management_sdk.models.time_of_day.TimeOfDay.from_dictionary(dictionary.get('startTime')) if dictionary.get('startTime') else None
timezone = dictionary.get('timezone')
view_name = dictionary.get('viewName')
vm_tag_ids = dictionary.get('vmTagIds')
# Return an object of this model
return cls(name,
policy_id,
view_box_id,
abort_in_blackout_period,
alerting_config,
alerting_policy,
cloud_parameters,
continue_on_quiesce_failure,
dedup_disabled_source_ids,
description,
end_time_usecs,
environment,
environment_parameters,
exclude_source_ids,
exclude_vm_tag_ids,
full_protection_sla_time_mins,
full_protection_start_time,
incremental_protection_sla_time_mins,
incremental_protection_start_time,
indexing_policy,
leverage_storage_snapshots,
leverage_storage_snapshots_for_hyperflex,
parent_source_id,
perform_source_side_dedup,
post_backup_script,
pre_backup_script,
priority,
qos_type,
quiesce,
remote_script,
source_ids,
source_special_parameters,
start_time,
timezone,
view_name,
vm_tag_ids)
| [
"ashish@cohesity.com"
] | ashish@cohesity.com |
e17345f6cf00a1d2eedbf04969bc6da4e66e9878 | cce1b624c5d41d8a5e832217a928225b45f62b15 | /mysite/polls/models.py | 93d1e0a3856579251dbc219036c60bdf13f0f39c | [] | no_license | SauravL3010/Django | 8b192c9d57606ebd3d1f0f310689b3c14ad6e2f6 | 021c80357e98ebafc2bbb6e60b1a5a473a79f20f | refs/heads/main | 2023-02-06T15:34:11.876571 | 2020-12-31T01:56:38 | 2020-12-31T01:56:38 | 324,661,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def recently_pub(self):
return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"slgurhal@uwaterloo.ca"
] | slgurhal@uwaterloo.ca |
7b97ec507375533d56ca683bf1a913138e0a7955 | e52bf115107bc31cd812cb5573bfa85900ecfaff | /eval-parcial-primer-bimestre/Ejercicio_1.py | e12c51a086dfa575494f7ea564cfe9d2509f06cc | [] | no_license | cfjimbo/fp-utpl-18-evaluaciones | cd0cbc793cb11f0d297c9dd2c445991d2b183e9a | 0353656bae322848d7732edb39d7d7f25e1bb275 | refs/heads/master | 2020-03-14T04:43:58.676649 | 2018-05-01T06:36:55 | 2018-05-01T06:36:55 | 131,448,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | """Realizar un programa en Java que permita ingresar por teclado la longitud y la anchura de una
habitación, realizar los procesos respectivos que permita obtener la superficie de la misma, además
se debe presentar en pantalla el valor de la superficie, finalmente tomar en consideración que se
debe presentar el valor con 3 decimales
"""
longitud = float(input("Longitud de la habitacion: "))
ancho = float(input("Ancho de la habitacion: "))
superficie = longitud * ancho
print("La superficie de la habitacion es: {}\n".format(round(superficie, 3))) | [
"noreply@github.com"
] | noreply@github.com |
15cfbda2f912b5560429a9729b5ac0d60497f097 | 97495220db95d0ba4a4a7e7ad1863f8a49fc97df | /feat_ext/nets/resnet_v1.py | ee92afabd43407f356494d1605f8b9191ecc746a | [
"Apache-2.0"
] | permissive | forwchen/HVTG | cfdbc7a774fb3b911519a0e83b9edf88f772653a | b800ea3e1b9067389db98e4a9f6de3ce702aa081 | refs/heads/master | 2022-12-07T03:48:45.334685 | 2020-08-25T03:01:24 | 2020-08-25T03:01:24 | 278,540,659 | 17 | 2 | null | null | null | null | UTF-8 | Python | false | false | 16,634 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for the original form of Residual Networks.
The 'v1' residual networks (ResNets) implemented in this module were proposed
by:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
Other variants were introduced in:
[2] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Identity Mappings in Deep Residual Networks. arXiv: 1603.05027
The networks defined in this module utilize the bottleneck building block of
[1] with projection shortcuts only for increasing depths. They employ batch
normalization *after* every weight layer. This is the architecture used by
MSRA in the Imagenet and MSCOCO 2016 competition models ResNet-101 and
ResNet-152. See [2; Fig. 1a] for a comparison between the current 'v1'
architecture and the alternative 'v2' architecture of [2] which uses batch
normalization *before* every weight layer in the so-called full pre-activation
units.
Typical use:
from tensorflow.contrib.slim.nets import resnet_v1
ResNet-101 for image classification into 1000 classes:
# inputs has shape [batch, 224, 224, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs, 1000, is_training=False)
ResNet-101 for semantic segmentation into 21 classes:
# inputs has shape [batch, 513, 513, 3]
with slim.arg_scope(resnet_v1.resnet_arg_scope()):
net, end_points = resnet_v1.resnet_v1_101(inputs,
21,
is_training=False,
global_pool=False,
output_stride=16)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import resnet_utils
resnet_arg_scope = resnet_utils.resnet_arg_scope
slim = tf.contrib.slim
@slim.add_arg_scope
def bottleneck(inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
use_bounded_activations=False):
"""Bottleneck residual unit variant with BN after convolutions.
This is the original residual unit proposed in [1]. See Fig. 1(a) of [2] for
its definition. Note that we use here the bottleneck variant which has an
extra bottleneck layer.
When putting together two consecutive ResNet blocks that use this unit, one
should use stride = 2 in the last unit of the first block.
Args:
inputs: A tensor of size [batch, height, width, channels].
depth: The depth of the ResNet unit output.
depth_bottleneck: The depth of the bottleneck layers.
stride: The ResNet unit's stride. Determines the amount of downsampling of
the units output compared to its input.
rate: An integer, rate for atrous convolution.
outputs_collections: Collection to add the ResNet unit output.
scope: Optional variable_scope.
use_bounded_activations: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
Returns:
The ResNet unit's output.
"""
with tf.variable_scope(scope, 'bottleneck_v1', [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
if depth == depth_in:
shortcut = resnet_utils.subsample(inputs, stride, 'shortcut')
else:
shortcut = slim.conv2d(
inputs,
depth, [1, 1],
stride=stride,
activation_fn=tf.nn.relu6 if use_bounded_activations else None,
scope='shortcut')
residual = slim.conv2d(inputs, depth_bottleneck, [1, 1], stride=1,
scope='conv1')
residual = resnet_utils.conv2d_same(residual, depth_bottleneck, 3, stride,
rate=rate, scope='conv2')
residual = slim.conv2d(residual, depth, [1, 1], stride=1,
activation_fn=None, scope='conv3')
if use_bounded_activations:
# Use clip_by_value to simulate bandpass activation.
residual = tf.clip_by_value(residual, -6.0, 6.0)
output = tf.nn.relu6(shortcut + residual)
else:
output = tf.nn.relu(shortcut + residual)
return slim.utils.collect_named_outputs(outputs_collections,
sc.name,
output)
def resnet_v1(inputs,
blocks,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope=None):
"""Generator for v1 ResNet models.
This function generates a family of ResNet v1 models. See the resnet_v1_*()
methods for specific model instantiations, obtained by selecting different
block instantiations that produce ResNets of various depths.
Training for image classification on Imagenet is usually done with [224, 224]
inputs, resulting in [7, 7] feature maps at the output of the last ResNet
block for the ResNets defined in [1] that have nominal stride equal to 32.
However, for dense prediction tasks we advise that one uses inputs with
spatial dimensions that are multiples of 32 plus 1, e.g., [321, 321]. In
this case the feature maps at the ResNet output will have spatial shape
[(height - 1) / output_stride + 1, (width - 1) / output_stride + 1]
and corners exactly aligned with the input image corners, which greatly
facilitates alignment of the features to the image. Using as input [225, 225]
images results in [8, 8] feature maps at the output of the last ResNet block.
For dense prediction tasks, the ResNet needs to run in fully-convolutional
(FCN) mode and global_pool needs to be set to False. The ResNets in [1, 2] all
have nominal stride equal to 32 and a good choice in FCN mode is to use
output_stride=16 in order to increase the density of the computed features at
small computational and memory overhead, cf. http://arxiv.org/abs/1606.00915.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
blocks: A list of length equal to the number of ResNet blocks. Each element
is a resnet_utils.Block object describing the units in the block.
num_classes: Number of predicted classes for classification tasks.
If 0 or None, we return the features before the logit layer.
is_training: whether batch_norm layers are in training mode.
global_pool: If True, we perform global average pooling before computing the
logits. Set to True for image classification, False for dense prediction.
output_stride: If None, then the output will be computed at the nominal
network stride. If output_stride is not None, it specifies the requested
ratio of input to output spatial resolution.
include_root_block: If True, include the initial convolution followed by
max-pooling, if False excludes it.
spatial_squeeze: if True, logits is of shape [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
To use this parameter, the input images must be smaller than 300x300
pixels, in which case the output logit layer does not contain spatial
information and can be removed.
store_non_strided_activations: If True, we compute non-strided (undecimated)
activations at the last unit of each block and store them in the
`outputs_collections` before subsampling them. This gives us access to
higher resolution intermediate activations which are useful in some
dense prediction problems but increases 4x the computation and memory cost
at the last unit of each block.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
Returns:
net: A rank-4 tensor of size [batch, height_out, width_out, channels_out].
If global_pool is False, then height_out and width_out are reduced by a
factor of output_stride compared to the respective height_in and width_in,
else both height_out and width_out equal one. If num_classes is 0 or None,
then net is the output of the last ResNet block, potentially after global
average pooling. If num_classes a non-zero integer, net contains the
pre-softmax activations.
end_points: A dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: If the target output_stride is not valid.
"""
with tf.variable_scope(scope, 'resnet_v1', [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + '_end_points'
with slim.arg_scope([slim.conv2d, bottleneck,
resnet_utils.stack_blocks_dense],
outputs_collections=end_points_collection):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError('The output_stride needs to be a multiple of 4.')
output_stride /= 4
net = resnet_utils.conv2d_same(net, 64, 7, stride=2, scope='conv1')
net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = resnet_utils.stack_blocks_dense(net, blocks, output_stride,
store_non_strided_activations)
# Convert end_points_collection into a dictionary of end_points.
end_points = slim.utils.convert_collection_to_dict(
end_points_collection)
if global_pool:
end_points['pre_pool'] = net
end_points['pre_pool_7x7'] = slim.avg_pool2d(net, [7, 7], stride=1, scope='pool1')
# Global average pooling.
net = tf.reduce_mean(net, [1, 2], name='pool5', keep_dims=True)
end_points['global_pool'] = net
if num_classes:
net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='logits')
end_points[sc.name + '/logits'] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points[sc.name + '/spatial_squeeze'] = net
end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
resnet_v1.default_image_size = 224
def resnet_v1_block(scope, base_depth, num_units, stride):
"""Helper function for creating a resnet_v1 bottleneck block.
Args:
scope: The scope of the block.
base_depth: The depth of the bottleneck layer for each unit.
num_units: The number of units in the block.
stride: The stride of the block, implemented as a stride in the last unit.
All other units have stride=1.
Returns:
A resnet_v1 bottleneck block.
"""
return resnet_utils.Block(scope, bottleneck, [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': 1
}] * (num_units - 1) + [{
'depth': base_depth * 4,
'depth_bottleneck': base_depth,
'stride': stride
}])
def resnet_v1_50(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_50'):
"""ResNet-50 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=6, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_50.default_image_size = resnet_v1.default_image_size
def resnet_v1_101(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
store_non_strided_activations=False,
reuse=None,
scope='resnet_v1_101'):
"""ResNet-101 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=4, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=23, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_101.default_image_size = resnet_v1.default_image_size
def resnet_v1_152(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_152'):
"""ResNet-152 model of [1]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=8, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_152.default_image_size = resnet_v1.default_image_size
def resnet_v1_200(inputs,
num_classes=None,
is_training=True,
global_pool=True,
output_stride=None,
store_non_strided_activations=False,
spatial_squeeze=True,
reuse=None,
scope='resnet_v1_200'):
"""ResNet-200 model of [2]. See resnet_v1() for arg and return description."""
blocks = [
resnet_v1_block('block1', base_depth=64, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=128, num_units=24, stride=2),
resnet_v1_block('block3', base_depth=256, num_units=36, stride=2),
resnet_v1_block('block4', base_depth=512, num_units=3, stride=1),
]
return resnet_v1(inputs, blocks, num_classes, is_training,
global_pool=global_pool, output_stride=output_stride,
include_root_block=True, spatial_squeeze=spatial_squeeze,
store_non_strided_activations=store_non_strided_activations,
reuse=reuse, scope=scope)
resnet_v1_200.default_image_size = resnet_v1.default_image_size
| [
"forwchen@gmail.com"
] | forwchen@gmail.com |
5574fa697c20b9926bc62f49277b71d1dcd3a57d | 672fa6128c88e43bf14b4168c7c08c60061477bd | /day5/page_object/loginPage.py | 89fcacd15f93afadc07573b5f125d363b627bcad | [] | no_license | zuhui940615/selenium7th | 80128efc75f58bfa1e296506e60c3102d871b8e0 | a752edd16311424360cef4d0b746259b4e424cc4 | refs/heads/master | 2020-03-21T10:42:49.519768 | 2018-06-24T08:46:12 | 2018-06-24T08:46:12 | 138,466,923 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,991 | py | #这种框架的设计思想,叫做page—object设计模式,是一种高级框架设计思想
#这种思想的主旨是把业务逻辑和代码技术分离开
#测试用例的类,专门负责业务逻辑
#元素定位和操作交给 网页对象
#在pageObiect这个类中,把每个网页看成一个类
#其中,网页中的每个元素看成类中的一个属性
#针对这个元素的操作,看成类中的一个方法
#元素的信息,定位是名词性,所以可以看成属性(成员变量)
#元素的操作是动词性的,所以可以看成是方法
#那么,下面我们封装一下登录这个网页
#这个类主要做的就是把元素定位,改一个易于理解的名字
'''driver.get("http://localhost/index.php?m=user&c=public&a=login")
driver.find_element(By.NAME,"username").send_keys("huohuozu")
driver.find_element(By.NAME, "password").send_keys("123456")
old_title = driver.title
driver.find_element(By.CLASS_NAME, "login_btn").click()'''
#把上面的代码封装成下面的样子
from selenium import webdriver
from selenium.webdriver.common.by import By
class LoginPage:
#为这个网页创建一个构造函数
#在python中构造函数固定名字__init__()
def __init__(self,driver):
#因为setup方法中已经创建了一个浏览器,所以这里不需要新建浏览器,直接用setup建好的浏览器
#self.driver = webdriver.Chrome()
self.driver = driver
self.url = "http://localhost/index.php?m=user&c=public&a=login"
username_input_loc = (By.ID, "username")
password_input_loc = (By.ID, "password")
login_button_loc = (By.CLASS_NAME, "login_btn")
#声明一个变量username_input_loc,保存元素的定位需要的两个参数
#python的元组,类似于数组
#这句话的意思是,声明了一个数组叫username_input_loc
#这个数组中有两个元素,分别是 By.ID,"username"
def open(self):
self.driver.get(self.url)
#给参数设置默认值,如果调用方法时,传入一个新的用户名,那么使用新的
#如果调用方法时,不传参,那么使用默认值
def input_username(self,username="huohuozu"):
#这个类中涉及到三个元素定位,因为元素定位不太稳定,经常需要修改,所以应该把定位方式声明成类中的一个属性
#self.driver.find_element(By.ID,"username").send_keys("username")
#*表示find_element()这个方法传入的不是一个元组,
#而是把元组中的每个元素都分别传入find_element()这个方法,作为单独的参数
self.driver.find_element(*self.username_input_loc).send_keys(username)
def input_password(self,password='123456'):
self.driver.find_element( *self.password_input_loc).send_keys(password)
def click_login_button(self):
self.driver.find_element(*self.login_button_loc).click()
| [
"15032683126@163.com"
] | 15032683126@163.com |
d42d3fd35bef84a4bc8c882075bcd8e35c62b2e5 | 3d2d7c223314acf338d9e1aedb9463ac780ed8aa | /fujiblog/urls.py | 49f160e2124df99f58dd2dfb8b7729694d1f2342 | [] | no_license | fuji97/fujiblog | 79b4d751c466e8fd6a82d99385f6bf3d7248258d | d00b6652cad3673a9b4ecc6ca37c0e17ecebc58c | refs/heads/master | 2021-01-20T10:05:12.595966 | 2017-06-08T22:54:54 | 2017-06-08T22:54:54 | 90,153,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | """fujiblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('zinnia.urls')),
#url(r'^weblog/', include('zinnia.urls')),
url(r'^comments/', include('django_comments.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"fuji1097@gmail.com"
] | fuji1097@gmail.com |
d3a7bdf0bdf102bef366ae9b245a8a5f800eb96d | 8def256b361cb117e291d435f20ee9b4b27fe9f7 | /getpubmed.py | 50d5e9323f6ec327c212519d69d01d401f6dd248 | [] | no_license | dvdmrn/citation_scraper | 44011afc4cda515b512ce6aceb32fc7412e6c292 | ff5a81dca7e31463c1793755863dedc11c7f7215 | refs/heads/master | 2021-05-13T23:51:09.950508 | 2018-01-14T04:18:42 | 2018-01-14T04:18:42 | 116,526,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,667 | py | import metapub
import confidence as c
import helpers
fetch = metapub.PubMedFetcher()
listOfCitations = []
def get_pmid(descriptor):
"""
gets the pmid of an article based off a descriptor
descriptor: name or doi
returns: pub med id, or False
"""
candidates = fetch.pmids_for_query(descriptor)
if len(candidates) == 1:
return candidates[0]
if len(candidates) > 1:
print "WARNING: multiple matches found, selecting first candidate"
return candidates[0]
# !!! determine most viable match
else:
# couldn't find anything
print "SAD: no results found! (TT-TT)"
return 0
def lookup_pmid(pmid):
"""
finds an article with a given pub med id --
pmid = an int
returns: a PubMedArticle
"""
try:
article = fetch.article_by_pmid(pmid)
except:
print(" SAD: could not fetch pubmed data! (TT-TT)")
return 0
return article
def create_citation(pm_article):
"""
Creates a NF friendly citation --
pm_article: a PubMedArticle
returns: a string
"""
title = pm_article.title
volume = pm_article.volume
issue = pm_article.issue
journal = pm_article.journal
pages = pm_article.pages
missingData = 0
if issue:
issue = "("+issue+")"
else:
missingData += 1
issue = ""
if not journal:
missingData += 1
journal = pm_article.book
if not journal:
missingData += 1
journal = "COULD_NOT_FIND_JOURNAL_SORRY_BUB"
if not volume:
missingData += 1
volume = ""
if not pages:
missingData += 1
pages = ""
citation = journal+" "+volume+issue+":"+pages
if missingData >= 2:
citation = citation+"!!! missing quite a bit of data"
print " WARNING: "+str(missingData)+" missing fields. Citation flagged."
return citation
def process_pubs(dois):
writeData = [] # list of Rows
"""
dict:
file
title
citation
confidence
"""
for pub in dois:
print("\n---")
title = ""
citation = ""
conf = 0
if pub["doi"]:
print "+ searching for doi: "+pub["doi"]+"; file: "+pub["file"]
pmid = get_pmid(pub["doi"])
if pmid:
article = lookup_pmid(pmid)
if article:
citation = create_citation(article)
title = article.title
conf = c.confidence_metric(article,"pdfs_to_analyze/"+pub["file"])
if conf < 0.6:
print("\!/ WARNING \!/ pubmed data below critical confidence levels")
citation = citation+"!!! VERIFY"
print "writing citation: "+citation
else:
print(" No doi found for: "+pub["file"]+"; ignoring file")
writeData.append({"file":pub["file"],"title":title,"citation":citation,"confidence":conf})
return writeData
# ==================================================
# id = get_pmid("10.1039/c4fo00570h")
# article = lookup_pmid(id)
| [
"damarino@cs.ubc.ca"
] | damarino@cs.ubc.ca |
f4005d99185dc2e01e9b1daf8d65d901d29911ca | b7d155502d3494866becbfbd5237a45425054b5d | /DAY_9/Face detection using HAAR CLASSIFIERS/Face_Eye_Detection_in_OPENCV.py | 955641f2f9d7c0002059b15cac205a066bebffc3 | [] | no_license | IEEESFIT1/31DaysOfCode | 1b1f01fb73efde32ab68d170a4ecb1dc18824cff | 2eac7a720ad15734a7020dcb3aab31a2d6d55cc8 | refs/heads/main | 2023-08-06T09:20:49.980701 | 2021-10-01T14:03:09 | 2021-10-01T14:03:09 | 317,566,761 | 7 | 3 | null | 2021-10-01T14:03:10 | 2020-12-01T14:27:11 | Python | UTF-8 | Python | false | false | 870 | py | import cv2
from numpy.lib.type_check import imag
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('haarcascade_eye_tree_eye_glasses.xml')
frame = cv2.VideoCapture(0)
while frame.isOpened():
_,img = frame.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
for (x,y,w,h) in faces :
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0),3)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectdetectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,255,0),5)
cv2.imshow('img', img)
if cv2.waitkey(1) & 0xFF == ord('q'):
break
frame.release() | [
"noreply@github.com"
] | noreply@github.com |
c4b60be269cb804c222514ca84f971ba53fe0a2b | 7590d16f6db2c0b16982fc644b5d536ab1f98c7e | /src/webapp/apps/profiles/management/commands/followers_from_csv.py | 7da4fdec687f5479e19b365a51ac2a350a0c6591 | [] | no_license | GeoRemindMe/GeoRemindMe_Platform | 33444bd8e2fcbf1c8fc42a78140fa5848441ae84 | 30436fba4f16cd787903a667302a3b34a2b8a8e2 | refs/heads/master | 2016-09-05T22:02:51.526975 | 2012-07-12T20:08:26 | 2012-07-12T20:08:26 | 2,743,081 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 838 | py | # coding=utf-8
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from apps.timelines.models import Follower
import csv
import sys
class Command(BaseCommand):
args = '.csv'
def handle(self, *args, **options):
file = csv.reader(open(args[0], 'r'), delimiter='#')
rownum = 0
for r in file:
if rownum == 0:
rownum=rownum+1
continue
try:
follower = User.objects.get(username=r[0])
followee = User.objects.get(username=r[1])
if not Follower.objects.is_follower(follower, followee):
Follower.objects.toggle_follower(follower, followee)
except User.DoesNotExist:
pass
return sys.exit(0) | [
"javier@georemindme.com"
] | javier@georemindme.com |
24b6a392193af3ed499ed5481be0d574615aa635 | fa0f12a6d63be22b588133bfb9c130f1eeecab3d | /myvenv/lib/python3.7/site-packages/pip/_internal/cli/autocompletion.py | 1295e23141c110930d3bf02637af4990d0143b8e | [] | no_license | 8th-caulion/high-hat | 6b2c455be14b5e617bf993cfb67c68975df3aa65 | fc1f9793747892b7b58f066c45ab95d3f0269db9 | refs/heads/master | 2023-08-02T12:07:36.540488 | 2020-06-03T17:36:32 | 2020-06-03T17:36:32 | 267,542,957 | 0 | 6 | null | 2021-09-22T19:09:26 | 2020-05-28T09:04:29 | Python | UTF-8 | Python | false | false | 8,237 | py | """Logic that powers autocompletion installed by ``pip completion``.
"""
import optparse
import os
import sys
<<<<<<< HEAD
from itertools import chain
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, create_command
from pip._internal.utils.misc import get_installed_distributions
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Iterable, List, Optional
def autocomplete():
# type: () -> None
=======
from pip._internal.cli.main_parser import create_main_parser
from pip._internal.commands import commands_dict, get_summaries
from pip._internal.utils.misc import get_installed_distributions
def autocomplete():
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Entry Point for completion of main and subcommand options.
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
<<<<<<< HEAD
parser = create_main_parser()
subcommands = list(commands_dict)
options = []
# subcommand
subcommand_name = None # type: Optional[str]
for word in cwords:
if word in subcommands:
subcommand_name = word
break
# subcommand options
if subcommand_name is not None:
=======
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for show and uninstall
should_list_installed = (
subcommand_name in ['show', 'uninstall'] and
not current.startswith('-')
)
if should_list_installed:
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
<<<<<<< HEAD
subcommand = create_command(subcommand_name)
=======
subcommand = commands_dict[subcommand_name]()
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in subcommand.parser.option_list_all:
if opt.help != optparse.SUPPRESS_HELP:
for opt_str in opt._long_opts + opt._short_opts:
options.append((opt_str, opt.nargs))
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
# get completion type given cwords and available subcommand options
completion_type = get_path_completion_type(
cwords, cword, subcommand.parser.option_list_all,
)
# get completion files and directories if ``completion_type`` is
# ``<file>``, ``<dir>`` or ``<path>``
if completion_type:
<<<<<<< HEAD
paths = auto_complete_paths(current, completion_type)
options = [(path, 0) for path in paths]
=======
options = auto_complete_paths(current, completion_type)
options = ((opt, 0) for opt in options)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1] and option[0][:2] == "--":
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
<<<<<<< HEAD
flattened_opts = chain.from_iterable(opts)
if current.startswith('-'):
for opt in flattened_opts:
=======
opts = (o for it in opts for o in it)
if current.startswith('-'):
for opt in opts:
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
if opt.help != optparse.SUPPRESS_HELP:
subcommands += opt._long_opts + opt._short_opts
else:
# get completion type given cwords and all available options
<<<<<<< HEAD
completion_type = get_path_completion_type(cwords, cword,
flattened_opts)
if completion_type:
subcommands = list(auto_complete_paths(current,
completion_type))
=======
completion_type = get_path_completion_type(cwords, cword, opts)
if completion_type:
subcommands = auto_complete_paths(current, completion_type)
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def get_path_completion_type(cwords, cword, opts):
<<<<<<< HEAD
# type: (List[str], int, Iterable[Any]) -> Optional[str]
=======
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""Get the type of path completion (``file``, ``dir``, ``path`` or None)
:param cwords: same as the environmental variable ``COMP_WORDS``
:param cword: same as the environmental variable ``COMP_CWORD``
:param opts: The available options to check
:return: path completion type (``file``, ``dir``, ``path`` or None)
"""
if cword < 2 or not cwords[cword - 2].startswith('-'):
<<<<<<< HEAD
return None
=======
return
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
for opt in opts:
if opt.help == optparse.SUPPRESS_HELP:
continue
for o in str(opt).split('/'):
if cwords[cword - 2].split('=')[0] == o:
if not opt.metavar or any(
x in ('path', 'file', 'dir')
for x in opt.metavar.split('/')):
return opt.metavar
<<<<<<< HEAD
return None
def auto_complete_paths(current, completion_type):
# type: (str, str) -> Iterable[str]
=======
def auto_complete_paths(current, completion_type):
>>>>>>> 71358189c5e72ee2ac9883b408a2f540a7f5745e
"""If ``completion_type`` is ``file`` or ``path``, list all regular files
and directories starting with ``current``; otherwise only list directories
starting with ``current``.
:param current: The word to be completed
:param completion_type: path completion type(`file`, `path` or `dir`)i
:return: A generator of regular files and/or directories
"""
directory, filename = os.path.split(current)
current_path = os.path.abspath(directory)
# Don't complete paths if they can't be accessed
if not os.access(current_path, os.R_OK):
return
filename = os.path.normcase(filename)
# list all files that start with ``filename``
file_list = (x for x in os.listdir(current_path)
if os.path.normcase(x).startswith(filename))
for f in file_list:
opt = os.path.join(current_path, f)
comp_file = os.path.normcase(os.path.join(directory, f))
# complete regular files when there is not ``<dir>`` after option
# complete directories when there is ``<file>``, ``<path>`` or
# ``<dir>``after option
if completion_type != 'dir' and os.path.isfile(opt):
yield comp_file
elif os.path.isdir(opt):
yield os.path.join(comp_file, '')
| [
"rldnjs9347@gmail.com"
] | rldnjs9347@gmail.com |
186e04c580756ed5fcd2b7e91ca54ec476d908a3 | 017b95b21359aedb77b5a1df390ecb4130c2a9ea | /django_blog/myblog/models.py | 1dfdff15fced66ab2951b1a2b5374413de70c0a9 | [] | no_license | havidri/Django-Blog | 721880a1eddc7d62a9b75f34d8a039e5b404dee9 | db79e155bf326ede2b88ae120356d8def2a30d97 | refs/heads/main | 2023-07-04T12:44:59.372176 | 2021-08-12T06:35:00 | 2021-08-12T06:35:00 | 394,821,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,665 | py | import reserve as reserve
from django.contrib.auth.models import User
from django.db import models
from django.contrib.auth import get_user_model
from tinymce import HTMLField
from django.urls import reverse
User = get_user_model()
class Author(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_pic = models.ImageField()
def __str__(self):
return self.user.username
class Category(models.Model):
title = models.CharField(max_length=20)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('index')
class Post(models.Model):
title = models.CharField(max_length=100)
description = models.CharField(max_length=200)
content = HTMLField()
date = models.DateTimeField(auto_now=True)
author = models.ForeignKey(Author, on_delete=models.CASCADE)
thumbnail = models.ImageField(null=True, blank=True)
categories = models.ManyToManyField(Category)
featured = models.BooleanField()
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('blog', kwargs={
'blog_id': self.id
})
@property
def get_comments(self):
return self.comments.all().order_by()
class Comment(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
date = models.DateTimeField(auto_now_add=True)
content = models.TextField()
author = models.ForeignKey(Author, on_delete=models.CASCADE, null=True)
post = models.ForeignKey(Post, related_name='comments', on_delete=models.CASCADE)
def __str__(self):
return self.user.username | [
"havidriyono@yahoo.com"
] | havidriyono@yahoo.com |
1fceb20404030a5fec787e594da373dd6185278b | 20dc3427454e86c949e4d0e44c89a9f0ec0ff76a | /tests/watcher.py | 9ce156b16248902bfd948f5e076ea084b2367399 | [
"MIT"
] | permissive | simonwittber/fibra | a0d01e1b1a040ea8d8d84c6150c781bfa63b4ebe | 1761ba79cb643b0392bb82d7e80ce9e55bb75275 | refs/heads/master | 2021-01-23T13:16:53.985148 | 2011-05-31T02:21:14 | 2011-05-31T02:21:14 | 1,824,259 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import fibra
def task():
yield None
print 'raising'
raise Exception('ARGH')
def watcher(e):
print "watcher received:", type(e), e
schedule = fibra.schedule()
t = task()
schedule.install(t)
schedule.watch(t, watcher)
schedule.run()
| [
"simonwittber@gmail.com"
] | simonwittber@gmail.com |
c33583ccd6b33f5b384c1373a54f70a46388cc88 | 9888ef3bb4408a4cef8b2ad49d3b6eb873056694 | /multiclass_allH5_data/step2_write_all_H5_tfrecord.py | f34eb929cb07fbe2d0d0722c4d6dff8cd268ecc1 | [] | no_license | MeiliLiu-STEM/TFSeg_BraTS | bd3d52a8cbfeeea1b188fc268f0dfb74c0171efa | d6f482a57a859b59a5c507094efa928a21239198 | refs/heads/master | 2021-04-08T02:41:10.564845 | 2018-08-20T14:45:17 | 2018-08-20T14:45:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,577 | py | # -*- coding: utf-8 -*-
# @__ramraj__
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import sys
import numpy as np
import cv2
import os
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
import config
import h5py
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def write_record(imgs, lbls, IDs, tfrecord_name='./train.tfrecords', lbl='train'):
writer = tf.python_io.TFRecordWriter(tfrecord_name)
n_obs = imgs.shape[0]
for i in range(n_obs):
if not i % 100:
print('{} data: {}/{}'.format(lbl, i, n_obs))
sys.stdout.flush()
# Load the image
img = imgs[i, :, :, :]
lbl = lbls[i, :, :]
ID = IDs[i]
# Create a feature
feature = {
'train/image': _bytes_feature(tf.compat.as_bytes(img.tostring())),
'train/label': _bytes_feature(tf.compat.as_bytes(lbl.tostring())),
'train/id': _bytes_feature(tf.compat.as_bytes(ID))}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
writer.close()
sys.stdout.flush()
def binarize_targets(y_train):
task = 'all'
if task == 'all':
y_train = (y_train > 0).astype(int)
elif task == 'necrotic':
y_train = (y_train == 1).astype(int)
elif task == 'edema':
y_train = (y_train == 2).astype(int)
elif task == 'enhance':
y_train = (y_train == 4).astype(int)
else:
exit("Unknow task %s" % task)
# print('uniques elements in y_Train : ', np.unique(y_train))
return y_train
def load_data(path, do_test=False):
images = []
labels = []
ids = []
print('Reading images')
mod_dict = dict((v, k) for k, v in config.MODALITY_DICT.iteritems())
for i in os.listdir(path):
tmp_list = i.split('_')
patient_num = tmp_list[2]
slice_ix = tmp_list[3]
h5f = h5py.File(os.path.join(path, i), 'r')
# +++++++++++++++++++++++++ IMAGE +++++++++++++++++++++++++
mod_images = []
for mod in range(4):
dataset_name = '{}_{}_{}'.format(mod_dict[mod],
patient_num, slice_ix)
img = h5f[dataset_name][:]
mod_images.append(img)
images.append(mod_images)
# +++++++++++++++++++++++++ LABEL +++++++++++++++++++++++++
lbl = h5f['gt_{}_{}'.format(patient_num, slice_ix)][:]
lbl = binarize_targets(lbl)
labels.append(lbl)
h5f.close()
# +++++++++++++++++++++++++++ ID ++++++++++++++++++++++++++
ids.append(i)
images = np.array(images, dtype=np.float32)
images = images.transpose((0, 2, 3, 1))
labels = np.array(labels, dtype=np.int32)
ids = np.array(ids)
print('images shape : ', images.shape)
print('labels shape : ', labels.shape)
return images, labels, ids
def creat_tf_records():
images_data, labels_data, ids_data = load_data(config.H5_SRC)
print('Data Loaded.')
print(' Data : ', images_data.shape, '\n')
train_images, test_images, train_labels, test_labels, \
train_ids, test_ids = train_test_split(images_data, labels_data, ids_data,
test_size=config.TEST_SPLIT,
random_state=42)
print('Train data : ')
print(train_images.shape)
print(train_labels.shape)
print(train_ids.shape)
print(test_images.shape)
print(test_labels.shape)
print(test_ids.shape)
print('++++++++++++++++++++++++++++++++')
# ========================================
# Shuffle
train_images, train_labels, train_ids = shuffle(train_images, train_labels, train_ids)
test_images, test_labels, test_ids = shuffle(test_images, test_labels, test_ids)
TFRECORD_ROOT = './record/'
if not os.path.exists(TFRECORD_ROOT):
os.makedirs(TFRECORD_ROOT)
# Write Train TFRecords
write_record(train_images, train_labels, train_ids,
tfrecord_name=TFRECORD_ROOT + 'train.tfrecords', lbl='train')
print('\n')
# Write Test TFRecords
write_record(test_images, test_labels, test_ids,
tfrecord_name=TFRECORD_ROOT + 'test.tfrecords', lbl='test')
if __name__ == '__main__':
creat_tf_records()
| [
"cramraj8@gmail.com"
] | cramraj8@gmail.com |
e257d259dbfc021d53cf6ad1b76045bdfbe6eb01 | 1567a3af5e8bec0735cde692a2ed9e25614b3625 | /TestEnv.py | 788e1611dfe604cdb92aea8610742f919662db88 | [] | no_license | lroin/Py_Cralwer | bbae9022299ffa28d8ef3833af7d67585ffe6bf6 | 84ccab0ecdc260e59e149893ff12871b7ba9951b | refs/heads/master | 2023-03-21T02:43:08.184180 | 2016-12-19T09:03:18 | 2016-12-19T09:03:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,200 | py | from MajorCrawler import *
# ==============================<<func>>==============================
def getBJ(): #北京, dic->DB, 有验证码
url_i='http://www.bjcourt.gov.cn/ktgg/index.htm?c=&court=&start=&end=&type=&p='
header=['省级行政区','网址','内容']
flag=True
for i in range(1,10):
#url=url_i+str(i)
#buf=getContent(url)
Links=[]
result=[]
"""
for ele in re.findall('<a href="(/ktgg/ktggDetailInfo.htm?[\s\S]+?)"',buf):
Links.append('http://www.bjcourt.gov.cn'+ele)
"""
Links=['http://www.bjcourt.gov.cn/ktgg/ktggDetailInfo.htm?NId=58109&NAjbh=8755026']
for fwd in Links:
node={}
node=node.fromkeys(header)
page=getContent(fwd)
soup=BeautifulSoup(page,'html.parser')
"""
if re.search('定于二〇一五年',page): # 如果本页有2015的资料, 则到此为止
print('[北京] 2016 end.')
flag=False
break
elif re.search('验证码',page):
print('[北京] ',fwd)
print('[北京] 验证码,等待90秒后重试.')
time.sleep(90)
"""
try:
node['省级行政区']='北京市'
node['网址']=fwd
#node['内容']=
for x in soup.find_all(class_='article_con'):
writeText(x,'test.txt')
else:
print('class failed')
result.append(node)
print(node)
except AttributeError:
print(fwd)
traceback.print_exc()
writeText(traceback.format_exc(),'_ErrorLog.txt')
continue
if flag==False:
print('[北京] End at page ',i,'.')
break
else:
#write_DB(result)
print('[北京] Page ',i,' saved.')
break
return;
# ==============================<<Main>>==============================
getBJ() | [
"eyu.yang@gmail.com"
] | eyu.yang@gmail.com |
d6f9576e15f4246ceca27311ec1c907b2dde14b7 | e06a996c9f78bd8767bde431951e91859dc6ae8a | /experimentalComponents/gupta_paper_brian2.py | 8f6e78cb4d1b3abc295fc6460508c62b8b415f08 | [
"MIT"
] | permissive | Jbwasse2/snn-rl | 7dbe8bd5c23837cb76f492e7b911081dee7a4e4a | 29b040655f432bd390bc9d835b86cbfdf1a622e4 | refs/heads/master | 2020-08-07T10:28:16.533162 | 2019-10-07T15:21:02 | 2019-10-07T15:21:02 | 213,411,865 | 0 | 0 | NOASSERTION | 2019-10-07T14:53:54 | 2019-10-07T14:53:54 | null | UTF-8 | Python | false | false | 852 | py | #While reading fninf-08-00006.pdf I copied some useful snippets of code here
#Leaky integrate and fire with refractoriness
G = NeuronGroup ( number_of_neurons,
'dv/dt = -(v - v_0)/tau_m : volt # membrane potential',
threshold='v > v_th',
reset='v = v_0',
refractory='(t-lastspike) <= 2*ms');
#Random initial values for membrane potential
G.v = 'v_0+randn() *3*mV'
#Spike timming dependant plasticity
S = Synapses ( source_group , target_group,
'''w: siemens
dA_source/dt = -A_source/tau_source: siemens (event-driven)
dA_target/dt = -A_target/tau_target: siemens (event-driven)''',
pre='''g_post += w
A_source += deltaA_source
w = clip(w+A_target, 0*siemens, w_max)''',
post='''A_target += deltaA_target
w = clip(w+A_source, 0*siemens, w_max)''')
#Connectivity without self connections
S.connect('i != j')
| [
"tartavull@gmail.com"
] | tartavull@gmail.com |
9023e00f19ae2dd73801a944716c50ce967f6522 | ff99e4847f91a288cec57124d7beb4b672db2f1e | /maestro/providers/aws/vpc_location.py | c79afa0de62d6c133d13b85bc2d0e8b0932e1234 | [
"Apache-2.0"
] | permissive | tunein/Maestro | e50afdbc9fe61340a2c4e82511bcec4d50957567 | 789205fdbe85242189c50e407445c57ca916e42c | refs/heads/development | 2021-09-29T23:21:14.327896 | 2021-09-21T21:06:31 | 2021-09-21T21:06:31 | 130,405,209 | 12 | 2 | Apache-2.0 | 2021-09-21T21:06:31 | 2018-04-20T19:26:32 | Python | UTF-8 | Python | false | false | 2,463 | py | #Import external libs
import boto3
import sys
import json
import os
from botocore.exceptions import ClientError
#This is only here for printing pretty colors
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
#Establish our boto resources
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
def get_vpc_id(vpc_name):
'''
Gets the unique ID of the given VPC by AWS and returns it
ex: vpc-a1b2c4d4
args:
vpc_name: name of the vpc
'''
filters = [{'Name': 'tag:Name', 'Values': ['%s' % vpc_name]}]
vpcs = list(ec2.vpcs.filter(Filters=filters))
for vpc in vpcs:
try:
response = client.describe_vpcs(
VpcIds=[
vpc.id,
]
)
vpc_id = response['Vpcs'][0]['VpcId']
if len(vpc_id)!=0:
return vpc_id
else:
print(color.RED + "Couldn't find the ID for your vpc, check the name and try again" + color.END)
return False
except ClientError as error:
print(color.RED + error.response['Error']['Message'] + color.END)
def get_subnets(vpc_id):
'''
Takes the ID from "get_vpc_id" and gathers all private subnets
then it puts them in a list and returns them for the lambda config
args:
vpc_id: the unique ID given to the VPC by aws
'''
vpc = ec2.Vpc(vpc_id)
subnets = list(vpc.subnets.all())
ids = {}
list_id = []
for subnet in subnets:
try:
info = ec2.Subnet(subnet.id)
get_tags = list(info.tags)
dumper = json.dumps(get_tags, indent=4)
loader = json.loads(dumper)
for item in loader:
private_tag = item['Value']
if 'private' in private_tag:
ids.update({private_tag: subnet.id})
except ClientError as error:
print(color.RED + error.response['Error']['Message'] + color.END)
for key, value in ids.items():
list_id.append(value)
return list_id
def main(vpc_name):
'''
Main entry point of this module, for simplicities sake
args:
vpc_name: taken from the config
'''
vpc_id = get_vpc_id(vpc_name)
return get_subnets(vpc_id) | [
"mmoon@tunein.com"
] | mmoon@tunein.com |
8b3bfe75a888d9cb49b2f4c56c83b47f04bfaa01 | 7de174ec684fe60717b2757fe5e194cc597fee38 | /plugins/plugin_clone.py | 312d2f5ae7f840bba90736e8b0d4e3c654b8abc7 | [] | no_license | oma256/repo_scan | 41ad4972908859f947d7226dd80f09a6f582301c | 03971eea701a0079d9a824261f6ee0e21c1d2f79 | refs/heads/master | 2022-03-26T06:40:20.388127 | 2019-12-05T15:09:11 | 2019-12-05T15:09:11 | 222,365,319 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import os
import subprocess
import sys
from loguru import logger
from plugins.base import Command
from utils.parser import create_parser
class Clone(Command):
def execute(self) -> None:
parser = create_parser()
args = parser.parse_args()
if not args.repository:
create_parser().print_help()
sys.exit(0)
if not os.path.exists('sandbox'):
os.makedirs('sandbox')
logger.info("Downloading repository")
subprocess.Popen(cwd='./sandbox',
args=['git', 'clone', args.repository],
stderr=subprocess.STDOUT,
stdout=subprocess.DEVNULL).communicate()
logger.info("Done")
| [
"oma.dulatov@gmail.com"
] | oma.dulatov@gmail.com |
950cc3ec633927641e6bc1b3f51f4408ecff16e7 | 5f5ea1011786269376ec09f43c3b9bb246e9d98b | /login-robot/src/services/user_service.py | 4a581e0d11c439c7d0e7adfb1f74d0ea2493329a | [] | no_license | tholsti/hy-ohtu-syksy-2021-tehtavat | 30561b84e0da768f15b9f6787e34136f60bf6d00 | 5613e033fbddb5833f7e69b3c148204554c5dd3a | refs/heads/main | 2023-04-04T23:44:26.248025 | 2021-04-18T11:10:48 | 2021-04-18T11:10:48 | 334,141,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | from entities.user import User
import re
class UserInputError(Exception):
pass
class AuthenticationError(Exception):
pass
class RegistrationError(Exception):
pass
class UserService:
def __init__(self, user_repository):
self._user_repository = user_repository
def check_credentials(self, username, password):
if not username or not password:
raise UserInputError("Username and password are required")
user = self._user_repository.find_by_username(username)
if not user or user.password != password:
raise AuthenticationError("Invalid username or password")
return user
def create_user(self, username, password):
self.validate(username, password)
user = self._user_repository.create(
User(username, password)
)
return user
def validate(self, username, password):
if not username or not password:
raise UserInputError("Username and password are required")
if self._user_repository.find_by_username(username):
raise RegistrationError("Username already exists")
if (not re.match('^[a-z]{3,}$', username)):
raise RegistrationError("Username is invalid")
if (not re.match('^[\S]{8,}$', password)):
raise RegistrationError("Password is too short")
if (not re.search('[^a-z]$', password)):
raise RegistrationError("Password contains only letters")
| [
"tomi.holstila@gmail.com"
] | tomi.holstila@gmail.com |
58bb65a58ddad2e7ba4755e15c3698f3ff9b3301 | cb33113c4063867fa41cb74943d0a056a383b6a1 | /codexpert/Snake.py | bf0365b45c2712a8fdc2e057e76157dea480dae5 | [] | no_license | manuck/Algorithm | 9c6280095da6b88473460da52d07fb23ee6c3f9f | 4c15ff42f39224eb9b29728544c92dce9341fdfa | refs/heads/master | 2020-04-18T02:06:53.437576 | 2019-06-26T08:59:16 | 2019-06-26T08:59:16 | 167,148,058 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | import sys
sys.stdin = open("Snake_input.txt")
| [
"snc9000@naver.com"
] | snc9000@naver.com |
d871c5cfc9ab2fb5f9fd61aa0dca96c2093b5d22 | d15db6af7db42745262775a7402877bcee37e22b | /HaiZhiTestEngine.py | 86915fb8c4b7fb93d612043b3e5712e3833f47a9 | [] | no_license | NotTodayNotMe/HaiZhiInterface | 0838916245f56ae369a7de3a64d597cc40065b7d | 7ad1c555fbc9b3bf53a1235c523c24910d1cf71a | refs/heads/master | 2020-03-28T22:36:16.689070 | 2018-08-27T09:28:34 | 2018-08-27T09:28:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,519 | py | #coding:utf-8
import datetime
import json
from HistoryTrading import HistoryTrading
from RealTimeTrading import RealTimeTrading
from HaizhiData import HaizhiData
'''装饰器'''
def input_checker(func):
'''
用于监测函数输入是否合法,code,volume均转化为str
:param func:
:return:
'''
def _input_checker(self,**kwargs):
# 股票代码检查
if isinstance(kwargs['code'], str):
pass
elif isinstance(kwargs['code'], int):
kwargs['code'] = str(kwargs['code'])
while len(kwargs['code']) < 6:
kwargs['code'] = '0' + kwargs['code']
else:
raise TypeError, 'code must be str or int'
# 股票交易量检查
if isinstance(kwargs['volume'], str):
pass
elif isinstance(kwargs['volume'], int):
kwargs['volume'] = str(kwargs['volume'])
else:
raise TypeError, 'volume must be str or int'
#回测日期检查
if isinstance(self._core,HistoryTrading):
if 'date' not in kwargs:
kwargs['date'] = self._current_time.strftime('%Y-%m-%d')
elif isinstance(kwargs['date'],datetime.datetime):
kwargs['date'] = kwargs['date'].strftime('%Y-%m-%d')
elif isinstance(kwargs['date'],str):
pass
else:
raise TypeError,'date must be str or datetime object'
#返回函数
#print kwargs
res = func(self, **kwargs)
return res
return _input_checker
class HaiZhiTestEngine(object):
def __init__(self,user_id='',password = '',type = 'RealTimeTrading'):
'''
{'buy_sell': 'sell', 'code': '000006', 'volume': '100', 'price': '1', 'price_type': 'now_price', 'effect_term': '2'}
:param user_id:用户id
:param password: 用户密码
:param type: 交易引擎类型,默认为实盘交易引擎
:param stratagy_name: 交易策略名称,默认为空,当选用回测引擎时,必填
'''
if type == 'RealTimeTrading':
self._core = RealTimeTrading(userid=user_id, password=password)
elif type == 'HistoryTrading':
stratagy_name = user_id
self._current_time = datetime.datetime.today()-datetime.timedelta(days=1)
self._core = HistoryTrading(userid=user_id,password=password,strategy_name = stratagy_name)
self._core.create_strategy(stratagy_name)
elif type == 'HaizhiData':
self._core = HaizhiData(userid=user_id, password=password)
else:
raise ValueError,'type must be "RealTimeTrading" or "HistoryTrading"'
#显示当前的交易引擎类型
@property
def core(self):
'''
返回当前的引擎类型
:return:
'''
return self._core.__class__
#显示当前回测引擎时间
@property
def current_time(self):
'''
返回当前的引擎时间,主要用于回测
:return:
'''
if isinstance(self._core,RealTimeTrading):
return datetime.datetime.now().strftime('%Y-%m-%d,%H:%M:%S')
elif isinstance(self._core,HistoryTrading):
return self._current_time.strftime('%Y-%m-%d')
@current_time.setter
def current_time(self,date):
'''
自由设定引擎时间
:param date:
:return:
'''
if isinstance(self._core,HistoryTrading):
if isinstance(date,str):
self._current_time = datetime.datetime.strptime(date,'%Y-%m-%d')
elif isinstance(date,datetime.datetime):
self._current_time = date
else:
raise TypeError, '%s can not operate on current_time' % (self._core.__class__)
def shift_current_time(self,days):
'''
按时间步长调整时间
:param days:
:return:
'''
if isinstance(self._core,RealTimeTrading):
raise TypeError,'RealTimeTrading can not operate on current_time'
elif isinstance(self._core,HistoryTrading):
self._current_time += datetime.timedelta(days=days)
return self._current_time.strftime('%Y-%m-%d')
#购买
@input_checker
def buy(self,code,volume,price_type='now_price',price=None,date=None,effect_term = 1):
if isinstance(self._core,RealTimeTrading):
dic = {'code':code,
'volume':volume,
'price_type': price_type,
'price': price,
'effect_term':str(effect_term)}
self._core.set_stock_dic(dic)
res = self._core.buy()
return json.loads(res)
elif isinstance(self._core,HistoryTrading):
if not date:
date = self._current_time.strftime("%Y-%m-%d")
dic = {'date':date,
'code': code,
'volume': volume,
'price_type': 'average_price',
}
self._core.set_stock_dic(dic)
res = self._core.bt_buy()
return json.loads(res)
#卖出
@input_checker
def sell(self,code,volume,price_type='now_price',price=None,date=None,effect_term = 1):
if isinstance(self._core,RealTimeTrading):
dic = {'code':code,
'volume':volume,
'price_type': price_type,
'price': price,
'effect_term':str(effect_term)}
self._core.set_stock_dic(dic)
res = self._core.sell()
return json.loads(res)
elif isinstance(self._core,HistoryTrading):
if not date:
date = self._current_time.strftime("%Y-%m-%d")
dic = {'date': date,
'code': code,
'volume': volume,
'price_type': 'average_price',
}
self._core.set_stock_dic(dic)
res = self._core.bt_sell()
return json.loads(res)
#撤单
def cancel_order(self,pre_id):
if isinstance(self._core,RealTimeTrading):
return self._core.cancel_order(pre_id)
else:
raise TypeError
#资产和持仓情况
def query_profit(self):
if isinstance(self._core, RealTimeTrading):
return json.loads(self._core.query_profit())
elif isinstance(self._core,HistoryTrading):
pass
#委托查询
def query_records(self,start="2018-4-4", end="2018-04-05"):
if isinstance(self._core,RealTimeTrading):
return json.loads(self._core.query_records(start,end))
#历史交割查询
def query_history_records(self,start='',end=''):
if isinstance(self._core,RealTimeTrading):
return json.loads(self._core.query_history_records(start,end))
elif isinstance(self._core,HistoryTrading):
return json.loads(self._core.bt_query_history_records(start, end))
#历史交割单输出到csv文件
def history_to_csv(self,path='history_record'):
if isinstance(self._core,RealTimeTrading):
pass
elif isinstance(self._core,HistoryTrading):
return self._core.get_history_csv(path)
#查询策略
def list_stratagy(self):
if isinstance(self._core,HistoryTrading):
return json.loads(self._core.get_strategy())
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
# 设置策略名称
def set_stratagy(self, stratagy_name):
if isinstance(self._core, HistoryTrading):
self._core.set_strategy_name(stratagy_name)
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
#创建策略
def create_stratagy(self,stratagy_name):
if isinstance(self._core,HistoryTrading):
return self._core.create_strategy(stratagy_name)
else:
raise AttributeError
#删除策略
def del_stratagy(self,stratagy_name):
if isinstance(self._core,HistoryTrading):
return self._core.del_strategy(stratagy_name)
else:
raise AttributeError, '%s has no attribute stratagy_name' % (self._core.__class__)
# 获取某个时期单只股票的某些属性
def get_stock_args(self, code, startday="", endday="", args=[]):
if isinstance(self._core, HaizhiData):
return self._core.get_stock_args(code, startday, endday, args)
else:
raise TypeError
# 获取某个时期所有股票的某个属性
def get_stocks_arg(self, startday="", endday="", arg=""):
if isinstance(self._core, HaizhiData):
return self._core.get_stocks_arg(startday, endday, arg)
else:
raise TypeError
# 获取某个时期沪市或深市的所有股票代码
def get_exchange_stocks(self, startday="", endday="", exchange="all"):
if isinstance(self._core, HaizhiData):
return self._core.get_exchange_stocks(startday, endday, exchange)
else:
raise TypeError
# 获取某个时期某个板块的所有股票代码
def get_plate_stocks(self,startday="", endday="", plate=""):
if isinstance(self._core, HaizhiData):
return self._core.get_plate_stocks(startday, endday, plate)
else:
raise TypeError
| [
"787162506@qq.com"
] | 787162506@qq.com |
06d8642d821b8be29fbef654e1e24ef1fe4d3a1e | f480589c6f8c1d33fccb0dad4380dada77340660 | /migrations/versions/ac3f2179013d_.py | 90ab319f620ac1efe4ff2779223877201e2f8817 | [] | no_license | carlosribas/backend-coding-challenge | 519cb35bfd57caf014dfcd4505a7a84da6ae9bda | 32b9fa7dd7940a27a10eff3af01f4ce2e93ccdbd | refs/heads/master | 2020-04-05T04:29:42.085218 | 2018-11-12T12:28:26 | 2018-11-12T12:28:26 | 156,553,812 | 0 | 0 | null | 2018-11-07T13:53:30 | 2018-11-07T13:53:30 | null | UTF-8 | Python | false | false | 926 | py | """empty message
Revision ID: ac3f2179013d
Revises:
Create Date: 2018-11-07 20:12:02.753303
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ac3f2179013d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('translator',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('text', sa.String(length=255), nullable=False),
sa.Column('text_translated', sa.String(length=255), nullable=True),
sa.Column('uid', sa.String(length=50), nullable=True),
sa.Column('status', sa.String(length=10), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('translator')
# ### end Alembic commands ###
| [
"caduribas@gmail.com"
] | caduribas@gmail.com |
84b0c5269b2439b5b470b7c84eed3eb96c57cd6d | 28979d6e7873687ee5dd2ff3b838629d03baaa58 | /djangoTutorials/djangoTutorials/wsgi.py | 6aa24c0f51532e0f8900408e467b4426e5ffbffd | [] | no_license | NSNSingh/tryingDjango | 469768d92b5b2398042aef27016ae3d3078233d4 | 22acaa189c5ce2f22bb861a41a72f970a52706df | refs/heads/master | 2021-01-02T08:44:40.961863 | 2017-08-03T12:35:46 | 2017-08-03T12:35:46 | 99,059,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | """
WSGI config for djangoTutorials project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoTutorials.settings")
application = get_wsgi_application()
| [
"sachinsngh64@gmail.com"
] | sachinsngh64@gmail.com |
e798b57fa3a276c7acb65be428cc91e5a58aca43 | e3f2ab2999a851121897c02ee81bd85c2543bb96 | /ketan/codes/ee18btech11030/ee18btech11030_1.py | 7034225e0dcac1c1afe24ced57259387f4318dfb | [] | no_license | yashwanthguguloth24/control | ee38822c00d709ab63a35a9ebf7be886abae7eb7 | cff91230294686a4ee9432b04aea4333198512c1 | refs/heads/master | 2022-09-16T14:49:10.111030 | 2020-06-01T03:21:08 | 2020-06-01T03:21:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,349 | py | ###################################################################
# This is python code for Bode plots.
# By Moparthi Varun Sankar
# April 28 , 2020
# Released under GNU GPL
###################################################################
from scipy import signal
import matplotlib.pyplot as plt
from pylab import*
#if using termux
import subprocess
import shlex
#end if
#Defining the transfer function
s1 = signal.lti([16200,21*16200,110*16200], [11, 18*11 ,99*11,162*11,0]) #G(s)
s2 = signal.lti([1,0.121], [754.223*1,754.223*0.0001604]) #Gc(s)
s3 = signal.lti([16200,342160.2,1823164.2,215622],[8296.2,149333,821522,1344116.2,215.6,0]) #G(s)*Gc(s)
#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays
w1,mag1,phase1 = signal.bode(s1,n=1000)
w2,mag2,phase2 = signal.bode(s2,n=1000)
w3,mag3,phase3 = signal.bode(s3,n=1000)
plt.figure()
plt.subplot(2,1,1)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Magnitude(db)')
plt.semilogx(w1, mag1,label='Uncompensated') # Magnitude plot for G(s)
plt.semilogx(w2, mag2,label='Compensator') # Magnitude plot for Gc(s)
plt.semilogx(w3, mag3,label='Compensated') # Magnitude plot for G(s)*Gc(s)
plt.plot(38.95,0,'o')
plt.text(38.95,0, '({}, {})'.format(38.95,0))
plt.plot(0.0001604,0,'o')
plt.text(0.0001604,0, '({}, {})'.format(0.0001604,0))
plt.plot(0.121,-57.55,'o')
plt.text(0.121,-57.55, '({}, {})'.format(0.121,-57.55))
plt.plot(1.21,0,'o')
plt.text(1.21,0, '({}, {})'.format(1.21,0))
plt.legend()
plt.subplot(2,1,2)
plt.grid()
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Phase(degree)')
plt.semilogx(w1, phase1,label='Uncompensated') # Phase plot for G(s)
plt.semilogx(w2, phase2,label='Compensator') # Phase plot for Gc(s)
plt.semilogx(w3, phase3,label='Compensated') # Phase plot for G(s)*Gc(s)
plt.annotate('', (1.21,-117), (1.21,-127), arrowprops=dict(facecolor='red',arrowstyle='<|-|>',mutation_scale=15))
plt.annotate("Lag in Phase",(1.21,-117))
plt.plot(38.95,-184,'o')
plt.text(38.95,-184, '({}, {})'.format(38.95,-184))
plt.legend()
#if using termux
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.pdf')
plt.savefig('./figs/ee18btech11030/ee18btech11030_2.eps')
subprocess.run(shlex.split("termux-open ./figs/ee18btech11030/ee18btech11030_2.pdf"))
#else
#plt.show()
| [
"gadepall@gmail.com"
] | gadepall@gmail.com |
06c79cf2ab054537d61dc9f297aec93bfa26b767 | 4f43cb4a2cbdafde4d9070aace0edca633cb6ab4 | /stats.py | bfb89069fc6f8e55e8d7ab98671bde3a69d70d0a | [] | no_license | trevorc/blackscholes | 72a05ec97b52e2c4d15b2bfd5db86991724ffda3 | 2b927a5f0c469ea52b7de6709572a3a8f2105ffc | refs/heads/master | 2016-09-05T19:20:58.816374 | 2010-08-09T00:53:14 | 2010-08-09T00:54:53 | 825,545 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | import collections
import math
import scipy.optimize
Errors = collections.namedtuple('Errors', ['rss', 'r_squared', 'rmse'])
def lm(f, y, x, b0, **kwargs):
def compute_residuals(b):
return y - f(x, b)
return scipy.optimize.leastsq(compute_residuals, b0, **kwargs)
def errors(f, y, x, b):
Y = f(x, b)
y_mean = sum(y, 0.0) / len(y)
rss = sum((y - Y) ** 2)
ss_tot = sum((y - y_mean) ** 2)
r_squared = 1 - rss / ss_tot
rmse = math.sqrt(rss / len(y))
return Errors(rss, r_squared, math.sqrt(rss / len(y)))
| [
"trevor@caira.com"
] | trevor@caira.com |
38a31c1facdab62182e07fb91d89b3bbd83243e6 | 15961f319555c38ebb80b4801edcc84a24f9415b | /loader.py | 13c80e259f87a77668787c28d9ab90ed9859d98e | [] | no_license | elifiner/crmz | f4a7897cbbcc6b7d0fac4fc8ad7705bc55e81c5f | cd15ff574a1e523aa1594f2929b62c3f3f75d153 | refs/heads/master | 2020-12-26T02:49:29.500188 | 2015-06-29T12:56:44 | 2015-06-29T12:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | import sys
companies = []
for line in open(sys.argv[1]):
companies.append(eval(line.strip()))
print(companies) | [
"eli.finer@gmail.com"
] | eli.finer@gmail.com |
64b7d70b08eb9417de61cee04e088e8fa686e7e3 | 9f4705a8472dc42427c5509ccce94106c38bd9ee | /Q3.py | dc28bed1e17d458d14072f3e05e63b542a27a38d | [] | no_license | Aniket-Bhagat/Computing_tools_8 | 305382aaa6bb3d6020ccf00d10aa3e2783c5de67 | 109a21b6647fadd3c5e7e9503b517b3c010a0d8f | refs/heads/master | 2020-04-22T04:13:18.329327 | 2019-02-11T11:12:44 | 2019-02-11T11:12:44 | 170,115,525 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | import numpy, matplotlib.pyplot as plt
from scipy.stats.kde import gaussian_kde
data = numpy.loadtxt('data.dat', dtype='float', delimiter='\t', unpack=True)
def probdist(colno):
global data
plt.subplot(2,2,colno+1)
kde = gaussian_kde( data[colno] )
dist_space = numpy.linspace( min(data[colno]), max(data[colno]), 100 )
plt.plot( dist_space, kde(dist_space) )
plt.title('Distribution for column %d'%(colno+1))
plt.suptitle('Probability Distribution of Data in file')
probdist(0)
probdist(1)
probdist(2)
probdist(3)
mng = plt.get_current_fig_manager()
mng.resize(*mng.window.maxsize())
plt.show()
# import numpy as np
# import scipy.stats as stats
# import matplotlib.pyplot as plt
# data = np.loadtxt('data.dat', dtype='float', delimiter='\t', unpack=True)
# data = sorted(data[0])
# fit = stats.norm.pdf(data, np.mean(data), np.std(data))
# plt.plot(data,fit,'-')
# # plt.hist(data,normed=True)
# plt.show() | [
"noreply@github.com"
] | noreply@github.com |
31af98181d7dd6927e3d2117e9d1f89471704af2 | 23a8b03599b0e97157739c367168b481f046726f | /vocabchallenge/dictionaries/french_dict_edit.py | 2861d4fd9f9037fe02a7f792bde2f9ad0a532a0f | [] | no_license | lzbotha/VocabChallenge | 04be101d5a6bf49dcaf895417a9ecf095b69fd14 | 5abff66a397c66c9ce79aaf2fd27925289013db9 | refs/heads/master | 2021-01-19T18:11:07.222922 | 2014-03-17T07:59:59 | 2014-03-17T07:59:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,132 | py |
f = open('french_dictionary.tsv','r')
outpt = open('french_dictionary_v1.tsv','w')
def remove_formatting(text):
temp = ''
for c in text:
if not c =='[' and not c==']' and not c =='#':
temp = temp + c
return temp
# as it stands now this seperates all words with a direct translation
for line in f:
dictitem = line.split('\t')
if dictitem[2]!='Suffix' and dictitem[2]!='Prefix' and dictitem[2]!='Proper noun' and dictitem[2]!='Symbol' and dictitem[2]!='Proverb' and dictitem[2]!='Abbreviation' and dictitem[2]!='Initialism':
if not 'initialism' in dictitem[-1] and not 'Arabic spelling' in dictitem[-1] and 'initialism' not in line and 'abbreviation' not in line:
if '[[' in dictitem[-1] and not '{{' in dictitem[-1] and not '[[#English' in dictitem[-1]:
if len(dictitem[-1].split(' ')) == 2:
if not '|' in dictitem[-1].split(' ')[1]:
outpt.write(dictitem[1]+'\t'+remove_formatting(dictitem[-1]))
else:
outpt.write(dictitem[1]+'\t'+remove_formatting(dictitem[-1]))
f.close() | [
"leonardzbotha@gmail.com"
] | leonardzbotha@gmail.com |
c9f81bef1f3181735e2d92ff5e734356f7d6e16f | 14373275670c1f3065ce9ae195df142146e2c1a4 | /stubs/SQLAlchemy/sqlalchemy/cimmutabledict.pyi | 1a1a3006afc360bf3f13c4a33677a997d14fb729 | [
"Apache-2.0",
"MIT"
] | permissive | sobolevn/typeshed | eb7af17c06a9722f23c337e6b9a4726223155d58 | d63a82640390a9c130e0fe7d409e8b0b836b7c31 | refs/heads/master | 2023-08-04T05:59:29.447015 | 2023-06-14T21:27:53 | 2023-06-14T21:27:53 | 216,265,622 | 2 | 0 | Apache-2.0 | 2022-02-08T10:40:53 | 2019-10-19T20:21:25 | Python | UTF-8 | Python | false | false | 737 | pyi | from _typeshed import SupportsKeysAndGetItem
from collections.abc import Iterable
from typing import Generic, TypeVar, overload
from typing_extensions import final
_KT = TypeVar("_KT")
_KT2 = TypeVar("_KT2")
_VT = TypeVar("_VT")
_VT2 = TypeVar("_VT2")
@final
class immutabledict(dict[_KT, _VT], Generic[_KT, _VT]):
@overload
def union(self, __dict: dict[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
@overload
def union(self, __dict: None = None, **kw: SupportsKeysAndGetItem[_KT2, _VT2]) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
def merge_with(
self, *args: SupportsKeysAndGetItem[_KT | _KT2, _VT2] | Iterable[tuple[_KT2, _VT2]] | None
) -> immutabledict[_KT | _KT2, _VT | _VT2]: ...
| [
"noreply@github.com"
] | noreply@github.com |
68d7bf5e288f8d694bff5efa73f76d5d085e833e | 24c8c76dee0cfbeaa3bc61666e63801aaf86afbc | /acmicpc.net/2798/test/script.py | 3b02d2dfb3a6b5edce5714ff49fb0809cb7d0a71 | [] | no_license | developers-algorithm-study/mjy9088 | e018f93f87394e106163259659a913b2ec2dd7f5 | 04f8fa0611a76aba45956ef82218b0e15152faa9 | refs/heads/master | 2022-09-16T17:55:21.387115 | 2022-08-18T14:32:35 | 2022-08-18T14:33:46 | 173,717,731 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | import sys
from os import path, sep
from importlib import import_module
base_path = path.dirname(path.dirname(path.abspath(__file__)))
root_path = path.dirname(path.dirname(base_path))
sys.path.append(root_path + sep + 'test')
if import_module('pipe').test(
prelaunch_tasks=[['cargo', 'build']],
popen_params=sep.join([base_path, 'target', 'debug', 'acmicpc_2798']),
path_to_cases_json=sep.join([base_path, 'test', 'cases.json'])
):
print('Passed all cases')
else:
exit(1)
| [
"mjy9088@naver.com"
] | mjy9088@naver.com |
96a1a69d636663d00ed646ff53f6c1fde2ee639b | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /9zsDKijmBffmnk9AP_4.py | f1bdc518936ccc7193328054c14e0aff9757174a | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py |
def programmers(one, two, three):
return max(one, two, three)-min(one, two, three)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
4b2a96ef55a632d0fcb49ebd4f0e74caf63bdc4b | 6804c33960e66afa43c54f147e4ca86f32b2ee70 | /stands/recognizer/gender_detect.py | 76806f349f582ea4cc556999d2079cddb56533c8 | [] | no_license | ApiProdject/apiproject | 9cd6be2781f75338a41c2376e8303b910bbc4307 | 6e35244e0c003b6b5331def17c8c1d7ad671dab2 | refs/heads/master | 2022-07-10T16:44:50.954386 | 2020-05-17T05:37:54 | 2020-05-17T05:37:54 | 244,185,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,047 | py | import os
import cv2
import numpy as np
from apiproject.settings import STATIC_DIR
from infoPoints.models import InfoPoint
from stands.recognizer.SSRNET_model import SSR_net_general
class GenderRecognizer:
__instance__ = None
@staticmethod
def get_instance():
if not GenderRecognizer.__instance__:
GenderRecognizer()
return GenderRecognizer.__instance__
def __init__(self):
if GenderRecognizer.__instance__ is None:
self.gender_net = SSR_net_general(64, [3, 3, 3], 1, 1)()
self.gender_net.load_weights(os.path.join(STATIC_DIR, 'models/ssrnet_gender_3_3_3_64_1.0_1.0.h5'))
GenderRecognizer.__instance__ = self
else:
raise Exception("This class is a singleton!")
def gender(self, face):
blob = cv2.resize(face, (64, 64))
blob = cv2.normalize(blob, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
gender = self.gender_net.predict(np.expand_dims(blob, axis=0))
return 1 if (gender >= 0.5) else 2
| [
"egorgavrilenko6@gmail.com"
] | egorgavrilenko6@gmail.com |
c23e2c7b72b4b8a49f79f3091cb3eb3ca241fb0f | a3123495438f8054f15711be138e10bef4b8c8ce | /parallax_eddie_robot/src/parallax_eddie_robot/msg/_Velocity.py | c57d36e43ccd2d56f604a02ce4b991f1402545b3 | [] | no_license | haikalpribadi/haikalpribadi-ros-pkg | 53aeb2335dee131bcc626afb13ceb39a56f00556 | a3f9c2580e82beabe83a463cdbe46c40dc70ea1b | refs/heads/master | 2021-01-10T20:32:15.132608 | 2020-05-20T13:41:45 | 2020-05-20T13:41:45 | 3,742,934 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py | """autogenerated by genmsg_py from Velocity.msg. Do not edit."""
import roslib.message
import struct
class Velocity(roslib.message.Message):
_md5sum = "9d5c2dcd348ac8f76ce2a4307bd63a13"
_type = "parallax_eddie_robot/Velocity"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float32 linear
float32 angular
"""
__slots__ = ['linear','angular']
_slot_types = ['float32','float32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
linear,angular
@param args: complete set of field values, in .msg order
@param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Velocity, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.linear is None:
self.linear = 0.
if self.angular is None:
self.angular = 0.
else:
self.linear = 0.
self.angular = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
@param buff: buffer
@type buff: StringIO
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.linear, _x.angular))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
@param str: byte array of serialized message
@type str: str
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.linear, _x.angular,) = _struct_2f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
@param buff: buffer
@type buff: StringIO
@param numpy: numpy python module
@type numpy module
"""
try:
_x = self
buff.write(_struct_2f.pack(_x.linear, _x.angular))
except struct.error as se: self._check_types(se)
except TypeError as te: self._check_types(te)
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
@param str: byte array of serialized message
@type str: str
@param numpy: numpy python module
@type numpy: module
"""
try:
end = 0
_x = self
start = end
end += 8
(_x.linear, _x.angular,) = _struct_2f.unpack(str[start:end])
return self
except struct.error as e:
raise roslib.message.DeserializationError(e) #most likely buffer underfill
_struct_I = roslib.message.struct_I
_struct_2f = struct.Struct("<2f")
| [
"haikal.pribadi@gmail.com"
] | haikal.pribadi@gmail.com |
6399e8279ba126093ebb5008d5f3db2f7f0e9f0f | e4c1cc89b0014ec932014eb25302c0f431800017 | /polls/models.py | 0264edf31be5314445db6416b6f03c6d2ca17f62 | [] | no_license | jungdoyoon/polls_example | f3b5f98adb68a0fc87d6b591fffcc1ada4baf63b | c05f5841867dbf08a4c21bf5ea1864a5f94c0484 | refs/heads/master | 2022-07-27T17:45:52.365080 | 2020-05-20T05:57:48 | 2020-05-20T05:57:48 | 265,464,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from django.db import models
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date=models.DateField('date published')
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text=models.CharField(max_length=200)
votes= models.IntegerField(default=0)
def __str__(self):
return self.choice_text | [
"jungdo8016@naver.com"
] | jungdo8016@naver.com |
f96decde8d13d57b8a17e6a18603082d48517ed8 | 091d605dcd15b61abb88b7e7c00fc2ccadc5c51a | /KIM_dipole.py | a27f6fed3ffb721d594d6ec9241146fad23d6f0b | [] | no_license | yqian1/OpenKIM | cb0abeee3f5e3d3b9d649118d8c85a5e283c8ab6 | 2f8d7469b4f67eac4c20265ef11815838307c04e | refs/heads/main | 2023-01-06T09:28:46.923355 | 2020-10-28T23:51:23 | 2020-10-28T23:51:23 | 308,100,239 | 0 | 0 | null | 2020-10-28T18:40:50 | 2020-10-28T18:01:31 | Python | UTF-8 | Python | false | false | 6,077 | py | import numpy as np
import itertools
'''
This script generates MD++ supercells for core energy calculations
The dislocation plane normal is along the x-direction
The dislocation line is along the z-direction
Nicolas Bertin, 08/31/2020
'''
def find_burgers_coord(theta, c2, n2, c3, n3):
# compute Burgers vector in scaled coordinates
d2 = n2*c2
d3 = n3*c3
det = d2[0]*d3[1]-d2[1]*d3[0]
if np.abs(det) > 1e20:
a = 0.5*(d3[1]-d3[0])/det
b = 0.5*(d2[0]-d2[1])/det
else:
det = d2[2]*d3[1]-d2[1]*d3[2]
a = 0.5*(d3[1]-d3[2])/det
b = 0.5*(d2[2]-d2[1])/det
# make sure always remove atom, not insert atom
if theta < 0:
a = -a
b = -b
return np.array([0.,a,b])
def generate_script(celldata, theta):
# Find corresponding cell
ind = np.argwhere(np.abs(celldata['theta']-theta)<1e-2)
if ind.size == 0:
raise Exception('Cannot find character angle in the cell data')
ind = ind[0]
angle = celldata['theta'][ind]
c1 = celldata['c1'][ind][0]
n1 = celldata['n1'][ind]
c2 = celldata['c2'][ind][0]
n2 = celldata['n2'][ind]
c3 = celldata['c3'][ind][0]
n3 = celldata['n3'][ind]
bs = celldata['bs'][ind][0]
# Generate MD++ script
script = '# -*-shell-script-*-\n'
script += '#MD++ script to compute core energies\n'
script += 'setnolog\n'
script += 'setoverwrite\n'
script += 'dirname = runs/KIM/dipole_%.2f_ref\n' % angle
script += '#------------------------------------------------------------\n'
script += '#Read in EAM/MEAM potential\n'
script += '#potfile = "~/Potentials/w_version3.eam" eamgrid = 5000 readeam\n'
script += 'potfile = ~/Documents/Codes/MD++/potentials/EAMDATA/eamdata.W.Marinica13 eamgrid = 80001 readeam\n'
script += 'NNM = 100\n'
script += '#------------------------------------------------------------\n'
script += 'latticestructure = body-centered-cubic\n'
script += 'latticeconst = 3.14339 # (A) for W_cea\n'
script += '\n'
script += 'makecnspec = [%4d %4d %4d %4d #(x) dipole direction\n' % (c1[0], c1[1], c1[2], n1)
script += ' %4d %4d %4d %4d #(y)\n' % (c2[0], c2[1], c2[2], n2)
script += ' %4d %4d %4d %4d ] #(z) dislocation line\n' % (c3[0], c3[1], c3[2], n3)
script += '\n'
script += 'makecn finalcnfile = perf.cn writecn\n'
script += '#-------------------------------------------------------------\n'
script += '#Create Dislocation Dipole by using linear elastic solutions\n'
script += '\n'
script += 'mkdipole = [ 3 1 #z(dislocation line), y(dipole direction)\n'
script += ' %12.8f %12.8f %12.8f #(bx,by,bz)\n' % (bs[0], bs[1], bs[2])
script += ' -0.01 -0.2499 0.251 #(x0,y0,y1) #type (2)\n'
script += ' 0.278 -10 10 -10 10 1 ] #nu, number of images, shiftbox\n'
script += '\n'
script += 'makedipole finalcnfile = makedp_%.2f.lammps writeLAMMPS\n' % angle
script += '#-------------------------------------------------------------\n'
script += '#Conjugate-Gradient relaxation\n'
script += 'conj_ftol = 1e-7 conj_fevalmax = 3000\n'
script += 'conj_fixbox = 1 conj_dfpred = 1e-4\n'
script += 'relax\n'
script += 'eval\n'
script += 'finalcnfile = dipole_%.2f.lammps writeLAMMPS\n' % angle
script += 'quit\n'
return script
# supercell size
ar = 1.5 # aspect ratio x/y
n2 = 10.0 # supercell size along the y-direction
n3 = 3.0 # supercell size along the z-direction
mult = 3.0 # multiplication factor
bv = np.array([1,1,1]) # Burgers vector direction
c1 = np.array([-1,1,0]) # dislocation plane index
# maximum Miller index of repeat vectors allowed to
# generate supercells of various character angles
nmax = 10
# generate in-plane discrete directions
p = np.array(list(itertools.permutations(range(1, nmax+1), 2)))
p = np.vstack(([1,0], [1,1], [0,1], p))
m = np.gcd(p[:,0], p[:,1])
p = p / m[:, np.newaxis]
# generate global supercell repeat vectors
if np.abs(np.dot(bv, c1)) > 1e-5:
raise Exception('Burgers vector and dislocation plane must be orthogonal')
y0 = np.cross(c1, bv)
my = np.gcd(y0[0], np.gcd(y0[1], y0[2]))
y0 = y0 / my
x = bv / np.linalg.norm(bv)
y = y0 / np.linalg.norm(y0)
c3plus = np.outer(p[:,0], bv) + np.outer(p[:,1], y0)
c3minus = np.outer(p[:,0], bv) - np.outer(p[:,1], y0)
c3 = np.unique(np.vstack((c3plus, c3minus)), axis=0)
# compute character angles
c3n = np.linalg.norm(c3, axis=1)
c3x = np.dot(c3, x)
c3y = np.dot(c3, y)
angle = np.arctan2(c3y, c3x)*180.0/np.pi
ia = np.argsort(angle)
angle = angle[ia]
c3 = c3[ia]
# compute complementary supercell repeat vector
c2 = np.cross(c3, c1)
# determine supercell size
m2 = np.gcd(c2[:,0], np.gcd(c2[:,1], c2[:,2]))
cm2 = c2 / m2[:, np.newaxis]
l2 = np.linalg.norm(cm2, axis=1)
n2 = np.ceil(mult*n2/l2)
m3 = np.gcd(c3[:,0], np.gcd(c3[:,1], c3[:,2]))
cm3 = c3 / m3[:, np.newaxis]
l3 = np.linalg.norm(cm3, axis=1)
n3 = np.ceil(mult*n3/l3)
# adjust aspect ratio
cm1 = np.tile(c1, (c3.shape[0], 1))
l1 = np.linalg.norm(cm1, axis=1)
n1 = np.round(ar*n2*l2/l1)
# select orientations with acceptable Miller indices
cmax = np.max(np.abs(np.hstack((cm1,cm2,cm3))), axis=1)
ind = (cmax<=nmax)
# Burgers vector in scaled coordinates
bs = np.zeros((angle.size,3))
for i in range(angle.size):
bs[i] = find_burgers_coord(angle[i], cm2[i], n2[i], cm3[i], n3[i])
# all supercells data
celldata = {
"theta": angle[ind],
"c1": cm1[ind],
"n1": n1[ind],
"c2": cm2[ind],
"n2": n2[ind],
"c3": cm3[ind],
"n3": n3[ind],
"bs": bs[ind]
}
# Generate MD++ script for a given character angle
theta = 90.0 # edge dislocation
#theta = 0.0 # screw dislocation
#theta = 70.53 # M111 dislocation
script = generate_script(celldata, theta)
print(script)
if 0:
# Print MD++ script into file
script_file = open('/Users/bertin1/Documents/Codes/MD++/scripts/KIM/W-dipole_test.script', 'w')
script_file.write(script)
script_file.close() | [
"noreply@github.com"
] | noreply@github.com |
7ab15aaa9ab86f2169fe7e677124ec25d9b66c6e | 6071edc45eace43e4c9335650986c7588652a714 | /NER-AV.py | 7094e5d9ba46880ceadb3e9846542cde72daefe5 | [] | no_license | Chadni-Islam/cybersecproject1 | 47e685ab34e544f2229bac39895a6bac3275a9d8 | e3b5892295637f814924c66e9e9bfe37dea27bca | refs/heads/master | 2020-04-12T00:41:51.842388 | 2018-12-16T06:09:56 | 2018-12-16T06:09:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,170 | py | # Named Entity Recognintion on Alien Vault blog posts
# Scrapes info about malware/threats from page and attempts to extract threat data
import utils
import spacy
import pandas as pd
import csv
from bs4 import BeautifulSoup
import requests
import pprint
from OTXv2 import OTXv2
from OTXv2 import IndicatorTypes
from googleapiclient.discovery import build
# Initialise nlp corpus
nlp = spacy.load('en_core_web_sm')
# Google api key and search engine id
my_api_key = 'AIzaSyAVfCR6inp74mBkr7w12TVLH3l4vkWwsiw'
my_cse_id = '003774403560526411905:wsb8ncz3hw4'
# Performs google search using google custom search API
def google_search(search_term, api_key, cse_id, **kwargs):
service = build("customsearch", "v1", developerKey=api_key)
res = service.cse().list(q=search_term, cx=cse_id, **kwargs).execute()
return res['items']
# initialise Open Threat Exchange API
otx = OTXv2("1bc976440bad33a81703fcec442f158153fe93976770874ea1af79680a84f0c7")
# open a list of countries
f = open("other/country.csv", "rb")
# Keywords to detect if attack vector or asset
# Add space in front of try so it isn't picked up as a part of a word
attackKeys = ["attacker", "trick", " try ", " tries ", "attempt", "launch"]
assetKeys = ["result", "ability", "grant", "installation", "corrupt", "poison", "after ", "information"]
# Get some blog pages
links = utils.getAlienVaultPage(3)
otxurl = "https://otx.alienvault.com/pulse/"
for link in links:
# Scrape blog page
text = utils.scrapeAlienVault(link)
# Information we can extract
title = [] # Extracted from TITLE
rawtext = [] # Raw paragraph text, so accuracy can be checked
names = [] # NER - ORG
country = [] # NER - GPE
date = [] # NER - DATE
attackvectors = [] # Attack Vectors (found with keywords)
assets = [] # Assets (found with keywords)
capeclink = [] # CAPEC Article found (to see accuraccy)
likelihood = [] # Obtain from CAPEC
severity = [] # Obtain from CAPEC
risk = [] # Obtain from likelihood and severity
maliciousness = [] # 1 - least, 5 - maximum
indicators = [] # For now extracted from OTX links
# Extracting csv name
temp = link.split('-')
csvName = temp[-5] + '-' + temp[-4] + '-' + temp[-3] + '-' + temp[-2] + '-' + temp[-1] + ".csv"
# Iterates through pparagraphs of blog post
for count, t in enumerate(text):
# Create a list of sentences abstraction
sents = t.split(". ")
# Ignore paragraphs with only one sentence
if len(sents) < 3:
continue
# TITLE
# Extracting title based on delimeters
temp = t.split('%')[0]
temp = temp.split('-')
# If no '-', then no relevant title/category (for now)
if len(temp) == 1:
continue
else:
title.append(temp[-1])
# Cut title, perform nlp
t = t.split('%')[1]
doc = nlp(t)
# Append Raw text
rawtext.append(t)
# NAMES COUNTRIES DATES
# Extracting names/countries/dates
tempN, tempC, tempD = "", "", ""
for X in doc.ents:
# Threat name
if X.label_ == 'ORG':
# Ignoring 'Open Threat Exchange'
if (X.text == "Open Threat Exchange"):
continue
tempN += X.text + ', '
# Country/Area
elif X.label_ == 'GPE':
# Check that entity is actually a country
isCountry = False
for row in f:
row = str(row)
if X.text.lower() in row.lower():
isCountry = True
tempC += row.split(",")[2] + ' '
# Else not a country, so assume ORG
if not isCountry:
tempN += X.text + ', '
# Date
elif X.label_ == 'DATE':
tempD += X.text + ', '
names.append(tempN)
country.append(tempC)
date.append(tempD)
# INDICATORS
# Extracting OTX links for indicators
if (otxurl in t):
pulseID = t.split(otxurl)[-1]
tempI = ""
# Get all indicators for a specific pulse
results = otx.get_pulse_indicators(pulseID)
for count, indicator in enumerate(results):
# Only get first 5 for now, some have too many
if count > 5:
break
tempI += indicator["indicator"] + " (" + indicator["type"] + ")\n"
indicators.append(tempI)
else:
indicators.append("")
# MALICIOUSNESS
# Identify maliciousness by keywords which follow mitre rules from: www.mitre.org/sites/default/files/pdf/10_2914.pdf
malic = 0
key2 = ["target", "data", "information", "access"]
key3 = ["backdoor", "install"]
key4 = ["military", "government", "nation", "defense", "defence"]
for k in key2:
if k in t:
malic = 2
break
for k in key3:
if k in t:
malic = 3
break
for k in key4:
if k in t:
malic += 1
break
# If still 0, couldn't identify
if malic == 0:
malic = '-'
maliciousness.append(malic)
# ATTACKVECTORS ASSETS LIKELIHOOD SEVERITY
asses = ""
attacks = ""
caplink = ""
likeli = ""
sev = ""
# iterate through sentences
for i in sents:
# apply nlp
doc = nlp(i)
# Iterate through attack keywords
for j in attackKeys:
# If keyword in sentence
if j in i.lower():
# Iterate through nlp tokens
for count, token in enumerate(doc):
# Only keep nouns and verbs
if token.pos_ == "NOUN" or token.pos_ == "VERB":
attacks += token.text + ' '
# Break after first keyword found
break
# Iterate through asset keywords
for j in assetKeys:
# if keyword in sentence
if j in i.lower():
short = "" # A shorter version of the sentence
# Iterate through nlp tokens
c = 0
for count, token in enumerate(doc):
# Only keep nouns and verbs
if token.pos_ == "NOUN" or token.pos_ == "VERB":
asses += token.text + ' '
c += 1
# Only take 3 for best search results
if c < 4:
short += token.text + ' '
# Search for a CAPEC resource
query = "capec.mitre.org " + short
res = google_search(query, my_api_key, my_cse_id, num=10)
# Get first relevant link
for r in res:
# Only take capec data definitions
if "capec.mitre.org/data/definition" in r['link']:
caplink = r['title']
# Get page
page = requests.get(r['link'])
soup = BeautifulSoup(page.text, 'html.parser')
# Take first two detail parameters
for count, rf in enumerate(soup.find_all(id="Detail")):
tex = rf.find('p')
if count == 0:
try:
likeli = tex.get_text()
except AttributeError:
pass
elif count == 1:
try:
sev = tex.get_text()
except AttributeError:
pass
else:
break
break
break
attackvectors.append(attacks)
assets.append(asses)
capeclink.append(caplink)
likelihood.append(likeli)
severity.append(sev)
# RISK
# Calculated from likelihood and severity
# Options Very Low, Low, Medium, High, Very High
# Risk Matrix taken from https://itsecurity.uiowa.edu/resources/everyone/determining-risk-levels
ris = ""
if ((sev == "Very Low") or (sev == "Low" and (likeli == "Medium" or likeli == "Low" or likeli == "Very Low")) or (sev == "Medium" and likeli == "Very Low")):
ris = "Low"
elif ((sev == "Low" and (likeli == "Very High" or likeli == "High")) or (sev == "Medium" and (likeli == "High" or likeli == "Medium" or likeli == "Low")) or (sev == "High" and (likeli == "Medium" or likeli == "Low" or likeli == "Very Low")) or (sev == "Very High" and (likeli == "Low" or likeli == "Very Low"))):
ris = "Medium"
elif ((sev == "Medium" and likeli == "Very High") or (sev == "High" and (likeli == "Very High" or likeli == "High")) or (sev == "Very High" and (likeli == "Very High" or likeli == "High" or likeli == "Medium"))):
ris = "High"
risk.append(ris)
# Combine data into a pandas dataframe
ThreatInfo = pd.DataFrame({
"Title": title,
"RawText": rawtext,
"Names": names,
"Country": country,
"Date": date,
"Attack Vectors": attackvectors,
"Assets": assets,
"Likelihood": likelihood,
"Severity": severity,
"Risk": risk,
"Maliciousness": maliciousness,
"Indicators": indicators
})
ThreatInfo.to_csv("output/3/" + csvName, encoding='utf-8', columns=["Title", "RawText", "Date", "Names", "Country", "Attack Vectors", "Assets", "Likelihood", "Severity", "Risk", "Maliciousness", "Indicators"])
| [
"a1706489@student.adelaide.edu.au"
] | a1706489@student.adelaide.edu.au |
7ab25735908dffad4ff145d77a16b3adf7334ef5 | d094ba0c8a9b1217fbf014aa79a283a49aabe88c | /env/lib/python3.6/site-packages/djcelery/tests/_compat.py | 4969b5c033405ba7bf924e2166b838b11922e304 | [
"Apache-2.0"
] | permissive | Raniac/NEURO-LEARN | d9274e0baadd97bb02da54bdfcf6ca091fc1c703 | 3c3acc55de8ba741e673063378e6cbaf10b64c7a | refs/heads/master | 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 | Apache-2.0 | 2022-12-09T21:01:00 | 2019-04-18T03:57:00 | CSS | UTF-8 | Python | false | false | 113 | py | # coding: utf-8
try:
from unittest.mock import patch
except ImportError:
from mock import patch # noqa
| [
"leibingye@outlook.com"
] | leibingye@outlook.com |
b9132f16bfc5b5e0cc2704d85af65a089cffd7cb | eee647635af1583d9b1150b7cd3195336291e1d2 | /ABC133/c.py | eb49ffdc05d6db403c85c8227196668dd8d288ac | [] | no_license | lilium513/competition_programing | 42f69222290b09b491477b8a2b9c2d4513ebe301 | 45082bf542224b667e753ad357cf145f683fde54 | refs/heads/master | 2020-06-22T03:16:34.510906 | 2019-07-31T18:22:31 | 2019-07-31T18:22:31 | 197,619,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py |
def do():
L, R = list(map(int, input().split(" ")))
ans = 10 ** 15
if R - L < 5000: #差が小さい場合は全探索
for i in range(L,R + 1):
for j in range(i+1,R + 1):
if (i*j) % 2019 < ans:
ans = (i*j) % 2019
else:#そうでなければ確実に一つ2019の倍数がある
ans = 0
print(ans)
| [
"lim.intefx@gmail.com"
] | lim.intefx@gmail.com |
6e0e7be32af312f6e4e5c22864d619f58343b46b | 07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8 | /lib/python3.6/site-packages/qtconsole/usage.py | 9748f0e934f04e3c18259feed28ecd2d79a87874 | [] | no_license | cronos91/ML-exercise | 39c5cd7f94bb90c57450f9a85d40c2f014900ea4 | 3b7afeeb6a7c87384049a9b87cac1fe4c294e415 | refs/heads/master | 2021-05-09T22:02:55.131977 | 2017-12-14T13:50:44 | 2017-12-14T13:50:44 | 118,736,043 | 0 | 0 | null | 2018-01-24T08:30:23 | 2018-01-24T08:30:22 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:4999bdffa49207a0fc3b0d1a32be17cab386bc93cb7e7f592a5154ee85dcc4e9
size 8349
| [
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] | seokinj@jangseog-in-ui-MacBook-Pro.local |
abc4485dd7fd0ee1e358442f4b46caf996041df3 | 7c9425e73f12622042bdc783b014976e8e8498dd | /django/pages/views.py | 38dcfa4cb9e002ace8e4c2f9a3a79d003575ad7b | [] | no_license | SameerKhan5669/python-WebFramworks | 7f6822780ac7d133e2f2f54d3ad53c816db37943 | 62d1d1d61d698d9dd4b4aba386b9e75aa14cf676 | refs/heads/main | 2023-08-31T18:17:27.791069 | 2021-10-04T17:22:16 | 2021-10-04T17:22:16 | 407,669,507 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from django.shortcuts import render
# Create your views here.
# pages/views.py
from django.http import HttpResponse
def homePageView(request):
return HttpResponse('Hello, World!') | [
"sameer.khan@freshbooks.com"
] | sameer.khan@freshbooks.com |
fd2de93e61b8ec7175144413c12aa76acc68faf3 | 16841aa873355de23833f4a78e77cf7440345f6d | /code/server/es_run_all.py | e6aba881549458302a29449b1c089f8ed433b300 | [] | no_license | ShyGuyPy/Shiny_Forecasting_Automation | bf42ad603fbcd3c82849c79e4667e0c4391d428d | 60f060bacf90ed593c92e2f17092c44480b2fd77 | refs/heads/master | 2023-05-04T22:05:06.680250 | 2023-04-18T19:50:03 | 2023-04-18T19:50:03 | 224,223,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | ##this is a workaronud to an issue were win32com module would not
#import properly whe run through r reticulate module
import win32com.client
import win32gui as wg
import win32con
import time
#
# if (__name__ == '__main__'):
def open_and_ID(prog_ID, win_ID):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
print(app_ID)
# wait_time(2)
wg.ShowWindow(app_ID, win32con.SW_MAXIMIZE)
wg.SetActiveWindow(app_ID)
#wg.SetForegroundWindow(app_ID)
#print(program_handle)
return program_handle
def run_by_id(prog_ID, win_ID):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
program_handle.Execute("""ExecuteMenuCommand(6000)""")
def set_and_run(prog_ID, win_ID, SetEndTime, SetStartTime, SetNumSim, SetNumStep):
program_handle = win32com.client.Dispatch(prog_ID)
app_ID = wg.FindWindow(None, win_ID)
# sets the setting parameters into a string that can be fed into the MODL execute
execute_input = """SetRunParameters({}, {}, {}, {})""".format(SetEndTime, SetStartTime, SetNumSim, SetNumStep)
program_handle.Execute(execute_input)
def wait_time(x):
time.sleep(x)
def test_click():
print("click works")
def run_all():
#open model
es_handle = open_and_ID("Extend.application", "ExtendSim")
wait_time(20)
#sets run parameters and then run the model
set_and_run("Extend.application", "ExtendSim", 1000, 0 , 1, 1)
#
wait_time(30)
#run open model
run_by_id("Extend.application", "ExtendSim")
run_all()
| [
"luke.vawter1@gmail.com"
] | luke.vawter1@gmail.com |
c58f1c2970ecc1f52452603ec752fee605c737c0 | 053221e1d90b365f68701dbd5b6466f30d1f6fd7 | /Day2/vd9.py | fd7cce53fa7b1ae816f5b6dbeb603d15b41e478e | [] | no_license | pytutorial/py2011E | eceb4d563cc807294b08b818edadd521ed8da488 | 306437369b0bfe55a2fa827b098283856242e731 | refs/heads/main | 2023-02-28T23:57:32.851536 | 2021-01-30T14:56:12 | 2021-01-30T14:56:12 | 318,186,117 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | # vd9.py
# Chương trình dự báo thời tiết
# Cho T(độ C), w (km/h), p(atm)
# In ra : Có mưa ?
T = float(input('Nhiệt độ (C):'))
w = float(input('Tốc độ gió (km/h):'))
p = float(input('Áp suất khí quyển(atm):'))
rain = False # default
if T >= 21:
if w >= 3 and p > 0.87:
rain = True
else:
if w >= 7 or p > 1.04:
rain = True
print(rain)
| [
"duongthanhtungvn01@gmail.com"
] | duongthanhtungvn01@gmail.com |
586bebcb89179bee8bb4bb171079ab83e1625aa4 | e0da81d30c5178cee999801c8d6673d782878bfa | /create_tables.py | b7b8e401daa01509b4388a3ae021767ff7398c3c | [] | no_license | as234545/Sparkify_ETL | 22bf7c233dcf7f53e1870c304fd564f5e16a0628 | ecc6758e44e79736f4ad8d3b0d5bfb86be9234e9 | refs/heads/master | 2022-10-17T08:50:40.238072 | 2020-06-11T18:16:31 | 2020-06-11T18:16:31 | 262,396,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,713 | py | import psycopg2
from sql_queries import create_table_queries, drop_table_queries
def create_database():
"""
- Creates and connects to the sparkifydb
- Returns the connection and cursor to sparkifydb
"""
# connect to default database
conn = psycopg2.connect("host=127.0.0.1 dbname=postgres user=[] password=[]")
conn.set_session(autocommit=True)
cur = conn.cursor()
# create sparkify database with UTF8 encoding
cur.execute("DROP DATABASE IF EXISTS sparkifydb")
cur.execute("CREATE DATABASE sparkifydb WITH ENCODING 'utf8' TEMPLATE template0")
# close connection to default database
conn.close()
# connect to sparkify database
conn = psycopg2.connect("host=127.0.0.1 dbname=sparkifydb user=postgres password=postgres")
cur = conn.cursor()
return cur, conn
def drop_tables(cur, conn):
"""
Drops each table using the queries in `drop_table_queries` list.
"""
for query in drop_table_queries:
cur.execute(query)
conn.commit()
def create_tables(cur, conn):
"""
Creates each table using the queries in `create_table_queries` list.
"""
for query in create_table_queries:
cur.execute(query)
conn.commit()
def main():
"""
- Drops (if exists) and Creates the sparkify database.
- Establishes connection with the sparkify database and gets
cursor to it.
- Drops all the tables.
- Creates all tables needed.
- Finally, closes the connection.
"""
cur, conn = create_database()
drop_tables(cur, conn)
create_tables(cur, conn)
conn.close()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
5f7755aabf8fbe67914c3bbf540ddcfad5fe2dca | 036fb4fc50bb1fab2cca125484bfe3a0726894bc | /note.py | 8f21429c0e16dfa22801f6af73dc70acbbde5a8c | [] | no_license | SWC-Painist/Backend_Api | 8df5ebc46bde9831dbd04e63be47bf4815868d16 | 34924745985780eb832edaf5d3c4f809c86b1d30 | refs/heads/main | 2023-04-14T04:17:34.969567 | 2021-03-29T10:58:22 | 2021-03-29T10:58:22 | 351,360,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,986 | py | NUM_NOTENAME_LIST = ['Not Piano Key' for i in range(0,109)]
TELVE_TONE_TEMPERAMENT = ['c_','c#_/db_,','d_','d#_/eb_','e_','f_','f#_/gb_','g_','g#_/ab_','a_','a#_/bb_','b_']
#Generate Notename List
for i in range(0,109):
if i < 24 :
continue
else:
octa,num = int((i-24)/12), ((i-24)%12)
NUM_NOTENAME_LIST[i] = TELVE_TONE_TEMPERAMENT[num].replace('_',str(octa+1))
NUM_NOTENAME_LIST[21] = 'c0'
NUM_NOTENAME_LIST[22] = 'a#0/bb0'
NUM_NOTENAME_LIST[23] = 'b0'
STR_TO_MIDI_MAP = {}
for i,s in enumerate(NUM_NOTENAME_LIST) :
if s == 'Not Piano Key':
continue
if s.find('/') != -1:
s = s.split('/')
STR_TO_MIDI_MAP.update({s[0]:i,s[1]:i})
else:
STR_TO_MIDI_MAP.update({s:i})
class pianoNote:
'''
Note class.
contains pitch time(length) velocity and name
'''
def __init__(self, __mNum : int, __start : int, __end, __velo : int):
'''
Args:
__mNum : midi node number
__start : time start
__end : time end
__velo : note velocity
constructor for midi event
'''
self.MidiNum = __mNum
self.start = __start
self.end = __end
self.velocity = __velo
self.TimeDiv = 0
self.chord = False
self.Modifier = ''
self.dot = 0
self.name = NUM_NOTENAME_LIST[self.MidiNum]
def find_modifier(self, note_str : str):
sharp = note_str.find('#')
flat = note_str.find('&')
if sharp != -1:
if note_str.find('##') != -1:
return '##'
else:
return '#'
elif flat != -1:
if note_str.find('&&') != -1:
return 'bb'
else:
return 'b'
elif note_str.find('n') != -1:
return 'n'
return ''
def fromStr(self, from_str : str):
back_index = from_str.__len__() - 1
while back_index >= 0 and from_str[back_index] == '.':
self.dot = self.dot + 1
back_index = back_index - 1
from_str = from_str[0:back_index+1].split('/')
self.TimeDiv = int(from_str[1])
note_len = 1000/self.TimeDiv
note_len = note_len * 1.5**self.dot
self.end = self.start + note_len
self.Modifier = self.find_modifier(from_str[0])
if self.Modifier == '##':
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
fake_name = self.name[0]+self.name[-1]
self.MidiNum = STR_TO_MIDI_MAP.get(fake_name) + 2
elif self.Modifier == 'bb':
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
fake_name = self.name[0]+self.name[-1]
self.MidiNum = STR_TO_MIDI_MAP.get(fake_name) - 2
elif self.Modifier == 'n':
self.name = from_str[0][0] + from_str[0][-1]
self.MidiNum = STR_TO_MIDI_MAP.get(self.name)
else :
self.name = from_str[0][0] + self.Modifier + from_str[0][-1]
self.MidiNum = STR_TO_MIDI_MAP.get(self.name)
def setChord(self,flag : bool):
self.chord = flag
def setModifier(self,modifier : str):
if self.Modifier == modifier:
return
#remove old
if self.Modifier == '#' :
self.MidiNum = self.MidiNum - 1
elif self.Modifier == '##':
self.MidiNum = self.MidiNum - 2
elif self.Modifier == 'b' :
self.MidiNum = self.MidiNum + 1
elif self.Modifier == 'bb':
self.MidiNum = self.MidiNum + 2
#set new
if modifier == '#' :
self.MidiNum = self.MidiNum + 1
elif modifier == '##':
self.MidiNum = self.MidiNum + 2
elif modifier == 'b' :
self.MidiNum = self.MidiNum - 1
elif modifier == 'bb':
self.MidiNum = self.MidiNum - 2
self.Modifier = modifier
self.name = self.name[0] + modifier + self.name[-1]
def __eq__(self, __rhs) -> bool:
'''
operator=.
true only if this two notes has same pitch, same length, same velocity
'''
return self.MidiNum == __rhs.MidiNum and self.start == __rhs.start and self.end == __rhs.end and self.velocity == __rhs.length
def SamePitch(self, __cmp) -> bool:
'''
true if this two notes has same pitch
'''
return self.MidiNum == __cmp.MidiNum
def __str__(self) -> str:
return('Note: {}, Name: {}, Start: {}ms, End: {}ms, Velo: {}'.format(self.MidiNum,self.name,self.start,self.end,self.velocity))
if __name__ == '__main__':
print('not the main module only for temperament check')
for i in NUM_NOTENAME_LIST:
print(i,end=', ')
| [
"noreply@github.com"
] | noreply@github.com |
6daa8b703512b7e2e3c7130d9d9b62f52575be17 | 5d049b79b10480e0e03dbd699c496b5eaa050eb5 | /tests/test_preprocessing.py | 26b4df9beff58bd1556114ab91b4a684dcb652f5 | [] | no_license | isseychua/type-hints-demo | 96bd46fd3c54acd5fa2f14913a2e1c1522d109dd | 2e2ae03e9b2c1254807a7f01bc47869ecea6bb24 | refs/heads/master | 2023-08-02T16:36:36.479644 | 2021-10-10T11:26:19 | 2021-10-10T11:26:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,073 | py | import unittest
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from pandas.testing import assert_frame_equal, assert_series_equal
from src.preprocessing import (add_derived_title, add_is_alone_column,
categorize_column, impute_nans, train_model)
class TestProcessing(unittest.TestCase):
def test_add_derived_title(self):
df = pd.DataFrame({
'Name': ['Smith, Mr. Owen Harris ', 'Heikkinen, Miss. Laina ', 'Allen, Mlle. Maisie',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
# rare titles
'Smith, Lady. Owen Harris ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'Smith, Col. Owen Harris ', 'Heikkinen, Don. Laina ', 'Allen, Dr. Maisie',
'Smith, Major. Owen Harris ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'Smith, Jonkheer. Owen Bob ', 'Heikkinen, Dona. Laina '
],
})
expected = pd.DataFrame({
'Name': ['Smith, Mr. Owen Harris ', 'Heikkinen, Miss. Laina ', 'Allen, Mlle. Maisie',
'Allen, Ms. Maisie', 'Allen, Mme. Maisie',
'Smith, Lady. Owen Harris ', 'Heikkinen, Countess. X ', 'Allen, Capt. Maisie',
'Smith, Col. Owen Harris ', 'Heikkinen, Don. Laina ', 'Allen, Dr. Maisie',
'Smith, Major. Owen Harris ', 'Heikkinen, Rev. Laina ', 'Allen, Sir. Maisie',
'Smith, Jonkheer. Owen Bob ', 'Heikkinen, Dona. Laina '
],
'Title': ['Mr', 'Miss', 'Miss',
'Miss', 'Mrs',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare', 'Rare',
'Rare', 'Rare']
})
assert_frame_equal(expected, add_derived_title(df))
def test_categorize_column_into_2_categories(self):
series = pd.Series([5, 20, 10, 25]) # bins: [ 4.98 15. 25. ]
assert_series_equal(
pd.Series([1, 2, 1, 2]), categorize_column(series, num_bins=2))
def test_categorize_column_into_5_categories(self):
# bins: [ -0.1, 20. , 40. , 60. , 80. , 100. ]
series = pd.Series([0, 30, 50, 80, 100])
assert_series_equal(
pd.Series([1, 2, 3, 4, 5]), categorize_column(series, num_bins=5))
def test_add_is_alone_column(self):
# df = df['SibSp'] + df['Parch']
df = pd.DataFrame({
'SibSp': [0, 1, 2, 0, 0],
'Parch': [0, 0, 5, 0, 1]
})
expected = pd.DataFrame({
'SibSp': [0, 1, 2, 0, 0],
'Parch': [0, 0, 5, 0, 1],
'IsAlone': [1, 0, 0, 1, 0]
})
assert_frame_equal(expected, add_is_alone_column(df))
def test_impute_nans_for_categorical_columns_replaces_na_with_most_frequent_mode(self):
df = pd.DataFrame({
'some_categorical_column': ['A', 'A', 'B', np.nan, 'A', np.nan]
})
expected = pd.DataFrame({
'some_categorical_column': ['A', 'A', 'B', 'A', 'A', 'A']
})
assert_frame_equal(expected, impute_nans(
df, categorical_columns=['some_categorical_column']))
def test_impute_nans_for_continuous_columns_replaces_na_with_median(self):
df = pd.DataFrame({
# median value: 20
'some_continuous_column': [10, 20, np.nan, np.nan, 30]
})
expected = pd.DataFrame({
'some_continuous_column': [10, 20, 20, 20, 30]
})
assert_frame_equal(expected, impute_nans(df, continuous_columns=[
'some_continuous_column']), check_dtype=False)
def test_train_model_should_return_instance_of_model_and_accuracy_score(self):
model, accuracy = train_model(DecisionTreeClassifier, [[1, 1, 1], [1, 1, 1]], [0, 1])
self.assertIsInstance(model, DecisionTreeClassifier)
self.assertIsInstance(accuracy, float)
| [
"davidtan@thoughtworks.com"
] | davidtan@thoughtworks.com |
c830596b2f898d2ead4f94528ad2f3100de2be7b | 7786de317489fa258c7504b2fc96341e970e45db | /tests/unit/test_cf_storage_object.py | 40cecc402ed6e56b9c96465a85a7524220df10d6 | [
"MIT"
] | permissive | tvaught/pyrax | 7207158d832721ca6ccde2e9c328855155a60915 | 8a310435239c536921490e04a984ff8a82b18eb8 | refs/heads/master | 2020-12-25T10:10:54.714401 | 2013-05-30T19:56:21 | 2013-05-30T19:56:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import random
import unittest
from mock import patch
from mock import MagicMock as Mock
import pyrax
from pyrax.cf_wrapper.storage_object import StorageObject
import pyrax.exceptions as exc
from tests.unit.fakes import FakeContainer
from tests.unit.fakes import FakeIdentity
from tests.unit.fakes import FakeResponse
class CF_StorageObjectTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
reload(pyrax)
self.orig_connect_to_cloudservers = pyrax.connect_to_cloudservers
self.orig_connect_to_cloudfiles = pyrax.connect_to_cloudfiles
self.orig_connect_to_cloud_databases = pyrax.connect_to_cloud_databases
ctclb = pyrax.connect_to_cloud_loadbalancers
self.orig_connect_to_cloud_loadbalancers = ctclb
ctcbs = pyrax.connect_to_cloud_blockstorage
self.orig_connect_to_cloud_blockstorage = ctcbs
super(CF_StorageObjectTest, self).__init__(*args, **kwargs)
self.obj_name = "testobj"
self.container_name = "testcont"
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
@patch('pyrax.cf_wrapper.client.Container', new=FakeContainer)
def setUp(self):
pyrax.connect_to_cloudservers = Mock()
pyrax.connect_to_cloud_loadbalancers = Mock()
pyrax.connect_to_cloud_databases = Mock()
pyrax.connect_to_cloud_blockstorage = Mock()
pyrax.clear_credentials()
pyrax.identity = FakeIdentity()
pyrax.set_credentials("fakeuser", "fakeapikey")
pyrax.connect_to_cloudfiles()
self.client = pyrax.cloudfiles
self.container = FakeContainer(self.client, self.container_name, 0, 0)
self.container.name = self.container_name
self.client.get_container = Mock(return_value=self.container)
self.client.connection.get_container = Mock()
self.client.connection.head_object = Mock()
objs = [{"name": self.obj_name, "content_type": "test/test",
"bytes": 444, "hash": "abcdef0123456789"}]
self.client.connection.head_object.return_value = ({}, objs)
self.client.connection.get_container.return_value = ({}, objs)
self.storage_object = self.client.get_object(self.container, "testobj")
self.client._container_cache = {}
self.container.object_cache = {}
def tearDown(self):
self.client = None
self.container = None
self.storage_object = None
pyrax.connect_to_cloudservers = self.orig_connect_to_cloudservers
pyrax.connect_to_cloudfiles = self.orig_connect_to_cloudfiles
pyrax.connect_to_cloud_databases = self.orig_connect_to_cloud_databases
octclb = self.orig_connect_to_cloud_loadbalancers
pyrax.connect_to_cloud_loadbalancers = octclb
octcbs = self.orig_connect_to_cloud_blockstorage
pyrax.connect_to_cloud_blockstorage = octcbs
def test_read_attdict(self):
tname = "something"
ttype = "foo/bar"
tbytes = 12345
tlastmodified = "2222-02-22T22:22:22.222222"
tetag = "123123123"
dct = {"name": tname, "content_type": ttype, "bytes": tbytes,
"last_modified": tlastmodified, "hash": tetag}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
self.assertEqual(obj.content_type, ttype)
self.assertEqual(obj.total_bytes, tbytes)
self.assertEqual(obj.last_modified, tlastmodified)
self.assertEqual(obj.etag, tetag)
def test_subdir(self):
tname = "something"
dct = {"subdir": tname}
obj = self.storage_object
obj._read_attdict(dct)
self.assertEqual(obj.name, tname)
def test_get(self):
obj = self.storage_object
obj.client.connection.get_object = Mock()
meta = {"a": "b"}
data = "This is the contents of the file"
obj.client.connection.get_object.return_value = (meta, data)
ret = obj.get()
self.assertEqual(ret, data)
ret = obj.get(include_meta=True)
self.assertEqual(ret, (meta, data))
def test_delete(self):
obj = self.storage_object
obj.client.connection.delete_object = Mock()
obj.delete()
obj.client.connection.delete_object.assert_called_with(
obj.container.name, obj.name)
def test_purge(self):
obj = self.storage_object
cont = obj.container
cont.cdn_uri = None
self.assertRaises(exc.NotCDNEnabled, obj.purge)
cont.cdn_uri = "http://example.com"
obj.client.connection.cdn_request = Mock()
obj.purge()
obj.client.connection.cdn_request.assert_called_with("DELETE",
cont.name, obj.name, hdrs={})
def test_get_metadata(self):
obj = self.storage_object
obj.client.connection.head_object = Mock()
obj.client.connection.head_object.return_value = {
"X-Object-Meta-Foo": "yes",
"Some-Other-Key": "no"}
meta = obj.get_metadata()
self.assertEqual(meta, {"X-Object-Meta-Foo": "yes"})
def test_set_metadata(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.set_metadata({"newkey": "newval"})
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {"x-object-meta-newkey": "newval"})
def test_remove_metadata_key(self):
obj = self.storage_object
obj.client.connection.post_object = Mock()
obj.client.connection.head_object = Mock(return_value={})
obj.remove_metadata_key("newkey")
obj.client.connection.post_object.assert_called_with(obj.container.name,
obj.name, {})
def test_change_content_type(self):
obj = self.storage_object
obj.client.change_object_content_type = Mock()
obj.change_content_type("foo")
obj.client.change_object_content_type.assert_called_once_with(
obj.container, obj, new_ctype="foo", guess=False)
def test_get_temp_url(self):
obj = self.storage_object
obj.client.get_temp_url = Mock()
secs = random.randint(1, 1000)
obj.get_temp_url(seconds=secs)
obj.client.get_temp_url.assert_called_with(obj.container, obj,
seconds=secs, method="GET")
def test_repr(self):
obj = self.storage_object
rep = obj.__repr__()
self.assert_("<Object " in rep)
self.assert_(obj.name in rep)
self.assert_(obj.content_type in rep)
if __name__ == "__main__":
unittest.main()
| [
"ed@leafe.com"
] | ed@leafe.com |
00158ec2eee07a649a6064356b537bc1f351953c | d292e8094cdfbd2dd6e35d775f6edfa8e209db47 | /src/tests/test_profiled_forward.py | 8d869d8d2f7ad953dbfc20f8331029c0c13c6f71 | [
"MIT"
] | permissive | SpookyWoogin/robot2018 | 81562d96ffd42aa3642f8f746a8126a2940827ec | a8ddf6a64b883904b15031e0ae13b2056faed4f5 | refs/heads/master | 2020-09-02T01:05:00.897963 | 2019-01-26T05:00:02 | 2019-01-26T05:00:02 | 219,100,518 | 0 | 0 | MIT | 2019-11-02T03:55:26 | 2019-11-02T03:55:26 | null | UTF-8 | Python | false | false | 2,129 | py | from unittest.mock import MagicMock
from data_logger import DataLogger
from robot import Rockslide
from commands.profiled_forward import ProfiledForward
def test_ProfiledForward1(Notifier):
robot = Rockslide()
robot.robotInit()
command = ProfiledForward(10)
command.initialize()
command.execute()
command.isFinished()
command.end()
log_trajectory = True
def test_ProfiledForward2(Notifier, sim_hooks):
global log_trajectory
robot = Rockslide()
robot.robotInit()
DT = robot.getPeriod()
robot.drivetrain.getLeftEncoder = getLeftEncoder = MagicMock()
robot.drivetrain.getRightEncoder = getRightEncoder = MagicMock()
getLeftEncoder.return_value = 0
getRightEncoder.return_value = 0
command = ProfiledForward(10)
command.initialize()
t = 0
pos_ft = 0
if log_trajectory:
logger = DataLogger("test_profiled_forward2.csv")
logger.log_while_disabled = True
logger.do_print = True
logger.add('t', lambda: t)
logger.add('pos', lambda: pos_ft)
logger.add('target_pos', lambda: command.dist_ft)
logger.add('v', lambda: command.profiler_l.current_target_v)
logger.add('max_v', lambda: command.max_v_encps)
logger.add('a', lambda: command.profiler_l.current_a)
logger.add('max_a', lambda: command.max_acceleration)
logger.add('voltage', lambda: command.drivetrain.getVoltage())
logger.add('vpl', lambda: command.drivetrain.motor_lb.get())
logger.add('adist', lambda: command.profiler_l.adist)
logger.add('err', lambda: command.profiler_l.err)
while t < 10:
if log_trajectory:
logger.log()
getLeftEncoder.return_value = pos_ft * command.drivetrain.ratio
getRightEncoder.return_value = -pos_ft * command.drivetrain.ratio
command.execute()
v = command.profiler_l.current_target_v
pos_ft += v * DT
t += DT
sim_hooks.time = t
if command.isFinished():
break
command.end()
if log_trajectory:
logger.log()
logger.close()
| [
"ellery-newcomer@utulsa.edu"
] | ellery-newcomer@utulsa.edu |
8be8b9d514ef8af40f16b0f5750beca00056be18 | 661ccc272af5d72a4aea6cecebd59879ab8458f5 | /test_scores.py | 8bbbb94312640aef4bdc7d5c8d9fba98e5442c39 | [] | no_license | Monitor-Wang/ERMDA | 9e03718292404f5a0a8cf0bb29974ef2ea981675 | cdafa1e3bba24b16f81c427c29009ecbbc716a88 | refs/heads/main | 2023-08-30T19:32:34.156473 | 2021-11-05T12:52:03 | 2021-11-05T12:52:03 | 424,944,508 | 0 | 0 | null | 2021-11-05T12:42:11 | 2021-11-05T12:42:11 | null | UTF-8 | Python | false | false | 1,979 | py | # -*- coding: utf-8 -*-
from sklearn.metrics import roc_auc_score
import numpy as np
def calculate_performace(num, y_pred, y_prob, y_test):
tp = 0
fp = 0
tn = 0
fn = 0
for index in range(num):
if y_test[index] ==1:
if y_test[index] == y_pred[index]:
tp = tp + 1
else:
fn = fn + 1
else:
if y_test[index] == y_pred[index]:
tn = tn + 1
else:
fp = fp + 1
acc = float(tp + tn)/num
try:
precision = float(tp)/(tp + fp)
recall = float(tp)/ (tp + fn)
f1_score = float((2*precision*recall)/(precision+recall))
#MCC = float(tp*tn-fp*fn)/(np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn)))
except ZeroDivisionError:
print("You can't divide by 0.")
precision=recall=f1_score = 100
AUC = roc_auc_score(y_test, y_prob)
return tp, fp, tn, fn, acc, precision, recall, f1_score, AUC
def base_learners_results(metric_dict, fold_num, group_num, f):
for i in range(group_num):
ave_acc = 0
ave_prec = 0
ave_recall = 0
ave_f1_score = 0
ave_auc = 0
ave_sum = 0
bl_metric_list = []
for fold in range(fold_num):
temp_list = metric_dict[fold]
bl_metric_list.append(temp_list[i])
bl_metric_list = np.array(bl_metric_list)
ave_acc = np.mean(bl_metric_list[:,0])
ave_prec = np.mean(bl_metric_list[:,1])
ave_recall = np.mean(bl_metric_list[:,2])
ave_f1_score = np.mean(bl_metric_list[:,3])
ave_auc = np.mean(bl_metric_list[:,4])
ave_sum = np.mean(bl_metric_list[:,5])
f.write('the '+ str(i+1)+ ' base learner proformance: \tAcc\t'+ str(ave_acc)+'\tprec\t'+ str(ave_prec)+ '\trecall\t'+str(ave_recall)+'\tf1_score\t'+str(ave_f1_score)+'\tAUC\t'+ str(ave_auc)+'\tSum\t'+ str(ave_sum)+'\n') | [
"noreply@github.com"
] | noreply@github.com |
449d5c2f3a0a020d0c74ca688990cf14ec87f350 | c99b89e8b4d5ebdae4aaaf26c33dd8075e61b5e4 | /AnchorDxLimsApp/RandDTaskAssignment.py | 852174993b8a463ae1c594f0e83a627c6404016d | [] | no_license | ranandrom/Lims | 1afa9f86829b5c09b10bc802501f745c489045c6 | 8a762cad72a334054f4211e46a4b36b403dc06c2 | refs/heads/master | 2020-03-12T00:14:45.192049 | 2018-04-23T09:44:45 | 2018-04-23T09:44:45 | 128,862,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,184 | py | # encoding: utf-8
from django.shortcuts import render
from AnchorDxLimsApp import models
from itertools import chain
# Create your views here.
#coding:utf-8
from django.shortcuts import render,HttpResponse
# 研发样本实验任务分配首页
def RandDExperimentalTaskAssignmentHomePage(request):
try:
username = request.session['username']
department = request.session['department']
except Exception:
return render(request, "index.html")
else:
print(r'首页,username: ', username, department)
temp = {"username": username, "department": department}
temp_myInfo = models.UserInfo.objects.filter(username=username) # 用户信息
# temp_SystemMessage = models.UserSystemMessage.objects.filter(Receiver=username) # 用户信息
temp_SystemMessage_Unread = models.UserSystemMessage.objects.filter(Receiver=username,
ReadingState='未读') # 用户信息
num_SystemMessage_Unread = len(temp_SystemMessage_Unread)
# 预处理任务列表
Pretreatment_not_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, sample_review='1',
TissueSampleSign=0) # 任务未分配信息
Pretreatment_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1, sample_review='1',
TissueSampleSign=0) # 任务已分配信息
# DNA提取任务列表
# DNA_not_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, TissueSampleSign=1) # 任务未分配信息
temp_not_Pretreatment = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=0, sample_review='1',
TissueSampleSign=1) # 任务未分配信息
temp_Pretreatment = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
DNA_not_audited = chain(temp_not_Pretreatment, temp_Pretreatment) # 合并所有数据表数据
# DNA_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1, TissueSampleSign=1) # 任务已分配信息
temp_not_Pretreatment_audited = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=1,
sample_review='1',
TissueSampleSign=1) # 任务已分配信息
temp_Pretreatment_audited = models.RandDSamplePretreatmentInfo.objects.filter(
Next_TaskProgress_Sign=1) # 任务已分配信息
DNA_audited = chain(temp_not_Pretreatment_audited, temp_Pretreatment_audited) # 合并所有数据表数据
# 预文库构建任务列表
temp_Fin_unaud = models.clinicalSampleInfo.objects.filter(contract_review=0) # 财务未审核信息
temp_Fin_NoPass = models.clinicalSampleInfo.objects.filter(contract_review=2) # 财务审核不通过信息
PreLibCon_not_audited = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
PreLibCon_audited = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 终文库构建任务列表
FinLibCon_not_audited = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
FinLibCon_audited = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 上机测序任务列表
ComputerSeq_not_audited = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=0) # 任务未分配信息
ComputerSeq_audited = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=1) # 任务已分配信息
# 其他信息列表
# 任务暂停信息
temp_Pretreatment = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=2, sample_review='1') # 预处理任务暂停信息
temp_DNAExtract = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=2) # DNA提取任务暂停信息
temp_PreLibCon = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=2) # 预文库构建任务暂停信息
temp_FinLibCon = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=2) # 终文库构建任务暂停信息
temp_SeqCom = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=2) # 上机测序任务暂停信息
temp_suspend = chain(temp_Pretreatment, temp_DNAExtract, temp_PreLibCon, temp_FinLibCon, temp_SeqCom) # 合并所有数据表数据
# 任务终止信息
# temp_stop = models.clinicalSampleInfo.objects.filter(Next_TaskProgress_Sign=3) # 任务终止信息
temp_Pretreatment_stop = models.RandDSampleInfo.objects.filter(Next_TaskProgress_Sign=3 , sample_review='1') # 预处理任务终止信息
temp_DNAExtract_stop = models.RandDSamplePretreatmentInfo.objects.filter(Next_TaskProgress_Sign=3) # DNA提取任务终止信息
temp_PreLibCon_stop = models.RandDSampleDNAExtractInfo.objects.filter(Next_TaskProgress_Sign=3) # 预文库构建任务终止信息
temp_FinLibCon_stop = models.RandDSamplePreLibConInfo.objects.filter(Next_TaskProgress_Sign=3) # 终文库构建任务终止信息
temp_SeqCom_stop = models.RandDSampleFinLibConInfo.objects.filter(Next_TaskProgress_Sign=3) # 上机测序任务终止信息
temp_stop = chain(temp_Pretreatment_stop, temp_DNAExtract_stop, temp_PreLibCon_stop, temp_FinLibCon_stop,
temp_SeqCom_stop) # 合并所有数据表数据
return render(request, "modelspage/RandDExperimentalTaskAssignment.html", {"userinfo": temp,
"Pretreatment_not_audited": Pretreatment_not_audited,
"Pretreatment_audited": Pretreatment_audited,
"DNA_not_audited": DNA_not_audited,
"DNA_audited": DNA_audited,
"PreLibCon_not_audited": PreLibCon_not_audited,
"PreLibCon_audited": PreLibCon_audited,
"FinLibCon_not_audited": FinLibCon_not_audited,
"FinLibCon_audited": FinLibCon_audited,
"ComputerSeq_not_audited": ComputerSeq_not_audited,
"ComputerSeq_audited": ComputerSeq_audited,
"Fin_unaud": temp_Fin_unaud,
"Fin_NoPass": temp_Fin_NoPass,
"suspend": temp_suspend,
"stop": temp_stop,
"myInfo": temp_myInfo,
"SystemMessage": temp_SystemMessage_Unread,
"num_SystemMessage_Unread": num_SystemMessage_Unread})
| [
"ramandrom@139.com"
] | ramandrom@139.com |
2b2292edfd105992c36aa4fca01ce951238696ab | 439add47001009e173418b30cfb820b0e92989ed | /apps/users/urls.py | 03905571d8e624a6de7bd52b2762f03ca522ec43 | [] | no_license | AngelMercado/primeTed | 31b410d7c64da1001f40bae824f7a700f46dcd40 | 7a05d2ea257334cb726d39e80e2209f9cdbf0578 | refs/heads/master | 2021-01-01T17:59:28.360220 | 2017-07-24T17:27:57 | 2017-07-24T17:27:57 | 98,215,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | from django.conf.urls import patterns, include, url
from .views import PanelView,RegistrateView,LogOut,LoginView
from apps.home.views import HomeView
urlpatterns = patterns('',
url(r'^$',PanelView.as_view(),name='panel'),
url(r'^login$',LoginView.as_view(),name='login'),
url(r'^registrateGratis$',RegistrateView.as_view(),name='registrate'),
url(r'^inicio$',LogOut,name='logout'),
url(r'^home$',HomeView.as_view(),name='home'),
) | [
"myjava@outlook.es"
] | myjava@outlook.es |
9f17a97976b8031844c5b47af19eedcf16363869 | e03250b86ba042c55f05882998c6a19cd4f39c31 | /sicknote_app_v00_01.py | d3a1e941ef6455949b2f67cfab4e0366544cf4a2 | [] | no_license | nzwi/sicknote-flask-endpoint | a467d519a0fd31b5ff9d45b8dfd8306cb88eadc7 | cf7241c951b04292df9a4b8161446e30db8b4f84 | refs/heads/master | 2020-03-09T06:50:36.832533 | 2018-04-08T14:32:04 | 2018-04-08T14:32:04 | 128,649,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | ##
# Title: Python Flask endpoint within Amazon Virtual Private Cloud (VPC)
# to allow lambda to communicate with ethereum helper functions
# Version: v00_01
# Author: Nzwisisa Chidembo <nzwisisa@gmail.com>
##
from flask import Flask, jsonify, request
# Replace <helper function file> with your helper python file
import <helper function file> as sk
app = Flask(__name__)
@app.route('/', methods=["POST"])
def post():
if request.is_json:
data = request.get_json()
res = sk.lambda_handler(data,[])
return jsonify(res)
else:
return jsonify(state='Request was not JSON')
# Include the internal VPC ip address of your AWS EC2 instant
if __name__ == '__main__':
app.run(host='xxxxxxxxx',debug=True)
| [
"nzwisisa@gmail.com"
] | nzwisisa@gmail.com |
20a079bd1af4db6c499e81e182bb3635f71069b9 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/doc3d/I_to_M_Gk3_no_pad/pyr_Tcrop256_pad20_jit15/pyr_3s/L8/step09_3side_L8.py | 492f375600b24d9111789d8a77bc4776a8444e6d | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82,606 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### 把 kong_model2 加入 sys.path
import os
from tkinter import S
code_exe_path = os.path.realpath(__file__) ### 目前執行 step10_b.py 的 path
code_exe_path_element = code_exe_path.split("\\") ### 把 path 切分 等等 要找出 kong_model 在第幾層
kong_layer = code_exe_path_element.index("kong_model2") ### 找出 kong_model2 在第幾層
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### 定位出 kong_model2 的 dir
import sys ### 把 kong_model2 加入 sys.path
sys.path.append(kong_model2_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
from step08_b_use_G_generate_I_to_M import I_to_M
from step08_b_use_G_generate_0_util import Tight_crop
from step09_c_train_step import Train_step_I_to_M
from step09_d_KModel_builder_combine_step789 import KModel_builder, MODEL_NAME
use_what_gen_op = I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale= 0) )
use_what_train_step = Train_step_I_to_M( Tight_crop(pad_size=20, resize=(256, 256), jit_scale=15) )
import time
start_time = time.time()
###############################################################################################################################################################################################
###############################################################################################################################################################################################
########################################################### Block1
### Block1
#########################################################################################
# 3
pyramid_1side_1__2side_1__3side_1 = [3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]
# 6
pyramid_1side_2__2side_1__3side_1 = [3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3]
pyramid_1side_2__2side_2__3side_1 = [3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3]
pyramid_1side_2__2side_2__3side_2 = [3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3]
# 10
pyramid_1side_3__2side_1__3side_1 = [3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3]
pyramid_1side_3__2side_2__3side_1 = [3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3]
pyramid_1side_3__2side_2__3side_2 = [3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3]
pyramid_1side_3__2side_3__3side_1 = [3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3]
pyramid_1side_3__2side_3__3side_2 = [3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3]
pyramid_1side_3__2side_3__3side_3 = [3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3]
# 15
pyramid_1side_4__2side_1__3side_1 = [3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3]
pyramid_1side_4__2side_2__3side_1 = [3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3]
pyramid_1side_4__2side_2__3side_2 = [3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3]
pyramid_1side_4__2side_3__3side_1 = [3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3]
pyramid_1side_4__2side_3__3side_2 = [3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3]
pyramid_1side_4__2side_3__3side_3 = [3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3]
pyramid_1side_4__2side_4__3side_1 = [3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3]
pyramid_1side_4__2side_4__3side_2 = [3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3]
pyramid_1side_4__2side_4__3side_3 = [3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3]
pyramid_1side_4__2side_4__3side_4 = [3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3]
# 21
pyramid_1side_5__2side_1__3side_1 = [3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3]
pyramid_1side_5__2side_2__3side_1 = [3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3]
pyramid_1side_5__2side_2__3side_2 = [3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3]
pyramid_1side_5__2side_3__3side_1 = [3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3]
pyramid_1side_5__2side_3__3side_2 = [3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3]
pyramid_1side_5__2side_3__3side_3 = [3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3]
pyramid_1side_5__2side_4__3side_1 = [3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3]
pyramid_1side_5__2side_4__3side_2 = [3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3]
pyramid_1side_5__2side_4__3side_3 = [3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3]
pyramid_1side_5__2side_4__3side_4 = [3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_1 = [3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3]
pyramid_1side_5__2side_5__3side_2 = [3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3]
pyramid_1side_5__2side_5__3side_3 = [3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3]
pyramid_1side_5__2side_5__3side_4 = [3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3]
pyramid_1side_5__2side_5__3side_5 = [3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3]
# 28
pyramid_1side_6__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 3]
pyramid_1side_6__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3]
pyramid_1side_6__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 3]
pyramid_1side_6__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3]
pyramid_1side_6__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3]
pyramid_1side_6__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 3, 3, 3]
pyramid_1side_6__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3]
pyramid_1side_6__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3]
pyramid_1side_6__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 0, 0, 0, 0, 0, 1, 1, 2, 3, 3, 3]
pyramid_1side_6__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 0, 0, 0, 0, 0, 1, 1, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 0, 0, 0, 0, 0, 1, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 0, 0, 0, 0, 0, 1, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3]
pyramid_1side_6__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 2, 3, 3]
pyramid_1side_6__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 0, 0, 0, 0, 0, 2, 2, 2, 3, 3, 3]
pyramid_1side_6__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 0, 0, 0, 0, 0, 2, 2, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 0, 0, 0, 0, 0, 2, 3, 3, 3, 3, 3]
pyramid_1side_6__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3]
# 36
pyramid_1side_7__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_7__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_7__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_7__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_7__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_7__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_7__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_7__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_7__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_7__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 0, 0, 0, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 0, 0, 0, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 0, 0, 0, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 0, 0, 0, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 0, 0, 0, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 0, 0, 0, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_7__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_7__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 0, 0, 0, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_7__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 0, 0, 0, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 0, 0, 0, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 0, 0, 0, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_7__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 3, 3, 3, 3, 3, 3, 3]
# 45
pyramid_1side_8__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_8__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_8__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_8__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_8__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_8__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_8__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_8__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_8__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_8__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 0, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 0, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 0, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 0, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 0, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 0, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 0, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 0, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 0, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 0, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 0, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 0, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 0, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_8__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_8__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 0, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_8__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 0, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 0, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 0, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 0, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_8__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 3, 3, 3, 3, 3, 3, 3]
# 55
pyramid_1side_9__2side_1__3side_1 = [3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3]
pyramid_1side_9__2side_2__3side_1 = [3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3]
pyramid_1side_9__2side_2__3side_2 = [3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3]
pyramid_1side_9__2side_3__3side_1 = [3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3]
pyramid_1side_9__2side_3__3side_2 = [3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3]
pyramid_1side_9__2side_3__3side_3 = [3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3]
pyramid_1side_9__2side_4__3side_1 = [3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3]
pyramid_1side_9__2side_4__3side_2 = [3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3]
pyramid_1side_9__2side_4__3side_3 = [3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3]
pyramid_1side_9__2side_4__3side_4 = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_1 = [3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_5__3side_2 = [3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_5__3side_3 = [3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_5__3side_4 = [3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_5__3side_5 = [3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_1 = [3, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_6__3side_2 = [3, 3, 2, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_6__3side_3 = [3, 3, 3, 2, 2, 2, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_6__3side_4 = [3, 3, 3, 3, 2, 2, 1, 1, 1, 1, 1, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_5 = [3, 3, 3, 3, 3, 2, 1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_6__3side_6 = [3, 3, 3, 3, 3, 3, 1, 1, 1, 1, 1, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_1 = [3, 2, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_7__3side_2 = [3, 3, 2, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_7__3side_3 = [3, 3, 3, 2, 2, 2, 2, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_7__3side_4 = [3, 3, 3, 3, 2, 2, 2, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_5 = [3, 3, 3, 3, 3, 2, 2, 1, 1, 1, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_6 = [3, 3, 3, 3, 3, 3, 2, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_7__3side_7 = [3, 3, 3, 3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_8__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_8__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_8__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 1, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 1, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 1, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 1, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_8__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_1 = [3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3]
pyramid_1side_9__2side_9__3side_2 = [3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3]
pyramid_1side_9__2side_9__3side_3 = [3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3]
pyramid_1side_9__2side_9__3side_4 = [3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_5 = [3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_6 = [3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_7 = [3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_8 = [3, 3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3]
pyramid_1side_9__2side_9__3side_9 = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
#########################################################################################
ch032_limit_pyramid_1side_1__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_1__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_2__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_2__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_3__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_3__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_4__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_4__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_5__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_5__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_6__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_6__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_7__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_7__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_8__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_8__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_1__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_1__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_2__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_2__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_3__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_3__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_4__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_4__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_5__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_5__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_6__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_6__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_7__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_7__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_8__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_8__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_1 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_1, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_2 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_2, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_3 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_3, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_4 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_4, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_5 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_5, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_6 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_6, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_7 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_7, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_8 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_8, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
ch032_limit_pyramid_1side_9__2side_9__3side_9 = KModel_builder().set_model_name(MODEL_NAME.flow_unet2).set_unet3(out_conv_block=True, concat_before_down=True, kernel_size=3, padding="valid", hid_ch= 32, depth_level=8, out_ch=1, unet_acti="sigmoid", conv_block_num=pyramid_1side_9__2side_9__3side_9, ch_upper_bound= 2 ** 9).set_gen_op( use_what_gen_op ).set_train_step( use_what_train_step )
#########################################################################################
###############################################################################################################################################################################################
if(__name__ == "__main__"):
import numpy as np
print("build_model cost time:", time.time() - start_time)
data = np.zeros(shape=(1, 512, 512, 1))
use_model = ch032_limit_pyramid_1side_1__2side_1__3side_1
use_model = use_model.build()
result = use_model.generator(data)
print(result.shape)
from kong_util.tf_model_util import Show_model_weights
Show_model_weights(use_model.generator)
use_model.generator.summary()
print(use_model.model_describe)
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
d59306979796aafc1ce71802b7397335571e7929 | e4df66483ef535aa89e6348b72a7d683f472b034 | /products/migrations/0004_auto_20210703_1331.py | b7d13cbbb8a1cea2c4dcab80894d85b6efbcce80 | [] | no_license | Summersby95/james-boutique | 595b083c996dfd2f78f6912058b83118e77627a2 | ceeeddd796fe9a807d24d4ed222536762e565cf1 | refs/heads/master | 2023-06-20T09:27:54.979372 | 2021-07-14T21:52:42 | 2021-07-14T21:52:42 | 381,137,290 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.2.4 on 2021-07-03 12:31
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20210629_1032'),
]
operations = [
migrations.AlterModelOptions(
name='category',
options={'verbose_name_plural': 'Categories'},
),
migrations.AddField(
model_name='product',
name='has_sizes',
field=models.BooleanField(blank=True, default=False, null=True),
),
]
| [
"47246572+BigbyWolf95@users.noreply.github.com"
] | 47246572+BigbyWolf95@users.noreply.github.com |
fed740e3a86c5c0992ca482c58875e9b14269012 | 1bfad01139237049eded6c42981ee9b4c09bb6de | /RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/pimsm/router/interface/learnedmdtinfo/learnedmdtinfo.py | b27f8bb6f94a4485f17effd4ef1a42a2e0f065ba | [
"MIT"
] | permissive | kakkotetsu/IxNetwork | 3a395c2b4de1488994a0cfe51bca36d21e4368a5 | f9fb614b51bb8988af035967991ad36702933274 | refs/heads/master | 2020-04-22T09:46:37.408010 | 2019-02-07T18:12:20 | 2019-02-07T18:12:20 | 170,284,084 | 0 | 0 | MIT | 2019-02-12T08:51:02 | 2019-02-12T08:51:01 | null | UTF-8 | Python | false | false | 4,210 | py |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class LearnedMdtInfo(Base):
"""The LearnedMdtInfo class encapsulates a system managed learnedMdtInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the LearnedMdtInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'learnedMdtInfo'
def __init__(self, parent):
super(LearnedMdtInfo, self).__init__(parent)
@property
def Age(self):
"""The amount of time (in seconds) remaining before this TLV times out.
Returns:
number
"""
return self._get_attribute('age')
@property
def CeGroupAddress(self):
"""The CE group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceGroupAddress')
@property
def CeSourceAddress(self):
"""The CE source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('ceSourceAddress')
@property
def MdtGroupAddress(self):
"""The MDT (PE) group address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtGroupAddress')
@property
def MdtSourceAddress(self):
"""The MDT (PE) source address contained in this data MDT TLV.
Returns:
str
"""
return self._get_attribute('mdtSourceAddress')
def find(self, Age=None, CeGroupAddress=None, CeSourceAddress=None, MdtGroupAddress=None, MdtSourceAddress=None):
"""Finds and retrieves learnedMdtInfo data from the server.
All named parameters support regex and can be used to selectively retrieve learnedMdtInfo data from the server.
By default the find method takes no parameters and will retrieve all learnedMdtInfo data from the server.
Args:
Age (number): The amount of time (in seconds) remaining before this TLV times out.
CeGroupAddress (str): The CE group address contained in this data MDT TLV.
CeSourceAddress (str): The CE source address contained in this data MDT TLV.
MdtGroupAddress (str): The MDT (PE) group address contained in this data MDT TLV.
MdtSourceAddress (str): The MDT (PE) source address contained in this data MDT TLV.
Returns:
self: This instance with matching learnedMdtInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of learnedMdtInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the learnedMdtInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| [
"hubert.gee@keysight.com"
] | hubert.gee@keysight.com |
5088ff9a441d0a89a9acc0d64fff0a8dc6f8e028 | 9c0f298d56ef554b6bb004545dcd02988211df7d | /uebung07/uebung07-examples/tasks-show.py | fde8acdb8d610cd2ba32c22be0559d10cc9a70d5 | [] | no_license | n1tr0-5urf3r/InTech-2020 | 96d418360b47c17a7c2e4f00d32680fcb603a802 | 43d5659907586e6f5b55eb872cc8136c0b059678 | refs/heads/master | 2022-11-17T20:58:31.782540 | 2020-07-14T12:05:45 | 2020-07-14T12:05:45 | 259,252,184 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | #!/usr/bin/python3
# coding=utf-8
from tasks_lib import read_all_tasks, get_done_tasks, get_open_tasks
from tasks_lib import print_header, print_tasks, print_footer, print_form, print_navigation
import cgi
form = cgi.FieldStorage(encoding='utf8')
# Welchen Zustand sollen die angezeigten Tasks haben? Default-Wert: all
state = form.getfirst('state', 'all')
all_tasks = read_all_tasks()
# Filtere die Tasks nach dem entsprechenden Zustand
if state == "open":
tasks = get_open_tasks(all_tasks)
prefix = "offene"
elif state == "done":
tasks = get_done_tasks(all_tasks)
prefix = "erledigte"
else:
tasks = all_tasks
prefix = ""
# Ab hier:Ausgabe des HTML-Codes
print_header("{} {} Aufgaben".format(len(tasks), prefix))
print_navigation()
print_tasks(tasks)
print_form()
print_footer()
| [
"fabi@ihlecloud.de"
] | fabi@ihlecloud.de |
c3269a9d2921b1dd7aedb9e987d48a9a1cb04198 | 99a5e59f1f6dccd580989e92fc148143bef9ae23 | /store/models/customer.py | b9b79996b4d8e12facf7e9df4adc870b19fb17d9 | [] | no_license | Sachin-Kahandal/eshop | 743ce2c48c913f6aa41c6388395478b3fc01c1aa | c58b7f959ff4294c069bba1f1bca8f78294a4483 | refs/heads/master | 2023-02-22T13:01:22.113343 | 2021-01-26T14:56:57 | 2021-01-26T14:56:57 | 331,246,562 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | from django.db import models
from django.contrib.auth.hashers import make_password, check_password
class Customer(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
phone = models.CharField(max_length=10)
address = models.CharField(max_length=100)
email = models.EmailField()
password = models.CharField(max_length=500)
def __str__(self):
return self.first_name + ' ' + self.last_name
def register(self):
self.save()
# checks if email exists
def emailExists(self):
if Customer.objects.filter(email = self.email):
return True
else:
return False
# checks if phone exists
def phoneExists(self):
if Customer.objects.filter(phone = self.phone):
return True
else:
return False
@staticmethod
def get_customer_email(email):
try:
customer = Customer.objects.get(email = email)
return customer
except:
return None
| [
"54132749+SachinKahandal@users.noreply.github.com"
] | 54132749+SachinKahandal@users.noreply.github.com |
a5a11cfef9f4349cd1bbbda6164070d5f154324b | ad682d2145f440c078a431a40d2153a204771026 | /method/DepBased/WM_OLPDM.py | 7889685fa719f8816d1f5051b2aece6f7cb45c2f | [] | no_license | barry800414/NewsCrawler | d81f1ee4b0e0c4a997dda1efd24d1430e222d318 | 18c10f10508558600f734d659e724d4e27f071a3 | refs/heads/master | 2021-05-03T13:11:29.696108 | 2015-07-01T16:38:05 | 2015-07-01T16:38:05 | 26,075,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,697 | py | #!/usr/bin/env python3
import sys
import json
import math
from collections import defaultdict
import numpy as np
from scipy.sparse import csr_matrix, hstack
from sklearn.grid_search import ParameterGrid
import WordModelImproved as WM
import OneLayerPhraseDepModel as OLPDM
from PhraseDepTree import loadPhraseFile
from sentiDictSum import readSentiDict
from RunExperiments import *
import ErrorAnalysis as EA
from misc import *
import dataTool
import Parameter
'''
This code implements the baseline (tf, tf-idf) features
for training and testing (supervised document-level learning)
Author: Wei-Ming Chen
Date: 2015/02/16
'''
# Depricated
def mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=None):
oldms = dict()
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['columnSource'],
statCol=p['statementCol'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
if topicMap == None: #self train -> self test
prefix = "%d, %s, %s, %s" % (topicId, 'OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.selfTrainTest(X, y1, clfList, "MacroF1", testSize=0.2, prefix=prefix)
else: # all-train-and-test and leave-one-test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']),
toStr(p['columnSource']), p['statementCol'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
# generate word model features and dependency model features, then merge them
def genXY(labelNewsList, olpdm, topicSet, sentiDict, params, volc):
# generate WM features
print('generating word features...', file=sys.stderr)
p = params['WM']['model settings']
allowedPOS = set(['VA', 'VV', 'NN', 'NR', 'AD', 'JJ', 'FW'])
wm = WM.WordModel(labelNewsList, newsCols=p['col'], statCol=p['stat'],
feature=p['feature'], allowedPOS=allowedPOS, volc=volc)
(X1, y1) = wm.genXY(p['minCnt'])
volc1 = WM.getVolc()
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
p = params['OLPDM']['model settings']
allowedSeedWord = initAllowedSet(topicSet, p['seedWordType'])
allowedFirstLayerWord = initAllowedSet(topicSet, p['firstLayerType'], sentiDict)
allowedRel = { t: None for t in topicSet }
olpdm.setModel(allowedSeedWord, p['seedWordType']['type'],
allowedFirstLayerWord, p['firstLayerType']['type'],
allowedRel, p['minCnt'])
(X2, y2) = olpdm.genXY()
volc2 = olpdm.getVolc()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
assert np.array_equal(y1, y2)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
volc3 = mergeVolc(volc1, volc2)
print('X: (%d, %d)' % (X.shape[0], X.shape[1]), file=sys.stderr)
return (X, y1, volc3)
if __name__ == '__main__':
if len(sys.argv) != 6:
print('Usage:', sys.argv[0], 'TagAndDepLabelNewsJson phraseJson sentiDict WMParamsJson OLPDMParamsJson', file=sys.stderr)
exit(-1)
# arguments
labelNewsJson = sys.argv[1]
phraseJson = sys.argv[2]
sentiDictFile = sys.argv[3]
WMParamsJson = sys.argv[4]
OLPDMParamsJson = sys.argv[5]
# load labels and news
with open(labelNewsJson, 'r') as f:
labelNewsList = json.load(f)
# ====== initialization ======
# load phrases
topicPhraseList = loadPhraseFile(phraseJson)
# load sentiment dictionary
sentiDict = readSentiDict(sentiDictFile)
# get the set of all possible topic
topicSet = set([labelNews['statement_id'] for labelNews in labelNewsList])
# contruct in the process of constructing phrase dependency tree
allowedFirstLayerWord = { topicId: set(sentiDict.keys()) for topicId in topicSet }
allowedRel = { topicId: None for topicId in topicSet }
topicMap = [ labelNewsList[i]['statement_id'] for i in range(0, len(labelNewsList)) ]
# ====== initalizing parameters ======
clfList = ['NaiveBayes', 'MaxEnt', 'SVM']
randSeedList = [1, 2, 3, 4, 5]
# print result of first Line
ResultPrinter.printFirstLine()
# ==================================================================== #
# Run experiments on given list of parameters #
# ==================================================================== #
# read best parameters of two model
WMParams = Parameter.loadFrameworkTopicParams(WMParamsJson)
OLPDMParams = Parameter.loadFrameworkTopicParams(OLPDMParamsJson)
# ============= Run for self-train-test ===============
print('Self-Train-Test...', file=sys.stderr)
labelNewsInTopic = dataTool.divideLabel(labelNewsList)
for t in topicSet:
bestR = None
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsInTopic[t], topicPhraseList)
paramsIter = Parameter.getParamsIter(WMParams['SelfTrainTest'][t], 'WM',
OLPDMParams['SelfTrainTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsInTopic[t], olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'SelfTrainTest', p,
clfList, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_SelfTrainTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
olpdm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList)
# ============= Run for all-train-test ================
print('All-Train-Test...', file=sys.stderr)
paramsIter = Parameter.getParamsIter(WMParams['AllTrainTest'], 'WM',
OLPDMParams['AllTrainTest'], 'OLPDM')
bestR = None
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet,
sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'AllTrainTest', p, clfList,
topicMap=topicMap, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs, 'MacroF1')
with open('WM_OLPDM_AllTrainTest.pickle', 'w+b') as f:
pickle.dump(bestR, f)
# ============= Run for leave-one-test ================
print('Leave-One-Test...', file=sys.stderr)
for t in topicSet:
bestR = None
paramsIter = Parameter.getParamsIter(WMParams['LeaveOneTest'][t], 'tfidf',
OLPDMParams['LeaveOneTest'][t], 'OLPDM')
for p in paramsIter:
(X, y, volc) = genXY(labelNewsList, olpdm, topicSet, sentiDict, p)
rsList = RunExp.runTask(X, y, volc, 'LeaveOneTest', p, clfList,
topicMap=topicMap, topicId=t, randSeedList=randSeedList)
for rs in rsList:
if rs != None:
bestR = keepBestResult(bestR, rs[t], 'MacroF1')
with open('WM_OLPDM_LeaveOneTest_topic%d.pickle' % t, 'w+b') as f:
pickle.dump(bestR, f)
'''
# run all combination
params = { 'feature': ['0/1', 'tf', 'tfidf'],
'column': [['content'], ['title'], ['title', 'content']],
'statement': [False, True],
'seedWordPOSType': [('NP',), ('NP', 'NR'), ('NP', 'NN', 'NR')]
}
paramsIter = ParameterGrid(params)
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=topicMap, topicId=None)
topicLabelNewsList = dataTool.divideLabel(labelNewsList)
for topicId, labelNewsList in topicLabelNewsList.items():
mainProcedure(labelNewsList, paramsIter, clfList, allowedFirstLayerWord,
allowedRel, topicMap=None, topicId=topicId)
'''
'''
oldms = dict()
# all topic are mixed to train and predict/ leave-one-test
for p in paramsIter:
# generate tfidf features
print('generating tfidf features...', file=sys.stderr)
(X1, y1) = tfidf.generateXY(labelNewsList, newsCols=p['column'],
statementCol=p['statement'], feature=p['feature'])
print('X1: (%d, %d)' % (X1.shape[0], X1.shape[1]), file=sys.stderr)
# generate OLPDM features
print('generating OLPDM features...', file=sys.stderr)
# saving model for speed up
if p['seedWordPOSType'] not in oldms:
allowedSeedWord = { topicId: set(p['seedWordPOSType']) for topicId in topicSet }
print(allowedSeedWord)
oldm = OLPDM.OneLayerPhraseDepModel(labelNewsList, topicPhraseList, allowedSeedWord,
'tag', allowedFirstLayerWord, 'word', allowedRel)
oldms[p['seedWordPOSType']] = oldm
else:
oldm = oldms[p['seedWordPOSType']]
(X2, y2) = oldm.genXY()
print('X2: (%d, %d)' % (X2.shape[0], X2.shape[1]), file=sys.stderr)
# merge (horozontally align) two matrix
X = DataTool.hstack(X1, X2)
print('X: %d %d' % (X.shape[0], X.shape[1]), file=sys.stderr)
# all train and test
prefix = "all, %s, %s, %s" % ('OLPDM+' + str(p['feature']), list2Str(p['column']), p['statement'])
RunExp.allTrainTest(X, y1, topicMap, clfList, "MacroF1", testSize=0.2, prefix=prefix)
# leave one test
RunExp.leaveOneTest(X, y1, topicMap, clfList, "MacroF1", prefix=prefix)
'''
| [
"barry800414@gmail.com"
] | barry800414@gmail.com |
047660a9b15f645d34c790dbd31c938415f1e740 | bad62c2b0dfad33197db55b44efeec0bab405634 | /sdk/storage/azure-mgmt-storage/azure/mgmt/storage/v2021_02_01/models/__init__.py | 82c172aa1eb5e798e13af3d8f39e6216f291614d | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | test-repo-billy/azure-sdk-for-python | 20c5a2486456e02456de17515704cb064ff19833 | cece86a8548cb5f575e5419864d631673be0a244 | refs/heads/master | 2022-10-25T02:28:39.022559 | 2022-10-18T06:05:46 | 2022-10-18T06:05:46 | 182,325,031 | 0 | 0 | MIT | 2019-07-25T22:28:52 | 2019-04-19T20:59:15 | Python | UTF-8 | Python | false | false | 12,525 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._models_py3 import AccountSasParameters
from ._models_py3 import ActiveDirectoryProperties
from ._models_py3 import AzureEntityResource
from ._models_py3 import AzureFilesIdentityBasedAuthentication
from ._models_py3 import BlobContainer
from ._models_py3 import BlobInventoryPolicy
from ._models_py3 import BlobInventoryPolicyDefinition
from ._models_py3 import BlobInventoryPolicyFilter
from ._models_py3 import BlobInventoryPolicyRule
from ._models_py3 import BlobInventoryPolicySchema
from ._models_py3 import BlobRestoreParameters
from ._models_py3 import BlobRestoreRange
from ._models_py3 import BlobRestoreStatus
from ._models_py3 import BlobServiceItems
from ._models_py3 import BlobServiceProperties
from ._models_py3 import ChangeFeed
from ._models_py3 import CheckNameAvailabilityResult
from ._models_py3 import CloudErrorBody
from ._models_py3 import CorsRule
from ._models_py3 import CorsRules
from ._models_py3 import CustomDomain
from ._models_py3 import DateAfterCreation
from ._models_py3 import DateAfterModification
from ._models_py3 import DeleteRetentionPolicy
from ._models_py3 import DeletedAccount
from ._models_py3 import DeletedAccountListResult
from ._models_py3 import DeletedShare
from ._models_py3 import Dimension
from ._models_py3 import Encryption
from ._models_py3 import EncryptionIdentity
from ._models_py3 import EncryptionScope
from ._models_py3 import EncryptionScopeKeyVaultProperties
from ._models_py3 import EncryptionScopeListResult
from ._models_py3 import EncryptionService
from ._models_py3 import EncryptionServices
from ._models_py3 import Endpoints
from ._models_py3 import ErrorResponse
from ._models_py3 import ErrorResponseBody
from ._models_py3 import ExtendedLocation
from ._models_py3 import FileServiceItems
from ._models_py3 import FileServiceProperties
from ._models_py3 import FileShare
from ._models_py3 import FileShareItem
from ._models_py3 import FileShareItems
from ._models_py3 import GeoReplicationStats
from ._models_py3 import IPRule
from ._models_py3 import Identity
from ._models_py3 import ImmutabilityPolicy
from ._models_py3 import ImmutabilityPolicyProperties
from ._models_py3 import KeyCreationTime
from ._models_py3 import KeyPolicy
from ._models_py3 import KeyVaultProperties
from ._models_py3 import LastAccessTimeTrackingPolicy
from ._models_py3 import LeaseContainerRequest
from ._models_py3 import LeaseContainerResponse
from ._models_py3 import LegalHold
from ._models_py3 import LegalHoldProperties
from ._models_py3 import ListAccountSasResponse
from ._models_py3 import ListBlobInventoryPolicy
from ._models_py3 import ListContainerItem
from ._models_py3 import ListContainerItems
from ._models_py3 import ListQueue
from ._models_py3 import ListQueueResource
from ._models_py3 import ListQueueServices
from ._models_py3 import ListServiceSasResponse
from ._models_py3 import ListTableResource
from ._models_py3 import ListTableServices
from ._models_py3 import ManagementPolicy
from ._models_py3 import ManagementPolicyAction
from ._models_py3 import ManagementPolicyBaseBlob
from ._models_py3 import ManagementPolicyDefinition
from ._models_py3 import ManagementPolicyFilter
from ._models_py3 import ManagementPolicyRule
from ._models_py3 import ManagementPolicySchema
from ._models_py3 import ManagementPolicySnapShot
from ._models_py3 import ManagementPolicyVersion
from ._models_py3 import MetricSpecification
from ._models_py3 import Multichannel
from ._models_py3 import NetworkRuleSet
from ._models_py3 import ObjectReplicationPolicies
from ._models_py3 import ObjectReplicationPolicy
from ._models_py3 import ObjectReplicationPolicyFilter
from ._models_py3 import ObjectReplicationPolicyRule
from ._models_py3 import Operation
from ._models_py3 import OperationDisplay
from ._models_py3 import OperationListResult
from ._models_py3 import PrivateEndpoint
from ._models_py3 import PrivateEndpointConnection
from ._models_py3 import PrivateEndpointConnectionListResult
from ._models_py3 import PrivateLinkResource
from ._models_py3 import PrivateLinkResourceListResult
from ._models_py3 import PrivateLinkServiceConnectionState
from ._models_py3 import ProtocolSettings
from ._models_py3 import ProxyResource
from ._models_py3 import QueueServiceProperties
from ._models_py3 import Resource
from ._models_py3 import ResourceAccessRule
from ._models_py3 import RestorePolicyProperties
from ._models_py3 import Restriction
from ._models_py3 import RoutingPreference
from ._models_py3 import SKUCapability
from ._models_py3 import SasPolicy
from ._models_py3 import ServiceSasParameters
from ._models_py3 import ServiceSpecification
from ._models_py3 import Sku
from ._models_py3 import SkuInformation
from ._models_py3 import SmbSetting
from ._models_py3 import StorageAccount
from ._models_py3 import StorageAccountCheckNameAvailabilityParameters
from ._models_py3 import StorageAccountCreateParameters
from ._models_py3 import StorageAccountInternetEndpoints
from ._models_py3 import StorageAccountKey
from ._models_py3 import StorageAccountListKeysResult
from ._models_py3 import StorageAccountListResult
from ._models_py3 import StorageAccountMicrosoftEndpoints
from ._models_py3 import StorageAccountRegenerateKeyParameters
from ._models_py3 import StorageAccountUpdateParameters
from ._models_py3 import StorageQueue
from ._models_py3 import StorageSkuListResult
from ._models_py3 import SystemData
from ._models_py3 import Table
from ._models_py3 import TableServiceProperties
from ._models_py3 import TagFilter
from ._models_py3 import TagProperty
from ._models_py3 import TrackedResource
from ._models_py3 import UpdateHistoryProperty
from ._models_py3 import Usage
from ._models_py3 import UsageListResult
from ._models_py3 import UsageName
from ._models_py3 import UserAssignedIdentity
from ._models_py3 import VirtualNetworkRule
from ._storage_management_client_enums import (
AccessTier,
AccountStatus,
BlobInventoryPolicyName,
BlobRestoreProgressStatus,
Bypass,
CorsRuleAllowedMethodsItem,
CreatedByType,
DefaultAction,
DirectoryServiceOptions,
EnabledProtocols,
EncryptionScopeSource,
EncryptionScopeState,
ExpirationAction,
ExtendedLocationTypes,
GeoReplicationStatus,
HttpProtocol,
IdentityType,
ImmutabilityPolicyState,
ImmutabilityPolicyUpdateType,
InventoryRuleType,
KeyPermission,
KeySource,
KeyType,
Kind,
LargeFileSharesState,
LeaseContainerRequestAction,
LeaseDuration,
LeaseState,
LeaseStatus,
ListContainersInclude,
ListSharesExpand,
ManagementPolicyName,
MinimumTlsVersion,
Name,
Permissions,
PrivateEndpointConnectionProvisioningState,
PrivateEndpointServiceConnectionStatus,
ProvisioningState,
PublicAccess,
PutSharesExpand,
Reason,
ReasonCode,
RootSquashType,
RoutingChoice,
RuleType,
Services,
ShareAccessTier,
SignedResource,
SignedResourceTypes,
SkuName,
SkuTier,
State,
StorageAccountExpand,
UsageUnit,
)
from ._patch import __all__ as _patch_all
from ._patch import * # type: ignore # pylint: disable=unused-wildcard-import
from ._patch import patch_sdk as _patch_sdk
__all__ = [
'AccountSasParameters',
'ActiveDirectoryProperties',
'AzureEntityResource',
'AzureFilesIdentityBasedAuthentication',
'BlobContainer',
'BlobInventoryPolicy',
'BlobInventoryPolicyDefinition',
'BlobInventoryPolicyFilter',
'BlobInventoryPolicyRule',
'BlobInventoryPolicySchema',
'BlobRestoreParameters',
'BlobRestoreRange',
'BlobRestoreStatus',
'BlobServiceItems',
'BlobServiceProperties',
'ChangeFeed',
'CheckNameAvailabilityResult',
'CloudErrorBody',
'CorsRule',
'CorsRules',
'CustomDomain',
'DateAfterCreation',
'DateAfterModification',
'DeleteRetentionPolicy',
'DeletedAccount',
'DeletedAccountListResult',
'DeletedShare',
'Dimension',
'Encryption',
'EncryptionIdentity',
'EncryptionScope',
'EncryptionScopeKeyVaultProperties',
'EncryptionScopeListResult',
'EncryptionService',
'EncryptionServices',
'Endpoints',
'ErrorResponse',
'ErrorResponseBody',
'ExtendedLocation',
'FileServiceItems',
'FileServiceProperties',
'FileShare',
'FileShareItem',
'FileShareItems',
'GeoReplicationStats',
'IPRule',
'Identity',
'ImmutabilityPolicy',
'ImmutabilityPolicyProperties',
'KeyCreationTime',
'KeyPolicy',
'KeyVaultProperties',
'LastAccessTimeTrackingPolicy',
'LeaseContainerRequest',
'LeaseContainerResponse',
'LegalHold',
'LegalHoldProperties',
'ListAccountSasResponse',
'ListBlobInventoryPolicy',
'ListContainerItem',
'ListContainerItems',
'ListQueue',
'ListQueueResource',
'ListQueueServices',
'ListServiceSasResponse',
'ListTableResource',
'ListTableServices',
'ManagementPolicy',
'ManagementPolicyAction',
'ManagementPolicyBaseBlob',
'ManagementPolicyDefinition',
'ManagementPolicyFilter',
'ManagementPolicyRule',
'ManagementPolicySchema',
'ManagementPolicySnapShot',
'ManagementPolicyVersion',
'MetricSpecification',
'Multichannel',
'NetworkRuleSet',
'ObjectReplicationPolicies',
'ObjectReplicationPolicy',
'ObjectReplicationPolicyFilter',
'ObjectReplicationPolicyRule',
'Operation',
'OperationDisplay',
'OperationListResult',
'PrivateEndpoint',
'PrivateEndpointConnection',
'PrivateEndpointConnectionListResult',
'PrivateLinkResource',
'PrivateLinkResourceListResult',
'PrivateLinkServiceConnectionState',
'ProtocolSettings',
'ProxyResource',
'QueueServiceProperties',
'Resource',
'ResourceAccessRule',
'RestorePolicyProperties',
'Restriction',
'RoutingPreference',
'SKUCapability',
'SasPolicy',
'ServiceSasParameters',
'ServiceSpecification',
'Sku',
'SkuInformation',
'SmbSetting',
'StorageAccount',
'StorageAccountCheckNameAvailabilityParameters',
'StorageAccountCreateParameters',
'StorageAccountInternetEndpoints',
'StorageAccountKey',
'StorageAccountListKeysResult',
'StorageAccountListResult',
'StorageAccountMicrosoftEndpoints',
'StorageAccountRegenerateKeyParameters',
'StorageAccountUpdateParameters',
'StorageQueue',
'StorageSkuListResult',
'SystemData',
'Table',
'TableServiceProperties',
'TagFilter',
'TagProperty',
'TrackedResource',
'UpdateHistoryProperty',
'Usage',
'UsageListResult',
'UsageName',
'UserAssignedIdentity',
'VirtualNetworkRule',
'AccessTier',
'AccountStatus',
'BlobInventoryPolicyName',
'BlobRestoreProgressStatus',
'Bypass',
'CorsRuleAllowedMethodsItem',
'CreatedByType',
'DefaultAction',
'DirectoryServiceOptions',
'EnabledProtocols',
'EncryptionScopeSource',
'EncryptionScopeState',
'ExpirationAction',
'ExtendedLocationTypes',
'GeoReplicationStatus',
'HttpProtocol',
'IdentityType',
'ImmutabilityPolicyState',
'ImmutabilityPolicyUpdateType',
'InventoryRuleType',
'KeyPermission',
'KeySource',
'KeyType',
'Kind',
'LargeFileSharesState',
'LeaseContainerRequestAction',
'LeaseDuration',
'LeaseState',
'LeaseStatus',
'ListContainersInclude',
'ListSharesExpand',
'ManagementPolicyName',
'MinimumTlsVersion',
'Name',
'Permissions',
'PrivateEndpointConnectionProvisioningState',
'PrivateEndpointServiceConnectionStatus',
'ProvisioningState',
'PublicAccess',
'PutSharesExpand',
'Reason',
'ReasonCode',
'RootSquashType',
'RoutingChoice',
'RuleType',
'Services',
'ShareAccessTier',
'SignedResource',
'SignedResourceTypes',
'SkuName',
'SkuTier',
'State',
'StorageAccountExpand',
'UsageUnit',
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk() | [
"noreply@github.com"
] | noreply@github.com |
cf3ee11aac574e0f1e461602f57fd51ffa9135bb | 4fdc839b92bf50d342467d7f453093fa4233af9d | /templateLoader/help/source/conf.py | b2993fe10b57ab82126175183061902aef62b806 | [] | no_license | lpofredc/Qgis-plugin-templateLoader | f8d848192639018d655eb2ca6c8846d608ad2a4d | c3b46eecd5481693315e7d294cd82a513508bdc8 | refs/heads/master | 2020-03-27T14:13:54.580319 | 2017-05-10T13:51:48 | 2017-05-10T13:51:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,047 | py | # -*- coding: utf-8 -*-
#
# templateloader documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 12 17:11:03 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'templateloader'
copyright = u'2013, PnC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'templateclassdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'templateloader.tex', u'templateloader Documentation',
u'PnC', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'templateclass', u'templateloader Documentation',
[u'PnC'], 1)
]
| [
"amandine.sahl@gmail.com"
] | amandine.sahl@gmail.com |
f5ec3f1b0f0acf25ad487555a7f33120f6d5522a | 63cb8173f398a99b69c6345e05943ec1c5bdccd6 | /main.py | 53e4564a5e1c358618aff9084bc49191c9e348c7 | [] | no_license | Blender3D/Deskboard | 596ff809ae1f7ad15bff0eca4f8e36e44ee8976f | 693361c010c1b1a7489480c406ec92354d8dc766 | refs/heads/master | 2021-01-22T05:24:36.441601 | 2012-09-17T03:07:52 | 2012-09-17T03:07:52 | 5,835,678 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | #!/usr/bin/env python2
import os, re, sys, json, datetime, time, glob, ConfigParser, subprocess
from functools import wraps
import psutil
import dbus
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
from dbus.mainloop.qt import DBusQtMainLoop
from WebkitQObject import WebkitQObject
from desktop import DesktopLauncher, Desktop
from music import MusicBackend
try:
from cStringIO import StringIO
except:
from StringIO import StringIO
DBusQtMainLoop(set_as_default=True)
def cached_property(function):
result = None
@wraps(function)
def wrapper(*args, **kwargs):
if result:
return result
result = function(*args, **kwargs)
return result
return wrapper
def debug(function):
@wraps(function)
def wrapper(*args, **kwargs):
result = function(*args, **kwargs)
print '{}() -> {}'.format(function.__name__, result)
return result
return wrapper
class WebkitQObject(QObject):
def __init__(self):
super(WebkitQObject, self).__init__()
self.__cache__ = []
def store(self, item):
self.__cache__.append(item)
return self.__cache__[-1]
class System(QObject):
def __init__(self):
super(System, self).__init__()
@pyqtProperty(QVariant)
@debug
def ram(self):
return dict(psutil.phymem_usage().__dict__)
@pyqtSlot(QVariant)
@debug
def cpu(self):
return {
'usage': psutil.cpu_percent(),
'cores': psutil.cpu_percent(percpu=True)
}
class Background(QWebView):
def __init__(self):
super(Background, self).__init__()
self.resize(QApplication.desktop().size())
geometry = self.frameGeometry()
geometry.moveCenter(QDesktopWidget().availableGeometry().center())
self.move(geometry.topLeft())
self.frame = self.page().mainFrame()
self.settings = QWebSettings.globalSettings()
self.settings.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
self.settings.setAttribute(QWebSettings.LocalContentCanAccessRemoteUrls, True)
self.settings.setAttribute(QWebSettings.LocalContentCanAccessFileUrls, True)
self.settings.setAttribute(QWebSettings.LocalStorageEnabled, True)
self.settings.setAttribute(QWebSettings.AutoLoadImages, True)
self.setAttribute(Qt.WA_X11NetWmWindowTypeDesktop)
system_info = System()
music_info = MusicBackend()
desktop_info = Desktop()
self.frame.addToJavaScriptWindowObject('system', system_info)
self.frame.addToJavaScriptWindowObject('desktop', desktop_info)
self.frame.addToJavaScriptWindowObject('music', music_info)
def load_theme(self, name):
path = os.path.abspath('themes/{name}/index.html'.format(name=name))
if not os.path.exists(path):
return False
self.load(QUrl.fromLocalFile(path))
self.load(QUrl('http://gridster.net/'))
return True
if __name__ == '__main__':
app = QApplication(sys.argv)
background = Background()
background.load_theme('text')
background.show()
sys.exit(app.exec_())
| [
"452469+Blender3D@users.noreply.github.com"
] | 452469+Blender3D@users.noreply.github.com |
5f32e4fa86ea444a96fde64ff2b9e4259b98b9f7 | 5002037a61b129ade69f675137cd9e16966518a2 | /apps/gallery/migrations/0007_auto_20190801_1340.py | 1f0c84144319147a27c1d5b54a3f020d9da65176 | [
"Apache-2.0"
] | permissive | mrtaalebi/sitigo | e290f1e952a3c47b9fb356177e5c7ea708dcd708 | cce8b4f5299b58d7365789ead416d4568b443743 | refs/heads/master | 2022-12-11T00:09:07.196902 | 2020-11-19T20:34:58 | 2020-11-19T20:34:58 | 194,496,364 | 0 | 0 | Apache-2.0 | 2019-07-05T14:29:39 | 2019-06-30T09:06:47 | JavaScript | UTF-8 | Python | false | false | 489 | py | # Generated by Django 2.2.3 on 2019-08-01 09:10
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('gallery', '0006_auto_20190801_1338'),
]
operations = [
migrations.AlterField(
model_name='image',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gallery.City'),
),
]
| [
"the.doors.are.locked@gmail.com"
] | the.doors.are.locked@gmail.com |
0e5f8dad633573efa423a07619bc2d96e0e6736b | 340e25f879c0f96d9be68882bf986e302c762c9b | /compsocsite/polls/record.py | 1e1975ceda6f28202b364230fe581a1cde9420c3 | [
"MIT"
] | permissive | lengzi/opra | 5a9077a2d7d9f30d5b96f05af8270f7d3f6a54e6 | db4ee82dde5b95a5db4ea76281154be0cec7dfa5 | refs/heads/master | 2020-03-29T22:35:00.919465 | 2018-09-19T01:04:33 | 2018-09-19T01:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,200 | py | import datetime
import os
import csv
from django.shortcuts import render, get_object_or_404, redirect
from django.http import HttpResponseRedirect, HttpResponse, HttpRequest, JsonResponse
from django.core.urlresolvers import reverse
from django.views import generic
from django.core.files import File
from .models import *
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from django.template import RequestContext
from django.shortcuts import render_to_response
from django.contrib.auth import authenticate, login,logout
from django.contrib.auth.decorators import login_required
from django.core import mail
from groups.models import *
from django.conf import settings
import random
import string
import json
def writeUserAction(request, question_id):
question = get_object_or_404(Question, pk=question_id)
if request.method == 'POST':
#session_key = request.COOKIES.get(settings.SESSION_COOKIE_NAME, None)
#str = "/log/" + request.user.username + "_" + session_key + ".txt"
#f = open(str, 'w+')
type = 0
data = request.POST['data']
order1 = request.POST['order1']
device = request.POST['device']
final = request.POST['final']
commentTime = request.POST['commentTime']
swit = request.POST['swit']
init = order1
UI = request.POST['ui']
submit_time = request.POST['submit_time']
#print(slider_record)
if request.user.username == "":
anonymous_name = ""
new_name = "(Anonymous)" + anonymous_name
r = UserVoteRecord(timestamp=timezone.now(),user=new_name,col=data,question=question,initial_order=init,final_order=final,device=device,initial_type=type,comment_time=commentTime,swit=swit,submit_time=submit_time,ui=UI)
r.save()
else:
r = UserVoteRecord(timestamp=timezone.now(),user=request.user.username,col=data,question=question,initial_order=init,final_order=final,device=device,initial_type=type,comment_time=commentTime,swit=swit,submit_time=submit_time,ui=UI)
r.save()
#f.close()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
# download all data from database, only initial and final rankings
# allow self-sign-up to groups
def interpretRecordForDownload(record):
order = record.initial_order
r = record.record
title_arr = []
record_arr = []
if r != "":
action_arr = r.split(";;;")
title_arr.append(record.user)
title_arr.append(str(record.question.id))
title_arr.append(str(record.timestamp))
if record.initial_order == "":
title_arr.append("Last user's vote order")
else:
title_arr.append(order)
title_arr.append(record.device)
for str1 in action_arr:
each_record = []
if str1.find(";;") != -1:
pair = str1.split(";;")
if len(pair) == 2:
t1 = pair[0].split("::")
t2 = pair[1].split("::")
t1[2] = t1[2][4:]
t2[2] = t2[2][4:]
if t1[1] == "start":
each_record.append("Drag")
str2 = ""
item_arr = t2[3].split("||")
each_record.append(t1[0])
each_record.append(t1[3])
each_record.append(t2[0])
each_record.append(item_arr[0])
for index in range(1,len(item_arr)):
if(item_arr[index] != ""):
str2 += item_arr[index][4:] + ", "
each_record.append(str2)
else:
each_record.append("Click")
each_record.append(t1[0])
each_record.append(t1[3])
each_record.append(t2[0])
each_record.append(t2[3])
else:
if len(str1) != 0:
if str1[0] == "S":
each_record.append("Submit")
each_record.append(str1[1:])
elif str1.find("||") == -1:
each_record.append("Move All")
each_record.append(str1)
else:
str2 = ""
clear_arr = str1.split("||")
each_record.append("Clear All")
each_record.append(clear_arr[0])
each_record.append("")
each_record.append("")
each_record.append("")
for i in range(1,len(clear_arr)):
str2 += clear_arr[i][4:] + "; "
each_record.append(str2)
record_arr.append(each_record)
return (title_arr,record_arr)
def interpretRecord(record):
order = record.initial_order
r = record.record
record_arr = []
if r != "":
action_arr = r.split(";;;")
temp = ""
temp += record.user + " voted at " + str(record.timestamp) + "\n"
if order != "":
temp += "\nInitial order: " + order
if record.initial_type == 0:
temp += " (recommended order)"
else:
temp += " (User's last vote order)"
record_arr.append(temp)
record_arr.append(record.device)
for str1 in action_arr:
if str1.find(";;") != -1:
pair = str1.split(";;")
if len(pair) == 2:
t1 = pair[0].split("::")
t2 = pair[1].split("::")
t1[2] = t1[2][4:]
t2[2] = t2[2][4:]
if t1[1] == "start":
str2 = ""
item_arr = t2[3].split("||")
str2 += "Moved item " + t1[2] + " from tier " + t1[3] + " to tier " + item_arr[0] + " at time " + t1[0] + ", tier " + item_arr[0] + " has items: "
for index in range(1,len(item_arr)):
if(item_arr[index] != ""):
str2 += item_arr[index][4:] + ", "
record_arr.append(str2)
else:
str2 = ""
str2 += "Clicked item " + t1[2] + " on the right at tier " +t1[3] + " and moved it to tier " + t2[3] + " on the left at time " + t1[0] + "."
record_arr.append(str2)
else:
if len(str1) != 0:
if str1[0] == "S":
str2 = "Clicked submit at time " + str1[1:] + "."
record_arr.append(str2)
elif str1.find("||") == -1:
str2 = "Clicked move all at time " + str1 + "."
record_arr.append(str2)
else:
clear_arr = str1.split("||")
str2 = "Clicked clear at time " + clear_arr[0] + ", order on the right is: "
for i in range(1,len(clear_arr)):
str2 += clear_arr[i][4:] + "; "
record_arr.append(str2)
else:
temp = ""
temp += record.user + " voted at " + str(record.timestamp) + "\n"
if order != "":
temp += "\nInitial order: " + order
if record.initial_type == 0:
temp += " (recommended order)"
else:
temp += " (User's last vote order)"
record_arr.append(temp)
record_arr.append(record.device)
record_arr.append(record.col)
if record.slider != "":
record_arr.append(record.one_col)
record_arr.append(record.slider)
record_arr.append(record.star)
record_arr.append(record.swit)
return record_arr
def interpretRecord1(record):
init = record.initial_order
if len(init) > 0 and init[len(init)-1] == "":
init = init[0:len(init)-1]
final = record.final_order
t = ""
if record.record != "":
action_arr = record.record.split(";;;")
t = action_arr[len(action_arr)-1][1:]
else:
r = json.loads(record.col)
t = r["submit"]["time"]
type = str(record.initial_type)
result = []
result.append(str(record.question.id))
result.append(record.user)
result.append(t)
result.append(init)
result.append(type)
result.append(final)
result.append(record.comment_time)
return result
def interpretSliderStar(record):
slider = json.dumps(record.slider)
star = json.dumps(record.star)
return (slider)
def downloadRecord(request, question_id):
response = HttpResponse(content_type='text/csv')
question = get_object_or_404(Question,pk=question_id)
records = question.uservoterecord_set.all()
response['Content-Disposition'] = 'attachment; filename="record.csv"'
writer = csv.writer(response)
for r in records:
(data,time) = interpretRecordForLearning(r)
writer.writerow(data)
return response
def downloadAllRecord(request, user_id):
user = get_object_or_404(User,pk=user_id)
response = HttpResponse(content_type='text/csv')
records = []
response['Content-Disposition'] = 'attachment; filename="all_record.csv"'
writer = csv.writer(response)
for question in user.question_set.order_by('id'):
for record in question.uservoterecord_set.all():
writer.writerow(interpretRecord1(record))
writer.writerow([])
return response
def downloadAllRecord1(request, user_id):
user = get_object_or_404(User,pk=user_id)
response = HttpResponse(content_type='text/csv')
records = []
response['Content-Disposition'] = 'attachment; filename="record.csv"'
writer = csv.writer(response)
writer.writerow(["Username","Question id","Timestamp","Initial order"])
for question in user.question_set.order_by('id'):
for record in question.uservoterecord_set.all():
(title_arr,record_arr) = interpretRecordForDownload(record)
writer.writerow(title_arr)
writer.writerow(["Action type","Start time","Start tier","Stop time","Stop tier","Final order"])
for r in record_arr:
writer.writerow(r)
writer.writerow([])
return response
class RecordView(generic.DetailView):
model = Question
template_name = 'polls/record.html'
def get_context_data(self, **kwargs):
ctx = super(RecordView, self).get_context_data(**kwargs)
records = self.object.response_set.all()
interpreted_records = []
for r in records:
interpreted_records.append(r.behavior_data)
ctx['user_records'] = interpreted_records
return ctx
def interpretRecordForLearning(record):
record_time = record.timestamp
record_user = record.user
record_device = record.device
col_dict = json.loads(record.col)
one_col_dict = json.loads(record.one_col)
slider_dict = json.loads(record.slider)
star_dict = json.loads(record.star)
submit_time = "0"
for item in col_dict["two_column"]:
if item["action"] == "submit":
submit_time = item["time"]
for item in one_col_dict["one_column"]:
if item["action"] == "submit":
submit_time = item["time"]
switch_list = []
if record.swit != "":
temp_switch_list = record.swit.split(";;")
for item in temp_switch_list:
if item != "":
each_list = item.split(";")
temp_dict = {}
temp_dict["action"] = "switch"
if len(each_list) > 2:
temp_dict["time"] = each_list[0]
temp_dict["from"] = each_list[1]
temp_dict["to"] = each_list[2]
else:
temp_dict["time"] = each_list[0]
temp_dict["to"] = each_list[1]
switch_list.append(temp_dict)
total_order = []
total_order.extend(col_dict["two_column"])
total_order.extend(one_col_dict["one_column"])
total_order.extend(slider_dict["slider"])
total_order.extend(star_dict["star"])
total_order.extend(switch_list)
sorted_order = sorted(total_order, key=lambda k: k["time"])
final_list = []
final_list.append(record_user)
final_list.append(record_device)
final_list.append(record_time)
final_list.append(submit_time)
final_list.extend(total_order)
return (final_list, submit_time)
def downloadParticipants(request):
all_par = User.objects.filter(userprofile__mturk=1)
result = []
for par in all_par:
dic = {}
dic["user_id"] = par.id
dic["username"] = par.username
dic["time_creation"] = par.userprofile.time_creation
dic["age"] = par.userprofile.age
dic["survey_code"]=par.userprofile.code
dic["poll_seq"]=par.userprofile.sequence
dic["current_poll"] = par.userprofile.cur_poll
result.append(dic)
return JsonResponse(result, safe=False)
def downloadPolls(request):
all_poll = User.objects.get(username="opraexp").question_set.all()
result = []
for poll in all_poll:
dic = {}
dic["id"] = poll.id
dic["title"] = poll.question_text
dic["time_creation"] = str(poll.pub_date)
dic["UI"] = getUIs(poll)
dic["description"] = poll.question_desc
dic["alternatives"] = list(poll.item_set.all().values_list("item_text",flat=True))
result.append(dic)
return JsonResponse(result, safe=False)
def downloadRecords(request):
all_record = UserVoteRecord.objects.all()
result = []
for record in all_record:
dic = {}
dic["vote_id"] = record.id
dic["poll_id"] = record.question.id
dic["user_id"] = 0
try:
user = User.objects.get(username=record.user)
dic["user_id"] = user.id
except ObjectDoesNotExist:
pass
dic["data"] = json.loads(record.col)
dic["platform"] = record.device
dic["UI"] = getUIs(record.question)
try:
dic["initial_ranking"] = json.loads(record.initial_order)
except ValueError:
dic["initial_ranking"] = []
try:
dic["submitted_ranking"] = json.loads(record.final_order)
except ValueError:
dic["submitted_ranking"] = []
dic["timestamp_submission"] = str(record.timestamp)
dic["time_submission"] = record.submit_time
result.append(dic)
return JsonResponse(result, safe=False)
def downloadSpecificRecords(request):
all_responses = []
result = []
polls = getMturkPollID()
for poll in polls:
all_responses += list(get_object_or_404(Question,pk=poll).response_set.filter(active=1,timestamp__gt=datetime.datetime(2018,5,17,23,30)))
for resp in all_responses:
dic = {}
try:
dic = json.loads(resp.behavior_data)
except ValueError:
dic = {}
dic["UI"] = getUIs(resp.question)
dic["vote_id"] = resp.id
dic["poll_id"] = resp.question.id
dic["timestamp_submission"] = str(resp.timestamp)
if hasattr(resp, 'user'):
dic["user_id"] = resp.user.id
else:
dic["user_id"] = 0
result.append(dic)
return JsonResponse(result, safe=False)
def getMturkPollID():
polls = list(range(180,190))
return polls
def getUIs(poll):
result = []
if poll.twocol_enabled:
result.append("two_column")
if poll.onecol_enabled:
result.append("one_column")
if poll.slider_enabled:
result.append("slider")
if poll.star_enabled:
result.append("star")
if poll.yesno_enabled:
result.append("yesno")
if poll.yesno2_enabled:
result.append("yesno_grid")
return result
| [
"tomjmwang@yahoo.com"
] | tomjmwang@yahoo.com |
8ea369755709ea09b07fed508e95099cc47b316a | 406d942b98d15f45393cb864b21ee3345eb9cc8f | /Coursera_Algorithms/max_mult.py | e81e650090f64723bc6c303c457c8ff250116381 | [] | no_license | msekhar12/Algorithms_Exercises | c3804d64f9cf43da92e20b151807952b41ac89c3 | c2454987060f8c0404d4fdb215c7b2eb6f8c677a | refs/heads/master | 2020-04-07T06:17:24.602842 | 2019-02-03T14:34:49 | 2019-02-03T14:34:49 | 158,129,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | # python 3
# Max product of 2 numbers from an array of integers
# Input will be 2 lines.
# The first line will contain the number of elements in the array, and the second line will be space separated numbers:
def find_max_product(n, l):
if n <= 1:
return None
comps = 0
if l[0] > l[1]:
max_1 = l[0]
max_2 = l[1]
comps += 1
else:
max_2 = l[0]
max_1 = l[1]
comps += 1
for i in range(2, n):
if l[i] > max_1:
max_2 = max_1
max_1 = l[i]
comps += 1
elif l[i] > max_2 and l[i] <= max_1:
max_2 = l[i]
comps += 1
return max_1, max_2, max_1*max_2, comps
n = int(input())
l = [int(x) for x in input().split()]
print(find_max_product(n, l))
| [
"sekhar@Sekhars-MacBook-Pro.local"
] | sekhar@Sekhars-MacBook-Pro.local |
565584f78e17bf14cc57d09723b4166b4d3c8e6f | a0a7c9997676217387738f4c89e0665fd403b2c0 | /basic/perceptron.py | aa96363c3aeeb78fe704d83b5dcbfd81f0ff02d7 | [] | no_license | whoisalan/MACHINELEARNING | 86f51bd64fc46c68574b89846d6a11c88cf94272 | 3a658eae6e8818a8286045665b7e3e4d23c284db | refs/heads/master | 2020-04-09T18:15:47.313308 | 2018-12-05T14:23:02 | 2018-12-05T14:23:02 | 160,506,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,213 | py | import random
import numpy as np
import matplotlib.pyplot as plt
def sign(v):
if v>=0:
return 1
else:
return -1
def train(train_num,train_datas,lr):
w=[0,0]
b=0
for i in range(train_num):
# 随机梯度下降
x=random.choice(train_datas)
x1,x2,y=x
if(y*sign((w[0]*x1+w[1]*x2+b))<=0):
w[0]+=lr*y*x1
w[1]+=lr*y*x2
b+=lr*y
return w,b
def plot_points(train_datas,w,b):
plt.figure()
x1 = np.linspace(0, 8, 100)
x2 = (-b-w[0]*x1)/w[1]
plt.plot(x1, x2, color='r', label='y1 data')
datas_len=len(train_datas)
for i in range(datas_len):
if(train_datas[i][-1]==1):
plt.scatter(train_datas[i][0],train_datas[i][1],s=50)
else:
plt.scatter(train_datas[i][0],train_datas[i][1],marker='x',s=50)
plt.show()
if __name__=='__main__':
train_data1 = [[1, 3, 1], [2, 2, 1], [3, 8, 1], [2, 6, 1]] # 正样本
train_data2 = [[2, 1, -1], [4, 1, -1], [6, 2, -1], [7, 3, -1]] # 负样本
train_datas = train_data1 + train_data2 # 样本集
w,b=train(train_num=50,train_datas=train_datas,lr=0.01)
plot_points(train_datas,w,b)
| [
"alanznala@163.com"
] | alanznala@163.com |
2a632ed951bfbd64396511d30034e84b2fb566c2 | 6a1174a7215cfd4c345cc8723cfb9a3bf053b108 | /conditional_image_manipulation/data/preprocess.py | 5b8fe0e726ad94bbe9b499668959e940bd8b6a00 | [
"MIT"
] | permissive | jlezama/disentangling-jacobian | e55e84bafe3c4f41d76e108bcb1dfb9cd6697b1e | c570945055c735a15b9adba093b7c688c7310aad | refs/heads/master | 2021-06-30T03:09:21.393511 | 2020-10-06T14:35:11 | 2020-10-06T14:35:11 | 169,459,503 | 26 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | # Code from FaderNetworks by Facebook
#!/usr/bin/env python
import os
import matplotlib.image as mpimg
import cv2
import numpy as np
import torch
N_IMAGES = 202599
IMG_SIZE = 256
IMG_PATH = 'data/images_%i_%i.pth' % (IMG_SIZE, IMG_SIZE)
ATTR_PATH = 'data/attributes.pth'
def preprocess_images():
if os.path.isfile(IMG_PATH):
print("%s exists, nothing to do." % IMG_PATH)
return
print("Reading images from img_align_celeba/ ...")
raw_images = []
for i in range(1, N_IMAGES + 1):
if i % 10000 == 0:
print(i)
raw_images.append(mpimg.imread('img_align_celeba/%06i.jpg' % i)[20:-20])
if len(raw_images) != N_IMAGES:
raise Exception("Found %i images. Expected %i" % (len(raw_images), N_IMAGES))
print("Resizing images ...")
all_images = []
for i, image in enumerate(raw_images):
if i % 10000 == 0:
print(i)
assert image.shape == (178, 178, 3)
if IMG_SIZE < 178:
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_AREA)
elif IMG_SIZE > 178:
image = cv2.resize(image, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_LANCZOS4)
assert image.shape == (IMG_SIZE, IMG_SIZE, 3)
all_images.append(image)
data = np.concatenate([img.transpose((2, 0, 1))[None] for img in all_images], 0)
data = torch.from_numpy(data)
assert data.size() == (N_IMAGES, 3, IMG_SIZE, IMG_SIZE)
print("Saving images to %s ..." % IMG_PATH)
torch.save(data[:20000].clone(), 'data/images_%i_%i_20000.pth' % (IMG_SIZE, IMG_SIZE))
torch.save(data, IMG_PATH)
def preprocess_attributes():
if os.path.isfile(ATTR_PATH):
print("%s exists, nothing to do." % ATTR_PATH)
return
attr_lines = [line.rstrip() for line in open('list_attr_celeba.txt', 'r')]
assert len(attr_lines) == N_IMAGES + 2
attr_keys = attr_lines[1].split()
attributes = {k: np.zeros(N_IMAGES, dtype=np.bool) for k in attr_keys}
for i, line in enumerate(attr_lines[2:]):
image_id = i + 1
split = line.split()
assert len(split) == 41
assert split[0] == ('%06i.jpg' % image_id)
assert all(x in ['-1', '1'] for x in split[1:])
for j, value in enumerate(split[1:]):
attributes[attr_keys[j]][i] = value == '1'
print("Saving attributes to %s ..." % ATTR_PATH)
torch.save(attributes, ATTR_PATH)
preprocess_images()
preprocess_attributes()
| [
"jlezama@gmail.com"
] | jlezama@gmail.com |
047c84b87840d1b7ec7ee3291c29a71e590f7b89 | 8341678973612363868a36d89b1c464cbe0f4a79 | /app.py | c3b2815b752fcfd98f7940020234f803e7b9c6ed | [] | no_license | sveco86/magiogo-iptv-server | 45c80ce8e21d6488933e6cd4be9ed3873c013581 | 00610e6e73cfd0b79c136c48dcf4f87c08239040 | refs/heads/master | 2023-03-26T12:04:46.038199 | 2021-03-10T12:29:42 | 2021-03-10T12:29:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,348 | py | import atexit
import gzip
from pathlib import Path
import xmltv
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, redirect, render_template
from magiogo import *
from parse_season_number import parse_season_number
app = Flask(__name__, static_url_path="/", static_folder="public")
# Ensure public dir exists
Path("public").mkdir(exist_ok=True)
last_refresh = None
@app.route('/')
def index():
return render_template("index.html", last_refresh=last_refresh)
@app.route('/channel/<channel_id>')
def channel_redirect(channel_id):
stream_info = magio.channel_stream_info(channel_id)
return redirect(stream_info.url, code=303)
@app.errorhandler(404)
def page_not_found(e):
# Redirect all to index page
return redirect('/')
def gzip_file(file_path):
with open(file_path, 'rb') as src, gzip.open(f'{file_path}.gz', 'wb') as dst:
dst.writelines(src)
def generate_m3u8(channels):
magio_iptv_server_public_url = os.environ.get('MAGIO_SERVER_PUBLIC_URL', "http://127.0.0.1:5000")
with open("public/magioPlaylist.m3u8", "w", encoding="utf-8") as text_file:
text_file.write("#EXTM3U\n")
for channel in channels:
text_file.write(f'#EXTINF:-1 tvg-id="{channel.id}" tvg-logo="{channel.logo}",{channel.name}\n')
text_file.write(f"{magio_iptv_server_public_url}/channel/{channel.id}\n")
def generate_xmltv(channels):
date_from = datetime.datetime.now() - datetime.timedelta(days=0)
date_to = datetime.datetime.now() + datetime.timedelta(days=int(os.environ.get('MAGIO_GUIDE_DAYS', 7)))
channel_ids = list(map(lambda c: c.id, channels))
epg = magio.epg(channel_ids, date_from, date_to)
with open("public/magioGuide.xmltv", "wb") as guide_file:
writer = xmltv.Writer(
date=datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
generator_info_name="MagioGoIPTVServer",
generator_info_url="",
source_info_name="Magio GO Guide",
source_info_url="https://skgo.magio.tv/v2/television/epg")
# Write channels
for channel in channels:
channel_dict = {'display-name': [(channel.name, u'sk')],
'icon': [{'src': channel.logo}],
'id': channel.id}
writer.addChannel(channel_dict)
# Write programmes
for (channel_id, programmes) in epg.items():
for programme in programmes:
programme_dict = {
'category': [(genre, u'en') for genre in programme.genres],
'channel': channel_id,
'credits': {'producer': [producer for producer in programme.producers],
'actor': [actor for actor in programme.actors],
'writer': [writer for writer in programme.writers],
'director': [director for director in programme.directors]},
'date': str(programme.year),
'desc': [(programme.description,
u'')],
'icon': [{'src': programme.poster}, {'src': programme.thumbnail}],
'length': {'units': u'seconds', 'length': str(programme.duration)},
'start': programme.start_time.strftime("%Y%m%d%H%M%S"),
'stop': programme.end_time.strftime("%Y%m%d%H%M%S"),
'title': [(programme.title, u'')]}
# Define episode info only if provided
if programme.episodeNo is not None:
# Since seasonNo seems to be always null, try parsing the season from the title (e.g. Kosti X. = 10)
if programme.seasonNo is None:
(show_title_sans_season, programme.seasonNo) = parse_season_number(programme.title)
programme_dict['title'] = [(show_title_sans_season, u'')]
programme_dict['episode-num'] = [
(f'{(programme.seasonNo or 1) - 1} . {(programme.episodeNo or 1) - 1} . 0', u'xmltv_ns')]
writer.addProgramme(programme_dict)
writer.write(guide_file, True)
# Gzip the guide file
gzip_file("public/magioGuide.xmltv")
def refresh():
channels = magio.channels()
print("Generating .m3u8 playlist")
generate_m3u8(channels)
print("Generating XMLTV guide")
generate_xmltv(channels)
print("Refreshing finished!")
global last_refresh
last_refresh = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
# Quality config
qualityString = os.environ.get('MAGIO_QUALITY', "HIGH")
qualityMapping = {"LOW": MagioQuality.low, "MEDIUM": MagioQuality.medium, "HIGH": MagioQuality.high, "EXTRA": MagioQuality.extra}
quality = qualityMapping[qualityString]
print(f"Stream quality configured to: {qualityString} ({quality})")
# Initial playlist and xmltv load
print("Logging in to Magio Go TV")
magio = MagioGo(os.environ.get('MAGIO_USERNAME'), os.environ.get('MAGIO_PASSWORD'), quality)
refresh()
# Load new playlist and xmltv everyday
scheduler = BackgroundScheduler()
scheduler.add_job(refresh, 'interval', hours=int(os.environ.get('MAGIO_GUIDE_REFRESH_HOURS', 12)))
scheduler.start()
atexit.register(lambda: scheduler.shutdown())
| [
"lukas.kusik@gmail.com"
] | lukas.kusik@gmail.com |
ce0c8512a2373bffac1635858e730b38b204d9dd | 37bc60b070be22a5e22321655c8490df2285b07c | /translate.py | 5f414fdbd164ef00cfcaa2c3eddd47a0378d4518 | [] | no_license | TheWover/DidierStevensSuite | 2ab56d33472a242a5d49359d643c4e669c7a7e04 | 17f08aee76b98f95fc94b4e9c6131786d62b4716 | refs/heads/master | 2020-07-30T01:00:00.497949 | 2019-09-17T18:46:00 | 2019-09-17T18:46:00 | 210,027,232 | 1 | 0 | null | 2019-09-21T17:32:54 | 2019-09-21T17:32:53 | null | UTF-8 | Python | false | false | 27,454 | py | #!/usr/bin/env python
__description__ = 'Translate bytes according to a Python expression'
__author__ = 'Didier Stevens'
__version__ = '2.5.6'
__date__ = '2019/02/26'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
No input validation (neither output) is performed by this program: it contains injection vulnerabilities
Developed with Python 2.7, tested with 2.7 and 3.3
History:
2007/08/20: start
2014/02/24: rewrite
2014/02/27: manual
2015/11/04: added option -f
2015/11/05: continue
2016/02/20: added option -r
2016/04/25: 2.3.0 added StdoutWriteChunked() and option -R
2016/09/07: 2.3.1 added option -e
2016/09/09: continue
2016/09/13: man
2017/02/10: 2.4.0 added input filename # support
2017/02/26: fixed Python 3 str vs bytes bug
2017/06/04: 2.5.0 added #e# support
2017/06/16: continued #e# support
2017/07/29: added -2 option
2017/08/09: 2.5.1 #e# chr can take a second argument
2017/09/09: added functions Sani1 and Sani2 to help with input/output sanitization
2018/01/29: 2.5.2 added functions GzipD and ZlibD; and fixed stdin/stdout for Python 3
2018/02/12: 2.5.3 when the Python expression returns None (in stead of a byte value), no byte is written to output.
2018/03/05: 2.5.4 updated #e# expressions
2018/04/27: added option literalfilenames
2019/02/20: 2.5.5 added ZlibRawD
2019/02/26: 2.5.6 updated help
Todo:
"""
import optparse
import sys
import os
import textwrap
import re
import math
import binascii
import random
import zlib
import gzip
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
def PrintManual():
manual = '''
Manual:
Translate.py is a Python script to perform bitwise operations on files (like XOR, ROL/ROR, ...). You specify the bitwise operation to perform as a Python expression, and pass it as a command-line argument.
translate.py malware -o malware.decoded "byte ^ 0x10"
This will read file malware, perform XOR 0x10 on each byte (this is, expressed in Python: byte ^ 0x10), and write the result to file malware.decoded.
byte is a variable containing the current byte from the input file. Your expression has to evaluate to the modified byte. When your expression evaluates to None, no byte will be written to output. This can be used to delete bytes from the input.
For complex manipulation, you can define your own functions in a script file and load this with translate.py, like this:
translate.py malware -o malware.decoded "Process(byte)" process.py
process.py must contain the definition of function Process. Function Process must return the modified byte.
Another variable is also available: position. This variable contains the position of the current byte in the input file, starting from 0.
If only part of the file has to be manipulated, while leaving the rest unchanged, you can do it like this:
def Process(byte):
if position >= 0x10 and position < 0x20:
return byte ^ 0x10
else:
return byte
This example will perform an XOR 0x10 operation from the 17th byte till the 32nd byte included. All other bytes remain unchanged.
Because Python has built-in shift operators (<< and >>) but no rotate operators, I've defined 2 rotate functions that operate on a byte: rol (rotate left) and ror (rotate right). They accept 2 arguments: the byte to rotate and the number of bit positions to rotate. For example, rol(0x01, 2) gives 0x04.
translate.py malware -o malware.decoded "rol(byte, 2)"
Another function I defined is IFF (the IF Function): IFF(expression, valueTrue, valueFalse). This function allows you to write conditional code without an if statement. When expression evaluates to True, IFF returns valueTrue, otherwise it returns valueFalse.
And yet 2 other functions I defined are Sani1 and Sani2. They can help you with input/output sanitization: Sani1 accepts a byte as input and returns the same byte, except if it is a control character. All control characters (except VT, LF and CR) are replaced by a space character (0x20). Sani2 is like Sani1, but sanitizes even more bytes: it sanitizes control characters like Sani1, and also all bytes equal to 0x80 and higher.
translate.py malware -o malware.decoded "IFF(position >= 0x10 and position < 0x20, byte ^ 0x10, byte)"
By default this program translates individual bytes via the provided Python expression. With option -f (fullread), translate.py reads the input file as one byte sequence and passes it to the function specified by the expression. This function needs to take one string as an argument and return one string (the translated file).
Option -r (regex) uses a regular expression to search through the file and then calls the provided function with a match argument for each matched string. The return value of the function (a string) is used to replace the matched string.
Option -R (filterregex) is similar to option -r (regex), except that it does not operate on the complete file, but on the file filtered for the regex.
Here are 2 examples with a regex. The input file (test-ah.txt) contains the following: 1234&H41&H42&H43&H444321
The first command will search for strings &Hxx and replace them with the character represented in ASCII by hexadecimal number xx:
translate.py -r "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: 1234ABCD4321
The second command is exactly the same as the first command, except that it uses option -R in stead or -r:
translate.py -R "&H(..)" test-ah.txt "lambda m: chr(int(m.groups()[0], 16))"
Output: ABCD
Option -e (execute) is used to execute Python commands before the command is executed. This can, for example, be used to import modules.
Here is an example to decompress a Flash file (.swf):
translate.py -f -e "import zlib" sample.swf "lambda b: zlib.decompress(b[8:])"
You can use build in function ZlibD too, and ZlibRawD for inflating without header, and GzipD for gzip decompression.
A second file can be used as input with option -2. The value of the current byte of the second input file is stored in variable byte2 (this too advances byte per byte together with the primary input file).
Example:
translate.py -2 #021230 #Scbpbt "byte + byte2 - 0x30"
Output:
Secret
In stead of using an input filename, the content can also be passed in the argument. To achieve this, prefix the text with character #.
If the text to pass via the argument contains control characters or non-printable characters, hexadecimal (#h#) or base64 (#b#) can be used.
Example:
translate.py #h#89B5B4AEFDB4AEFDBCFDAEB8BEAFB8A9FC "byte ^0xDD"
Output:
This is a secret!
File arguments that start with #e# are a notational convention to use expressions to generate data. An expression is a single function/string or the concatenation of several functions/strings (using character + as concatenation operator).
Strings can be characters enclosed by single quotes ('example') or hexadecimal strings prefixed by 0x (0xBEEF).
4 functions are available: random, loremipsum, repeat and chr.
Function random takes exactly one argument: an integer (with value 1 or more). Integers can be specified using decimal notation or hexadecimal notation (prefix 0x).
The random function generates a sequence of bytes with a random value (between 0 and 255), the argument specifies how many bytes need to be generated. Remark that the random number generator that is used is just the Python random number generator, not a cryptographic random number generator.
Example:
tool.py #e#random(100)
will make the tool process data consisting of a sequence of 100 random bytes.
Function loremipsum takes exactly one argument: an integer (with value 1 or more).
The loremipsum function generates "lorem ipsum" text (fake latin), the argument specifies the number of sentences to generate.
Example: #e#loremipsum(2) generates this text:
Ipsum commodo proin pulvinar hac vel nunc dignissim neque eget odio erat magna lorem urna cursus fusce facilisis porttitor congue eleifend taciti. Turpis duis suscipit facilisi tristique dictum praesent natoque sem mi egestas venenatis per dui sit sodales est condimentum habitasse ipsum phasellus non bibendum hendrerit.
Function chr takes one argument or two arguments.
chr with one argument takes an integer between 0 and 255, and generates a single byte with the value specified by the integer.
chr with two arguments takes two integers between 0 and 255, and generates a byte sequence with the values specified by the integers.
For example #e#chr(0x41,0x45) generates data ABCDE.
Function repeat takes two arguments: an integer (with value 1 or more) and a byte sequence. This byte sequence can be a quoted string of characters (single quotes), like 'ABCDE' or an hexadecimal string prefixed with 0x, like 0x4142434445.
The repeat function will create a sequence of bytes consisting of the provided byte sequence (the second argument) repeated as many times as specified by the first argument.
For example, #e#repeat(3, 'AB') generates byte sequence ABABAB.
When more than one function needs to be used, the byte sequences generated by the functions can be concatenated with the + operator.
For example, #e#repeat(10,0xFF)+random(100) will generate a byte sequence of 10 FF bytes followed by 100 random bytes.
To prevent the tool from processing file arguments with wildcard characters or special initial characters (@ and #) differently, but to process them as normal files, use option --literalfilenames.
'''
for line in manual.split('\n'):
print(textwrap.fill(line))
def rol(byte, count):
return (byte << count | byte >> (8- count)) & 0xFF
def ror(byte, count):
return (byte >> count | byte << (8- count)) & 0xFF
#Sanitize 1: Sanitize input: return space (0x20) for all control characters, except HT, LF and CR
def Sani1(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
return byte
#Sanitize 2: Sanitize input: return space (0x20) for all bytes equal to 0x80 and higher, and all control characters, except HT, LF and CR
def Sani2(byte):
if byte in [0x09, 0x0A, 0x0D]:
return byte
if byte < 0x20:
return 0x20
if byte >= 0x80:
return 0x20
return byte
def GzipD(data):
return gzip.GzipFile('', 'r', fileobj=StringIO(data)).read()
def ZlibD(data):
return zlib.decompress(data)
def ZlibRawD(data):
return zlib.decompress(data, -8)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
#Convert String To Bytes If Python 3
def CS2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
def Output(fOut, data):
if fOut != sys.stdout:
fOut.write(data)
else:
StdoutWriteChunked(data)
def LoremIpsumSentence(minimum, maximum):
words = ['lorem', 'ipsum', 'dolor', 'sit', 'amet', 'consectetur', 'adipiscing', 'elit', 'etiam', 'tortor', 'metus', 'cursus', 'sed', 'sollicitudin', 'ac', 'sagittis', 'eget', 'massa', 'praesent', 'sem', 'fermentum', 'dignissim', 'in', 'vel', 'augue', 'scelerisque', 'auctor', 'libero', 'nam', 'a', 'gravida', 'odio', 'duis', 'vestibulum', 'vulputate', 'quam', 'nec', 'cras', 'nibh', 'feugiat', 'ut', 'vitae', 'ornare', 'justo', 'orci', 'varius', 'natoque', 'penatibus', 'et', 'magnis', 'dis', 'parturient', 'montes', 'nascetur', 'ridiculus', 'mus', 'curabitur', 'nisl', 'egestas', 'urna', 'iaculis', 'lectus', 'maecenas', 'ultrices', 'velit', 'eu', 'porta', 'hac', 'habitasse', 'platea', 'dictumst', 'integer', 'id', 'commodo', 'mauris', 'interdum', 'malesuada', 'fames', 'ante', 'primis', 'faucibus', 'accumsan', 'pharetra', 'aliquam', 'nunc', 'at', 'est', 'non', 'leo', 'nulla', 'sodales', 'porttitor', 'facilisis', 'aenean', 'condimentum', 'rutrum', 'facilisi', 'tincidunt', 'laoreet', 'ultricies', 'neque', 'diam', 'euismod', 'consequat', 'tempor', 'elementum', 'lobortis', 'erat', 'ligula', 'risus', 'donec', 'phasellus', 'quisque', 'vivamus', 'pellentesque', 'tristique', 'venenatis', 'purus', 'mi', 'dictum', 'posuere', 'fringilla', 'quis', 'magna', 'pretium', 'felis', 'pulvinar', 'lacinia', 'proin', 'viverra', 'lacus', 'suscipit', 'aliquet', 'dui', 'molestie', 'dapibus', 'mollis', 'suspendisse', 'sapien', 'blandit', 'morbi', 'tellus', 'enim', 'maximus', 'semper', 'arcu', 'bibendum', 'convallis', 'hendrerit', 'imperdiet', 'finibus', 'fusce', 'congue', 'ullamcorper', 'placerat', 'nullam', 'eros', 'habitant', 'senectus', 'netus', 'turpis', 'luctus', 'volutpat', 'rhoncus', 'mattis', 'nisi', 'ex', 'tempus', 'eleifend', 'vehicula', 'class', 'aptent', 'taciti', 'sociosqu', 'ad', 'litora', 'torquent', 'per', 'conubia', 'nostra', 'inceptos', 'himenaeos']
sample = random.sample(words, random.randint(minimum, maximum))
sample[0] = sample[0].capitalize()
return ' '.join(sample) + '.'
def LoremIpsum(sentences):
return ' '.join([LoremIpsumSentence(15, 30) for i in range(sentences)])
STATE_START = 0
STATE_IDENTIFIER = 1
STATE_STRING = 2
STATE_SPECIAL_CHAR = 3
STATE_ERROR = 4
FUNCTIONNAME_REPEAT = 'repeat'
FUNCTIONNAME_RANDOM = 'random'
FUNCTIONNAME_CHR = 'chr'
FUNCTIONNAME_LOREMIPSUM = 'loremipsum'
def Tokenize(expression):
result = []
token = ''
state = STATE_START
while expression != '':
char = expression[0]
expression = expression[1:]
if char == "'":
if state == STATE_START:
state = STATE_STRING
elif state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
state = STATE_STRING
token = ''
elif state == STATE_STRING:
result.append([STATE_STRING, token])
state = STATE_START
token = ''
elif char >= '0' and char <= '9' or char.lower() >= 'a' and char.lower() <= 'z':
if state == STATE_START:
token = char
state = STATE_IDENTIFIER
else:
token += char
elif char == ' ':
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
elif state == STATE_STRING:
token += char
else:
if state == STATE_IDENTIFIER:
result.append([STATE_IDENTIFIER, token])
token = ''
state = STATE_START
result.append([STATE_SPECIAL_CHAR, char])
elif state == STATE_STRING:
token += char
else:
result.append([STATE_SPECIAL_CHAR, char])
token = ''
if state == STATE_IDENTIFIER:
result.append([state, token])
elif state == STATE_STRING:
result = [[STATE_ERROR, 'Error: string not closed', token]]
return result
def ParseFunction(tokens):
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_STRING or tokens[0][0] == STATE_IDENTIFIER and tokens[0][1].startswith('0x'):
return [[FUNCTIONNAME_REPEAT, [[STATE_IDENTIFIER, '1'], tokens[0]]], tokens[1:]]
if tokens[0][0] != STATE_IDENTIFIER:
print('Parsing error')
return None, tokens
function = tokens[0][1]
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '(':
print('Parsing error')
return None, tokens
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
arguments = []
while True:
if tokens[0][0] != STATE_IDENTIFIER and tokens[0][0] != STATE_STRING:
print('Parsing error')
return None, tokens
arguments.append(tokens[0])
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
if tokens[0][0] != STATE_SPECIAL_CHAR or (tokens[0][1] != ',' and tokens[0][1] != ')'):
print('Parsing error')
return None, tokens
if tokens[0][0] == STATE_SPECIAL_CHAR and tokens[0][1] == ')':
tokens = tokens[1:]
break
tokens = tokens[1:]
if len(tokens) == 0:
print('Parsing error')
return None, tokens
return [[function, arguments], tokens]
def Parse(expression):
tokens = Tokenize(expression)
if len(tokens) == 0:
print('Parsing error')
return None
if tokens[0][0] == STATE_ERROR:
print(tokens[0][1])
print(tokens[0][2])
print(expression)
return None
functioncalls = []
while True:
functioncall, tokens = ParseFunction(tokens)
if functioncall == None:
return None
functioncalls.append(functioncall)
if len(tokens) == 0:
return functioncalls
if tokens[0][0] != STATE_SPECIAL_CHAR or tokens[0][1] != '+':
print('Parsing error')
return None
tokens = tokens[1:]
def InterpretInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
try:
return int(token[1])
except:
return None
def Hex2Bytes(hexadecimal):
if len(hexadecimal) % 2 == 1:
hexadecimal = '0' + hexadecimal
try:
return binascii.a2b_hex(hexadecimal)
except:
return None
def InterpretHexInteger(token):
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
bytes = Hex2Bytes(token[1][2:])
if bytes == None:
return None
integer = 0
for byte in bytes:
integer = integer * 0x100 + ord(byte)
return integer
def InterpretNumber(token):
number = InterpretInteger(token)
if number == None:
return InterpretHexInteger(token)
else:
return number
def InterpretBytes(token):
if token[0] == STATE_STRING:
return token[1]
if token[0] != STATE_IDENTIFIER:
return None
if not token[1].startswith('0x'):
return None
return Hex2Bytes(token[1][2:])
def CheckFunction(functionname, arguments, countarguments, maxcountarguments=None):
if maxcountarguments == None:
if countarguments == 0 and len(arguments) != 0:
print('Error: function %s takes no arguments, %d are given' % (functionname, len(arguments)))
return True
if countarguments == 1 and len(arguments) != 1:
print('Error: function %s takes 1 argument, %d are given' % (functionname, len(arguments)))
return True
if countarguments != len(arguments):
print('Error: function %s takes %d arguments, %d are given' % (functionname, countarguments, len(arguments)))
return True
else:
if len(arguments) < countarguments or len(arguments) > maxcountarguments:
print('Error: function %s takes between %d and %d arguments, %d are given' % (functionname, countarguments, maxcountarguments, len(arguments)))
return True
return False
def CheckNumber(argument, minimum=None, maximum=None):
number = InterpretNumber(argument)
if number == None:
print('Error: argument should be a number: %s' % argument[1])
return None
if minimum != None and number < minimum:
print('Error: argument should be minimum %d: %d' % (minimum, number))
return None
if maximum != None and number > maximum:
print('Error: argument should be maximum %d: %d' % (maximum, number))
return None
return number
def Interpret(expression):
functioncalls = Parse(expression)
if functioncalls == None:
return None
decoded = ''
for functioncall in functioncalls:
functionname, arguments = functioncall
if functionname == FUNCTIONNAME_REPEAT:
if CheckFunction(functionname, arguments, 2):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
bytes = InterpretBytes(arguments[1])
if bytes == None:
print('Error: argument should be a byte sequence: %s' % arguments[1][1])
return None
decoded += number * bytes
elif functionname == FUNCTIONNAME_RANDOM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += ''.join([chr(random.randint(0, 255)) for x in range(number)])
elif functionname == FUNCTIONNAME_LOREMIPSUM:
if CheckFunction(functionname, arguments, 1):
return None
number = CheckNumber(arguments[0], minimum=1)
if number == None:
return None
decoded += LoremIpsum(number)
elif functionname == FUNCTIONNAME_CHR:
if CheckFunction(functionname, arguments, 1, 2):
return None
number = CheckNumber(arguments[0], minimum=1, maximum=255)
if number == None:
return None
if len(arguments) == 1:
decoded += chr(number)
else:
number2 = CheckNumber(arguments[1], minimum=1, maximum=255)
if number2 == None:
return None
decoded += ''.join([chr(n) for n in range(number, number2 + 1)])
else:
print('Error: unknown function: %s' % functionname)
return None
return decoded
def FilenameCheckHash(filename):
if filename.startswith('#h#'):
return Hex2Bytes(filename[3:])
elif filename.startswith('#b#'):
try:
return binascii.a2b_base64(filename[3:])
except:
return None
elif filename.startswith('#e#'):
return Interpret(filename[3:])
elif filename.startswith('#'):
return filename[1:]
else:
return ''
def Transform(fIn, fIn2, fOut, commandPython):
position = 0
while True:
inbyte = fIn.read(1)
if not inbyte:
break
byte = ord(inbyte)
if fIn2 != None:
inbyte2 = fIn2.read(1)
byte2 = ord(inbyte2)
outbyte = eval(commandPython)
if outbyte != None:
fOut.write(chr(outbyte))
position += 1
#Fix for http://bugs.python.org/issue11395
def StdoutWriteChunked(data):
if sys.version_info[0] > 2:
sys.stdout.buffer.write(data)
else:
while data != '':
sys.stdout.write(data[0:10000])
try:
sys.stdout.flush()
except IOError:
return
data = data[10000:]
def Translate(filenameInput, commandPython, options):
if filenameInput == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
try:
fIn = sys.stdin.buffer
except:
fIn = sys.stdin
else:
decoded = FilenameCheckHash(filenameInput)
if options.literalfilenames or decoded == '':
fIn = open(filenameInput, 'rb')
elif decoded == None:
print('Error parsing filename: ' + filenameInput)
return
else:
fIn = StringIO(decoded)
if options.secondbytestream != '':
decoded = FilenameCheckHash(options.secondbytestream)
if options.literalfilenames or decoded == '':
fIn2 = open(options.secondbytestream, 'rb')
elif decoded == None:
print('Error parsing filename: ' + options.secondbytestream)
return
else:
fIn2 = StringIO(decoded)
else:
fIn2 = None
if options.output == '':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
fOut = sys.stdout
else:
fOut = open(options.output, 'wb')
if options.script != '':
execfile(options.script, globals())
if options.execute != '':
exec(options.execute, globals())
if options.fullread:
Output(fOut, eval(commandPython)(fIn.read()))
elif options.regex != '' or options.filterregex != '':
content = fIn.read()
if options.regex != '':
Output(fOut, re.sub(options.regex, eval(commandPython), content))
else:
Output(fOut, re.sub(options.filterregex, eval(commandPython), ''.join([x.group() for x in re.finditer(options.filterregex, content)])))
else:
Transform(fIn, fIn2, fOut, commandPython)
if fIn != sys.stdin:
fIn.close()
if fIn2 != None:
fIn2.close()
if fOut != sys.stdout:
fOut.close()
def Main():
moredesc = '''
Example: translate.py -o svchost.exe.dec svchost.exe 'byte ^ 0x10'
"byte" is the current byte in the file, 'byte ^ 0x10' does an X0R 0x10
Extra functions:
rol(byte, count)
ror(byte, count)
IFF(expression, valueTrue, valueFalse)
Sani1(byte)
Sani2(byte)
ZlibD(bytes)
ZlibRawD(bytes)
GzipD(bytes)
Variable "position" is an index into the input file, starting at 0
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(usage='usage: %prog [options] [file-in] [file-out] command [script]\n' + __description__ + moredesc, version='%prog ' + __version__)
oParser.add_option('-o', '--output', default='', help='Output file (default is stdout)')
oParser.add_option('-s', '--script', default='', help='Script with definitions to include')
oParser.add_option('-f', '--fullread', action='store_true', default=False, help='Full read of the file')
oParser.add_option('-r', '--regex', default='', help='Regex to search input file for and apply function to')
oParser.add_option('-R', '--filterregex', default='', help='Regex to filter input file for and apply function to')
oParser.add_option('-e', '--execute', default='', help='Commands to execute')
oParser.add_option('-2', '--secondbytestream', default='', help='Second bytestream')
oParser.add_option('-l', '--literalfilenames', action='store_true', default=False, help='Do not interpret filenames')
oParser.add_option('-m', '--man', action='store_true', default=False, help='print manual')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
if len(args) == 0 or len(args) > 4:
oParser.print_help()
elif len(args) == 1:
Translate('', args[0], options)
elif len(args) == 2:
Translate(args[0], args[1], options)
elif len(args) == 3:
options.output = args[1]
Translate(args[0], args[2], options)
elif len(args) == 4:
options.output = args[1]
options.script = args[3]
Translate(args[0], args[2], options)
if __name__ == '__main__':
Main()
| [
"didier.stevens@gmail.com"
] | didier.stevens@gmail.com |
10235484ece4de8311785cbc4ef11abeb245c5aa | 577aa26c18c7ae5a2be3b1d4ef6cc4a88f0f455a | /bot.py | 379414f1d2d588c384aa3390f91f5be270390603 | [] | no_license | boringcactus/head-receiver-bot | add9f78ee771b597be8e342d914601a1270abcfe | e8012db3c2cae22771e1f68965eff7ff85a14014 | refs/heads/master | 2021-04-18T17:42:25.066518 | 2020-03-24T02:31:15 | 2020-03-24T02:31:15 | 249,567,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,972 | py | import logging
import os
from io import BytesIO
import telegram
from telegram.ext import Updater, MessageHandler, Filters
from dotenv import load_dotenv, find_dotenv
from PIL import Image, ImageDraw, ImageFont
load_dotenv(find_dotenv())
font = ImageFont.truetype("SourceSansPro-Regular.ttf", 48)
def apply(name, photo):
outfile = BytesIO()
with Image.open('orig.png') as base:
result = base.convert('RGBA')
icon = Image.open(BytesIO(photo)).convert('RGBA')
icon = icon.transform(base.size, Image.PERSPECTIVE, [1/0.39, 0, -430, 0.01807, 1/0.49, -365, 0, 0, 1])
result.alpha_composite(icon)
draw = ImageDraw.Draw(result)
nw, nh = draw.textsize(name, font=font)
draw.rectangle([(420 - nw / 2, 100 - nh / 2), (420 + nw / 2, 100 + nh / 2)], fill=(190, 190, 190, 255))
draw.text((420 - nw / 2, 100 - nh / 2), name, font=font, fill=(0, 0, 0, 255))
result.save(outfile, 'PNG')
return outfile.getvalue()
def process(update: telegram.Update, context):
target = update.effective_user
if update.effective_message is not None and update.effective_message.forward_from is not None:
target = update.effective_message.forward_from
name = target.full_name
photos = target.get_profile_photos(limit=1).photos
if len(photos) == 0:
error = "Can't find profile picture for {}".format(name)
context.bot.send_message(chat_id=update.effective_chat.id, text=error)
return
photo_all_sizes = target.get_profile_photos(limit=1).photos[0]
photo_best_size = max(photo_all_sizes, key=lambda x: x.width)
photo_file = photo_best_size.get_file()
photo = photo_file.download_as_bytearray()
result = apply(name, photo)
context.bot.send_photo(chat_id=update.effective_chat.id, photo=BytesIO(result))
log_message = 'Handled request for "{}"'.format(name)
if target is not update.effective_user:
log_message += ' on behalf of "{}"'.format(update.effective_user.full_name)
logger.info(log_message)
if __name__ == "__main__":
# Set these variable to the appropriate values
TOKEN = os.environ.get('TG_BOT_TOKEN')
NAME = "head-receiver-bot"
# Port is given by Heroku
PORT = os.environ.get('PORT')
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Set up the Updater
updater = Updater(token=TOKEN, use_context=True)
dp = updater.dispatcher
# Add handlers
dp.add_handler(MessageHandler(Filters.all, process))
# Start the webhook
if PORT is None:
updater.start_polling()
else:
updater.start_webhook(listen="0.0.0.0",
port=int(PORT),
url_path=TOKEN)
updater.bot.setWebhook("https://{}.herokuapp.com/{}".format(NAME, TOKEN))
updater.idle()
| [
"melody@boringcactus.com"
] | melody@boringcactus.com |
b8b058c24e942784ccc2a2b2ef0ed358711175a1 | 400086979e153dea632339ff23e0a2cce3e40d77 | /starting_kit/code/model.py | eb9f26fa61b3eeba03160dc4ff64357707d068ca | [] | no_license | PhamAlexT/MOSQUITO | 99b1c7c3eb2490ec5c073bbf1da1d5697d4032bf | 6c93a49367c62b9159bfa3291b0dd0de9a4558e4 | refs/heads/master | 2020-12-30T05:19:39.235458 | 2020-05-09T12:37:11 | 2020-05-09T12:37:11 | 238,873,909 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | py | '''
Sample predictive model.
You must supply at least 4 methods:
- fit: trains the model.
- predict: uses the model to perform predictions.
- save: saves the model.
- load: reloads the model.
'''
import pickle
import numpy as np
from os.path import isfile
from sklearn.base import BaseEstimator
from sklearn.ensemble import RandomForestClassifier
# Preprocessing de la bibliothèque
from prePro import prepro
# Preprocessing de la bib scikit learn
from sklearn.preprocessing import StandardScaler
class model (BaseEstimator):
def __init__(self, classifier = RandomForestClassifier(random_state=42, n_estimators = 100, max_depth=100)):
'''
Constructeur de notre classe "model"
param :
classifier = Un modèle de classification (Par défault : RandomForest)
'''
# Notre modèle
self.classifier = classifier
# Preprocessing de la Team prepro
self.preprocessing1 = prepro()
# Preprocessing de la bibliothèque Scikit Learn
self.preprocessing2 = StandardScaler()
def fit(self, X, y, sample_weights=None):
"""
Preprocess the training set and build a forest of trees from it
params:
X : training dataset
y : Labels of each data on the dataset
return :
Our model 'Trained'
"""
X = self.preprocessing1.fit_transform(X,y)
X = self.preprocessing2.fit_transform(X,y)
self.classifier.fit(X, y)
return self
def predict_proba(self, X):
"""
Predict class probabilities
param :
X : The input dataset
return :
The class probabilities of the input samples
"""
X = self.preprocessing1.transform(X)
X = self.preprocessing2.transform(X)
y_proba = self.classifier.predict_proba(X)
return y_proba
def predict(self, X):
"""
Predict the class of a given dataset
param :
X : The dataset
return
The predicted classes
"""
y_proba = self.predict_proba(X)
y_pred = np.argmax(y_proba, axis=1)
return y_pred
def save(self, path="./"):
pickle.dump(self, open(path + '_model.pickle', "wb"))
def load(self, path="./"):
modelfile = path + '_model.pickle'
if isfile(modelfile):
with open(modelfile, 'rb') as f:
self = pickle.load(f)
print("Model reloaded from: " + modelfile)
return self | [
"liliaizri99@gmail.com"
] | liliaizri99@gmail.com |
27460d30d032a0d1bdc979b8ff2544520320a468 | 240f4b564a53ead9076276258e5f3749fc9efb99 | /yproblem/utils.py | 68982c510dd39d6f80232e5fd0460f3e155bc350 | [
"MIT"
] | permissive | dbojanjac/effective2D | fe7c5fd8d53684274b3ffa28f43723d5d4d8c276 | 8d124a103a5bd8e68d1bc23c4e10fe4d3cd27759 | refs/heads/master | 2023-02-17T12:06:08.483294 | 2020-09-10T17:23:21 | 2020-09-10T17:23:21 | 168,364,828 | 1 | 0 | MIT | 2021-01-13T16:35:32 | 2019-01-30T15:21:40 | GLSL | UTF-8 | Python | false | false | 386 | py | import dolfin as df
import matplotlib.pyplot as plt
def save_field_plots(output_folder, f1, f2):
df.plot(f1)
plt.savefig(output_folder + "/plots/f1.pdf")
df.plot(f2)
plt.savefig(output_folder + "/plots/f2.pdf")
def save_pvd(output_folder, f1, f2):
f = df.File(output_folder + "/PVD/f1.pvd")
f << f1
f = df.File(output_folder + "/PVD/f2.pvd")
f << f2
| [
"darko.janekovic@fer.hr"
] | darko.janekovic@fer.hr |
76ba16f3571ee2c45140eede623e32fb986c8881 | 25cab1a6c2d5370be53ba54236f5947e2eb9cb28 | /data/audio/__init__.py | bf7c93a9fb836f15bd0979177f3a783ab03b8c24 | [] | no_license | chenliming-1/tr_apiCode | b9aca368e656ae4651dc9e2a4bfae80ecb59ec95 | 36cd39a9d1dac4df5a4ecc57c1bb391a863c0035 | refs/heads/main | 2023-08-26T22:49:25.086745 | 2021-10-20T10:22:55 | 2021-10-20T10:22:55 | 402,780,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | #!Date:2019/02/21 17:02
# !@Author:龚远琪
from .uploadaudio import uploadaudio
__all__ = ['uploadaudio'] | [
"gongyq@histudy.com"
] | gongyq@histudy.com |
35dcdc43f617001a03482a66d869671fe3c327ec | 867b776ad26475b4fffb28cb4a7dbbff167863dc | /src/CsvReader.py | 4f5a987fa9454f377b78579e649bdbeb273161f7 | [] | no_license | knp56/Calculator | 2e108bb144edc98523c5bf055e5daa3c39bad867 | 4e8ff3064ebbc6ecf511c6283b29f84c184c39a1 | refs/heads/main | 2023-06-22T12:04:52.687412 | 2021-07-01T21:02:11 | 2021-07-01T21:02:11 | 382,148,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | import csv
#from pprint import pprint
def ClassFactory(class_name, dictionary):
return type(class_name, (object,), dictionary)
class CsvReader:
data = []
def __init__(self,filepath):
self.opdata = []
with open(filepath) as text_data:
csv_data = csv.DictReader(text_data)
for row in csv_data:
self.opdata.append(row)
self.data.append(row)
text_data.close()
pass
def return_data_as_objects(self, class_name):
objects = []
for row in self.data:
objects.append(ClassFactory(class_name,row))
return objects | [
"knp56@njit.edu"
] | knp56@njit.edu |
20076d99682732c095519240df2c951bfe0aae37 | 55ab64b67d8abc02907eb43a54ff6c326ded6b72 | /scripts/startup/tila_OP_SmartDelete.py | cc9ba649d4972b3487b5351419e9a875b4d2745a | [
"MIT"
] | permissive | Tilapiatsu/blender-custom_config | 2f03b0bb234c3b098d2830732296d199c91147d0 | 00e14fc190ebff66cf50ff911f25cf5ad3529f8f | refs/heads/master | 2023-08-16T14:26:39.990840 | 2023-08-16T01:32:41 | 2023-08-16T01:32:41 | 161,249,779 | 6 | 2 | MIT | 2023-04-12T05:33:59 | 2018-12-10T23:25:14 | Python | UTF-8 | Python | false | false | 2,449 | py | import bpy
bl_info = {
"name": "Tila : Smart Delete",
"author": "Tilapiatsu",
"version": (1, 0, 0, 0),
"blender": (2, 80, 0),
"location": "View3D",
"category": "Object",
}
class TILA_SmartDeleteOperator(bpy.types.Operator):
bl_idname = "object.tila_smartdelete"
bl_label = "TILA: Smart Delete"
bl_options = {'REGISTER', 'UNDO'}
menu: bpy.props.BoolProperty(name='call_menu', default=False)
def execute(self, context):
if context.space_data.type == 'VIEW_3D':
if self.menu:
if context.mode == 'EDIT_MESH':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_mesh_delete')
elif context.mode == 'EDIT_CURVE':
bpy.ops.wm.call_menu(name='VIEW3D_MT_edit_curve_delete')
else:
if context.mode == 'EDIT_MESH':
current_mesh_mode = context.tool_settings.mesh_select_mode[:]
# if vertex mode on
if current_mesh_mode[0]:
bpy.ops.mesh.dissolve_verts()
# if edge mode on
if current_mesh_mode[1]:
bpy.ops.mesh.dissolve_edges(use_verts=True)
# if face mode on
if current_mesh_mode[2]:
bpy.ops.mesh.delete(type='FACE')
elif context.mode == 'EDIT_CURVE':
bpy.ops.curve.delete(type='VERT')
elif context.mode == 'EDIT_GPENCIL':
try:
bpy.ops.gpencil.delete(type='POINTS')
except Exception as e:
print("Warning: %r" % e)
elif context.mode == 'EDIT_METABALL':
bpy.ops.mball.delete_metaelems('EXEC_DEFAULT')
elif context.mode == 'OBJECT':
bpy.ops.object.delete(use_global=False, confirm=False)
elif context.space_data.type == 'OUTLINER':
bpy.ops.outliner.delete()
elif context.space_data.type == 'FILE_BROWSER':
bpy.ops.file.delete()
# elif context.space_data.type == 'IMAGE_EDITOR':
# layout.label("No Context! image editor")
return {'FINISHED'}
addon_keymaps = []
classes = (TILA_SmartDeleteOperator,)
register, unregister = bpy.utils.register_classes_factory(classes)
if __name__ == "__main__":
register()
| [
"tilapiatsu@hotmail.fr"
] | tilapiatsu@hotmail.fr |
efdf6baebd3af374b832d92ff380d8150baa87ab | 5b41d2e551982784a1e53e49f12c7b058403bca8 | /venv/Scripts/easy_install-script.py | 1bbb1a2552e96f5fe5ac7d0cb9794956d25349a1 | [] | no_license | TsvetkovEvgenij/HelloWorld | 79b67793bc49e9f1fe717e3dc5ec98f2f9ea58cf | 1fa97bbb2015502c1289828fc36e598fb874d29a | refs/heads/master | 2020-09-23T21:33:43.203083 | 2019-12-03T10:27:07 | 2019-12-03T10:27:07 | 225,592,335 | 0 | 0 | null | null | null | null | WINDOWS-1251 | Python | false | false | 469 | py | #!C:\Users\Ковшикова\PycharmProjects\HelloWorld\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"tsvetkov.evgenij@gmail.com"
] | tsvetkov.evgenij@gmail.com |
f66d8eca2d435b8587e7ca130d23d12400ed0211 | 3fbd28e72606e5358328bfe4b99eb0349ca6a54f | /.history/a_Young_Physicist_20210607193741.py | 863458084f547b6a9bf662840ab4c6ff7880d758 | [] | no_license | Tarun1001/codeforces | f0a2ef618fbd45e3cdda3fa961e249248ca56fdb | 576b505d4b8b8652a3f116f32d8d7cda4a6644a1 | refs/heads/master | 2023-05-13T04:50:01.780931 | 2021-06-07T21:35:26 | 2021-06-07T21:35:26 | 374,399,423 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | n= int(input())
x=[]
for i in range(n):
p=map(int,input().split()))
x.append(p)
a=b=c=0
for i in x:
a+=i[0]
b+=i[1]
c+=i[2]
if a==b==c==0:
print("YES")
else:
print("NO")
| [
"tarunsivasai8@gmail.com"
] | tarunsivasai8@gmail.com |
5570cc0247c6ea3b0cdc3dc2629e40b676e7e7e7 | 02ec15c829f1755fb7981c561a40c8f4a968a028 | /corn/consumption.py | 5e852833107ff9c83d9df7d77a7a7a7763de6f50 | [
"MIT"
] | permissive | UGA-BSAIL/Corn-Segmentation | 0cf2b4beadf99c0f0ecf679264a50892188e6d14 | 79df856e3fc487508e24e9821e5ca49911064c73 | refs/heads/master | 2023-02-19T01:52:48.751927 | 2021-01-17T23:13:57 | 2021-01-17T23:13:57 | 250,403,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,312 | py | # -*- coding: utf-8 -*-
"""
@purpose: This file is used for batch detection of images using all models.
@input: Add all Model paths to "weights" list, and test image directory path "strDirectory".
@output: Masked images along with percent consumption will be saved under output/ directory. A matlab file will be created for all Prediction and Ground Truth values.
Created on Sun Dec 23 03:54:14 2018
@author: shrin
"""
import os
import sys
import numpy as np
import tensorflow as tf
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import utils
import mrcnn.model as modellib
from mrcnn import visualize
from mrcnn.visualize import display_images
from mrcnn.visualize import save_image
#import 2 different classes
import corn_2class
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
CORN_DIR = os.path.join(ROOT_DIR, "datasets/corn")
config_2class= corn_2class.CornConfig()
CORN_DIR = os.path.join(ROOT_DIR, "datasets/corn")
class InferenceConfig(config_2class.__class__):
# Run detection on one image at a time
GPU_COUNT = 1
IMAGES_PER_GPU = 1
DETECTION_MIN_CONFIDENCE = 0.8
config_2class = InferenceConfig()
config_2class.display()
dataset_2class = corn_2class.CornDataset()
dataset_2class.load_corn(CORN_DIR, "test")
dataset_2class.prepare()
print("Images: {}\nClasses: {}".format(len(dataset_2class.image_ids), dataset_2class.class_names))
# Create model in inference mode
with tf.device("/gpu:0"):
model_2class = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR,
config=config_2class)
def get_ax(rows=1, cols=1, size=16):
_, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
return ax
# Set path to corn weights file
#weights_path_2class = os.path.join(ROOT_DIR, "logs/appr1/2class_300im_600ep/mask_rcnn_corn_2class_0600.h5")
# Load weights
#print("Loading weights ", weights_path_3class)
#model_3class.load_weights(weights_path_3class, by_name=True)
#print("Loading weights ", weights_path_2class)
#model_2class.load_weights(weights_path_2class, by_name=True)
def get_cornList(r, n_classes, image) :
from collections import Counter
cornList = []
redCornList=[]
yellowCornList=[]
no_of_corns = no_of_red = no_of_yellow = 0
classes = r['class_ids']
masks = r['masks']
regions = r['rois']
cornMasks = []
redCornMasks = []
#print(regions)
#print(classes)
#print(masks.shape)
offset = round(((image.shape)[1])*0.075)
#print('Offset : ',offset)
class_detected = Counter(classes)
no_of_corns = class_detected[1]
if(n_classes == 2) :
no_of_red = class_detected[2]
elif(n_classes == 3) :
no_of_yellow = class_detected[2]
no_of_red = class_detected[3]
#print(no_of_corns, no_of_red, no_of_yellow)
for index, roi, class_id in zip(range(len(regions)), regions, classes):
mask = masks[:,:,index]
if(class_id == 1):
#print(mask.shape)
cornList.append({'cornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum()), 'redCorns' : [], 'yellowCorns' : []})
cornMasks.append(mask)
if(class_id == 2 and n_classes == 2) :
redCornList.append({'redCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
redCornMasks.append(mask)
elif(class_id == 2 and n_classes == 3) :
yellowCornList.append({'yellowCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
if(class_id == 3 and n_classes == 3) :
redCornList.append({'redCornRoi' : roi, 'class_id' : class_id, 'mask' : mask, 'mask_pixels' : (mask.sum())})
#redCornIdx = []
for corn in cornList:
corn_y1 = corn['cornRoi'][0] - offset
corn_x1 = corn['cornRoi'][1] - offset
corn_y2 = corn['cornRoi'][2] + offset
corn_x2 = corn['cornRoi'][3] + offset
corn_area = corn['mask_pixels']
eaten_area = 0
# print('RedCorns Before : ', corn['redCorns'])
for redCorn in redCornList:
if((corn_y1 <= redCorn['redCornRoi'][0]) and (corn_x1 <= redCorn['redCornRoi'][1])
and (corn_y2 >= redCorn['redCornRoi'][2]) and (corn_x2 >= redCorn['redCornRoi'][3])):
corn['redCorns'].append(redCorn)
eaten_area += redCorn['mask_pixels']
#redCornIdx.append(redCorn)
#redCornList.remove(redCorn)
percent_eaten = round((eaten_area / corn_area) * 100 , 3)
corn.update({'percent_eaten' : percent_eaten})
#print('RedCorns After : ', corn['redCorns'])
#redCornList = [e for e in redCornList if e not in redCornIdx]
# if len(redCornList) > 0 :
# print("There are ", len(redCornList) ," undetected corn cob present which are almost fully consumed.")
#print('RedCorns After : ', redCornList)
#print('Final CORNS : \n', cornList)
leftCorn={}
left_idx = len(cornList) - 1
if(len(cornList) > 1):
for corn_idx in range(len(cornList)):
corn = cornList[corn_idx]
corn_y1 = corn['cornRoi'][0]
corn_x1 = corn['cornRoi'][1]
corn_y2 = corn['cornRoi'][2]
corn_x2 = corn['cornRoi'][3]
height = corn_x2 - corn_x1
width = corn_y2 - corn_y1
replaceLeft = False
if(len(leftCorn) == 0):
replaceLeft = True
else :
if(height > width) :
if(corn_y1 < leftCorn['cornRoi'][0]) :
replaceLeft = True
elif(width > height) :
if(corn_x1 < leftCorn['cornRoi'][1]) :
replaceLeft = True
if replaceLeft:
leftCorn = corn
left_idx = corn_idx
cornList.pop(left_idx)
cornList.append(leftCorn)
if len(cornMasks) > 0:
ret_cornMasks = np.transpose(np.asarray(cornMasks),(1,2,0))
else:
ret_cornMasks = cornMasks
if len(redCornMasks) > 0:
ret_redCornMasks = np.transpose(np.asarray(redCornMasks),(1,2,0))
else:
ret_redCornMasks = redCornMasks
return cornList, ret_cornMasks, ret_redCornMasks
def compute_percent_est_accuracy(gt_percent_est, pred_percent_est, thresh):
if (gt_percent_est - thresh) <= pred_percent_est <= (gt_percent_est + thresh) :
error = 0
elif(gt_percent_est > pred_percent_est):
error = (gt_percent_est - thresh) - pred_percent_est
elif(gt_percent_est < pred_percent_est):
error = (gt_percent_est + thresh) - pred_percent_est
return (100 - math.fabs(error))
def compare_corns(cornList):
if cornList[0]['percent_eaten'] < cornList[1]['percent_eaten']:
return 1
else:
return 0
def compare_performance(gt_corns, pred_corns, left_eaten_count):
#make percent acc calculations
percent_est_accuracy = 0
for gt_corn, pred_corn in zip(gt_corns, pred_corns):
#make percent acc calculations
est_accuracy = compute_percent_est_accuracy(gt_corn['percent_eaten'], pred_corn['percent_eaten'], thresh=1.0)
pred_corn.update({'est_accuracy' : est_accuracy})
percent_est_accuracy += est_accuracy
percent_est_accuracy = percent_est_accuracy / len(pred_corns)
#make left vs right predictions
comparison_accuracy = 0
if(len(gt_corns) > 1 and len(pred_corns) > 1):
gt_left_eaten_more = compare_corns(gt_corns)
#print('gt_left_eaten_more : ' , gt_left_eaten_more)
pred_left_eaten_more = compare_corns(pred_corns)
#print('pred_left_eaten_more : ' , pred_left_eaten_more)
if(pred_left_eaten_more == 1) :
#print('Left corn has been eaten more than Right.')
left_eaten_count += 1
#else:
print('Right corn has been eaten more than Left.')
if(gt_left_eaten_more == pred_left_eaten_more):
comparison_accuracy = 1
else:
comparison_accuracy = 1
return percent_est_accuracy, left_eaten_count, comparison_accuracy
def compute_batch_ap(dataset, image_ids, verbose=1):
APs = []
mean_weight_iou = []
for image_id in image_ids:
try:
# Load image
image, image_meta, gt_class_id, gt_bbox, gt_mask =\
modellib.load_image_gt(dataset, config_2class,
image_id, use_mini_mask=False)
# Run object detection
results = model_2class.detect_molded(image[np.newaxis], image_meta[np.newaxis], verbose=0)
# Compute AP over range 0.5 to 0.95
r = results[0]
visualize.save_image(image, "test"+str(image_id), r['rois'], r['masks'],
r['class_ids'],r['scores'],['BG', 'Whole Corn','Bare Cob'],scores_thresh=0.8,mode=0, captions=None, show_mask=True)
gt_r = {"class_ids": gt_class_id,
"rois": gt_bbox,
"masks": gt_mask}
gt_corns, gt_corn_masks, gt_red_corn_masks = get_cornList(gt_r, 2, image)
# print('gt_mask size: ',gt_corn_masks.shape)
pred_corns, pred_cornMasks, pred_redCornMasks = get_cornList(r, 2, image)
#print(pred_corns)
print(image_id, "Image" , os.path.basename(dataset_2class.source_image_link(image_id)))
print(image_id, 'percent_eaten_gt', gt_corns[1]['percent_eaten'])
print(image_id, 'percent_eaten_pred', pred_corns[1]['percent_eaten'])
print(image_id, 'percent_eaten_gt', gt_corns[0]['percent_eaten'])
print(image_id, 'percent_eaten_pred', pred_corns[0]['percent_eaten'])
print("*****************************************************************")
images.append(os.path.basename(dataset_2class.source_image_link(image_id)))
gt_one.append(gt_corns[1]['percent_eaten'])
pred_one.append(pred_corns[1]['percent_eaten'])
gt_two.append(gt_corns[0]['percent_eaten'])
pred_two.append(pred_corns[0]['percent_eaten'])
except:
print("image Id :", image_id)
print(sys.exc_info())
ap = 0
APs.append(ap)
if verbose:
info = dataset.image_info[image_id]
meta = modellib.parse_image_meta(image_meta[np.newaxis,...])
print("{:3} {} AP: {:.2f}".format(
meta["image_id"][0], meta["original_image_shape"][0], ap))
pass
return APs
weights = []
#logs 50 image
weights.append("/home/ssa49593/Mask_RCNN/logs/50im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/50im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/50im_5/mask_rcnn_corn_2class_0600.h5")
#logs 100 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/100im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/100im_5/mask_rcnn_corn_2class_0600.h5")
#logs 150 image
weights.append("/work/cylilab/Mask_RCNN/logs/150im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/150im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/150im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/home/ssa49593/Mask_RCNN/logs/150im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/150im_5/mask_rcnn_corn_2class_0600.h5")
#logs 200 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/200im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/200im_3/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/200im_5/mask_rcnn_corn_2class_0600.h5")
#logs 250 image
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/250im_5/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/250im_4/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/appr1/2class_250im_600ep/mask_rcnn_corn_2class_0600.h5")
#logs 300 logs
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/300im_1/mask_rcnn_corn_2class_0600.h5")
weights.append("/scratch/ssa49593/workDir/Corn_detection/logs/300im_2/mask_rcnn_corn_2class_0600.h5")
weights.append("/work/cylilab/Mask_RCNN/logs/300im_4/mask_rcnn_corn_2class_0600.h5")
import scipy.io
import numpy as np
# Run on test set
for weights_path in weights:
# Load weights
#print("Loading weights ", weights_path_3class)
#model_3class.load_weights(weights_path_3class, by_name=True)
images = []
gt_one = []
pred_one = []
gt_two = []
pred_two = []
#weights_path = os.path.join(ROOT_DIR, "logs/appr1/2class_050im_600ep/50im_2/mask_rcnn_corn_2class_0600.h5")
print("Loading weights ", weights_path)
model_2class.load_weights(weights_path, by_name=True)
APs = compute_batch_ap(dataset_2class, dataset_2class.image_ids[5:6])
filename = weights_path[0:len(weights_path)-29] + "PRCurve.mat"
scipy.io.savemat(filename, mdict={'ImageIds': images, 'GT_Left': gt_one, 'Pred_Left': pred_one, 'GT_Right': gt_two, 'Pred_Right': pred_two})
break | [
"noreply@github.com"
] | noreply@github.com |
964276026958767292e65a825344e9d65be28f17 | aedc785e2666674aa638e09b39f990956e01e546 | /src/testoob/running/processed_helper.py | ce9c99487e3afb02b805b3964c4a4035cd146d2f | [
"Apache-2.0"
] | permissive | callmewilko/testoob | 9be21f2b0d6b6a95cb6a0e14918c55e6e9c1593f | b71b53c15d1b0a736ab40dbad4255e0984968373 | refs/heads/master | 2020-04-04T08:00:35.098595 | 2018-11-05T16:41:10 | 2018-11-05T16:41:10 | 155,756,643 | 0 | 0 | null | 2018-11-01T18:20:24 | 2018-11-01T18:13:15 | Python | UTF-8 | Python | false | false | 1,616 | py | # Testoob, Python Testing Out Of (The) Box
# Copyright (C) 2005-2006 The Testoob Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Helper for processed running"
class ProcessedRunnerHelper:
"A helper class to make ProcessedRunner shorter and clearer."
def __init__(self, max_processes):
self._fixturesList = [[] for i in xrange(max_processes)]
self._load_balance_idx = 0
def register_fixture(self, fixture):
self._fixturesList[self._load_balance_idx].append(fixture)
self._load_balance_idx = (self._load_balance_idx + 1) % len(self._fixturesList)
def start(self, reporter):
from os import fork, pipe, fdopen, waitpid
from sys import exit
children = []
for processFixtures in self._fixturesList:
pid = fork()
if pid == 0:
self._run_fixtures(processFixtures, reporter)
exit()
children.append(pid)
for child in children:
waitpid(child, 0)
def _run_fixtures(self, fixtures, reporter):
[fixture(reporter) for fixture in fixtures]
| [
""
] | |
66e2289930010858f17f6b47cd882273cfaacfe3 | 2d009dceeb7893d7441e42b1944b7ef317e561ab | /Bisection.py | 7ffb910041ff7d3efd04283482a133810206349c | [] | no_license | dweatherstone/calculusdrw | d22a3abf238aa29657ece79c8227411c38e82709 | 9eaf7f059ed219cdfc1f8968dadafdb2387ea059 | refs/heads/master | 2022-04-18T20:24:02.323455 | 2020-04-20T15:44:05 | 2020-04-20T15:44:05 | 257,258,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,793 | py | from Generalroots import RootStatus, Root
class Bisection(Root):
"""The simplest root finding algorithm is the bisection method. The
algorithm applies to any continuous function on an interval where
the value of the function changes sign from to . The idea is simple:
divide the interval in two, a solution must exist within one
subinterval, select the subinterval where the sign of changes
and repeat.
"""
def __init__(self, func):
""" Initialising an object to calculate the root of a function using
the Bisection method.
Parameters
----------
func (function): The function for which we are trying to approximate a
solution.
"""
super().__init__(func)
def find_root(self, start_interval, end_interval, num_iter = 100):
""" Approximate solution of f(x) = 0 on interval [a, b] using the
bisection method.
Parameters
----------
start_interval (number): The lower bound of the interval in which to
search for a solution.
end_interval (number): The upper bound of the interval in which to
search for a solution.
num_iter (positive integer): The number of iterations to implement.
Returns
-------
xn (number): Result of Bisection method. The midpoint of the Nth interval
computed by the bisection method. The intial interval
[a_0,b_0] is given by [a,b]. If f(m_n) == 0 for some
midpoint m_n = (a_n + b_n)/2, then the function returns this
solution. If all signs of values f(a_n), f(b_n) and f(m_n)
are the same at any iteration, the bisection methode fails
and returns None.
"""
assert num_iter > 0
assert end_interval > start_interval
if self.f(start_interval)*self.f(end_interval) >= 0:
self.status = RootStatus.method_fails
return None
a_n = start_interval
b_n = end_interval
for _ in range(1, num_iter+1):
m_n = (a_n + b_n)/2
f_m_n = self.f(m_n)
if self.f(a_n)*f_m_n < 0:
a_n = a_n
b_n = m_n
elif self.f(b_n)*f_m_n < 0:
a_n = m_n
b_n = b_n
elif f_m_n == 0:
self.status = RootStatus.root_found
self.xn.append(m_n)
return self.xn
else:
self.status = RootStatus.method_fails
return None
m_n = (a_n + b_n)/2
self.xn.append(m_n)
self.status = RootStatus.exceeded_max_iter
return self.xn | [
"davidweatherstone@gmail.com"
] | davidweatherstone@gmail.com |
e7af462a10aa2b01aba8dc9f51eaeb8d8f8f1589 | d9df48207e020367a2195bc3381db61c4eee4d9a | /Python/05.plot/01_bar.py | e31d9ea8c39aa23c9988a0b5448584e37556d445 | [] | no_license | surkjin/kosmo41_surkjin | d1872c39784b9c34f3016bf9cc1f347414b61816 | 2a262c4ae44415690034e8ce04e858732aa12c70 | refs/heads/master | 2020-03-21T04:42:09.070599 | 2018-12-12T06:52:33 | 2018-12-12T06:52:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 11 14:38:07 2018
@author: kosmo30
"""
#!/usr/bin/env python3
import matplotlib.pyplot as plt
plt.style.use('ggplot')
customers = ['ABC','DEF','GHI','JKL','MNO']
customers_index = range(len(customers))
sale_amounts =[127, 90, 201, 111, 232]
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
ax1.bar(customers_index, sale_amounts, align='center', color='green')
ax1.xaxis.set_ticks_position('bottom')
ax1.yaxis.set_ticks_position('left')
plt.xticks(customers_index, customers, rotation=0, fontsize='small')
plt.xlabel('Customer Name')
plt.ylabel('Sale Amount')
plt.title('Sale Amount per Customer')
plt.savefig('./output/01_bar_plot.png', dpi=400, bbox_inches='tight')
plt.show()
| [
"surkjin@gmail.com"
] | surkjin@gmail.com |
3c3083f149d724f150c0f60864c4c9d6ed10495d | 27856ac3b3311728fe103911f7cbc0f20cbdfa8f | /bot/config.py | 549488d5ab4942dbe9d3762ea0d3e81b3afc860a | [] | no_license | 535521469/crawl_free_ip_proxy | 2c314f5037e45508071593bbcfa27e16751e4078 | 977c7fc422e8d49dd1d195cf8d7d1475da427e04 | refs/heads/master | 2016-09-06T13:25:25.738769 | 2013-05-01T07:28:25 | 2013-05-01T07:28:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | # encoding=utf8
'''
Created on 2013-4-24
@author: corleone
'''
from bot.configutil import ConfigFile
import os
def read_config():
cfg_path = os.sep.join([os.getcwd(), os.curdir, 'fetchproxy.cfg'])
configdata = ConfigFile.readconfig(cfg_path).data
return configdata
configdata = read_config()
| [
"535521469@qq.com"
] | 535521469@qq.com |
ff224afdc46082bd19994708a0dc8289239eb5e4 | 9bc0d33e1c3454393ea74d85b531801d6aa28a55 | /baselines/duet/test_ranking.py | 20ddb3c6a7f5158fc67751c3eb22e468eb15f604 | [
"MIT"
] | permissive | skallumadi/mnsrf_ranking_suggestion | 4c604ce5fc394c6d1d1efebb68af08bd2349c696 | 37cbf55d27e8595b990c0a66449e7bfe3027cc8c | refs/heads/master | 2021-01-25T14:03:23.465568 | 2017-10-09T06:40:10 | 2017-10-09T06:40:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,189 | py | ###############################################################################
# Author: Wasi Ahmad
# Project: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/wwwfp0192-mitra.pdf
# Date Created: 7/23/2017
#
# File Description: This script evaluates test ranking performance.
###############################################################################
import torch, helper, util, data, os
from duet import DUET
from ranking_eval_functions import mean_average_precision, NDCG
args = util.get_args()
def compute_ranking_performance(model, test_batch, test_clicks, test_labels):
local_score = model.local_model(test_batch, test_clicks)
distributed_score = model.distributed_model(test_batch, test_clicks)
total_score = local_score + distributed_score
MAP = mean_average_precision(total_score, test_labels)
NDCG_at_1 = NDCG(total_score, test_labels, 1)
NDCG_at_3 = NDCG(total_score, test_labels, 3)
NDCG_at_10 = NDCG(total_score, test_labels, 5)
return MAP, NDCG_at_1, NDCG_at_3, NDCG_at_10
def test_ranking(model, test_batches):
num_batches = len(test_batches)
map, ndcg_1, ndcg_3, ndcg_10 = 0, 0, 0, 0
for batch_no in range(1, num_batches + 1):
test_queries, test_docs, test_labels = helper.batch_to_tensor(test_batches[batch_no - 1], model.dictionary,
model.config.max_query_length,
model.config.max_doc_length)
if model.config.cuda:
test_queries = test_queries.cuda()
test_docs = test_docs.cuda()
test_labels = test_labels.cuda()
ret_val = compute_ranking_performance(model, test_queries, test_docs, test_labels)
map += ret_val[0]
ndcg_1 += ret_val[1]
ndcg_3 += ret_val[2]
ndcg_10 += ret_val[3]
map = map / num_batches
ndcg_1 = ndcg_1 / num_batches
ndcg_3 = ndcg_3 / num_batches
ndcg_10 = ndcg_10 / num_batches
print('MAP - ', map)
print('NDCG@1 - ', ndcg_1)
print('NDCG@3 - ', ndcg_3)
print('NDCG@10 - ', ndcg_10)
if __name__ == "__main__":
dictionary = data.Dictionary(5)
dictionary.load_dictionary(args.save_path, 'vocab.csv', 5000)
model = DUET(dictionary, args)
if 'CUDA_VISIBLE_DEVICES' in os.environ:
cuda_visible_devices = [int(x) for x in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
if len(cuda_visible_devices) > 1:
model = torch.nn.DataParallel(model, device_ids=cuda_visible_devices)
if args.cuda:
model = model.cuda()
helper.load_model_states_from_checkpoint(model, os.path.join(args.save_path, 'model_best.pth.tar'), 'state_dict')
print('Model and dictionary loaded.')
model.eval()
test_corpus = data.Corpus(args.data, 'session_test.txt', dictionary)
print('Test set size = ', len(test_corpus.data))
test_batches = helper.batchify(test_corpus.data, args.batch_size)
print('Number of test batches = ', len(test_batches))
test_ranking(model, test_batches)
| [
"wasiahmad@ucla.edu"
] | wasiahmad@ucla.edu |
37c86035036c62d52190241df1ef24d041718a06 | 35212726c5c6d60eb48660068c962eeebea3353f | /utils/options/example.py | b1e24cce6a10d3ffa407ad7a1f94a33b54562f5a | [] | no_license | frankfralick/Charted | 5dfaa8c2e56f239c89d107ed5722d3d30cefa074 | 3ee6053549074da03f8c9881baf5b44d8d1c81ac | refs/heads/master | 2021-01-10T19:37:04.746769 | 2013-06-20T15:43:30 | 2013-06-20T15:43:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,136 | py | """
Demonstration code used in, or while writing, the documentation.
"""
from options import Options, attrs, Unset
class ClassicShape(object):
name = 'Shapes Rule!'
color = 'purple'
height = 50
width = 50
def __init__(self, name=None, color='white', height=10, width=10):
self.name = name
self.color = color
self.height = height
self.width = width
def draw(self, **kwargs):
name = kwargs.get('name', self.name)
color = kwargs.get('color', self.color)
height = kwargs.get('height', self.height)
width = kwargs.get('width', self.width)
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
def draw2(self, name=None, color=None, height=None, width=None):
name = name or self.name
color = color or self.color
height = height or self.height
width = width or self.width
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
def draw3(self, name=None, color=None, height=None, width=None):
name = name or self.name or ClassicShape.name
color = color or self.color or ClassicShape.color
height = height or self.height or ClassicShape.height
width = width or self.width or ClassicShape.width
print "color='{}', width={}, name='{}', height={}".format(color, width, name, height)
oldone = ClassicShape(name='one')
oldone.draw()
oldone.draw(color='red')
oldone.draw(color='green', width=22)
print "--"
oldone.draw2()
oldone.draw2(color='red')
oldone.draw2(color='green', width=22)
print "--"
oldone.draw3()
oldone.draw3(color='red')
oldone.draw3(color='green', width=22)
print '==='
def relative_meta(key):
def setter(v, current):
return int(v) + current[key] if isinstance(v, str) else v
return setter
def relative(value, currently):
return int(value) + currently if isinstance(value, str) else value
def relmath(value, currently):
if isinstance(value, str):
if value.startswith('*'):
return currently * int(value[1:])
elif value.startswith('/'):
return currently / int(value[1:])
else:
return currently + int(value)
else:
return value
class Shape(object):
options = Options(
name = None,
color = 'white',
height = 10,
width = 10,
)
options.magic(
height = lambda v, cur: cur.height + int(v) if isinstance(v, str) else v,
width = lambda v, cur: cur.height + int(v) + cur.width if isinstance(v, str) else v,
)
options.magic(
height = lambda v, cur: relmath(v, cur.height),
width = lambda v, cur: relmath(v, cur.width)
)
def __init__(self, **kwargs):
self.options = Shape.options.push(kwargs)
def _attrs(self, opts):
nicekeys = [ k for k in opts.keys() if not k.startswith('_') ]
return ', '.join([ "{}={}".format(k, repr(opts[k])) for k in nicekeys ])
def draw(self, **kwargs):
opts = self.options.push(kwargs)
print attrs(opts)
def draw2(self, **kwargs):
opts = self.options.push(kwargs)
print self._attrs(opts)
def set(self, **kwargs):
self.options.set(**kwargs)
def is_tall(self, **kwargs):
opts = self.options.push(kwargs)
return opts.height > 100
@options.magical('name')
def capitalize_name(self, v, cur):
return ' '.join(w.capitalize() for w in v.split())
one = Shape(name='one')
one.draw()
one.draw(color='red')
one.draw(color='green', width=22)
print '--'
Shape.options.set(color='blue')
one.draw()
one.draw(height=100)
one.draw(height=44, color='yellow')
print '---'
one.draw(width='+200')
one.draw()
print '----'
one.draw(width='*4', height='/2')
one.draw2(width='*4', height='/2')
print '-----'
one.set(width='*10', color='orange')
one.draw()
one.set(color=Unset)
one.draw()
print "------"
one.set(name='a shape')
one.draw() | [
"frankfralick@gmail.com"
] | frankfralick@gmail.com |
2e0c29033010b2955a644b542267326c9713f927 | 0581b4564d1e9683b49d754565c9b6f21a75d387 | /shop/models.py | 428c095c6557bc850101eca14ab7d66adb31631d | [] | no_license | AndreyIvanyutin/Webshop | 0deffcf5094fb86e74c6fe8b2d06d4dd2cdda93d | 8839824a5513b2e8d1c0d7f73f442095474cc8aa | refs/heads/master | 2020-12-14T09:56:12.219190 | 2017-07-08T17:25:29 | 2017-07-08T17:25:29 | 95,473,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,618 | py | from django.contrib.auth.models import User
from django.db import models
# Модель категории
class Category(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True)
image = models.ImageField(upload_to='category', blank=True, null=True)
class Meta:
ordering = ['name']
verbose_name = 'Категория'
verbose_name_plural = 'Категории'
def __str__(self):
return self.name
class SubCategory(models.Model):
name = models.CharField(max_length=200, db_index=True)
slug = models.SlugField(max_length=200, db_index=True, unique=True)
image = models.ImageField(upload_to='subcategory', blank=True, null=True)
category = models.ForeignKey(Category)
class Meta:
ordering = ['name']
verbose_name = 'Подкатегория'
verbose_name_plural = 'Подкатегории'
def __str__(self):
return self.name
# Модель продукта
class Product(models.Model):
subcategory = models.ForeignKey(SubCategory, verbose_name="Категория", blank=True, null=True)
name = models.CharField(max_length=200, db_index=True, verbose_name="Название")
image = models.ImageField(upload_to='products', blank=True, null=True, verbose_name="Изображение товара")
description = models.TextField(blank=True, verbose_name="Описание")
price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="Цена")
stock = models.PositiveIntegerField(verbose_name="На складе")
available = models.BooleanField(default=True, verbose_name="Доступен")
created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
updated = models.DateTimeField(auto_now=True, verbose_name="Обновлено")
class Meta:
ordering = ['name']
index_together = [
['id', 'name']
]
verbose_name = 'Продукт'
verbose_name_plural = 'Продукты'
def __str__(self):
return self.name
class FeedBack(models.Model):
content = models.TextField()
product = models.ForeignKey(Product)
pass
class Customer(models.Model):
user = models.OneToOneField(User)
user_name = models.CharField(max_length=200, default='', db_index=True, verbose_name="Name")
#def __unicode__(self):
# return self.user
# first_name = models.CharField(max_length=50, default=True, verbose_name='Имя')
# last_name = models.CharField(max_length=50, default=True, verbose_name='Фамилия')
# password = models.CharField(max_length=100, default=True)
# phone = models.CharField(max_length=10, default=True, verbose_name='Телефон')
# email = models.EmailField(default=True)
# date_of_birth = models.DateField(default=True, verbose_name='Дата рождения')
avatar = models.ImageField(upload_to='customer_avatar', blank=True, null=True, verbose_name="Avatar")
created = models.DateTimeField(auto_now_add=True, blank=True, null=True, verbose_name="Создан")
updated = models.DateTimeField(auto_now=True, blank=True, null=True, verbose_name="Обновлено")
#orders =
#reviews =
#wishes =
def __str__(self):
return self.user_name
class Meta:
verbose_name = 'Профиль'
verbose_name_plural = 'Профили'
#class Orders(models.Model):
#name = models.CharField(max_length=200, db_index=True, verbose_name="Заказы")
#quantity = models.PositiveIntegerField(verbose_name="Колличество")
#created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
#done = models.BooleanField(default=True, verbose_name="Выполнен")
#canceled = models.BooleanField(default=True, verbose_name="Отменен")
#orders = models.ForeignKey(Customer)
# def __str__(self):
# return self.name
#class Reviews(models.Model):
#name = models.CharField(max_length=200, db_index=True, verbose_name="Отзывы")
#product = models.ManyToManyField(Product)
#created = models.DateTimeField(auto_now_add=True, verbose_name="Создан")
#caption = models.CharField(max_length=200, db_index=True, verbose_name="Заголовок")
#text = models.TextField(blank=True, verbose_name="Текст отзыва")
# reviews = models.ForeignKey(Customer)
# ?? stars = models.CharField(max_length=5)
# def __str__(self):
# return self.name
| [
"andrey.ivanyutin@gmail.com"
] | andrey.ivanyutin@gmail.com |
93fe75d32ccb18339ef6ff1b37d1cfbe0b3c0c1e | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/dlymuh001/question2.py | 34d73fd549c0a400164a5301a2e7cc2b38ba5c3b | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,297 | py | def cat():
lick = input("Did the cat lick it? (yes/no)\n")
if (lick == "yes"):
healthy = input("Is your cat healthy? (yes/no)\n")
if (healthy == "yes"):
return "Eat it"
elif (healthy == "no"):
return "Your call"
elif (lick == "no"):
return "Eat it"
print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
decision = ""
seen = input("Did anyone see you? (yes/no)\n")
if (seen == "yes"):
person = input("Was it a boss/lover/parent? (yes/no)\n")
if (person == "yes"):
expensive = input("Was it expensive? (yes/no)\n")
if (expensive == "yes"):
cut_off = input("Can you cut off the part that touched the floor? (yes/no)\n")
if (cut_off == "yes"):
decision = "Eat it"
elif (cut_off == "no"):
decision = "Your call"
elif (expensive == "no"):
chocolate = input("Is it chocolate? (yes/no)\n")
if (chocolate == "yes"):
decision = "Eat it"
elif (chocolate == "no"):
decision = "Don\'t eat it"
elif (person == "no"):
decision = "Eat it"
elif (seen == "no"):
sticky = input("Was it sticky? (yes/no)\n")
if (sticky == "yes"):
raw_steak = input("Is it a raw steak? (yes/no)\n")
if (raw_steak == "yes"):
puma = input("Are you a puma? (yes/no)\n")
if (puma == "yes"):
decision = "Eat it"
elif (puma == "no"):
decision = "Don\'t eat it"
elif (raw_steak == "no"):
decision = cat()
elif (sticky == "no"):
emausaurus = input("Is it an Emausaurus? (yes/no)\n")
if (emausaurus == "yes"):
megalosaurus = input("Are you a Megalosaurus? (yes/no)\n")
if (megalosaurus == "yes"):
decision = "Eat it"
elif (megalosaurus == "no"):
decision = "Don\'t eat it"
elif (emausaurus == "no"):
decision = cat()
##output decision
print ("Decision:", decision, sep = " ", end = ".")
| [
"jarr2000@gmail.com"
] | jarr2000@gmail.com |
954aed060f4c9eddef7723e818e80e360517b911 | 7d6cb605a02dff3031da0bc6a334fdf9f0579412 | /Project/nn trim and preprocessed/muti_layer_nn.py | ec1d62419cb8c0181e7fe1dd5b192f402c5f26f1 | [] | no_license | 18369766918/Matthew_Project | f98fc4aec156e8920521863b25abf805649cf5c5 | 938a5b20865f842ef8695b2a0413ece31766fe00 | refs/heads/master | 2020-12-30T16:58:47.804504 | 2017-05-12T21:16:00 | 2017-05-12T21:16:00 | 91,042,943 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,203 | py | import tensorflow as tf
import numpy as np
from math import exp
from LoadDataSet import load_and_process_training_Data,load_and_process_test_data
tf.set_random_seed(0)
# Get all pre processed data
# load training data, test data
#train_x,train_y= load_and_process_training_Data('targfeatures_train.txt','nontargetFeatures_train.txt')
#test_x,test_y = load_and_process_test_data('testfeatures.txt','testlabels.txt')
train_x,train_y= load_and_process_training_Data('trainfeatures.txt','trainlabels.txt')
test_x,test_y = load_and_process_test_data('testfeatures.txt','testlabels.txt')
# set up parameters we need for nn model
# trained neural network path
save_path = "nn_saved_model/model_compress_samenode/model.ckpt"
# The number of class you want to have in NN. In this case we want NN to determine which dataset belone
# to target signal or non_target signal
n_classes = 2
# Number of node each hidden layer will have
n_nodes_hl1 = 100
n_nodes_hl2 = 100
n_nodes_hl3 = 100
# number of times we iterate through training data
num_epochs = 100
# computer may not have enough memory, so we divide the train into batch each batch have 100 data features.
batch_size = 100
# These are placeholders for some values in graph
# tf.placeholder(dtype, shape=None(optional), name=None(optional))
# It's a tensor to hold our datafeatures
x = tf.placeholder(tf.float32, [None,len(train_x[0])])
# Every row has either [1,0] for targ or [0,1] for non_target. placeholder to hold one hot value
Y_C = tf.placeholder(tf.int8, [None, n_classes])
# variable learning rate
lr = tf.placeholder(tf.float32)
# neural network model
def neural_network_model(data):
# layers contain weights and bias for case like all neurons fired a 0 into the layer, we will need result out
# When using RELUs, make sure biases are initialised with small *positive* values for example 0.1 = tf.ones([K])/10
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),'bias':tf.Variable(tf.ones([n_nodes_hl1])/10)}
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),'bias':tf.Variable(tf.ones([n_nodes_hl2])/10)}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),'bias':tf.Variable(tf.ones([n_nodes_hl3])/10)}
# no more bias when come to the output layer
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),'bias':tf.Variable(tf.zeros([n_classes]))}
# multiplication of the raw input data multipled by their unique weights (starting as random, but will be optimized)
l1 = tf.add(tf.matmul(data,hidden_1_layer['weights']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weights']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weights']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
# We repeat this process for each of the hidden layers, all the way down to our output, where we have the final values still being the multiplication of the input and the weights, plus the output layer's bias values.
Ylogits = tf.matmul(l3,output_layer['weights']) + output_layer['bias']
return Ylogits
# set up the training process
def train_neural_network(x):
# produce the prediction base on output of nn model
Ylogits = neural_network_model(x)
# measure the error use build in cross entropy function, the value that we want to minimize
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_C))
# To optimize our cost (cross_entropy), reduce error, default learning_rate is 0.001, but you can change it, this case we use default
# optimizer = tf.train.GradientDescentOptimizer(0.003)
optimizer = tf.train.AdamOptimizer(lr)
train_step = optimizer.minimize(cross_entropy)
# start the session
with tf.Session() as sess:
# We initialize all of our variables first before start
sess.run(tf.global_variables_initializer())
# iterate epoch count time (cycles of feed forward and back prop), each epoch means neural see through all train_data once
for epoch in range(num_epochs):
# count the total cost per epoch, declining mean better result
epoch_loss=0
i=0
# learning rate decay
max_learning_rate = 0.003
min_learning_rate = 0.0001
decay_speed = 150
learning_rate = min_learning_rate + (max_learning_rate - min_learning_rate) * exp(-epoch/decay_speed)
# divide the dataset in to dataset/batch_size in case run out of memory
while i < len(train_x):
# load train data
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
train_data = {x: batch_x, Y_C: batch_y, lr: learning_rate}
# train
# sess.run(train_step,feed_dict=train_data)
# run optimizer and cost against batch of data.
_, c = sess.run([train_step, cross_entropy], feed_dict=train_data)
epoch_loss += c
i+=batch_size
print('Epoch', epoch, 'completed out of',num_epochs,'loss:',epoch_loss)
# how many predictions we made that were perfect matches to their labels
# test model
# test data
test_data = {x:test_x, Y_C:test_y}
# calculate accuracy
correct_prediction = tf.equal(tf.argmax(Ylogits, 1), tf.argmax(Y_C, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('Accuracy:',accuracy.eval(test_data))
# result matrix, return the position of 1 in array
result = (sess.run(tf.argmax(Ylogits.eval(feed_dict=test_data),1)))
answer = []
for i in range(len(test_y)):
if test_y[i] == [0,1]:
answer.append(1)
elif test_y[i]==[1,0]:
answer.append(0)
answer = np.array(answer)
printResultandCorrectMatrix(result,answer)
np.savetxt('nn_prediction.txt', Ylogits.eval(feed_dict={x: test_x}), delimiter=',',newline="\r\n")
# save the nn model for later use again
# 'Saver' op to save and restore all the variables
saver = tf.train.Saver()
saver.save(sess, save_path)
print("Model saved in file: %s" % save_path)
# load the trained neural network model
def test_loaded_neural_network():
Ylogits = neural_network_model(x)
saver = tf.train.Saver()
with tf.Session() as sess:
# load saved model
saver.restore(sess, save_path)
print("Loading variables from ‘%s’." % save_path)
np.savetxt('nn_prediction.txt', Ylogits.eval(feed_dict={x: test_x}), delimiter=',',newline="\r\n")
# test model
# calculate accuracy
correct_prediction = tf.equal(tf.argmax(Ylogits, 1), tf.argmax(Y_C, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, Y_C:test_y}))
# result matrix
result = (sess.run(tf.argmax(Ylogits.eval(feed_dict={x:test_x}),1)))
# answer matrix
answer = []
for i in range(len(test_y)):
if test_y[i] == [0,1]:
answer.append(1)
elif test_y[i]==[1,0]:
answer.append(0)
answer = np.array(answer)
printResultandCorrectMatrix(result,answer)
print(Ylogits.eval(feed_dict={x: test_x}).shape)
def printResultandCorrectMatrix(result,answer):
print("Result matrix: ")
print(result)
# counter for positive and negative reflection
positiveCount = 0
negativeCount = 0
for i in np.nditer(result):
if i == 0:
positiveCount+=1
elif i == 1:
negativeCount+=1
print("Positive count ", positiveCount)
print("Negative count ", negativeCount)
print("Answer matrix: ")
print(answer)
countCorrectMatch = 0
for i in range(len(answer)):
if answer[i]==0:
if result[i]==0:
countCorrectMatch+=1
print("Correct match labels is ", countCorrectMatch)
''' plot result
def plotGraph(s,prediction):
import matplotlib.pyplot as plt
xx = [v[0] for v in test_x]
yy = [v[1] for v in test_y]
x_min, x_max = min(xx) - 0.5, max(xx) + 0.5
y_min, y_max = min(yy) - 0.5, max(yy) + 0.5
xxx, yyy = np.meshgrid(np.arange(x_min, x_max, 0.02), np.arange(y_min, y_max, 0.02))
pts = np.c_[xxx.ravel(), yyy.ravel()].tolist()
# ---> Important
z = s.run(tf.argmax(prediction, 1), feed_dict = {x: pts})
z = np.array(z).reshape(xxx.shape)
plt.pcolormesh(xxx, yyy, z)
plt.scatter(xx, yy, c=['r' if v[0] == 1 else 'b' for v in y_data], edgecolor='k', s=50)
plt.show()
'''
#train_neural_network(x)
test_loaded_neural_network()
| [
"matthew@desktop-jo4saar.algomau.auc.ca"
] | matthew@desktop-jo4saar.algomau.auc.ca |
175fc58cdec9dfa2614265d9e8687f6653571759 | 9ddb76f8bac669e89e2ae0c5de68bef6e81b7dd6 | /GamebotsParser.py | 865a444d3a048ea5f293af25aac7c910f18928bf | [] | no_license | formica-multiuso/ugc | 51598755823c912c9ba0af8fc75fa1c0b26817db | 0aa35de26412bc3855acb2407abafde77a430137 | refs/heads/master | 2016-09-06T16:25:06.158155 | 2013-08-05T10:31:56 | 2013-08-05T10:31:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | import sys
import socket
import threading
import select
class GamebotsParser(threading.Thread):
def __init__(self,socket,name):
threading.Thread.__init__(self)
self.socket = socket
self.name = name
def run(self):
while 1:
rlist, wlist, elist = select.select( [self.socket], [], [] )
self.messageBuffer = self.socket.recv(2048)
messages = self.messageBuffer.split('\n')
print "\n" + "\033[34m" + "[" + self.name + "] " + "\033[0m"
for message in messages:
pair = message.split(' ',1)
if len(pair) > 1:
print "\033[33m" + pair[0] + "\033[0m"
payload = ''.join(pair[1])
tokens = payload.split('{')
tokens = ''.join(tokens)
tokens = tokens.split('}')
# Here I need to return sensors (SEN) information to the IRobot class splitted in dictionary
for token in tokens:
print token
def parser(self):
pass
| [
"formica@member.fsf.org"
] | formica@member.fsf.org |
95141bfe82d59cd91b74d094bbf932c628c5c5be | 04d76de80ac3d57c6b7428cfed3d86f85cef3ab5 | /Week-4-Good-Programming-Practices/Problem_Set_4/Problem 2 - Dealing with Hands.py | 89c250947d83b8c8b29ec2b5484cb6273e1737f9 | [] | no_license | ojwills/MIT-6.00.1x-Intro-to-CS-and-Python | 2d1e7bfff3af302ffb7377528e81c5e423dd36aa | e92a8421e8967d4f482868334bdb16e0aaf0fea8 | refs/heads/master | 2022-06-24T19:36:13.595243 | 2020-05-09T16:57:18 | 2020-05-09T16:57:18 | 139,490,535 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | #Problem 2 - Dealing with Hands
#10.0/10.0 points (graded)
#**Please read this problem entirely!!** The majority of this problem consists of learning how to read code, which is an incredibly useful and important skill. At the end, you will implement a short function. Be sure to take your time on this problem - it may seem easy, but reading someone else's code can be challenging and this is an important exercise.
#
#
#Representing hands
#A hand is the set of letters held by a player during the game. The player is initially dealt a set of random letters. For example, the player could start out with the following hand: a, q, l, m, u, i, l. In our program, a hand will be represented as a dictionary: the keys are (lowercase) letters and the values are the number of times the particular letter is repeated in that hand. For example, the above hand would be represented as:
#
#hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
#Notice how the repeated letter 'l' is represented. Remember that with a dictionary, the usual way to access a value is hand['a'], where 'a' is the key we want to find. However, this only works if the key is in the dictionary; otherwise, we get a KeyError. To avoid this, we can use the call hand.get('a',0). This is the "safe" way to access a value if we are not sure the key is in the dictionary. d.get(key,default) returns the value for key if key is in the dictionary d, else default. If default is not given, it returns None, so that this method never raises a KeyError. For example:
#
#>>> hand['e']
#Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
#KeyError: 'e'
#>>> hand.get('e', 0)
#0
#Converting words into dictionary representation
#One useful function we've defined for you is getFrequencyDict, defined near the top of ps4a.py. When given a string of letters as an input, it returns a dictionary where the keys are letters and the values are the number of times that letter is represented in the input string. For example:
#
#>>> getFrequencyDict("hello")
#{'h': 1, 'e': 1, 'l': 2, 'o': 1}
#As you can see, this is the same kind of dictionary we use to represent hands.
#
#Displaying a hand
#Given a hand represented as a dictionary, we want to display it in a user-friendly way. We have provided the implementation for this in the displayHand function. Take a few minutes right now to read through this function carefully and understand what it does and how it works.
#
#Generating a random hand
#The hand a player is dealt is a set of letters chosen at random. We provide you with the implementation of a function that generates this random hand, dealHand. The function takes as input a positive integer n, and returns a new object, a hand containing n lowercase letters. Again, take a few minutes (right now!) to read through this function carefully and understand what it does and how it works.
#
#Removing letters from a hand (you implement this)
#The player starts with a hand, a set of letters. As the player spells out words, letters from this set are used up. For example, the player could start out with the following hand: a, q, l, m, u, i, l. The player could choose to spell the word quail . This would leave the following letters in the player's hand: l, m. Your task is to implement the function updateHand, which takes in two inputs - a hand and a word (string). updateHand uses letters from the hand to spell the word, and then returns a copy of the hand, containing only the letters remaining. For example:
#
#>>> hand = {'a':1, 'q':1, 'l':2, 'm':1, 'u':1, 'i':1}
#>>> displayHand(hand) # Implemented for you
#a q l l m u i
#>>> hand = updateHand(hand, 'quail') # You implement this function!
#>>> hand
#{'a':0, 'q':0, 'l':1, 'm':1, 'u':0, 'i':0}
#>>> displayHand(hand)
#l m
#Implement the updateHand function. Make sure this function has no side effects: i.e., it must not mutate the hand passed in. Before pasting your function definition here, be sure you've passed the appropriate tests in test_ps4a.py.
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
r = dict(hand)
for letter in word:
if letter in r.keys():
r[letter] -= 1
return r
#Correct | [
"noreply@github.com"
] | noreply@github.com |
26f2074d53662e3b75784826d1b3d465efd230e4 | 91397c476203a77c597f80769b9b8ac850a2dedb | /mongodb app/main.py | 2501f63bdb9279a0539f7b97b9511c019bf851c3 | [] | no_license | firdaussalim/Perpustakaan-App | 29cdf2d0963065f6d89901a19a0f258ebc165ac6 | dcf579f725b8ef90a3f6412599b77ac47b7b50a4 | refs/heads/master | 2023-06-06T11:34:05.261274 | 2021-06-25T10:06:23 | 2021-06-25T10:06:23 | 380,193,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | from fastapi import FastAPI
from books_route import router as books_router
app = FastAPI()
app.include_router(books_router)
@app.get("/")
async def read_main():
return {"message": "Hello Bigger Applications!"} | [
"firdaus.salim24@gmail.com"
] | firdaus.salim24@gmail.com |
2b8f12babeff6dcb5935f88b5bbc52db63205ad7 | 3b1a27c72024dc6ac932b39df28d2fb3a6e26a5b | /22_sum_floats/sum_floats.py | ac5da9e7fe7bc8d0012faf56b9f8f6f6c2894334 | [] | no_license | petermoyano/py_ds | 423f95dd5ae308343e52db9a7178936062c8ce36 | 3b1ff9880a8a1c08ee1061c2f239be31c167a0fc | refs/heads/master | 2023-07-18T05:43:07.387039 | 2021-09-07T23:21:58 | 2021-09-07T23:21:58 | 402,805,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | def sum_floats(nums):
"""Return sum of floating point numbers in nums.
>>> sum_floats([1.5, 2.4, 'awesome', [], 1])
3.9
>>> sum_floats([1, 2, 3])
0
"""
# hint: to find out if something is a float, you should use the
# "isinstance" function --- research how to use this to find out
# if something is a float!
sol = [num for num in nums if isinstance(num, float)]
sum = 0
for num in sol:
sum += num
return sum | [
"pedromoyano454@gmail.com"
] | pedromoyano454@gmail.com |
d5ccdd17e5f6a90bc2827ddfac825f3f325a9b19 | 11ef04d5323d2972429adc93ca3795f4c9b3ca35 | /blog/views.py | cbf3e15b60fcf326667bb452194741102230c997 | [] | no_license | ZveRuss/my-blog | b976895cc89f2896c39cfb45c2e4bcb13f4a3393 | 46913b368d13d6a724ae25278d3b43d3695c02ca | refs/heads/master | 2020-04-15T16:44:39.905856 | 2019-01-11T08:59:27 | 2019-01-11T08:59:27 | 164,848,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from django.shortcuts import render
from .models import Post
from django.utils import timezone
# Create your views here.
def post_list(request):
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
return render(request, 'blog/post_list.html', {'posts': posts})
votes = models.IntegerField(default=0)
| [
"jack8644@yandex.ru"
] | jack8644@yandex.ru |
d7eee39604fef2837377fb81e7d77c58c31765e1 | 635596e8f165717d5b8d0d83b3c434b24eb5629d | /tenkou.py | 7c1e85a0a888cad03474fbf0ef2db84cd857e850 | [
"MIT"
] | permissive | aaray6/bgm_tenkou | 58733ade90148a6c74ff5ef534a55bb04ee6306c | 8b01d98c5ad5479d931ee0096498062ad0dbbc8b | refs/heads/master | 2022-07-19T16:05:40.914407 | 2020-05-23T09:28:55 | 2020-05-23T09:28:55 | 266,247,912 | 0 | 0 | MIT | 2020-05-23T02:27:23 | 2020-05-23T02:27:22 | null | UTF-8 | Python | false | false | 14,139 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# version 0.0.2
import urllib.request, urllib.parse, urllib.error
import argparse, re, os, sys, copy
from bs4 import BeautifulSoup
def ptStrLiterally(str):
for i in str:
try:
print(i, end='')
except UnicodeEncodeError as e:
pass
print('')
def puts(str):
try:
print(str)
except UnicodeEncodeError as e:
# print(e.reason)
ptStrLiterally(str)
else:
pass
def searchSubStr(str, pattern_start, pattern_end, quiet=False):
try:
start = re.search(pattern_start, str).end()
end = re.search(pattern_end, str[start:]).start()
except AttributeError as e:
if not quiet:
print('AttributeError: Can\'t find substring')
return ''
substr = str[start:end+start]
return substr
def generateOpener(auth, ua):
opener = urllib.request.build_opener()
if ua:
opener.addheaders = [('User-agent', ua)]
else:
opener.addheaders = [('User-agent', 'Mozilla 5.0')]
if auth:
opener.addheaders.append(('Cookie', 'chii_auth=' + auth))
return opener
def getHtml(url, auth, ua):
opener = generateOpener(auth, ua)
try:
html = opener.open(url).read()
except urllib.error.URLError as e:
print(url)
print('No response...')
return None
else:
return html
def getProgress(url, auth, ua):
opener = generateOpener(auth, ua)
try:
html = opener.open(url).read()
#soup = BeautifulSoup(html.decode('utf-8'))
soup = BeautifulSoup(html.decode('utf-8'), "lxml")
p = soup.find('input', id='watchedeps')['value']
except urllib.error.URLError as e:
print(url)
print('No response...')
return ''
except TypeError as e:
print(url)
print('TyepError: NoneType')
print('Error: the given auth string doesn\'t match the user id')
return ''
else:
return p
def getIDnGh(li):
idngh = li.find('p', class_='collectModify').find_all('a')[1]['onclick']
# [subid, gh]
return idngh[20:-2].split(", '")
def removeItem(domain, subid, auth, ua, gh):
opener = generateOpener(auth, ua)
rmlink = ''.join([domain, '/subject/', subid, '/remove?gh=', gh])
try:
response = opener.open(rmlink)
except urllib.error.URLError as e:
print(rmlink)
print('Cant erase subject %s' % subid)
return False
else:
return True
def export(domain, auth, ua, uid, path, wipe):
cats = ['anime', 'game', 'music', 'book', 'real']
types = ['do', 'collect', 'wish', 'on_hold', 'dropped']
# types = ['do', 'wish', 'on_hold', 'dropped']
# types = ['do', 'on_hold', 'dropped']
cats_c = {'anime' : '动画',
'game' : '游戏',
'music' : '音乐',
'book' : '书籍',
'real' : '电视剧'}
types_c = {'do' : '在看',
'collect' : '看过',
'wish' : '想看',
'on_hold' : '搁置',
'dropped' : '抛弃'}
cats_types = [(c, t) for c in cats for t in types]
for cat, type in cats_types:
# if cat == 'anime' and type == 'collect':
# continue
# print(types_c[type], '的', cats_c[cat], '\n')
puts(types_c[type] + '的' + cats_c[cat] + '\n')
pg = 1
idx = 1
items = ''
while pg != 0:
url = ''.join( [domain, '/', cat, '/list/', uid, '/',
type, '?page=', str(pg)] )
html = getHtml(url, auth, ua)
if not html:
break
# # test
# with open("test.html",'w', encoding='utf-8') as ft:
# ft.write(html.decode('utf-8'))
# # test
#soup = BeautifulSoup(html.decode('utf-8'))
soup = BeautifulSoup(html.decode('utf-8'), "lxml")
ul = soup.find(id='browserItemList')
content = ''
for li in ul.children:
inner = li.find('div', class_='inner')
collect_info = inner.find('p', class_='collectInfo')
comment = inner.find('div', id='comment_box')
stars = inner.find('span', class_='starsinfo')
greyname = inner.h3.small
href = domain + inner.h3.a['href']
iname = str(idx) + '. ' + inner.h3.a.text.strip() + '\n'
iurl = '地址:' + href + '\n'
icollect_info = collect_info.text.strip() + '\n'
if greyname:
igreyname = '原名:' + greyname.text.strip() + '\n'
else:
igreyname = ''
if stars:
istars = '评分:' + stars['class'][0][6:] + '星\n'
else:
istars = ''
if comment:
icomment = ('简评:'
+ inner.find('div',
id='comment_box').text.strip()
+ '\n')
else:
icomment = ''
if ( (cat == 'anime' or cat == 'real')
and type == 'do'
and auth ):
iprogress = '进度:' + getProgress(href, auth, ua) + '\n'
else:
iprogress = ''
# print(iname)
puts(iname)
content += (iname + igreyname + iurl + istars + icomment
+ iprogress + icollect_info + '\n')
idx += 1
if wipe:
# remove item
try:
subid, gh = getIDnGh(li)
removeItem(domain, subid, auth, ua, gh)
except:
print('Error: wrong auth string\n')
if content != '':
items += content
pg += 1
else:
pg = 0
if items == '':
continue
file_name = path + '/bangumi_' + cat + '_' + type + '.txt'
with open(file_name, 'w', encoding='utf-8') as f:
f.write(items)
def getAuth(domain, auth, ua, authfile, uid, password):
if auth and ua:
return uid, auth, ua
elif authfile:
with open(authfile, 'r') as af:
user_agent = af.readline()
auth = af.readline()
return uid, auth.strip(), user_agent.strip()
elif not password:
# print('Error: No auth string, no auth file, no password\n')
return uid, auth, ua
url = domain + '/login'
# url = domain + '/FollowTheRabbit'
data = {'cookietime': '2592000',
'email': uid,
'password': password,
'loginsubmit': '登录'}
user_agent = 'Mozilla/5.0 (Elephant 3) Midori 3.5'
data = urllib.parse.urlencode(data).encode('utf-8')
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', user_agent)]
urllib.request.install_opener(opener)
res = urllib.request.urlopen(url, data)
# print(res.getheaders())
# print(res.getheader('Set-Cookie'))
cookie = res.getheader('Set-Cookie')
# -- use searchSubStr() --
# start = re.search('chii_auth=', cookie).end()
# end = re.search('(;|$)', cookie[start:]).start()
# # print(cookie[start:end+start])
# auth = cookie[start:end+start]
# -- use searchSubStr() --
auth = searchSubStr(cookie, 'chii_auth=', '(;|$)')
return uid, auth, user_agent
def post(url, data, auth, ua):
opener = generateOpener(auth, ua)
post_data = urllib.parse.urlencode(data).encode('utf-8')
urllib.request.install_opener(opener)
res = urllib.request.urlopen(url, post_data)
return res
def getGH(domain, auth, ua):
opener = generateOpener(auth, ua)
html = opener.open(domain).read().decode('utf-8')
pattern = '<a href="http://(bangumi.tv|bgm.tv|chii.in)/logout/'
# -- use searchSubStr() --
# start = re.search(pattern, html).end()
# end = re.search('"', html[start:]).start()
# return html[start:end+start]
# -- use searchSubStr() --
return searchSubStr(html, pattern, '"')
def addItem(domain, subid, type, rating, tags,
comment, watchedeps, gh, auth, ua):
# print(domain, subid, type, rating, tags,
# comment, watchedeps, gh, auth, ua)
# on == on_hold
types_table = {
'wish' : 1,
'collect' : 2,
'do' : 3,
'on' : 4,
'dropped' : 5
}
item_action = ''.join( [domain, '/subject/', subid,
'/interest/update?gh=', gh] )
item_data = {
'referer' : 'subject',
'interest' : types_table[type],
'rating' : rating,
'tags' : tags,
'comment' : comment,
'update' : '保存'
}
item_res = post(item_action, item_data, auth, ua)
if watchedeps:
eps_action = ''.join( [domain, '/subject/set/watched/', subid] )
eps_data = {
'referer' : 'subject',
'subject' : '更新',
'watchedeps' : watchedeps
}
eps_res = post(eps_action, eps_data, auth, ua)
return item_res
def restore(domain, auth, ua, path):
basic_dict = {
'title' : '',
'subid' : '',
'type' : '',
'rating' : '',
'tags' : '',
'comment' : '',
'watchedeps' : '',
}
m_dict = {
'简评' : 'comment',
'进度' : 'watchedeps',
}
part_a = '_(anime|game|music|book|real)'
part_b = '_(do|collect|wish|on_hold|dropped)'
files_name_pattern = 'bangumi' + part_a + part_b + '.txt$'
files = filter(lambda x : re.match(files_name_pattern, x), os.listdir(path))
gh = getGH(domain, auth, ua)
for file in files:
print(file, '\n')
items_dict = {}
counter = 0
with open(path + file, 'r', encoding='utf-8') as f:
items = f.readlines()
for line in items:
# print(line)
line = line.strip()
if re.match('\d+\. ', line):
counter += 1
items_dict[counter] = copy.deepcopy(basic_dict)
items_dict[counter]['title'] = line
type = file.split('.')[0].split('_')[2]
items_dict[counter]['type'] = type
elif re.match('\d{4}-\d{1,2}-\d{1,2}', line):
tags = searchSubStr(line, '标签: ', '$', True)
items_dict[counter]['tags'] = tags
elif line.startswith('地址'):
subid = searchSubStr(line, '\.(tv|in)/subject/', '$')
# print('subid', subid)
# print('counter', counter)
items_dict[counter]['subid'] = subid
elif line.startswith('评分'):
items_dict[counter]['rating'] = line[3:-1]
else:
m = m_dict.get(line[:2])
items_dict[counter][m] = line[3:]
n = len( items_dict.keys() )
for i in range(n, 0, -1):
# print(items_dict[i]['subid'],
# items_dict[i]['type'],
# items_dict[i]['rating'],
# items_dict[i]['tags'],
# items_dict[i]['comment'],
# items_dict[i]['watchedeps'],
# gh,
# auth,
# ua)
puts(items_dict[i]['title'] + '\n')
addItem(domain,
items_dict[i]['subid'],
items_dict[i]['type'],
items_dict[i]['rating'],
items_dict[i]['tags'],
items_dict[i]['comment'],
items_dict[i]['watchedeps'],
gh,
auth,
ua)
def main():
'''Main function'''
# parse argv start
parser = argparse.ArgumentParser(prog="tenkou.py")
parser.add_argument("-d", "--domain",
default="bgm.tv",
choices=["chii.in", "bgm.tv", "bangumi.tv"],
help="choose domain, default is bgm.tv")
parser.add_argument("-u", "--uid",
help="your id")
parser.add_argument("--password",
help="give me your password")
parser.add_argument("-p", "--path",
default="./",
help="change the directory "\
"where you save files")
parser.add_argument("--auth",
help="your auth string")
parser.add_argument("--useragent",
help="your user-agent")
parser.add_argument("--authfile",
help="specify the location of "\
"your auth file")
parser.add_argument("-r", "--restore",
action="store_true",
help="restore your data")
parser.add_argument("--wipe",
action="store_true",
help="tenkou")
parser.add_argument("-v", "--version",
action='version',
version='v0.0.2')
args = parser.parse_args()
# parse argv end
if not os.path.isdir(args.path):
print("Error: Local path doesn't exist")
return
if not args.uid:
print('Error: Please tell me your id')
return
path = args.path + "/"
domain = 'http://' + args.domain
wipe = args.wipe
# print(wipe==True)
uid, auth, ua = getAuth(domain,
args.auth,
args.useragent,
args.authfile,
args.uid,
args.password)
if not args.restore:
export(domain, auth, ua, uid, path, wipe)
else:
restore(domain, auth, ua, path)
print("Complete")
main()
| [
"aaray21cn@gmail.com"
] | aaray21cn@gmail.com |
8dcc2947e1a739ffad867c6bf674d20d81008c49 | 0abd812a50ba3330734fcbb0088a74c5ad6735a2 | /python/utf8_for_emojis.py | 695f4f879e0986f5202ac4876ea2878fd0bf97aa | [] | no_license | scMarth/Learning | a914af6f6327454234e5f98dfc8cf95d6d4f8077 | ae696461c2c8edc9944879503cce01d525cf4ce0 | refs/heads/master | 2023-08-03T05:13:03.162533 | 2023-07-28T22:58:51 | 2023-07-28T22:58:51 | 120,689,926 | 2 | 0 | null | 2022-12-11T13:14:07 | 2018-02-08T00:33:42 | JavaScript | UTF-8 | Python | false | false | 3,984 | py | # convert json to csv
import arcpy, os, shutil, numpy, json, codecs
fields = {
'request' : [ \
'id', \
'master', \
'addDate', \
'addDateUnix', \
'lastAction', \
'lastActionUnix', \
'dept', \
'displayDate', \
'displayLastAction', \
'status', \
'streetId', \
'streetName', \
'streetNum', \
'crossStreetId', \
'crossStreetName', \
'cityId', \
'cityName', \
'district', \
'comments', \
'privateNotes', \
'submitter', \
'typeId', \
'typeName', \
'priorityValue', \
'latitude', \
'longitude', \
'aggregatorId', \
'aggregatorInfo', \
'origin', \
'priorityToDisplay' \
],
'activity' : [ \
'actDate', \
'actDateUnix', \
'attachments', \
'code', \
'codeDesc', \
'comments', \
'displayDate', \
'id', \
'notify', \
'requestId', \
'routeId', \
'user', \
'files', \
'isEditable' \
],
'attachment' : [ \
'createDateUnix', \
'createDate', \
'fileName', \
'id', \
'parent', \
'parentType', \
'size', \
'user' \
],
'submitter' : [ \
'id', \
'firstName', \
'lastName', \
'middleInitial', \
'address', \
'address2', \
'city', \
'state', \
'zip', \
'email', \
'phone', \
'phoneExt', \
'altPhone', \
'altPhoneExt', \
'password', \
'aggregatorId', \
'verified', \
'banned', \
'twitterId', \
'twitterScreenName', \
'notifyEmail', \
'notifyPhone', \
'notifyAltPhone', \
'notifyMail', \
'notifyPush', \
'notifyPhoneSms', \
'notifyAltPhoneSms' \
]
}
def escaped(inputStr):
# return inputStr
return inputStr.translate(str.maketrans({ \
# "]": r"\]", \
# "^": r"\^", \
# "$": r"\$", \
# "*": r"\*", \
# ".": r"\.", \
# "/": r"\/",\
# so far, I've seen carriage returns, line feeds, and double-quotes that can mess up records. '\'' is escaped just in case
"\r": r"\r", \
"\n": r"\n", \
"\\": r"\\", \
'\"': r'\"' \
}))
# reads a json file path then creates a fgdb for that json file in 'workspace'
# the json file contains json data that is returned from the requests/dump method
def write_json_file_to_csv(workspace, json_path):
with open(json_path) as json_file:
data = json.load(json_file)
for key in data:
if key == 'deleted':
continue
output_filepath = workspace + r'\\' + key.upper() + '.csv'
print('Writing' + output_filepath)
# delete file if it exists
if os.path.exists(output_filepath):
os.unlink(output_filepath)
with codecs.open(output_filepath, 'w', encoding='utf8') as file:
# write header
for i in range(len(fields[key]) - 1):
file.write(escaped(fields[key][i]) + ',')
file.write(escaped(fields[key][-1]) + '\n')
# write records
for i in range(len(data[key])):
record = data[key][i]
# print(record)
for j in range(len(fields[key]) - 1):
# print(j)
file.write('"' + escaped(str(record[fields[key][j]])) + '",')
file.write('"' + escaped(str(record[fields[key][-1]])) + '"\n')
print('{} records written.\n'.format(len(data[key])))
workspace = os.path.dirname(__file__) + r'\request_data'
write_json_file_to_csv(workspace, workspace + r'\response.json') | [
"vlantaca@gmail.com"
] | vlantaca@gmail.com |
c124ea30bcfbbcba93bb65a323ba6e966cc49570 | 4c56f7e21fead7114e175fe621ea45c053345719 | /main.py | 574da7f0d5de04ac527e1b7cd093c7f607b0a1f2 | [] | no_license | M1keShum/ChatBot | 61e698c75f01b462c1fb9f062c849ddbf91b1ca4 | 2df6d4782f008acccff8a90b25300e304b632f46 | refs/heads/master | 2022-12-24T15:06:38.986797 | 2020-10-05T12:58:00 | 2020-10-05T12:58:00 | 301,353,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | import datetime as dt
import requests
DATABASE = {
'Сергей': 'Омск',
'Соня': 'Москва',
'Алексей': 'Калининград',
'Миша': 'Москва',
'Дима': 'Челябинск',
'Алина': 'Красноярск',
'Егор': 'Пермь',
'Коля': 'Красноярск',
'Артём': 'Владивосток',
'Петя': 'Михайловка'
}
UTC_OFFSET = {
'Москва': 3,
'Санкт-Петербург': 3,
'Новосибирск': 7,
'Екатеринбург': 5,
'Нижний Новгород': 3,
'Казань': 3,
'Челябинск': 5,
'Омск': 6,
'Самара': 4,
'Ростов-на-Дону': 3,
'Уфа': 5,
'Красноярск': 7,
'Воронеж': 3,
'Пермь': 5,
'Волгоград': 4,
'Краснодар': 3,
'Калининград': 2,
'Владивосток': 10
}
def format_count_friends(count_friends):
if count_friends == 1:
return '1 друг'
elif 2 <= count_friends <= 4:
return f'{count_friends} друга'
else:
return f'{count_friends} друзей'
def what_time(city):
offset = UTC_OFFSET[city]
city_time = dt.datetime.utcnow() + dt.timedelta(hours=offset)
f_time = city_time.strftime("%H:%M")
return f_time
def what_weather(city):
url = f'http://wttr.in/{city}'
weather_parameters = {
'format': 2,
'M': ''
}
try:
response = requests.get(url, params=weather_parameters)
except requests.ConnectionError:
return '<сетевая ошибка>'
if response.status_code == 200:
return response.text.strip()
else:
return '<ошибка на сервере погоды>'
def process_anfisa(query):
if query == 'сколько у меня друзей?':
count_string = format_count_friends(len(DATABASE))
return f'У тебя {count_string}'
elif query == 'кто все мои друзья?':
friends_string = ', '.join(DATABASE.keys())
return f'Твои друзья: {friends_string}'
elif query == 'где все мои друзья?':
unique_cities = set(DATABASE.values())
cities_string = []
friends_string = ', '.join(DATABASE.keys())
friends_string = friends_string.split(", ")
for i in range(len(DATABASE)):
cities_string.append(DATABASE[friends_string[i]])
return f'Твои друзья в городах: {", ".join(cities_string)}'
else:
return '<неизвестный запрос>'
def process_friend(name, query):
if name in DATABASE:
city = DATABASE[name]
if query == 'ты где?':
return f'{name} в городе {city}'
elif query == 'который час?':
if city not in UTC_OFFSET:
return f'<не могу определить время в городе {city}>'
time = what_time(city)
return f'Там сейчас {time}'
elif query=="как погода?":
return what_weather(city)
else:
return '<неизвестный запрос>'
else:
return f'У тебя нет друга по имени {name}'
def process_query(query):
tokens = query.split(', ')
name = tokens[0]
if name == 'Анфиса':
return process_anfisa(tokens[1])
else:
return process_friend(name, tokens[1])
def runner():
queries = [
'Анфиса, сколько у меня друзей?',
'Анфиса, кто все мои друзья?',
'Анфиса, где все мои друзья?',
'Анфиса, кто виноват?',
'Коля, ты где?',
'Соня, что делать?',
'Антон, ты где?',
'Алексей, который час?',
'Артём, который час?',
'Антон, который час?',
'Петя, который час?',
'Коля, как погода?',
'Соня, как погода?',
'Антон, как погода?'
]
for query in queries:
print(query, '-', process_query(query))
runner()
| [
"shumaher.mih@yandex.ru"
] | shumaher.mih@yandex.ru |
17e914aac8110ab19e8448f67594dcc2b1be380c | cee96536d5115a20bd271d7ff5626da496197ac6 | /test_coco.py | ce245527e8ec25e646dbf982ae9dda955ca58fb4 | [] | no_license | YaojwDefgun/new-YOLOv1_PyTorch | 0855a8b0dcf8960057ccf82dcf341f480069a789 | f81b1b033fe2ad9a62bd61ad0bab0f47a4463f42 | refs/heads/master | 2023-01-03T21:28:34.243705 | 2020-10-22T12:21:31 | 2020-10-22T12:21:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,138 | py | import os
import argparse
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from data.cocodataset import *
from data import config, BaseTransform, VOCAnnotationTransform, VOCDetection, VOC_ROOT, VOC_CLASSES
import numpy as np
import cv2
import time
from decimal import *
parser = argparse.ArgumentParser(description='YOLO Detection')
parser.add_argument('-v', '--version', default='yolo',
help='yolo.')
parser.add_argument('-d', '--dataset', default='COCO_val',
help='we use VOC, COCO_val, COCO_test-dev, to test.')
parser.add_argument('-bk', '--backbone', type=str, default='r18',
help='r18, r50, d19')
parser.add_argument('--trained_model', default='weights/coco/',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--visual_threshold', default=0.3, type=float,
help='Final confidence threshold')
parser.add_argument('--cuda', default=True, type=bool,
help='Use cuda to test model')
parser.add_argument('--dataset_root', default='/home/k303/object-detection/dataset/COCO/',
help='Location of VOC root directory')
parser.add_argument('-f', default=None, type=str,
help="Dummy arg so we can load in Jupyter Notebooks")
parser.add_argument('--debug', action='store_true', default=False,
help='debug mode where only one image is trained')
args = parser.parse_args()
coco_class_labels = ('background',
'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck',
'boat', 'traffic light', 'fire hydrant', 'street sign', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'hat', 'backpack', 'umbrella',
'shoe', 'eye glasses', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'plate', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
'couch', 'potted plant', 'bed', 'mirror', 'dining table', 'window', 'desk',
'toilet', 'door', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'blender', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
coco_class_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20,
21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67,
70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
def test_net(net, device, testset, transform, thresh, mode='voc'):
class_color = [(np.random.randint(255),np.random.randint(255),np.random.randint(255)) for _ in range(80)]
num_images = len(testset)
for index in range(num_images):
print('Testing image {:d}/{:d}....'.format(index+1, num_images))
if args.dataset == 'COCO_val' or args.dataset == 'COCO-test' or args.dataset == 'COCO_test-dev':
img, _ = testset.pull_image(index)
elif args.dataset == 'VOC':
img = testset.pull_image(index)
# img_id, annotation = testset.pull_anno(i)
x = torch.from_numpy(transform(img)[0][:, :, (2, 1, 0)]).permute(2, 0, 1)
x = x.unsqueeze(0).to(device)
t0 = time.clock()
y = net(x) # forward pass
detections = y
print("detection time used ", Decimal(time.clock()) - Decimal(t0), "s")
# scale each detection back up to the image
scale = np.array([[img.shape[1], img.shape[0],
img.shape[1], img.shape[0]]])
bbox_pred, scores, cls_inds = detections
# map the boxes to origin image scale
bbox_pred *= scale
for i, box in enumerate(bbox_pred):
cls_indx = cls_inds[i]
xmin, ymin, xmax, ymax = box
if scores[i] > thresh:
box_w = int(xmax - xmin)
cv2.rectangle(img, (int(xmin), int(ymin)), (int(xmax), int(ymax)), class_color[int(cls_indx)], 2)
cv2.rectangle(img, (int(xmin), int(abs(ymin)-15)), (int(xmin+box_w*0.55), int(ymin)), class_color[int(cls_indx)], -1)
cls_id = coco_class_index[int(cls_indx)]
cls_name = coco_class_labels[cls_id]
mess = '%s: %.3f' % (cls_name, scores[i])
cv2.putText(img, mess, (int(xmin), int(ymin)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 2)
cv2.imshow('detection', img)
cv2.waitKey(0)
# print('Saving the' + str(index) + '-th image ...')
# cv2.imwrite('test_images/' + args.dataset+ '3/' + str(index).zfill(6) +'.jpg', img)
def test():
# get device
if args.cuda:
cudnn.benchmark = True
device = torch.device("cuda")
else:
device = torch.device("cpu")
# load net
num_classes = 80
if args.dataset == 'COCO_val':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='instances_val2017.json',
name='val2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'COCO_test-dev':
cfg = config.coco_af
input_size = cfg['min_dim']
testset = COCODataset(
data_dir=args.dataset_root,
json_file='image_info_test-dev2017.json',
name='test2017',
img_size=cfg['min_dim'][0],
debug=args.debug)
elif args.dataset == 'VOC':
cfg = config.voc_af
input_size = cfg['min_dim']
testset = VOCDetection(VOC_ROOT, [('2007', 'test')], None, VOCAnnotationTransform())
# build model
if args.version == 'yolo':
from models.yolo import myYOLO
net = myYOLO(device, input_size=input_size, num_classes=num_classes, trainable=False)
print('Let us test YOLO on the %s dataset ......' % (args.dataset))
else:
print('Unknown Version !!!')
exit()
net.load_state_dict(torch.load(args.trained_model, map_location=device))
net.to(device).eval()
print('Finished loading model!')
# evaluation
test_net(net, device, testset,
BaseTransform(net.input_size, mean=(0.406, 0.456, 0.485), std=(0.225, 0.224, 0.229)),
thresh=args.visual_threshold)
if __name__ == '__main__':
test() | [
"1394571815@qq.com"
] | 1394571815@qq.com |
544f012ed613c50b88a731844aa93e3c38e64a57 | 79047f578878605269c454b05a43e7fb085dbe48 | /fairseq/playaround.py | 76821ecd0a59bace39acbd2cc08c31c94be313c2 | [
"MIT"
] | permissive | PANhuihuihuihui/NLP | 463249d7a7e374cf157096785363becd5da850eb | 9b00d54ad3e64355f02feeb4f045cacf7fca0bc9 | refs/heads/main | 2023-06-06T03:55:36.571861 | 2021-06-29T06:12:48 | 2021-06-29T06:12:48 | 330,099,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | import pandas as pd
import numpy as np
df = pd.read_csv('../input/alldata.csv') | [
"phjhk@connect.hku.hk"
] | phjhk@connect.hku.hk |
63f0aa6fed60ac58358fb125d683f0e2d987078c | 706d5ff4707793a225f41c469f19a4f1891078da | /代理ip/dao/save2mysql.py | f163a4e4c6a028cfa5735492a16e9fe3c177e889 | [] | no_license | dcc668/PyDemo1.2 | eb5f13a19343e4d9d82fdd7c54f6f45622c5c00e | f883ca1d9bc04673beb9b40d889da74d2aaa5095 | refs/heads/master | 2020-04-15T02:23:16.000157 | 2019-01-30T23:57:41 | 2019-01-30T23:57:41 | 164,312,703 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,902 | py | # -*- coding: utf-8 -*-
import pymysql.cursors
import time
from sshtunnel import SSHTunnelForwarder
class Data2MySql():
# Connect to the MySQL database
def __init__(self):
self.server= SSHTunnelForwarder(
('39.108.122.83', 22),
ssh_password="Dcc1234&",
ssh_username="root",
remote_bind_address=('172.18.150.104', 3306))
self.server.start() # start ssh sever
local_port = self.server.local_bind_port
#连接配置信息
config = {
'host': '127.0.0.1',
'port': local_port,
'user': 'cc',
'password': '1234',
'db': 'mydb',
'charset': 'utf8',
'cursorclass': pymysql.cursors.DictCursor,
}
# 创建连接
self.connection = pymysql.connect(**config)
self.cursor= self.connection.cursor()
# 创建数据表zhilian
self.clean_table()
# 创建数据表创建数据表zhilian_job_details_contents
def clean_table(self):
clean_sql = "truncate table t_ips;"
self.cursor.execute(clean_sql)
self.connection.commit()
print('t_ips 表已经清空!')
def process_item(self, ips):
for ip in ips:
# 将信息插入到数据库中
args = (
ip,
time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()),
1
)
newsSqlText = "insert into t_ips(" \
"ips,create_time," \
"orderBy)values(" \
"%s,%s,%s)"
print(newsSqlText)
self.cursor.execute(newsSqlText,args)
print('执行sql 完成!')
self.connection.commit()
def __del__(self):
self.cursor.close()
self.connection.close()
self.server.stop()
| [
"1187053696@qq.com"
] | 1187053696@qq.com |
1c9e3b879141282edd5569d79e16594bb83d4f29 | f51ac19ce4d1df15eba02c4b3481533087d5ef9e | /day03/xiaohuar/start.py | 06058cbe787a1bb3530230ff1fa09be09169f548 | [] | no_license | disenQF/xpy903_scrapy | c9e0818f4ad08614f933ec800d680439e3f22ea6 | 7fd1f89f2cbf046b59774071c48801dfc3c5b54d | refs/heads/master | 2022-08-09T13:53:10.104037 | 2019-09-27T09:06:15 | 2019-09-27T09:06:15 | 210,261,888 | 1 | 0 | null | 2022-07-29T22:35:50 | 2019-09-23T04:05:10 | Python | UTF-8 | Python | false | false | 156 | py | #!/usr/bin/python3
# coding: utf-8
from scrapy import cmdline
if __name__ == '__main__':
cmdline.execute(['scrapy', 'crawl', 'hua', '-o', 'hua.json']) | [
"610039018@qq.com"
] | 610039018@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.