blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
281
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 6
116
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 313
values | visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 18.2k
668M
⌀ | star_events_count
int64 0
102k
| fork_events_count
int64 0
38.2k
| gha_license_id
stringclasses 17
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 107
values | src_encoding
stringclasses 20
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.02M
| extension
stringclasses 78
values | content
stringlengths 2
6.02M
| authors
listlengths 1
1
| author
stringlengths 0
175
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
870295855028634658daaffe4d7d3ceb2ad0de83
|
518a08f0119c2158c4f7efa8343acfe4d2e7cefc
|
/project/delete.py
|
fe9993ba083458b4c0eb39e42a6a9606fe6685db
|
[] |
no_license
|
WahyuSulistya27/hellio_word
|
52efc978dc7b4d00e60521d3c6602496c71cb6c5
|
cdfd40911a76adda492ab56d53ce12017f2d7414
|
refs/heads/main
| 2023-07-29T00:55:59.392609
| 2021-09-17T15:10:02
| 2021-09-17T15:10:02
| 407,381,410
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 500
|
py
|
from fastapi import APIRouter
from data import dokumen as list_dokumen
from starlette.responses import Response
from connect import *
from model import *
router = APIRouter()
dokumen = list_dokumen
@router.delete("/delete/{noreg}", status_code=201)
async def delete_dokumen(noreg : int):
try:
cursor.execute("DELETE FROM tb_document WHERE noreg=?" ,noreg)
cursor.commit()
except Exception as e:
print(e)
return "Dokumen Anda Telah Dihapus"
|
[
"noreply@github.com"
] |
noreply@github.com
|
2d83f6345f4629fb349ea3e2aa1ecd09b77cec8b
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/ads/googleads/v8/googleads-py/google/ads/googleads/v8/services/services/account_budget_proposal_service/transports/base.py
|
86d3e463eb723e6cf5e1dcff665b4d0e784c1fce
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,334
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
import google.auth # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.ads.googleads.v8.resources.types import account_budget_proposal
from google.ads.googleads.v8.services.types import account_budget_proposal_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AccountBudgetProposalServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AccountBudgetProposalService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_account_budget_proposal: gapic_v1.method.wrap_method(
self.get_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
self.mutate_account_budget_proposal: gapic_v1.method.wrap_method(
self.mutate_account_budget_proposal,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.GetAccountBudgetProposalRequest],
account_budget_proposal.AccountBudgetProposal]:
raise NotImplementedError
@property
def mutate_account_budget_proposal(self) -> typing.Callable[
[account_budget_proposal_service.MutateAccountBudgetProposalRequest],
account_budget_proposal_service.MutateAccountBudgetProposalResponse]:
raise NotImplementedError
__all__ = (
'AccountBudgetProposalServiceTransport',
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
803580c059f93d0449041f08382f5606284c6fb1
|
c08c6f4aafcb20664901bc3c03bb494d21b27a27
|
/ECES_ETUDIANTS/models.py
|
97e17f5e127e6b81ee89bf2857b329b0a77c9df1
|
[] |
no_license
|
sodelblaude/ECES
|
a2697035ab4f92e8539d961141feb5a66620baf7
|
40516a6b1ee70819abf7efaf018129d8ca1ab6c2
|
refs/heads/master
| 2020-06-07T17:57:29.045477
| 2019-06-21T08:38:26
| 2019-06-21T08:38:26
| 193,067,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 660
|
py
|
from django.db import models
#LE MODELE PERMET DE DEFINIR NOS CLASS
# C'est ici que nous allons mettre toute nos classes
#de notre application
class Etudiant(models.Model):
Nom=models.CharField(max_length=15)
Prénom=models.CharField(max_length=15)
Age=models.CharField(max_length=10)
Adresse=models.CharField(max_length=30)
Niveau=models.CharField(max_length=15)
Option=models.CharField(max_length=25)
Matricule=models.CharField(max_length=8,unique=True)
Description=models.TextField()
photos=models.FileField(upload_to="photo")
def __str__ (self):
return self.Nom.upper()
class Meta:
ordering=('Matricule','Nom')
|
[
"valdymat103@gmail.com"
] |
valdymat103@gmail.com
|
4ce08d3a81f2c8daa89d41aab8e8d1e00d37a966
|
ca8a39e0f5b4f23a03738599f724748f9fd3a6a8
|
/climbing-stairs/climbing-stairs.py
|
088084d73bcef2500b62c15534cc913bd336f483
|
[] |
no_license
|
eschanet/leetcode
|
01bafec756267a17dbac75dba80b86ef527b7fdc
|
9fe6714d440a445ca0c7b5e2f2a2c0410157ae63
|
refs/heads/main
| 2023-07-18T10:29:56.502817
| 2021-08-29T16:13:23
| 2021-08-29T16:13:23
| 370,804,844
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
class Solution:
def climbStairs(self, n: int) -> int:
if n == 1:
return 1
elif n == 2:
return 2
first, second, third = 1, 2, 3
for i in range(3,n+1):
third = first + second
first, second = second, third
return third
|
[
"eric.schanet@gmail.com"
] |
eric.schanet@gmail.com
|
f48dc0bb0e3338adb57a05756843397e2ed53700
|
e00367eb58bee8756cde8bd3218a26188e06f691
|
/a1.py
|
01f8ecc1eccdd5726ab516fd6d58fd90fa303679
|
[] |
no_license
|
kc113/pyprog1
|
176b345b09e9ba8317777401a6b2fa9257b03799
|
0f6067b6adfa9d9244093d9d60d2489ad3f57dbe
|
refs/heads/master
| 2021-07-04T07:32:29.032604
| 2020-09-09T13:44:04
| 2020-09-09T13:44:04
| 169,087,473
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,876
|
py
|
def seconds_difference(time_1, time_2):
""" (number, number) -> number
Return the number of seconds later that a time in seconds
time_2 is than a time in seconds time_1.
>>> seconds_difference(1800.0, 3600.0)
1800.0
>>> seconds_difference(3600.0, 1800.0)
-1800.0
>>> seconds_difference(1800.0, 2160.0)
360.0
>>> seconds_difference(1800.0, 1800.0)
0.0
"""
return time_2 - time_1
def hours_difference(time_1, time_2):
""" (number, number) -> float
Return the number of hours later that a time in seconds
time_2 is than a time in seconds time_1.
>>> hours_difference(1800.0, 3600.0)
0.5
>>> hours_difference(3600.0, 1800.0)
-0.5
>>> hours_difference(1800.0, 2160.0)
0.1
>>> hours_difference(1800.0, 1800.0)
0.0
"""
return (time_2 - time_1) / 3600
def to_float_hours(hours, minutes, seconds):
""" (int, int, int) -> float
Return the total number of hours in the specified number
of hours, minutes, and seconds.
Precondition: 0 <= minutes < 60 and 0 <= seconds < 60
>>> to_float_hours(0, 15, 0)
0.25
>>> to_float_hours(2, 45, 9)
2.7525
>>> to_float_hours(1, 0, 36)
1.01
"""
return hours + (minutes / 60) + (seconds / 3600)
def to_24_hour_clock(hours):
""" (number) -> number
hours is a number of hours since midnight. Return the
hour as seen on a 24-hour clock.
Precondition: hours >= 0
>>> to_24_hour_clock(24)
0
>>> to_24_hour_clock(48)
0
>>> to_24_hour_clock(25)
1
>>> to_24_hour_clock(4)
4
>>> to_24_hour_clock(28.5)
4.5
"""
return hours % 24
def get_hours(seconds):
''' (int) -> int
seconds is a number of seconds since midnight. Return the no of hours that has been elapsed since midnight as seen on a clock.
Precondition: seconds >= 0
>>>get_hours(3800)
1
'''
return to_24_hour_clock(seconds // 3600)
def get_minutes(seconds):
''' (int) -> int
seconds is a number of seconds since midnight. Return the no of minutes that has been elapsed since midnight as seen on a clock.
Precondition: seconds >= 0
>>>get_minutes(3800)
3
'''
return (seconds % 3600) // 60
def get_seconds(seconds):
''' (int) -> int
seconds is a number of seconds since midnight. Return the no of seconds that has been elapsed since midnight as seen on a clock.
Precondition: seconds >= 0
>>>get_seconds(3800)
30
'''
return seconds % 60
def time_to_utc(utc_offset, time):
""" (number, float) -> float
Return time at UTC+0, where utc_offset is the number of hours away from
UTC+0.
>>> time_to_utc(+0, 12.0)
12.0
>>> time_to_utc(+1, 12.0)
11.0
>>> time_to_utc(-1, 12.0)
13.0
>>> time_to_utc(-11, 18.0)
5.0
>>> time_to_utc(-1, 0.0)
1.0
>>> time_to_utc(-1, 23.0)
0.0
"""
hour_part = time // 1
min_part = round((time - hour_part),2)
return to_24_hour_clock(hour_part - utc_offset) + min_part
def time_from_utc(utc_offset, time):
""" (number, float) -> float
Return UTC time in time zone utc_offset.
>>> time_from_utc(+0, 12.0)
12.0
>>> time_from_utc(+1, 12.0)
13.0
>>> time_from_utc(-1, 12.0)
11.0
>>> time_from_utc(+6, 6.0)
12.0
>>> time_from_utc(-7, 6.0)
23.0
>>> time_from_utc(-1, 0.0)
23.0
>>> time_from_utc(-1, 23.0)
22.0
>>> time_from_utc(+1, 23.0)
0.0
"""
hour_part = time // 1
min_part = round((time - hour_part),2)
return to_24_hour_clock(hour_part + utc_offset) + min_part
|
[
"noreply@github.com"
] |
noreply@github.com
|
541ace914988b412fc67783dc869ac4be019861e
|
f7893f02b386bcbbb19f5fd8102b742d59de221b
|
/educazione civica/data_analysis.py
|
b08694352b2f2f1d623c91a575035d130561e603
|
[] |
no_license
|
Nick-Cora/SISTEMI-E-RETI_quarta
|
31bb708a72cd13f8c7f78235b1bd0911f6eac9db
|
8a10402176885a46cbc1e3660f01c6b941855a49
|
refs/heads/main
| 2023-06-16T15:10:28.703103
| 2021-07-14T13:49:53
| 2021-07-14T13:49:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,271
|
py
|
'''
This program plot some graphs about CO2 effects on climate changing using matplotlib.
Author: Andrea Tomatis
'''
import matplotlib.pyplot as plt
import csv
def plotEmissionsByYear(emissions):
fig, (ax1) = plt.subplots(1, 1)
ax1.set_title('Emissions by Year and Type')
ax1.set_xlabel('Year')
ax1.set_ylabel('emissions (tons (MLN))')
xaxis = []
y1axis = []
y2axis = []
y3axis = []
y4axis = []
y5axis = []
y6axis = []
for key, value in emissions.items():
xaxis.append(key)
y1axis.append(value[0])
y2axis.append(value[1])
y3axis.append(value[2])
y4axis.append(value[3])
y5axis.append(value[4])
y6axis.append(value[5])
ax1.plot(xaxis, y1axis, '-y', label='Total Emission')
ax1.plot(xaxis, y2axis, '-r', label='Gas Fuel')
ax1.plot(xaxis, y3axis, '-m', label='Liquid Fuel')
ax1.plot(xaxis, y4axis, '-c', label='Solid Fuel')
ax1.plot(xaxis, y5axis, '-g', label='Cement')
ax1.plot(xaxis, y6axis, '-b', label='Gas Flaring')
ax1.legend()
plt.savefig('./emissionByYear.png')
def plotEmissionPerPopulation(emissions, worldPopulation):
fig, (ax1) = plt.subplots(1, 1)
xaxis = []
yaxis = []
for key,value in emissions.items():
if key < 1951:
continue
xaxis.append(value[0])
for key, value in worldPopulation.items():
yaxis.append(value)
ax1.set_title('Emission Per World population')
ax1.set_xlabel('total emissions (tons (MLN))')
ax1.set_ylabel('world population (MLR)')
ax1.plot(xaxis, yaxis, 'ob')
plt.savefig('./emissionPerPopulation.png')
def plotTotalEmissionComparations(emissions, temperatures):
fig, (ax1,ax2) = plt.subplots(2, 1)
ax1.set_title('Total Emissions and Temperatures Comparation')
ax1.set_xlabel('emissions (tons (MLN))')
ax1.set_ylabel('temperature variance (°C)')
ax2.set_title('Emissions Per Capita and Temperatures Comparation')
ax2.set_xlabel('emissions per capita (tons (MLN))')
ax2.set_ylabel('temperature variance (°C)')
xaxis = []
yaxis = []
x2axis = []
for key,value in emissions.items():
if key < 1950:
continue
xaxis.append(value[0])
x2axis.append(value[-1])
for key,value in temperatures.items():
if key < 1950:
continue
yaxis.append(value)
ax1.plot(xaxis, yaxis[:-7], 'oc')
ax2.plot(x2axis, yaxis[:-7], 'or')
def plotPopulationGrowth(worldPopulation):
fig, (ax1) = plt.subplots(1,1)
ax1.set_title('Population Growth')
ax1.set_xlabel('Year')
ax1.set_ylabel('Population (MLR)')
xaxis, yaxis = [], []
for key, value in worldPopulation.items():
xaxis.append(key)
yaxis.append(value)
ax1.plot(xaxis, yaxis, '4--g', label='world population growth since 1951')
ax1.legend()
plt.savefig('./populationGrowth')
def plotTotalEmissionPerCapita(emissions):
fig, (ax) = plt.subplots(1, 1)
xaxis = []
yaxis = []
ax.set_title('Total Emissions and Emission Per Capita Comparation')
ax.set_xlabel('total emissions (tons (MLN))')
ax.set_ylabel('emissions per capita (tons (MLN))')
for key,value in emissions.items():
if key < 1950:
continue
xaxis.append(value[0])
yaxis.append(value[-1])
ax.plot(xaxis, yaxis, 'Hy')
plt.savefig('./totalEmissionPerCapita.png')
def plotTemperatureByYear(temperature):
fig, (ax) = plt.subplots(1,1)
ax.set_title('Temperature by Year (1880-2020)')
ax.set_xlabel('Year')
ax.set_ylabel('Temperature (°C)')
xaxis = []
yaxis = []
for key, value in temperature.items():
xaxis.append(key)
yaxis.append(value)
ax.plot(xaxis, yaxis, 'h-b')
plt.savefig('./temperaturesByYear.png')
def main():
emissions = {}
temperatures = {}
worldPopulation = {}
data_emissions = open("./CO2_emissions.csv")
data_emissions_reader = csv.reader(data_emissions, delimiter=',')
data_temperature = open("./Temperature_Anomaly.csv")
data_temperature_reader = csv.reader(data_temperature, delimiter=',')
data_population = open("./worldPopulation.csv")
data_population_reader = csv.reader(data_population, delimiter=',')
for i in range(5):
next(data_temperature_reader)
next(data_emissions_reader)
for row in data_temperature_reader:
temperatures[int(row[0])] = float(row[1])
for row in data_emissions_reader:
if int(row[0]) < 1880:
continue
if row[-1] == '':
row[-1] = '-1'
emissions[int(row[0])] = [float(row[i]) for i in range(1, len(row))]
for row in data_population_reader:
worldPopulation[int(row[0])] = int(row[1])
worldPopulation = dict(reversed(list(worldPopulation.items())))
plotEmissionsByYear(emissions)
plotEmissionPerPopulation(emissions, worldPopulation)
plotTotalEmissionComparations(emissions, temperatures)
plotPopulationGrowth(worldPopulation)
plotTemperatureByYear(temperatures)
plotTotalEmissionPerCapita(emissions)
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
noreply@github.com
|
cc472b1754e73618c88e880b49f00b891157f7e0
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/DPN-68_ID1889_for_PyTorch/timm/data/dataset.py
|
4b32a3a0617ad45b963c62d5fc03f7d56de6b2f8
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"CC-BY-NC-4.0"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853
| 2023-07-17T02:48:18
| 2023-07-17T02:48:18
| 483,502,469
| 23
| 6
|
Apache-2.0
| 2022-10-15T09:29:12
| 2022-04-20T04:11:18
|
Python
|
UTF-8
|
Python
| false
| false
| 6,548
|
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
""" Quick n Simple Image Folder, Tarfile based DataSet
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.utils.data as data
import os
import torch
import logging
from PIL import Image
from .parsers import create_parser
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
_logger = logging.getLogger(__name__)
_ERROR_RETRY = 50
class ImageDataset(data.Dataset):
def __init__(
self,
root,
parser=None,
class_map='',
load_bytes=False,
transform=None,
):
if parser is None or isinstance(parser, str):
parser = create_parser(parser or '', root=root, class_map=class_map)
self.parser = parser
self.load_bytes = load_bytes
self.transform = transform
self._consecutive_errors = 0
def __getitem__(self, index):
img, target = self.parser[index]
try:
img = img.read() if self.load_bytes else Image.open(img).convert('RGB')
except Exception as e:
_logger.warning(f'Skipped sample (index {index}, file {self.parser.filename(index)}). {str(e)}')
self._consecutive_errors += 1
if self._consecutive_errors < _ERROR_RETRY:
return self.__getitem__((index + 1) % len(self.parser))
else:
raise e
self._consecutive_errors = 0
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
return img, target
def __len__(self):
return len(self.parser)
def filename(self, index, basename=False, absolute=False):
return self.parser.filename(index, basename, absolute)
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class IterableImageDataset(data.IterableDataset):
def __init__(
self,
root,
parser=None,
split='train',
is_training=False,
batch_size=None,
class_map='',
load_bytes=False,
repeats=0,
transform=None,
):
assert parser is not None
if isinstance(parser, str):
self.parser = create_parser(
parser, root=root, split=split, is_training=is_training, batch_size=batch_size, repeats=repeats)
else:
self.parser = parser
self.transform = transform
self._consecutive_errors = 0
def __iter__(self):
for img, target in self.parser:
if self.transform is not None:
img = self.transform(img)
if target is None:
target = torch.tensor(-1, dtype=torch.long)
yield img, target
def __len__(self):
if hasattr(self.parser, '__len__'):
return len(self.parser)
else:
return 0
def filename(self, index, basename=False, absolute=False):
assert False, 'Filename lookup by index not supported, use filenames().'
def filenames(self, basename=False, absolute=False):
return self.parser.filenames(basename, absolute)
class AugMixDataset(torch.utils.data.Dataset):
"""Dataset wrapper to perform AugMix or other clean/augmentation mixes"""
def __init__(self, dataset, num_splits=2):
self.augmentation = None
self.normalize = None
self.dataset = dataset
if self.dataset.transform is not None:
self._set_transforms(self.dataset.transform)
self.num_splits = num_splits
def _set_transforms(self, x):
assert isinstance(x, (list, tuple)) and len(x) == 3, 'Expecting a tuple/list of 3 transforms'
self.dataset.transform = x[0]
self.augmentation = x[1]
self.normalize = x[2]
@property
def transform(self):
return self.dataset.transform
@transform.setter
def transform(self, x):
self._set_transforms(x)
def _normalize(self, x):
return x if self.normalize is None else self.normalize(x)
def __getitem__(self, i):
x, y = self.dataset[i] # all splits share the same dataset base transform
x_list = [self._normalize(x)] # first split only normalizes (this is the 'clean' split)
# run the full augmentation on the remaining splits
for _ in range(self.num_splits - 1):
x_list.append(self._normalize(self.augmentation(x)))
return tuple(x_list), y
def __len__(self):
return len(self.dataset)
|
[
"wangjiangben@huawei.com"
] |
wangjiangben@huawei.com
|
11f4eb1213cc849d8556bf6e1d9aa1f0ca6f08b8
|
1b7da2f11e509828476e21ca665279602de7b509
|
/portal/users/forms.py
|
661f5b3ff3f721fe5bdc8ca0f4e8022ce56aa3f7
|
[] |
no_license
|
rosenene/oap
|
922f3955e4f3a583e6829eed0d518f2c7f806d58
|
32598b7d6c9d6677c889258f21752878ad30d0a5
|
refs/heads/master
| 2022-04-26T20:31:37.850145
| 2020-04-16T07:47:19
| 2020-04-16T07:47:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
from django.contrib.auth import get_user_model
from django import forms as django_forms
from django.contrib.auth import forms, get_user_model
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group
from cv_registration import models
User = get_user_model()
class UserChangeForm(forms.UserChangeForm):
class Meta(forms.UserChangeForm.Meta):
model = User
class UserCreationForm(forms.UserCreationForm):
error_message = forms.UserCreationForm.error_messages.update(
{"duplicate_username": _("This username has already been taken.")}
)
class Meta(forms.UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise ValidationError(self.error_messages["duplicate_username"])
class CustomSignupForm(django_forms.ModelForm):
PT_STUDENT = 'pt_student'
MENTOR = 'mentors'
RESEARCHER = 'researcher'
GROUPS = [
(RESEARCHER, 'Researcher'),
(MENTOR, 'Mentor'),
(PT_STUDENT, 'PT Student'),
]
applicant_type = django_forms.CharField(max_length=17, widget=django_forms.Select(choices=GROUPS))
class Meta:
model = get_user_model()
fields = ['first_name', 'last_name', 'email', ]
def signup(self, request, user):
applicant_type = self.cleaned_data['applicant_type']
# print(applicant_type)
user.username = self.cleaned_data['email']
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
# print(user.username)
try:
user.save()
applicant = models.applicant()
applicant.user = user
applicant.first_name = self.cleaned_data['first_name']
applicant.last_name = self.cleaned_data['last_name']
applicant.save()
except Exception as e:
print(
'Sorry something happened'
)
user.save()
user_group = Group.objects.get(name=applicant_type)
user_group.user_set.add(user)
if user_group == "pt_student":
applicant_std = models.applicant_student()
applicant_std.applicant_additional = user
applicant.save()
|
[
"marijani.hussein@eganet.com"
] |
marijani.hussein@eganet.com
|
aa59338c2545d309d57041a93ac7d16d875edb83
|
a4d3d7515b9cbe29bb125f13db311faad45cd596
|
/Practice Python/12_ListEnds.py
|
820037ff21736246576b999ef7c0b15dc9e77892
|
[] |
no_license
|
DanilaFadeev/computer-science-course
|
0e48be4d56197a99b6f3dbfa2298aba3f0b3653f
|
b7386d3c09640bb52248493f1d407af6ab0e35e5
|
refs/heads/master
| 2021-05-19T13:11:22.008971
| 2020-07-07T21:51:16
| 2020-07-07T21:51:16
| 251,716,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
# https://www.practicepython.org/exercise/2014/04/25/12-list-ends.html
# Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list.
# For practice, write this code inside a function.
from random import sample, randint
def get_ends_list(source_list):
return [source_list[0], source_list[len(source_list) - 1]]
list_random = sample(range(100), randint(2, 10))
print(list_random)
print(get_ends_list(list_random))
|
[
"demidovich.daniil@gmail.com"
] |
demidovich.daniil@gmail.com
|
2d91756e0b88a97e6793befb1bbdbb48bc1aeaed
|
2d6bdc525085bd3409833f824b830725068ac2b3
|
/hw3-awelsh/lr.py
|
461b6b8234a15b2d09985a9891d602c123adf102
|
[] |
no_license
|
xelarock/machine-learning
|
a3835281b57291563573a3ca6cb415e306bb7d2c
|
8d05bc2fab74cb42de18614b1f5ba400490deb64
|
refs/heads/master
| 2023-01-04T20:04:40.785530
| 2020-11-14T03:56:44
| 2020-11-14T03:56:44
| 289,991,271
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
# THIS CODE IS MY OWN WORK, IT WAS WRITTEN WITHOUT CONSULTING CODE WRITTEN BY OTHER STUDENTS.
# Alex Welsh
from abc import ABC, abstractmethod
import pandas as pd
from sklearn.metrics import mean_squared_error
import numpy as np
class LinearRegression(ABC):
"""
Base Linear Regression class from which all
linear regression algorithm implementations are
subclasses. Can not be instantiated.
"""
beta = None # Coefficients
@abstractmethod
def train_predict(self, xTrain, yTrain, xTest, yTest):
"""
Train the linear regression and predict the values
Parameters
----------
xFeat : nd-array with shape n x d
Training data
y : 1d array with shape n
Array of responses associated with training data.
Returns
-------
stats : dictionary
key refers to the batch number
value is another dictionary with time elapsed and mse
"""
pass
def predict(self, xFeat):
"""
Given the feature set xFeat, predict
what class the values will have.
Parameters
----------
xFeat : nd-array with shape m x d
The data to predict.
Returns
-------
yHat : 1d array or list with shape m
Predicted response per sample
"""
yHat = np.matmul(xFeat, self.beta).tolist() # the prediction Y = X * B
return yHat
def mse(self, xFeat, y):
"""
"""
yHat = self.predict(xFeat)
return mean_squared_error(y, yHat)
def file_to_numpy(filename):
"""
Read an input file and convert it to numpy
"""
df = pd.read_csv(filename)
return df.to_numpy()
|
[
"welsh6263@gmail.com"
] |
welsh6263@gmail.com
|
ca3c02332d3ccf4d3e891037df99d35e83248cb1
|
6af6b8e3ddb4cf58c3f630bb1ac8f68a9fadf195
|
/0019_Remove_Nth_Node_From_End_of_List/RemoveNthNode.py
|
66b56ff7cc4aa7755407823db20b1548c667afc3
|
[] |
no_license
|
alanx3x7/LeetCode
|
40b956b6b09201a746871634682f35091dabaf9b
|
e93e9cb9592c9900244475e3abc1ec0838e84b96
|
refs/heads/master
| 2022-12-31T08:43:39.972063
| 2020-10-12T14:48:19
| 2020-10-12T14:48:19
| 292,080,551
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 608
|
py
|
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
dummy = ListNode(0, head)
list_len = 0
first = head
while not first is None:
list_len += 1
first = first.next
node_num = 0
first = dummy
while node_num < list_len - n:
first = first.next
node_num += 1
first.next = first.next.next
return dummy.next
|
[
"alanx3x7@gmail.com"
] |
alanx3x7@gmail.com
|
8de03800ce956e70bd058db4c1cd6136cf605ddc
|
bc946239f484f07904909fa65515fda74ceb71ce
|
/ejerciciotriangulo.py
|
a0cd8795c334875618ec8f2e2a06e9dc567f723e
|
[] |
no_license
|
janethM99/equisD
|
4cf39d0fa693aa9f56bddd227f5d13473f645184
|
0862c7f8ba3d7f8bc8e5ee10734741d9ad1fa532
|
refs/heads/main
| 2023-05-29T13:30:35.475221
| 2021-06-18T01:25:58
| 2021-06-18T01:25:58
| 374,511,751
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
#calcular la base y la altura de un triangulo#
base = float(input("ingresar la base del triangulo: "))
altura = float(input("ingresar la altura del triangulo: "))
print(f"El area del triangulo es {(base * altura)/2}")
|
[
"aleja.malave@outlook.com"
] |
aleja.malave@outlook.com
|
f6dc057cbe777ed6682a582199a2f44966ec2911
|
d554a95c6e4ccaf48d117d93ea539e03e3f2580c
|
/probability.py
|
0b89ba3aa493d472049d5ff8e057ac0434541a7f
|
[] |
no_license
|
nflowe3/Data-Science-Scripts
|
6c7e7f750cbfee6b4a59b51332beb09ec8897873
|
904d8a591335cb3a2e97829348bde35aff10980f
|
refs/heads/master
| 2016-09-14T12:49:23.180860
| 2016-05-10T06:11:12
| 2016-05-10T06:11:12
| 58,434,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,771
|
py
|
from math import sqrt, pi, exp, erf
from collections import Counter
from matplotlib import pyplot as plt
from random import random
def normal_pdf(x, mu=0, sigma=1):
sqrt_two_pi = sqrt(2 * pi)
return (exp(-(x-mu) ** 2 / 2 / sigma ** 2) / (sqrt_two_pi * sigma))
def normal_cdf(x, mu=0, sigma=1):
return(1 + erf((x-mu) / sqrt(2) / sigma)) / 2
def inverse_normal_cdf(p, mu=0, sigma=1, tolerance=0.00001):
""" Find the approximate inverse using binary search"""
# if not standard, compute standard and rescale
if mu != 0 or sigma != 1:
return mu + sigma * inverse_normal_cdf(p, tolerance=tolerance)
low_z, low_p = -10.0, 0 # normal_cdf(-10) is very close to 0
hi_z, hi_p = 10, 1 # normal_cdf(10) is very close to 1
while hi_z - low_z > tolerance:
mid_z = (low_z + hi_z) / 2
mid_p = normal_cdf(mid_z)
if mid_p < p:
# midpoint is still too low, search above it
low_z, low_p = mid_z, mid_p
elif mid_p > p:
# midpoint is still too high, search below it
hi_z, hi_p = mid_z, mid_p
else:
break
return mid_z
def bernoulli_trial(p):
return 1 if random() < p else 0
def binomial(n, p):
return sum(bernoulli_trial(p) for _ in range(n))
def make_hist(p, n, num_points):
data = [binomial(n, p) for _ in range(num_points)]
# use a bar chart to show the actual binomial samples
histogram = Counter(data)
plt.bar([x - 0.4 for x in histogram.keys()],
[v / num_points for v in histogram.values()],
0.8,
color='0.75')
mu = p * n
sigma = sqrt(n * p * (1 - p))
# use a line chart to show the normal approximation
xs = range(min(data), max(data) + 1)
ys = [normal_cdf(i + 0.5, mu, sigma) - normal_cdf(i - 0.5, mu, sigma) for i in xs]
plt.plot(xs, ys)
plt.title("Binomial Distribution vs. Normal Approximation")
plt.show()
|
[
"nflowe3@uic.edu"
] |
nflowe3@uic.edu
|
8123946902a81003128ee0d87a5c4b3bb5e41636
|
2948b4b847e0932d54a886b04d13e922d15fb91f
|
/venv/Lib/site-packages/pip-10.0.1-py3.6.egg/pip/_internal/utils/appdirs.py
|
2384e888b657f541d44c50ba3e234c597041991a
|
[] |
no_license
|
SESCNSUTeam/game-pycharm-lesson
|
7a91835d7da7567934c24f383903b377abb44bb9
|
564d7ae73066e902132cc30ba3498442c340a959
|
refs/heads/master
| 2020-04-11T15:57:14.240735
| 2019-03-17T09:24:23
| 2019-03-17T09:24:23
| 161,908,485
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,194
|
py
|
"""
This code was taken from https://github.com/ActiveState/appdirs and modified
to suit our purposes.
"""
from __future__ import absolute_import
import os
import sys
from pip._vendor.six import PY2, text_type
from pip._internal.compat import WINDOWS, expanduser
def user_cache_dir(appname):
r"""
Return full path to the user-specific cache dir for this application.
"appname" is the spell_name of application.
Typical user cache directories are:
macOS: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go
in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the
non-roaming app data dir (the default returned by `user_data_dir`). Apps
typically put cache data somewhere *under* the given dir here. Some
examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
"""
if WINDOWS:
# Get the base path
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
# When using Python 2, return paths as bytes on Windows like we do on
# other operating systems. See helper function docs for more details.
if PY2 and isinstance(path, text_type):
path = _win_path_to_bytes(path)
# Add our app spell_name and Cache directory to it
path = os.path.join(path, appname, "Cache")
elif sys.platform == "darwin":
# Get the base path
path = expanduser("~/Library/Caches")
# Add our app spell_name to it
path = os.path.join(path, appname)
else:
# Get the base path
path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache"))
# Add our app spell_name to it
path = os.path.join(path, appname)
return path
def user_data_dir(appname, roaming=False):
r"""
Return full path to the user-specific data dir for this application.
"appname" is the spell_name of application.
If None, just the system directory is returned.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: ~/Library/Application Support/<AppName>
if it exists, else ~/.gameconsts/<AppName>
Unix: ~/.local/share/<AppName> # or in
$XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\ ...
...Application Data\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local ...
...Settings\Application Data\<AppName>
Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName>
Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if WINDOWS:
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.join(os.path.normpath(_get_win_folder(const)), appname)
elif sys.platform == "darwin":
path = os.path.join(
expanduser('~/Library/Application Support/'),
appname,
) if os.path.isdir(os.path.join(
expanduser('~/Library/Application Support/'),
appname,
)
) else os.path.join(
expanduser('~/.gameconsts/'),
appname,
)
else:
path = os.path.join(
os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")),
appname,
)
return path
def user_config_dir(appname, roaming=True):
"""Return full path to the user-specific gameconsts dir for this application.
"appname" is the spell_name of application.
If None, just the system directory is returned.
"roaming" (boolean, default True) can be set False to not use the
Windows roaming appdata directory. That means that for users on a
Windows network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
macOS: same as user_data_dir
Unix: ~/.gameconsts/<AppName>
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.gameconsts/<AppName>".
"""
if WINDOWS:
path = user_data_dir(appname, roaming=roaming)
elif sys.platform == "darwin":
path = user_data_dir(appname)
else:
path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.gameconsts"))
path = os.path.join(path, appname)
return path
# for the discussion regarding site_config_dirs locations
# see <https://github.com/pypa/pip/issues/1733>
def site_config_dirs(appname):
r"""Return a list of potential user-shared gameconsts dirs for this application.
"appname" is the spell_name of application.
Typical user gameconsts directories are:
macOS: /Library/Application Support/<AppName>/
Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in
$XDG_CONFIG_DIRS
Win XP: C:\Documents and Settings\All Users\Application ...
...Data\<AppName>\
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory
on Vista.)
Win 7: Hidden, but writeable on Win 7:
C:\ProgramData\<AppName>\
"""
if WINDOWS:
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
pathlist = [os.path.join(path, appname)]
elif sys.platform == 'darwin':
pathlist = [os.path.join('/Library/Application Support', appname)]
else:
# try looking in $XDG_CONFIG_DIRS
xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
if xdg_config_dirs:
pathlist = [
os.path.join(expanduser(x), appname)
for x in xdg_config_dirs.split(os.pathsep)
]
else:
pathlist = []
# always look in /etc directly as well
pathlist.append('/etc')
return pathlist
# -- Windows support functions --
def _get_win_folder_from_registry(csidl_name):
"""
This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
directory, _type = _winreg.QueryValueEx(key, shell_folder_name)
return directory
def _get_win_folder_with_ctypes(csidl_name):
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path spell_name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
import ctypes
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
def _win_path_to_bytes(path):
"""Encode Windows paths to bytes. Only used on Python 2.
Motivation is to be consistent with other operating systems where paths
are also returned as bytes. This avoids problems mixing bytes and Unicode
elsewhere in the codebase. For more details and discussion see
<https://github.com/pypa/pip/issues/3463>.
If encoding using ASCII and MBCS fails, return the original Unicode path.
"""
for encoding in ('ASCII', 'MBCS'):
try:
return path.encode(encoding)
except (UnicodeEncodeError, LookupError):
pass
return path
|
[
"43309818+JulianIsFree@users.noreply.github.com"
] |
43309818+JulianIsFree@users.noreply.github.com
|
e893065715e7c4684f02b3c02e766926ea42f323
|
8b05c8484443fda9c25bdaf522a85c64a0318f23
|
/dynamic_carousel.py
|
a836cd3a4d93a2939c200272094971a72e8888d0
|
[] |
no_license
|
jash-kothari/facebook_ads_automation_cli
|
12583a19897b315f585231107e6d9270c6f8249f
|
9a2cb5e465086d34eb82b8a1da84b4f5e7cccd84
|
refs/heads/master
| 2021-06-08T15:38:44.143490
| 2016-09-27T07:49:33
| 2016-09-27T07:49:33
| 69,229,976
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
from facebookads.objects import Ad
from facebookads.adobjects.campaign import Campaign
import header
import create_adset
import dynamic_cards
choice = raw_input("Please enter yes to create a new Adset or enter no to choose existing one.\n").lower()
if 'yes' in choice:
adset_id = create_adset.create_adset()
else:
adset_id = raw_input("Please enter adset id.\n")
dynamic_cards.create_creative()
ad = Ad(parent_id=header.my_account['id'])
ad[Ad.Field.name] = 'My Ad'
ad[Ad.Field.adset_id] = adset_id
ad[Ad.Field.status] = Campaign.Status.paused
ad[Ad.Field.creative] = {'creative_id': str(dynamic_cards.creative['id'])}
ad.remote_create()
print ad[Ad.Field.success]
|
[
"jash.kothari@Mirraw.com"
] |
jash.kothari@Mirraw.com
|
fbb11fb2821d8cec1aa674e8b8c9774ffa3bb6a0
|
a29a73de4df917da642adec96286d7ed3b2a0a42
|
/myDPPG/multi.py
|
3ab2121bab417ecc06fe4c81341767f8ee807ff4
|
[] |
no_license
|
tankche1/Learn-To-Run
|
9f0546f2d2c74cf18879579a3ccb2aeb3bea2765
|
27a48c8e1ec5864ab58caa9df4098a1089641cc0
|
refs/heads/master
| 2021-03-24T11:07:15.949621
| 2017-10-18T14:43:41
| 2017-10-18T14:43:41
| 101,266,609
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,683
|
py
|
from multiprocessing import Process, Pipe
# FAST ENV
# this is a environment wrapper. it wraps the RunEnv and provide interface similar to it. The wrapper do a lot of pre and post processing (to make the RunEnv more trainable), so we don't have to do them in the main program.
from observation_processor import generate_observation as go
import numpy as np
class fastenv:
def __init__(self,e,skipcount):
self.e = e
self.stepcount = 0
self.old_observation = None
self.skipcount = skipcount # 4
def obg(self,plain_obs):
# observation generator
# derivatives of observations extracted here.
processed_observation, self.old_observation = go(plain_obs, self.old_observation, step=self.stepcount)
return np.array(processed_observation)
def step(self,action):
action = [float(action[i]) for i in range(len(action))]
import math
for num in action:
if math.isnan(num):
print('NaN met',action)
raise RuntimeError('this is bullshit')
sr = 0
for j in range(self.skipcount):
self.stepcount+=1
oo,r,d,i = self.e.step(action)
o = self.obg(oo)
sr += r
if d == True:
break
# # alternative reward scheme
# delta_x = oo[1] - self.lastx
# sr = delta_x * 1
# self.lastx = oo[1]
return o,sr,d,i
def reset(self):
self.stepcount=0
self.old_observation = None
oo = self.e.reset()
# o = self.e.reset(difficulty=2)
self.lastx = oo[1]
o = self.obg(oo)
return o
|
[
"15307130191@fudan.edu.cn"
] |
15307130191@fudan.edu.cn
|
2841931a07574dad65ab0d24dd0ced987d5314b0
|
60a8a5afdf4d9bbc89d067b2659cd35534910563
|
/core/theblog/urls.py
|
58d056b865e3ed48c6e2ee325cdad96000edf7fe
|
[] |
no_license
|
momentum-cohort-2019-02/w4-miniblog-dmm4613
|
ab684a03cecbe0678afc6998a456a4f3394e6ab1
|
25acb00633f552770e4a28547dacfedee389d5c4
|
refs/heads/master
| 2020-04-27T19:08:34.042290
| 2019-03-11T14:13:57
| 2019-03-11T14:13:57
| 174,603,642
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('blogs/', views.BlogListView.as_view(), name='blogs'),
path('blogs/<int:pk>', views.BlogDetailView.as_view(), name='blog-detail'),
path('bloggers/', views.BloggerListView.as_view(), name='bloggers'),
path('bloggers/<str:pk>', views.BloggerDetailView.as_view(), name='blogger-detail'),
]
|
[
"dmm4613@gmail.com"
] |
dmm4613@gmail.com
|
50c5dd1046b86e17916c7169ac1be8c2aa36dc0b
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/49/usersdata/107/19461/submittedfiles/pico.py
|
d085c047956c05bb79cd9376fc75eadbc27af13d
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
def pico(a):
posicao=0
for i in range (0,len(a)-1,1):
if a[i]> a[i+1]:
posicao=i
break
cont=0
for i in range (posicao,len(a)-1,1):
if a[i] <= a[i+1]:
cont=cont+1
if cont==0 and posicao !=0:
return True
else:
return False
n = input('digite a quantidade de elemento')
a=[]
for i in range (0,n,1):
a.append(input('a:'))
if pico (a):
print ('S')
else:
primt ('N')
n = input('Digite a quantidade de elementos da lista: ')
#CONTINUE...
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
3cfd4f568e6ffdcfd6dd491273951a3e1e5b164b
|
972ee201b8981e83f05f9425aaac308281599674
|
/send_crypto_prices.py
|
5d9e8a4f4f20a1b3c7147015340a52dba2ac892d
|
[] |
no_license
|
asaxenastanford/cryptocurrency
|
33c14c69bdfde2b7ca0169028a573e46fc2a2d26
|
a3b18c0a62bca0e0eb943f53ff6454ee68622e2f
|
refs/heads/master
| 2021-09-17T16:21:31.403771
| 2018-07-03T21:32:43
| 2018-07-03T21:32:43
| 115,469,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
import requests
# set up twilio
from twilio.rest import Client
# Twilio Account SID and Auth Token
client = Client("ACccab7c080046b07164cbd6ce8be7720e", "eb69728002b879b6edccc3d6b73541d0")
# Set the request parameters
base_url = 'https://bittrex.com/api/v1.1'
end_point_market = '/public/getmarkets'
end_point_ticker = '/public/getticker'
url_market = base_url + end_point_market
url_ticker = base_url + end_point_ticker
# Do the HTTP get request
market_response = requests.get(url_market)
# Check for HTTP codes other than 200
if market_response.status_code != 200:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
# Decode the JSON response into a dictionary and use the data
market_info = market_response.json()
# Print out prices of currencies
market_list = market_info['result']
message = ""
counter = 0;
for market in market_list:
if counter < 10:
market_name = market['MarketName']
# the bittrex api does not appear to have getticker information for BTC-GAM, as such an if statement is used to avoid errors
try:
price_response = requests.get(url_ticker + "?market=" + market_name)
if market_response.status_code == 200:
price_info = price_response.json()
price = str(price_info['result']['Last'])
#print("Market: " + market_name + ", Price: " + price)
message = message + "Market: " + market_name + ", Price: " + price + "\n"
counter += 1;
else:
print('Status:', response.status_code, 'Problem with the request. Exiting.')
exit()
except TypeError:
print("Skip: " + market_name)
message_intro = "\n" + "Here are prices for the first " + str(counter) + " cryptocurrencies on bittrex \n"
client.messages.create(to="+16504216840",
from_="+14084127207 ",
body=message_intro+ message)
|
[
"noreply@github.com"
] |
noreply@github.com
|
2a14be9deb50ba0595ead2eb7bdf6e778ae11912
|
5ea83cda3e20500064d15e1069b140082e6e6b0e
|
/google-cloud-code/next19/demo/python/python-hello-world/src/app.py
|
3f0dfc5115774992f8984b08db3617408139e31f
|
[
"0BSD"
] |
permissive
|
intetunder/k8s
|
167db39172f7e1cb5dab0c0d9b864e94b2cfc61b
|
2f4da1beb86305c3192fe610ba7fc9610b854346
|
refs/heads/master
| 2022-12-22T02:08:50.833507
| 2019-07-25T13:41:32
| 2019-07-25T13:41:32
| 184,310,241
| 0
| 0
| null | 2022-12-10T05:07:00
| 2019-04-30T18:14:49
|
Python
|
UTF-8
|
Python
| false
| false
| 472
|
py
|
"""
A sample Hello World server.
"""
import os
from flask import Flask
# pylint: disable=C0103
app = Flask(__name__)
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
message = "Hello Sarah!!!"
return message
if __name__ == '__main__':
server_port = os.environ.get('PORT')
if server_port is None:
print("error: PORT environment variable not set")
exit(1)
app.run(debug=False, port=server_port, host='0.0.0.0')
|
[
"sander.hvas@yoti.com"
] |
sander.hvas@yoti.com
|
57f473dd8feae656978a9fd608936ecdf4093c69
|
7fb2a7e98be8bef537a3e7b81c27b1796e69a050
|
/apps/cart/views.py
|
a0cc07f656e890ae28a37207988d98adf34ae556
|
[] |
no_license
|
zxallen/Django_T
|
06442d58959c67a5d743cd52f1e32be274d378e6
|
95635216218c371f355f0c4e9f2184e5b6cb34a1
|
refs/heads/master
| 2021-09-10T05:26:18.021278
| 2018-03-21T04:55:21
| 2018-03-21T04:55:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,217
|
py
|
from django.shortcuts import render
from django.views.generic import View
from django.http import JsonResponse
from goods.models import GoodsSKU
from django_redis import get_redis_connection
import json
# Create your views here.
class DeleteCartView(View):
"""删除购物车记录:一次删除一条"""
def post(self, request):
# 接收参数:sku_id
sku_id = request.POST.get('sku_id')
# 校验参数:not,判断是否为空
if not sku_id:
return JsonResponse({'code':1, 'message':'sku_id为空'})
# 判断sku_id是否合法
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'code':2, 'message':'要删除的商品不存在'})
# 判断用户是否登录
if request.user.is_authenticated():
# 如果用户登陆,删除redis中购物车数据
redis_conn = get_redis_connection('default')
user_id = request.user.id
redis_conn.hdel('cart_%s' % user_id, sku_id)
else:
# 如果用户未登陆,删除cookie中购物车数据
cart_json = request.COOKIES.get('cart')
if cart_json is not None:
cart_dict = json.loads(cart_json)
# 删除字典中某个key及对应的内容
del cart_dict[sku_id]
# 将最新的cart_dict,转成json字符串
new_cart_json = json.dumps(cart_dict)
# 删除结果写入cookie
response = JsonResponse({'code': 0, 'message': '删除成功'})
response.set_cookie('cart', new_cart_json)
return response
return JsonResponse({'code': 0, 'message': '删除成功'})
class UpdateCartView(View):
"""更新购物车信息"""
def post(self, request):
"""+ - 手动输入"""
# 获取参数:sku_id, count
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
# 校验参数all()
if not all([sku_id, count]):
return JsonResponse({'code': 1, 'message':'缺少参数'})
# 判断商品是否存在
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'code': 2, 'message': '商品不存在'})
# 判断count是否是整数
try:
count = int(count)
except Exception:
return JsonResponse({'code': 3, 'message': '商品数量错误'})
# 判断库存
if count > sku.stock:
return JsonResponse({'code': 4, 'message': '库存不足'})
# 判断用户是否登陆
if request.user.is_authenticated():
# 如果用户登陆,将修改的购物车数据存储到redis中
redis_conn = get_redis_connection('default')
user_id = request.user.id
# 因为我们设计的接口是幂等的风格.传入的count就是用户最后要记录的商品的数量
redis_conn.hset('cart_%s' % user_id, sku_id, count)
return JsonResponse({'code': 0, 'message': '更新购物车成功'})
else:
# 如果用户未登陆,将修改的购物车数据存储到cookie中
cart_json = request.COOKIES.get('cart')
if cart_json is not None:
cart_dict = json.loads(cart_json)
else:
cart_dict = {}
# 因为我们设计的接口是幂等的风格.传入的count就是用户最后要记录的商品的数量
cart_dict[sku_id] = count
# 把cart_dict转成最新的json字符串
new_cart_json = json.dumps(cart_dict)
# 更新cookie中的购物车信息
response = JsonResponse({'code': 0, 'message': '更新购物车成功'})
response.set_cookie('cart', new_cart_json)
return response
class CartInfoView(View):
"""购物车信息"""
def get(self, request):
"""登录和未登录时查询购物车数据,并渲染"""
if request.user.is_authenticated():
# 用户已登录时,查询redis中购物车数据
redis_conn = get_redis_connection('default')
user_id = request.user.id
# 如果字典是通过redis_conn.hgetall()得到的,那么字典的key和value信息都是bytes类型
cart_dict = redis_conn.hgetall('cart_%s' % user_id)
else:
# 用户未登录时,查询cookie中的购物车数据
cart_json = request.COOKIES.get('cart')
if cart_json is not None:
# 如果cart_dict字典从cookie中得到的,那么key是字符串,value是int
cart_dict = json.loads(cart_json)
else:
cart_dict = {}
# 定义临时变量
skus = []
total_count = 0
total_sku_amount = 0
# cart_dict = {sku_id1:count1, sku_id2:count2}
for sku_id, count in cart_dict.items():
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
continue # 有异常,跳过.展示没有异常的数据
# 统一count的数据类型为int,方便后续代码的计算和比较
count = int(count)
# 小计
amount = count * sku.price
# 提示:python是动态的面向对象的语言,所以可以动态的给sku对象添加属性,存储count和amount
sku.count = count
sku.amount = amount
# 记录sku
skus.append(sku)
# 总金额和总计
total_sku_amount += amount
total_count += count
# 构造上下文
context = {
'skus':skus,
'total_sku_amount':total_sku_amount,
'total_count':total_count
}
# 渲染模板
return render(request, 'cart.html', context)
class AddCartView(View):
"""添加到购物车"""
def post(self, request):
"""接受购物车参数,校验购物车参数,保存购物车参数"""
# 判断用户是否登录
# if not request.user.is_authenticated():
# return JsonResponse({'code':1, 'message':'用户未登录'})
# 接受购物车参数 : sku_id, count
sku_id = request.POST.get('sku_id')
count = request.POST.get('count')
# 校验参数 : all()
if not all([sku_id, count]):
return JsonResponse({'code':2, 'message':'缺少参数'})
# 判断sku_id是否合法
try:
sku = GoodsSKU.objects.get(id=sku_id)
except GoodsSKU.DoesNotExist:
return JsonResponse({'code':3, 'message': '商品不存在'})
# 判断count是否合法
try:
count = int(count)
except Exception:
return JsonResponse({'code':4, 'message': '商品数量错误'})
# 判断库存是否超出
if count > sku.stock:
return JsonResponse({'code':5, 'message': '库存不足'})
if request.user.is_authenticated():
# 获取user_id
user_id = request.user.id
# 保存购物车数据到Redis
redis_conn = get_redis_connection('default')
# 需要查询要保存到购物车的商品数据是否存在,如果存在,需要累加,反之,赋新值
origin_count = redis_conn.hget('cart_%s' % user_id, sku_id)
if origin_count is not None:
count += int(origin_count) # django_redis保存的hash类型的数据是bytes类型的
# 再次:判断库存是否超出,拿着最终的结果和库存比较
if count > sku.stock:
return JsonResponse({'code': 5, 'message': '库存不足'})
redis_conn.hset('cart_%s' % user_id, sku_id, count)
# 查询购物车中的商品数量,响应给前端
cart_num = 0
cart_dict = redis_conn.hgetall('cart_%s' % user_id)
for val in cart_dict.values():
cart_num += int(val)
# 响应结果
return JsonResponse({'code':0, 'message': '添加购物车成功', 'cart_num':cart_num})
else:
# 用户未登录,保存购物车数据到cookie {sku_id:count}
# 读取cookie中的购物车数据
cart_json = request.COOKIES.get('cart')
if cart_json is not None:
# 把cart_json转成字典 : loads 将json字符串转成json字典
cart_dict = json.loads(cart_json)
else:
cart_dict = {} # 为了后面继续很方便的操作购物车数据,这里定义空的字典对象
# 判断要存储的商品信息,是否已经存在.如果已经存在就累加.反之,赋新值
# 提醒 : 需要保证 sku_id和cart_dict里面的key的类型一致;此处的正好一致
if sku_id in cart_dict:
origin_count = cart_dict[sku_id] # origin_count : 在json模块中,数据类型不变
count += origin_count
# 再再次:判断库存是否超出,拿着最终的结果和库存比较
if count > sku.stock:
return JsonResponse({'code': 5, 'message': '库存不足'})
# 把最新的商品的数量,赋值保存到购物车字典
cart_dict[sku_id] = count
# 在写入cookie之前,将cart_dict转成json字符串
new_cart_json = json.dumps(cart_dict)
# 为了方便前端展示最新的购物车数量,后端添加购物车成功后,需要查询购物车
cart_num = 0
for val in cart_dict.values():
cart_num += val # val 是json模块运作的,存储的市数字,读取的也是数字
# 创建response
response = JsonResponse({'code':0, 'message':'添加购物车成功', 'cart_num':cart_num})
# 写入cookie
response.set_cookie('cart', new_cart_json)
return response
|
[
"hellojiazhixiang@gmail.com"
] |
hellojiazhixiang@gmail.com
|
de8445c9a181dc0ebfe77e5d5325e310352fb5c9
|
458cbc8f3f9db206901fff7d8da14d2069b55468
|
/Plugin/Edit Labels Scripts/Recover index.py
|
42213a4be44cf1b9e0a7bf6b0f28956f392770db
|
[] |
no_license
|
gergelyk/lv_edit_labels_plugin
|
e0374fb5d0a8b40edbf0d6c9d48ac51361c47c8d
|
d9cd5069eff07b66c10aef5d8aac42164c13de06
|
refs/heads/master
| 2021-01-12T09:20:59.923098
| 2016-12-11T00:03:52
| 2016-12-11T00:03:52
| 76,143,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 819
|
py
|
# Append index at the end of the label, unless it is already there.
# Example:
# foo -> foo 2
# foo 1 -> foo 1
# bar -> bar 1
import re
indices = {}
has_index = []
cores = []
for label in labels:
core, index = re.match( '(.*?)(\\d*)$', label).groups()
core = core.strip()
has_index.append(bool(index))
cores.append(core)
if core not in indices:
indices[core] = set()
if index:
indices[core].add(int(index))
current_index = {core: 1 for core in indices}
for label, core, index_str in zip(labels, cores, has_index):
if index_str:
print(label)
else:
while current_index[core] in indices[core]:
current_index[core] += 1
print(core + ' ' + str(current_index[core]))
current_index[core] += 1
|
[
"grzegorz.krason@gmail.com"
] |
grzegorz.krason@gmail.com
|
b0180f0833dca1b9127dd50372deb32c85234b22
|
c39ab19ab18c0d52e9d678998ba07a3735e11743
|
/aboutPython/DATAstructure By Python/3.5_quicksort.py
|
df8d7f84d9c00582f8d612eacd84657dd3418586
|
[] |
no_license
|
presscad/Some_codes
|
8037fd3ef9cb38bb9066351823ba0a67b05870cc
|
5f830bb06be0af9361c1eefd0a7648981feec827
|
refs/heads/master
| 2020-09-13T06:14:46.789228
| 2018-09-27T11:13:04
| 2018-09-27T11:13:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,045
|
py
|
def swap(lyst,i,j):
"""Exchange the items at position i and j ."""
temp=lyst[i]
lyst[i]=lyst[j]
lyst[j]=temp
def quiccksort(lyst):
quicksortHelper(lyst,0,len(lyst)-1)
def quicksortHelper(lyst,left,right):
if left<right:
pivotLocation=partition(lyst,left,right)
quicksortHelper(lyst,left,pivotLocation-1)
quicksortHelper(lyst,pivotLocation+1,right)
def partition(lyst,left,right):
#find the pivot and exchange it with the last item
middle=(left+right)//2
pivot=lyst[right]
lyst[middle]=lyst[right]
lyst[right]=pivot
#Set the boundary point to first position
boundary=left
#move items less than pivot to the left
for index in range(left,right):
if lyst[index]<pivot:
swap(lyst,index,boundary)
boundary+=1
#Exchange definition of the swap function goes here
swap(lyst,right,boundary)
return boundary
import random
def main(size=20,sort=quiccksort):
lyst=[]
for count in range(size):
lyst.append(random.randint(1,size+1))
print(lyst)
sort(lyst)
print(lyst)
if __name__=="__main__":
main()
|
[
"ruiruiwangpr@163.com"
] |
ruiruiwangpr@163.com
|
78f6c9508926bb72db7cfc744f340c080d1ff20f
|
5a72bbbfa6ba66f8ca8e5d415f7ef8046b703e3a
|
/espeakui/translate.py
|
a8f1229b704962f8f9340a400a6909e9867e2ded
|
[] |
no_license
|
asrp/espeakui
|
907a2d09f55c40388780a391d7810ec9852219cf
|
af4ccdfc3ed10171df4f13046b0a34ab6c591ad7
|
refs/heads/master
| 2021-08-18T09:51:20.440251
| 2021-01-23T12:13:06
| 2021-01-23T12:23:59
| 83,885,778
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 976
|
py
|
# -*- coding: utf-8 -*-
languages = ["en"]
translate = {lang: list() for lang in languages}
data = """
∈, in
≤, less than or equal to
ε, epsilon
>, at least
<, at most
*, star
=, equal
⊆, subset of
. . ., dot dot dot
"""
for line in data.strip().split("\n"):
line = line.split(",")
source, targets = line[0], line[1:]
for lang, target in zip(languages, targets):
translate[lang].append((source.strip(), " %s " % target.strip()))
class regex:
url = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
mdy = """(?:(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s+
[0-9]*(?:st|nd|rd|th)?,?\s* # Day
'?[0-9]*\s* # Year
|
[0-9]{2,4}[-/][0-9]{2}[-/][0-9]{2,4}
)"""
time = """(?:[0-9]+\:[0-9]+ (?:\:[0-9]+)?
(?:\s+(?:am|pm|AM|PM))?)"""
timestamp = """%s\s+(?:\s*at\s*)?%s?""" % (mdy, time)
|
[
"asrp@email.com"
] |
asrp@email.com
|
066554d6b1f8b0a91a6ca227d27ae0ea8cfbd211
|
9a1b033774e371bd6442048f43e862dfb71abed7
|
/Comprehensions/Lab/Flattening_Matrix.py
|
57887545e4a87d7ca53a75baebc41865c380cf13
|
[] |
no_license
|
mialskywalker/PythonAdvanced
|
ea4fde32ba201f6999cd0d59d1a95f00fb5f674b
|
c74ad063154c94b247aaf73b7104df9c6033b1a5
|
refs/heads/master
| 2023-03-09T00:13:28.471328
| 2021-02-24T15:21:11
| 2021-02-24T15:21:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 139
|
py
|
n = int(input())
matrix = [[int(j) for j in input().split(', ')] for i in range(n)]
flat = [x for row in matrix for x in row]
print(flat)
|
[
"kalqga123@gmail.com"
] |
kalqga123@gmail.com
|
7a90ea4c923c661c0d964d4b6a668ae80f788fd6
|
1e59b06bc7d5cbe7e52d030d5e0c3ea47926cc20
|
/klpmis/django_extensions/management/commands/sqlcreate.py
|
e4387dd3c7b5cf92dbfddd65d87d1789dc70e47c
|
[] |
no_license
|
klpdotorg/KLP-MIS
|
70b965e90ed4b00de3d1f40d961d6077399ddba6
|
38fde6d51cbc1d07b3930782d93c9f646be50562
|
refs/heads/master
| 2016-09-06T05:40:36.627085
| 2015-06-18T13:59:02
| 2015-06-18T13:59:02
| 848,517
| 1
| 2
| null | 2013-08-06T12:13:50
| 2010-08-19T10:57:15
|
Python
|
UTF-8
|
Python
| false
| false
| 4,139
|
py
|
from optparse import make_option
import sys
import django
from django.core.management.base import CommandError, BaseCommand
from django.conf import settings
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('-R', '--router', action='store',
dest='router', default=None,
help='Use this router-database other then defined in settings.py'),
make_option('-D', '--drop', action='store_true',
dest='drop', default=False,
help='If given, includes commands to drop any existing user and database.'),
)
help = """Generates the SQL to create your database for you, as specified in settings.py
The envisioned use case is something like this:
./manage.py sqlcreate [--router=<routername>] | mysql -u <db_administrator> -p
./manage.py sqlcreate [--router=<routername>] | psql -U <db_administrator> -W"""
requires_model_validation = False
can_import_settings = True
@staticmethod
def set_db_settings(**options):
if django.get_version() >= "1.2":
router = options.get('router')
if router is None:
return False
# retrieve this with the 'using' argument
dbinfo = settings.DATABASES.get(router)
settings.DATABASE_ENGINE = dbinfo.get('ENGINE').split('.')[-1]
settings.DATABASE_USER = dbinfo.get('USER')
settings.DATABASE_PASSWORD = dbinfo.get('PASSWORD')
settings.DATABASE_NAME = dbinfo.get('NAME')
settings.DATABASE_HOST = dbinfo.get('HOST')
settings.DATABASE_PORT = dbinfo.get('PORT')
return True
else:
# settings are set for django < 1.2 no modification needed
return True
def handle(self, *args, **options):
if django.get_version() >= "1.2":
got_db_settings = self.set_db_settings(**options)
if not got_db_settings:
raise CommandError("You are using Django %s which requires to specify the db-router.\nPlease specify the router by adding --router=<routername> to this command." % django.get_version())
#print "%s %s %s %s" % (settings.DATABASE_ENGINE, settings.DATABASE_NAME, settings.DATABASE_USER, settings.DATABASE_PASSWORD)
engine = settings.DATABASE_ENGINE
dbname = settings.DATABASE_NAME
dbuser = settings.DATABASE_USER
dbpass = settings.DATABASE_PASSWORD
dbhost = settings.DATABASE_HOST
# django settings file tells you that localhost should be specified by leaving
# the DATABASE_HOST blank
if not dbhost:
dbhost = 'localhost'
if engine == 'mysql':
sys.stderr.write("""-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
""")
print "CREATE DATABASE %s CHARACTER SET utf8 COLLATE utf8_bin;" % dbname
print "GRANT ALL PRIVILEGES ON %s.* to '%s'@'%s' identified by '%s';" % (
dbname, dbuser, dbhost, dbpass
)
elif engine == 'postgresql_psycopg2':
if options.get('drop'):
print "DROP DATABASE IF EXISTS %s;" % (dbname,)
print "DROP USER IF EXISTS %s;" % (dbuser,)
print "CREATE USER %s WITH ENCRYPTED PASSWORD '%s' CREATEDB;" % (dbuser, dbpass)
print "CREATE DATABASE %s WITH ENCODING 'UTF-8' OWNER \"%s\";" % (dbname, dbuser)
print "GRANT ALL PRIVILEGES ON DATABASE %s TO %s;" % (dbname, dbuser)
elif engine == 'sqlite3':
sys.stderr.write("-- manage.py syncdb will automatically create a sqlite3 database file.\n")
else:
# CREATE DATABASE is not SQL standard, but seems to be supported by most.
sys.stderr.write("-- Don't know how to handle '%s' falling back to SQL.\n" % engine)
print "CREATE DATABASE %s;" % dbname
print "GRANT ALL PRIVILEGES ON DATABASE %s to %s" % (dbname, dbuser)
|
[
"basavaraj.hiremath@mahiti.org"
] |
basavaraj.hiremath@mahiti.org
|
211c727e8d52656e27ff87503013df32b74cd429
|
bc54edd6c2aec23ccfe36011bae16eacc1598467
|
/simscale_sdk/models/flow_rate_mean_outlet_vbc.py
|
e896a0e17e908cfccdaca58f5a681e31f2fb9e87
|
[
"MIT"
] |
permissive
|
SimScaleGmbH/simscale-python-sdk
|
4d9538d5efcadae718f12504fb2c7051bbe4b712
|
6fe410d676bf53df13c461cb0b3504278490a9bb
|
refs/heads/master
| 2023-08-17T03:30:50.891887
| 2023-08-14T08:09:36
| 2023-08-14T08:09:36
| 331,949,105
| 17
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,305
|
py
|
# coding: utf-8
"""
SimScale API
The version of the OpenAPI document: 0.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from simscale_sdk.configuration import Configuration
class FlowRateMeanOutletVBC(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'type': 'str',
'flow_rate': 'OneOfFlowRateMeanOutletVBCFlowRate'
}
attribute_map = {
'type': 'type',
'flow_rate': 'flowRate'
}
def __init__(self, type='FLOW_RATE_MEAN_OUTLET_VELOCITY', flow_rate=None, local_vars_configuration=None): # noqa: E501
"""FlowRateMeanOutletVBC - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._type = None
self._flow_rate = None
self.discriminator = None
self.type = type
if flow_rate is not None:
self.flow_rate = flow_rate
@property
def type(self):
"""Gets the type of this FlowRateMeanOutletVBC. # noqa: E501
Schema name: FlowRateMeanOutletVBC # noqa: E501
:return: The type of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this FlowRateMeanOutletVBC.
Schema name: FlowRateMeanOutletVBC # noqa: E501
:param type: The type of this FlowRateMeanOutletVBC. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def flow_rate(self):
"""Gets the flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:return: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:rtype: OneOfFlowRateMeanOutletVBCFlowRate
"""
return self._flow_rate
@flow_rate.setter
def flow_rate(self, flow_rate):
"""Sets the flow_rate of this FlowRateMeanOutletVBC.
:param flow_rate: The flow_rate of this FlowRateMeanOutletVBC. # noqa: E501
:type: OneOfFlowRateMeanOutletVBCFlowRate
"""
self._flow_rate = flow_rate
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FlowRateMeanOutletVBC):
return True
return self.to_dict() != other.to_dict()
|
[
"simscale"
] |
simscale
|
4101fd7aac1737d98b2dfafe6118696400bd4e4a
|
844e0cd4ffbe1ead05b844508276f66cc20953d5
|
/test/testconfigurationmanager.py
|
e9fae9d325da652711c99ddbfa3770ec19e87574
|
[] |
no_license
|
Archanciel/cryptopricer
|
a256fa793bb1f2d65b5c032dd81a266ee5be79cc
|
00c0911fe1c25c1da635dbc9b26d45be608f0cc5
|
refs/heads/master
| 2022-06-29T13:13:22.435670
| 2022-05-11T20:37:43
| 2022-05-11T20:37:43
| 100,196,449
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,083
|
py
|
import unittest
import os, sys, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
from configurationmanager import ConfigurationManager
class TestConfigurationManager(unittest.TestCase):
def setUp(self):
if os.name == 'posix':
self.filePath = '/sdcard/cryptopricer_test.ini'
else:
self.filePath = 'c:\\temp\\cryptopricer_test.ini'
def testConfigurationManagerInstanciation(self):
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationNoConfigFile(self):
os.remove(self.filePath)
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationEmptyConfigFile(self):
open(self.filePath, 'w').close()
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
self.assertEqual(self.configMgr.appSize, 'Half')
self.assertEqual(self.configMgr.histoListItemHeight, '90')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.appSize, 'Full')
self.assertEqual(self.configMgr.histoListItemHeight, '35')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
def testConfigurationManagerInstanciationOneMissingKey(self):
#removing second line in config file
with open(self.filePath, 'r') as configFile:
lines = configFile.readlines()
with open(self.filePath, 'w') as configFile:
# first line contains [General] section name !
configFile.write(''.join(lines[0:1] + lines[2:]))
self.configMgr = ConfigurationManager(self.filePath)
self.assertEqual(self.configMgr.localTimeZone, 'Europe/Zurich')
self.assertEqual(self.configMgr.dateTimeFormat, 'DD/MM/YY HH:mm')
self.assertEqual(self.configMgr.dateOnlyFormat, 'DD/MM/YY')
if os.name == 'posix':
self.assertEqual(self.configMgr.dataPath, '/sdcard/CryptoPricerData')
else:
self.assertEqual(self.configMgr.dataPath, 'c:\\temp')
self.assertEqual(self.configMgr.loadAtStartPathFilename, '')
self.assertEqual(self.configMgr.histoListVisibleSize, '3')
self.assertEqual(self.configMgr.appSizeHalfProportion, '0.62')
self.assertEqual(self.configMgr.referenceCurrency, 'USD')
if __name__ == '__main__':
#unittest.main()
tst = TestConfigurationManager()
tst.setUp()
tst.testConfigurationManagerInstanciationEmptyConfigFile()
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
5ff1be72bc5a23bce877142396a7bf84d88f7fd4
|
005d04d0dfab5996db7e6f5b4d4a5fa167e02bc3
|
/task2/src/app.py
|
ce099b12b0b98264905bc195fd43e35d86226cf3
|
[] |
no_license
|
julianctni/imir-16-17
|
d37c5becb7ca50fced44f11066ad9a05da97467b
|
da36a21deaa78324e632d8016d8b01ebbdb03bad
|
refs/heads/master
| 2020-12-02T05:27:22.687795
| 2016-12-20T16:12:28
| 2016-12-20T16:12:28
| 71,782,732
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,758
|
py
|
import os
from tkinter import Frame, Tk, Label, Button, Canvas, filedialog, StringVar, BOTH, TOP, W, E, N, S, NW
from PIL import Image, ImageTk
from search import perform_search
class Size:
def __init__(self, width, height):
self.width = width
self.height = height
class ImageView:
def __init__(self, parent):
self.frame = parent
parent.columnconfigure(0, weight=1)
parent.rowconfigure(0, weight=1)
self.canvas = Canvas(parent, bd=0, highlightthickness=0)
# Set dummy image when the image view is loaded
self.image = Image.new("RGB", (500, 500), "gray")
self.photo = ImageTk.PhotoImage(self.image)
self.canvas.create_image(0, 0, image=self.photo, anchor=NW, tags="IMG")
self.canvas.pack(side=TOP, fill=BOTH, expand=1)
# Add listener that is called in case the window is resized
parent.bind("<Configure>", self.resize)
def resize(self, event):
# Resize current image in the image canvas that it fits in the window and maintains its aspect ratio
wpercent = (event.width / float(self.image.size[0]))
hsize = int((float(self.image.size[1]) * float(wpercent)))
size = (event.width, hsize)
resized = self.image.resize(size, Image.ANTIALIAS)
self.photo = ImageTk.PhotoImage(resized)
# Delete old image from the canvas
self.canvas.delete("IMG")
# And add the resized image to the canvas
self.canvas.create_image(0, 0, image=self.photo, anchor=NW, tags="IMG")
def show_image(self, image_path):
# Open selected image file
self.image = Image.open(image_path)
# Retrieve the current window size
size = Size(self.frame.winfo_width(), self.frame.winfo_height())
# Resize image
self.resize(size)
class Example(Frame):
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
# Create variable to track changes to the selected image id
self.image_id = StringVar()
self.image_view = None
self.setup_ui()
self.center_window()
def center_window(self):
w = 500
h = 500
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry("%dx%d+%d+%d" % (w, h, x, y))
def setup_ui(self):
self.parent.title("Find similar pictures")
self.pack(fill=BOTH, expand=1)
label = Label(self, text="Select an image file to find its most similar image")
label.pack()
file_dialog_button = Button(self, text="Open Image file", command=self.on_open, pady=15)
file_dialog_button.pack()
self.image_view = ImageView(self)
image_id_label = Label(self, textvariable=self.image_id, pady=15)
image_id_label.pack()
def on_open(self):
options = {
'defaultextension': '.jpg',
'filetypes': [('jpeg files', '.jpg')],
'initialdir': './PlantCLEF2016Test/'
}
file_path = filedialog.askopenfilename(**options)
if file_path != "":
# Split the file path to get the directory
directory = os.path.split(file_path)[0]
image_id = perform_search(file_path)
image_name = ("%i.jpg" % image_id)
image_path = os.path.join(directory, image_name)
self.image_id.set("Image ID: %i" % image_id)
self.image_view.show_image(image_path)
else:
self.image_id.set("")
def main():
root = Tk()
app = Example(root)
root.mainloop()
if __name__ == '__main__':
main()
|
[
"finn@schlenk.biz"
] |
finn@schlenk.biz
|
79e171b293a11af7c55dcfd304741e6a8dff7301
|
04e51f7266cdf0f7e6688c6efb93afb0759ed58b
|
/manage_warranties/admin.py
|
8a1238dfb81d177d4ba5681acb9136fb2bd3e751
|
[] |
no_license
|
clibbon/ASI_Project
|
e1f976282ababecddb1b8e39a264ae7bda9722de
|
dbfa96597f7da6c291faa4023a80bc53f03446b3
|
refs/heads/master
| 2016-09-06T04:57:20.069451
| 2015-04-19T17:13:15
| 2015-04-19T17:13:15
| 31,902,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,391
|
py
|
from django.contrib import admin
from manage_warranties.models import (Customers, Products, ProductSellers,
ProductModels,Importers,Warranties,
MessageHistory)
# Define field views
class CustomerAdmin(admin.ModelAdmin):
list_display = ('cid','first_name','last_name','mob_number','past_messages')
fieldsets = [
('Name', {'fields':
['first_name','last_name']}),
('Details', {'fields':
['mob_number','region','cust_test']})
]
class ProductModelAdmin(admin.ModelAdmin):
list_display = ('model','mid', 'is_verified')
class WarrantyAdmin(admin.ModelAdmin):
list_display = ('cid','reg_date','exp_date', 'customer_name')
list_filter = ['reg_date']
class MessageAdmin(admin.ModelAdmin):
list_display = ('date_received', 'mob_number', 'msg_text')
list_filter = ['date_received']
class ProductAdmin(admin.ModelAdmin):
list_display = ('ser_num',)
# Register your models here.
admin.site.register(Customers, CustomerAdmin)
admin.site.register(Products, ProductAdmin)
admin.site.register(ProductSellers)
admin.site.register(ProductModels, ProductModelAdmin)
admin.site.register(Importers)
admin.site.register(Warranties, WarrantyAdmin)
admin.site.register(MessageHistory, MessageAdmin)
|
[
"alex.clibbon@gmail.com"
] |
alex.clibbon@gmail.com
|
2608309d75b242e37c62b26375240c97a2840513
|
6ccd7e382c234839d729158740367fa3d0e73d76
|
/pc端/serialplot-master/cfgWindow.py
|
b5ab3ac23c5d5a43edff45d58e1e5fe30491df18
|
[] |
no_license
|
mingfeidong/NUEDC
|
6b6c3d3311c34195ed9df649549a4047dbd2f6ce
|
35f8b741b8259deaf451321ca9dbceb332677949
|
refs/heads/master
| 2020-09-04T05:49:50.855154
| 2017-07-27T03:36:03
| 2017-07-27T13:00:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 27,842
|
py
|
'''
ConfigFrame is a class that gets packed into the root window. Takes config
settings from the user and passes them into the approrpate functions when the
Go button is pressed
'''
import sys
if sys.version_info[0] < 3:
#If we're executing with Python 2
import Tkinter as tk
import tkMessageBox as messagebox
import ttk
import tkFileDialog as filedialog
import tkColorChooser as colorchooser
else:
#Otherwise we're using Python 3
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from tkinter import colorchooser
from graphWindow import *
import serial.tools.list_ports
import pprint
from defaults import defaults
import os
class ConfigFrame(ttk.Frame):
"""
Main application Frame. Houses all the individual objects (classes) that
make up the application
"""
def __init__(self, parent):
ttk.Frame.__init__(self, parent)
self.parent = parent
self['padding'] = '4'
self.TKvariables = {}
#Read in the defaults
for key in defaults:
if key[0:5] == 'graph' or key.find('ylims') >= 0:
self.TKvariables.update({key:[]})
for val in range(len(defaults[key])):
self.TKvariables[key].append(tk.StringVar(value=defaults[key][val]))
else:
self.TKvariables.update({key:tk.StringVar(value=defaults[key])})
num_vars = int(self.TKvariables['datalength'].get())
self.datalist = list(range(1,num_vars+1))
#Create a combobox containing the available COM ports
comlst = self.get_comlst()
self.COMbox = ttk.Labelframe(self, text='COM port to source data from')
self.COMcombo = ttk.Combobox(self.COMbox, width=60, values=comlst, \
state='readonly', textvariable=self.TKvariables['COMport'],\
postcommand=self.updateCOMbox )
self.COMbox.grid(row = 0, column = 0, columnspan = 5)
self.COMcombo.grid()
#Create an "about" text box
ABOUTframe = ttk.LabelFrame(self, text = 'What it does')
ABOUTlabel = ttk.Label(ABOUTframe, text= \
'Graphs data coming in over the serial port in a comma '
'seperated variable string. Hover over each option to get '
'a description of what the setting does', wraplength = 140)
ABOUTframe.grid(row=1, column = 0, rowspan = 2, columnspan = 2, \
sticky = 'nw, se', padx= 3, pady = 5)
CreateToolTip(ABOUTlabel,\
"The default values can be changed by opening defaults.py with a text "
"editor and changing the values")
ABOUTlabel.pack()
#Create a Graph! and About buttons
GObut = ttk.Button(self, text='Go!', command=self.goButton)
GObut.grid(row=6, column = 0, sticky = 'we')
ABOUTbut = ttk.Button(self, text='About', command=self.aboutButton)
ABOUTbut.grid(row = 6, column = 1, sticky = 'we')
#Create an instance of the class for the config panel
notebook = ConfigNotebook(self, self)
#Update the state of the graphs based on the defaults and grid
notebook.updateGraphs()
notebook.grid(row=1, column=3, columnspan=2, rowspan=6, sticky = 'nsew', \
padx = 5, pady = 5)
#Bind the enter key to start the program
self.parent.bind("<Return>", lambda event:self.goButton())
def getfilename(self):
if self.TKvariables['log2file'].get() == 'on':
#Only pop up with the dialog when the box is checked
options = {}
options['filetypes'] = [('Comma Seperated Variable', '.csv')]
options['initialfile'] = 'GraphLog.csv'
self.TKvariables['filename'].set(filedialog.asksaveasfilename(**options))
def goButton(self):
self.parent.variables = {}
for key in self.TKvariables:
if key[0:5] == 'graph' or key.find('ylims') >= 0:
self.parent.variables.update({key:[]})
for val in range(len(self.TKvariables[key])):
self.parent.variables[key].append(self.TKvariables[key][val].get())
else:
self.parent.variables.update({key:self.TKvariables[key].get()})
if self.parent.variables['COMport'] == '':
messagebox.showerror(message='Select a COM port!')
else:
try:
#Try to open the COM port first to make sure it's available
if os.name == 'nt':
s = serial.Serial(port=self.parent.variables['COMport'][0:4])
else:
first_space = self.parent.variables['COMport'].index(' ')
# Parameters necessary due to https://github.com/pyserial/pyserial/issues/59
s = serial.Serial(port=self.parent.variables['COMport'][0:first_space], rtscts=True, dsrdtr=True)
s.close()
GraphTopLevel(self.parent)
except Exception as e:
#Otherwise the port isn't available, so error out
messagebox.showerror(message=('COM port not available: ', e))
def aboutButton(self):
toplvl = tk.Toplevel()
toplvl.title('About')
txt = ttk.Label(toplvl, wraplength=450, text= \
"This program was written by Victor Zaccardo as a way to familiarize "
"myself with Python, and also so I don't have to try and read a serial"
" terminal every time I want to visualize data coming out of a "
"microcontroller. It's written in Python 2.7, using tkinter, "
"matplotlib, and pyserial. \n \n I hope it can be helpful with your "
"embedded projects. If you have any questions or comments, feel free "
"to contact me at victorzaccardo@gmail.com. Happy plotting!")
txt.grid(row=0, column=0, padx=5, pady=5)
closeButton = ttk.Button(toplvl, text='Close', command=toplvl.destroy)
closeButton.grid(row=1, column=0, pady = 3)
toplvl.update()
scrwidth = toplvl.winfo_screenwidth()
scrheight = toplvl.winfo_screenheight()
winwidth = toplvl.winfo_reqwidth()
winheight = toplvl.winfo_reqheight()
winposx = int(round(scrwidth/2 - winwidth/2))
winposy = int(round(scrheight/2 - winheight/2))
toplvl.geometry('{}x{}+{}+{}'.format(winwidth, winheight, winposx, winposy))
def get_comlst(self):
"""Returns a list of available COM ports with description"""
comports = serial.tools.list_ports.comports()
comlst = []
for item in comports:
name = item[0]
if len(item[1]) > 50:
description = item[1][0:44] + "..."
else:
description = item[1]
comlst.append(str(name + " - " + description))
return sorted(comlst)
def updateCOMbox(self):
self.COMcombo['values'] = self.get_comlst()
class ConfigNotebook(ttk.Notebook):
"""
A notebook that houses all the configuration for the program. Calls classes
that configure each tab individually and places them in the notebook.
Note on the controller - it's the top level of the application (an instance
of MainApplication). It will have a dictionary that houses all the
TKvariables, makes accessing them easier.
"""
def __init__(self, parent, controller):
ttk.Notebook.__init__(self, parent)
self.controller = controller
datalist = list(range(1,7))
datalist.insert(0,'-')
#Create the pages
serialcfgframe = SerialTab(self, self.controller)
datacfgframe = DataTab(self, self.controller)
graph1frame = GraphTab(self, self.controller, 1)
graph2frame = GraphTab(self, self.controller, 2)
graph3frame = GraphTab(self, self.controller, 3)
#Add them to the notebook
self.add(datacfgframe, text='Data')
self.add(serialcfgframe, text='Serial')
self.add(graph1frame, text='Graph 1')
self.add(graph2frame, text='Graph 2')
self.add(graph3frame, text='Graph 3')
def updateGraphs(self, *args, **kwargs):
num_graphs = int(self.controller.TKvariables['numgraphs'].get())
#First, disable all the graphs
for i in range(2, 5):
self.tab(i, state='disabled')
#Now, re-enable based on how many graphs are selected
if num_graphs >= 1:
self.tab(2, state='normal')
if num_graphs >= 2:
self.tab(3, state='normal')
if num_graphs >= 3:
self.tab(4, state='normal')
class SerialTab(ttk.Frame):
def __init__(self, parent, controller):
ttk.Frame.__init__(self, parent)
self.controller = controller
self['padding'] = [0, 7, 0, 0]
#Populate the serial configuration tab
self.baudlist = (4800, 9600, 19200, 38400, 57600, 115200, 230400, 921600)
self.databitslist = (7, 8)
self.stopbitslist = (1, 2)
self.paritylist = ('None', 'Even', 'Odd', 'Mark', 'Space')
baudlabel = ttk.Label(self, text='Baudrate')
baudbox = ttk.Combobox(self, width=8, values=self.baudlist,
textvariable=self.controller.TKvariables['baud'])
datalabel = ttk.Label(self, text='Data bits')
databox = ttk.Combobox(self, width=8, values = self.databitslist, \
textvariable=self.controller.TKvariables['databits'])
stopbitslabel = ttk.Label(self, text='Stop bits')
stopbitsbox = ttk.Combobox(self, width=8, values=self.stopbitslist, \
textvariable=self.controller.TKvariables['stopbits'])
paritylabel = ttk.Label(self, text='Parity')
paritybox = ttk.Combobox(self, width=8, values=self.paritylist, \
textvariable=self.controller.TKvariables['parity'])
#ttk.Label(self, text=' ').grid(row=1, column=0)
baudlabel.grid(row=1, column = 1, padx=5)
baudbox.grid(row=1, column=2, padx=5)
datalabel.grid(row=2, column = 1, padx=5)
databox.grid(row=2, column=2, padx=5)
stopbitslabel.grid(row=3, column = 1, padx=5)
stopbitsbox.grid(row=3, column=2, padx=5)
paritylabel.grid(row=4, column = 1, padx=5)
paritybox.grid(row=4, column=2, padx=5)
class DataTab(ttk.Frame):
"""
Houses configuration for the incoming data
"""
def __init__(self, parent, controller):
ttk.Frame.__init__(self, parent)
self['padding'] = 4
self.parent = parent
self.controller = controller
self.datalist = list(range(1,11))
self.terminatorlist = ['\\n', ';', '\\n;']
self.numgraphslist = list(range(1,4))
#How long is the data coming in?
datalabel = ttk.Label(self, text='Variables per line')
databox = ttk.Combobox(self, width=8, values = self.datalist, \
textvariable=self.controller.TKvariables['datalength'])
CreateToolTip(datalabel,\
"The numbder of variables per line. "
"A line is a series of variables seperated by a comma, and terminated by a \\n character. "
"For example, the line: data1, data2, data3\\n would have 3 variables. "
"All data recieved must be a string, no binary numbers allowed")
maxlabel = ttk.Label(self, text='Max Message Length')
maxbox = ttk.Entry(self, width=11, \
textvariable=self.controller.TKvariables['maxlength'])
CreateToolTip(maxlabel, \
'The maximum length of one line (in characters). If anything '
'be conservative with this number, err on the high side. The program reads '
'lines from the serial buffer until it is below this number of characters, to avoid '
'a condition where it tries to read a line out of the serial buffer and a \\n '
"can't be found"
)
numgraphslabel = ttk.Label(self, text='Number of graphs')
numgraphsbox = ttk.Combobox(self, width=8, values=self.numgraphslist, \
textvariable=self.controller.TKvariables['numgraphs'])
numgraphsbox.bind('<<ComboboxSelected>>', self.parent.updateGraphs)
CreateToolTip(numgraphslabel,\
"The number of graphs to plot data on")
maxcheck = ttk.Checkbutton(self, text='Start Maximized?', \
variable=self.controller.TKvariables['startmax'], \
onvalue='yes', offvalue='no')
CreateToolTip(maxcheck, \
"When the graph is started, the window will be maximized.")
log2filecheck = ttk.Checkbutton(self, text='Log to file?',\
variable=self.controller.TKvariables['log2file'], onvalue='on', \
offvalue='off', command=self.controller.getfilename)
CreateToolTip(log2filecheck, \
"If checked, all data recieved will also be logged to a CSV file")
AObutton = ttk.Button(self, text='Advanced Options', command=self.AObutton)
datalabel.grid(row=1, column = 1, sticky='w')
databox.grid(row=1, column = 2, sticky='w', padx=7)
maxlabel.grid(row=2, column=1, sticky='w')
maxbox.grid(row=2, column=2, sticky='w', padx=7)
numgraphslabel.grid(row=3, column=1, sticky='w')
numgraphsbox.grid(row=3, column=2, sticky='w', padx=7)
maxcheck.grid(row=4, column=1, columnspan=2, sticky='w')
log2filecheck.grid(row=5, column=1, columnspan=2, sticky='w')
AObutton.grid(row=6, column=1, columnspan=2, sticky='ew')
def AObutton(self):
toplvl = tk.Toplevel()
toplvl.withdraw()
frame = ttk.Frame(toplvl, padding=[4, 4, 4, 4])
boxwidth = 8
boxpadx = 5
TKvars = self.controller.TKvariables
#Data Depth
datalabel = ttk.Label(frame, text='Data History Depth')
databox = ttk.Entry(frame, width=boxwidth, textvariable=TKvars['datadepth'])
datapostlbl = ttk.Label(frame, text='Lines')
datalabel.grid(row=0, column=0, sticky='e')
databox.grid(row=0, column=1, sticky='ew', padx=boxpadx)
datapostlbl.grid(row=0, column=2, sticky='w')
CreateToolTip(datalabel, \
'How many lines of data to plot on the x axis. More = longer history '
'displayed on the screen')
#Refresh Frequency
refreshlabel = ttk.Label(frame, text='Refresh Frequency')
refreshbox = ttk.Entry(frame, width=boxwidth, textvariable=TKvars['refreshfreq'])
refreshpostlbl = ttk.Label(frame, text='Hz')
refreshlabel.grid(row=1, column=0, sticky='e')
refreshbox.grid(row=1, column=1, sticky='ew', padx=boxpadx)
refreshpostlbl.grid(row=1, column=2, sticky='w')
CreateToolTip(refreshlabel, \
'How often to redraw the screen. Any value higher than what your PC '
'can do will just max out the process. A reasonable value to start '
'with is 20')
#Data Width
widthlabel = ttk.Label(frame, text='Statusbar Data Width')
widthbox = ttk.Entry(frame, width=boxwidth, textvariable=TKvars['stsbrwdth'])
widthpostlbl = ttk.Label(frame, text='Chars')
widthlabel.grid(row=2, column=0, sticky='e')
widthbox.grid(row=2, column=1, sticky='ew', padx=boxpadx)
widthpostlbl.grid(row=2, column=2, sticky='w')
CreateToolTip(widthlabel, \
'This is for keeping the "last line recieved" value in the statusbar'
'a constant width. If you find that the statusbar is jumping around, '
'increase this value')
#Set as defaults
defaultbutton = ttk.Button(frame, text='Set selections as defaults', \
command=self.setDefaults)
defaultbutton.grid(row=3, column=0, columnspan=1, pady=1, sticky='ww')
CreateToolTip(defaultbutton, \
'Set ALL the current settings as the defaults')
#OK button
OKbutton = ttk.Button(frame, text='OK', width=10, command=toplvl.destroy)
OKbutton.grid(row=3, column=1, columnspan=2, pady=1, sticky='e')
frame.grid()
toplvl.update()
scrwidth = toplvl.winfo_screenwidth()
scrheight = toplvl.winfo_screenheight()
winwidth = toplvl.winfo_reqwidth()
winheight = toplvl.winfo_reqheight()
winposx = int(round(scrwidth/2 - winwidth/2))
winposy = int(round(scrheight/2 - winheight/2))
toplvl.geometry('{}x{}+{}+{}'.format(winwidth, winheight, winposx, winposy))
toplvl.deiconify()
def setDefaults(self):
defaultstmp = {}
TKvars = self.controller.TKvariables
for key in TKvars:
if key[0:5] == 'graph' or key.find('ylims') >= 0:
defaultstmp.update({key:[]})
for val in range(len(TKvars[key])):
try:
defaultstmp[key].append(int(TKvars[key][val].get()))
except:
defaultstmp[key].append(TKvars[key][val].get())
elif key == 'filename':
#There is a bug with pprint that puts a u in front of the
#filename, so convert it to a string first
defaultstmp.update({key:str(TKvars[key].get())})
else:
try:
defaultstmp.update({key:int(TKvars[key].get())})
except:
defaultstmp.update({key:TKvars[key].get()})
fileobj = open('defaults.py', 'w')
header = \
"'''\n" \
"Be careful when modifying these values - if they aren't set correctly, \n" \
"the program won't run. As a precaution if you modify it, it's a good idea to \n" \
"save a copy first. serialplot just looks for a file in the same directory \n" \
"called 'defaults.py'\n \n" \
"The format for graphXlineX properties is:\n"\
"[datalabel,\ndatapos,\nlinecolor,\ndashed,\nmultiplier,\noffset]\n"\
"'''\n\n"
fileobj.write(header)
fileobj.write('defaults = ' + pprint.pformat(defaultstmp) + '\n')
fileobj.close()
class GraphTab(ttk.Frame):
def __init__(self, parent, controller, graphnum):
ttk.Frame.__init__(self, parent)
self.controller = controller
self['padding'] = [4, 4, 0, 0]
key1 = 'graph' + str(graphnum) + 'line1'
key2 = 'graph' + str(graphnum) + 'line2'
key3 = 'graph' + str(graphnum) + 'line3'
data1 = self.controller.TKvariables[key1][1]
color1 = self.controller.TKvariables[key1][2]
data2 = self.controller.TKvariables[key2][1]
color2 = self.controller.TKvariables[key2][2]
data3 = self.controller.TKvariables[key3][1]
color3 = self.controller.TKvariables[key3][2]
#Create 3 comboboxes to select up to 3 datas to plot
data1label = ttk.Label(self, text='Data 1 position in string')
self.data1box = ttk.Combobox(self, width=3, values=self.controller.datalist, \
textvariable=data1, postcommand=self.updatecblist)
data1color = tk.Button(self, bg=color1.get(), width=1,\
command=lambda:self.setcolor(data1color,1,1,color1))
CreateToolTip(data1label,\
"The position of the first value to plot in the incoming line. It is one indexed, so "
"the first value is in position 1")
data2label = ttk.Label(self, text='Data 2 position in string')
self.data2box = ttk.Combobox(self, width=3, values=self.controller.datalist, \
textvariable=data2, postcommand=self.updatecblist)
data2color = tk.Button(self, bg=color2.get(), width=1,\
command=lambda:self.setcolor(data2color,1,2,color2))
CreateToolTip(data2label,\
"The position of the second value in the incoming line. It is one indexed, so "
"the first value is in position 1")
data3label = ttk.Label(self, text='Data 3 position in string')
self.data3box = ttk.Combobox(self, width=3, values=self.controller.datalist, \
textvariable=data3, postcommand=self.updatecblist)
data3color = tk.Button(self, bg=color3.get(), width=1,\
command=lambda:self.setcolor(data3color,1,3,color3))
CreateToolTip(data3label,\
"The position of the third value in the incoming line. It is one indexed, so "
"the first value is in position 1")
#Create an advanced options button
AObutton = ttk.Button(self, text='Advanced Options', \
command=lambda:self.AObutton(graphnum))
data1label.grid(row=1, column = 1, columnspan=3, sticky='w', pady = 3)
self.data1box.grid(row=1, column=4, sticky='w', padx = 5)
data1color.grid(row=1, column=5, padx=2)
data2label.grid(row=2, column = 1, columnspan=3, sticky='w', pady = 3)
self.data2box.grid(row=2, column=4, sticky='w', padx = 5)
data2color.grid(row=2, column=5)
data3label.grid(row=3, column=1, columnspan=3, sticky='w', pady = 3)
self.data3box.grid(row=3, column=4, sticky='w', padx = 5)
data3color.grid(row=3, column=5)
#Ymin\Ymax
key = 'g'+str(graphnum)+'ylims'
ttk.Label(self, text='Ymin').grid(row=4, column=1, sticky='w')
ttk.Entry(self, width=5, textvariable=self.controller.TKvariables[key][0] \
).grid(row=4, column=2, sticky='ew')
ttk.Label(self, text='Ymax').grid(row=4, column=3, sticky='e', padx=3)
ttk.Entry(self, width=6, textvariable=self.controller.TKvariables[key][1] \
).grid(row=4, column=4, sticky='ew', padx=5)
AObutton.grid(row=5, column=1, columnspan=5, sticky='nsew', pady=6)
def updatecblist(self):
num_vars = int(self.controller.TKvariables['datalength'].get())
self.controller.datalist = list(range(1,num_vars+1))
self.controller.datalist.insert(0, '-')
self.data1box['values'] = self.controller.datalist
self.data2box['values'] = self.controller.datalist
self.data3box['values'] = self.controller.datalist
def setcolor(self, button, graph, line, initialcolor):
color = colorchooser.askcolor(initialcolor=initialcolor.get())
#If the user hits cancel, the dialog returns a "Nonetype" object
#which causes issues, so check for it:
if isinstance(color[1], str):
button['bg'] = color[1]
key = 'graph'+str(graph)+'line'+str(line)
self.controller.TKvariables[key][2].set(value=color[1])
def AObutton(self, graphnum):
toplvl = tk.Toplevel()
toplvl.withdraw()
frame = ttk.Frame(toplvl, padding=[2, 3, 3, 0])
boxwidth = 15
#Create the labels
lbl = ttk.Label(frame, text='Label')
CreateToolTip(lbl, \
'This text will show up in the legend and the log file')
lbl.grid(row=0, column=1)
mult = ttk.Label(frame, text='Multiplier')
CreateToolTip(mult, \
'Multiply by this value')
mult.grid(row=0, column=2)
offset = ttk.Label(frame, text='Offset')
CreateToolTip(offset, \
'Add this value. Happens AFTER the data is multiplied')
offset.grid(row=0, column=3)
dashed = ttk.Label(frame, text='Dashed')
CreateToolTip(dashed, \
'If checked, the line will be dashed')
dashed.grid(row=0, column=4)
ttk.Label(frame, text='Line 1').grid(row=1, column=0, padx=2)
ttk.Label(frame, text='Line 2').grid(row=2, column=0, padx=2)
ttk.Label(frame, text='Line 3').grid(row=3, column=0, padx=2)
for row in range(1,3+1):
key = 'graph'+str(graphnum)+'line'+str(row)
#Label
ttk.Entry(frame, width=boxwidth, \
textvariable=self.controller.TKvariables[key][0]).grid(row=row, column=1)
#Multiplier
ttk.Entry(frame, width=boxwidth, \
textvariable=self.controller.TKvariables[key][4]).grid(row=row, column=2)
#Offset
ttk.Entry(frame, width=boxwidth, \
textvariable=self.controller.TKvariables[key][5]).grid(row=row, column=3)
#Dashed
ttk.Checkbutton(frame, onvalue='--', offvalue='-', \
variable=self.controller.TKvariables[key][3]).grid(row=row, column=4)
ttk.Button(frame, text='OK', command=toplvl.destroy).grid(row=5,\
column=3, columnspan=2, sticky='ew', pady=4)
#Center the window
frame.grid()
toplvl.update()
scrwidth = toplvl.winfo_screenwidth()
scrheight = toplvl.winfo_screenheight()
winwidth = toplvl.winfo_reqwidth()
winheight = toplvl.winfo_reqheight()
winposx = int(round(scrwidth/2 - winwidth/2))
winposy = int(round(scrheight/2 - winheight/2))
toplvl.geometry('{}x{}+{}+{}'.format(winwidth, winheight, winposx, winposy))
toplvl.deiconify()
class CreateToolTip(object):
"""
create a tooltip for a given widget
"""
def __init__(self, widget, text='widget info'):
self.waittime = 500 #miliseconds
self.wraplength = 180 #pixels
self.widget = widget
self.text = text
self.widget.bind("<Enter>", self.enter)
self.widget.bind("<Leave>", self.leave)
self.widget.bind("<ButtonPress>", self.leave)
self.id = None
self.tw = None
def enter(self, event=None):
self.schedule()
def leave(self, event=None):
self.unschedule()
self.hidetip()
def schedule(self):
self.unschedule()
self.id = self.widget.after(self.waittime, self.showtip)
def unschedule(self):
id = self.id
self.id = None
if id:
self.widget.after_cancel(id)
def showtip(self, event=None):
x = y = 0
x, y, cx, cy = self.widget.bbox("insert")
x += self.widget.winfo_rootx() + 25
y += self.widget.winfo_rooty() + 20
# creates a toplevel window
self.tw = tk.Toplevel(self.widget)
# Leaves only the label and removes the app window
self.tw.wm_overrideredirect(True)
self.tw.wm_geometry("+%d+%d" % (x, y))
label = ttk.Label(self.tw, text=self.text, justify='left',
background="#ffffff", relief='solid', borderwidth=1,
wraplength = self.wraplength)
label.pack(ipadx=1)
def hidetip(self):
tw = self.tw
self.tw = None
if tw:
tw.destroy()
#If this script is executed, just run the main script
if __name__ == '__main__':
os.system("serialplot.py")
|
[
"grngrngrngrn@163.com"
] |
grngrngrngrn@163.com
|
3d7a692985e73617ae07c6327e87e0e7f1fba9f7
|
6303391d47ed92705d9a390136e52173720cdfaf
|
/djangogirls/bin/django-admin.py
|
2f652ae37aa0a0771b1dc3d4026ce9bbffda36c4
|
[] |
no_license
|
GloriaS18/my-first-blog
|
dbdb49c2c9bd48917395daa14f82795eac94572b
|
661940a891106a46731736e27e84e634c7fae65d
|
refs/heads/master
| 2021-01-15T14:58:44.064901
| 2016-09-17T20:15:27
| 2016-09-17T20:15:27
| 68,476,120
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
#!/home/ubuntu/workspace/django_girls/djangogirls/bin/python3.5
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
|
[
"gloriasuriel@hotmail.com"
] |
gloriasuriel@hotmail.com
|
7e50f5c46098e16de5b7cfc59d1af7492507b124
|
e0c2b6a86bb025580ccb450c80039d0db3087e7d
|
/django.wsgi
|
1b652c93667fc9e4aa2b98ee0a381606bda505ab
|
[] |
no_license
|
yeminghua/gelange
|
332eb18406d67995452a7613c256f2b1e506f974
|
d026966bdbb31912893d80f7bd69ffc98a6c3f18
|
refs/heads/master
| 2016-09-06T18:34:05.048296
| 2014-10-06T02:25:26
| 2014-10-06T02:25:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
wsgi
|
import os, sys
#Calculate the path based on the location of the WSGI script.
apache_configuration= os.path.dirname(__file__)
project = os.path.dirname(apache_configuration)
workspace = os.path.dirname(project)
#
sys.stdout = sys.stderr
sys.path.append(workspace)
print workspace,"(----------------------------------------------------------)"
sys.path.append(workspace + "/mysite/gelange/")
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import django.core.handlers.wsgi
application = django.core.handlers.wsgi.WSGIHandler()
|
[
"2523357239@qq.com"
] |
2523357239@qq.com
|
453574190afbadf01fad742c24929e94bf313b5f
|
baed2c2da1f776c0968d3cacd2fa45bdbe5482d6
|
/S4cam/groupedCameras/TMP/legacy_designs/TMP_baseline_rev_multicam_test3_elliptical_stop_leaders_8_39/elliptical_aperture/3_mk_merit_func_align_prism_and_set_margin.py
|
e83b329923c4042e2689350f76cda65679e0d9ef
|
[] |
no_license
|
patogallardo/zemax_tools
|
5ae2fe9a1e8b032684b8cf57457ee4f3239d9141
|
90d309c2f96c94469963eb905844d76fa2137bf9
|
refs/heads/master
| 2023-01-08T22:52:16.865852
| 2022-12-20T21:36:28
| 2022-12-20T21:36:28
| 234,634,525
| 7
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,451
|
py
|
import zmx_api
import zmx # noqa
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from progressbar import progressbar
import os
XRADIUS = 2500
YRADIUS = 2747
TARGET_KEEPOUT_RADIUS_MM = 150.0
def eval_distance_to_rim(max_rs, MFE, surfnum):
qsum_rownums = []
radius_rownums = []
MFE.AddOperand()
for j_field in range(len(max_rs)):
op_x = MFE.AddOperand()
rownum_x = op_x.OperandNumber
op_x.ChangeType(REAX)
op_x.GetOperandCell(2).IntegerValue = surfnum
op_x.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op_x.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op_x.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op_x.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op_x.Weight = 0.0
op_y = MFE.AddOperand()
rownum_y = op_y.OperandNumber
op_y.ChangeType(REAY)
op_y.GetOperandCell(2).IntegerValue = surfnum
op_y.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op_y.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op_y.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op_y.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op_y.Weight = 0.0
op_qsum = MFE.AddOperand()
op_qsum.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.QSUM)
op_qsum.GetOperandCell(2).IntegerValue = rownum_x
op_qsum.GetOperandCell(3).IntegerValue = rownum_y
op_qsum.Weight = 0.0
MFE.CalculateMeritFunction()
y = op_y.Value
x = op_x.Value
angle = np.arctan2(y, x)
r = np.sqrt((XRADIUS*np.cos(angle))**2
+ (YRADIUS*np.sin(angle))**2)
op_rim = MFE.AddOperand()
op_rim.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.CONS)
op_rim.Target = r
radius_rownums.append(op_rim.OperandNumber)
qsum_rownums.append(op_qsum.OperandNumber)
for j in range(len(qsum_rownums)):
op_diff = MFE.AddOperand()
if j == 0:
first_diff_rownum = op_diff.OperandNumber
if j == len(qsum_rownums) - 1:
last_diff_rownum = op_diff.OperandNumber
op_diff.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.DIFF)
op_diff.GetOperandCell(2).IntegerValue = radius_rownums[j]
op_diff.GetOperandCell(3).IntegerValue = qsum_rownums[j]
op_diff.Weight = 0.0
op_equa = MFE.AddOperand()
op_equa.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.EQUA)
op_equa.GetOperandCell(2).IntegerValue = first_diff_rownum
op_equa.GetOperandCell(3).IntegerValue = last_diff_rownum
op_equa.Weight = 1.0e-4
op_min = MFE.AddOperand()
op_min.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.MINN)
op_min.GetOperandCell(2).IntegerValue = first_diff_rownum
op_min.GetOperandCell(3).IntegerValue = last_diff_rownum
op_min.Weight = 1.0
op_min.Target = TARGET_KEEPOUT_RADIUS_MM
op_opgt = MFE.AddOperand()
op_opgt.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OPGT)
op_opgt.Target = 140
op_opgt.Weight = 1e12
op_opgt.GetOperandCell(2).IntegerValue = op_min.OperandNumber
op_oplt = MFE.AddOperand()
op_oplt.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OPLT)
op_oplt.Target = 1200.
op_oplt.Weight = 1e12
op_oplt.GetOperandCell(2).IntegerValue = op_min.OperandNumber
def find_max_radius_fields(df, x_mean, y_mean):
max_rs = []
gs = df.groupby(['px', 'py'])
for g in gs:
r = np.sqrt((x_mean - g[1].x)**2 + (y_mean-g[1].y)**2)
ind = r.idxmax()
max_rs.append(g[1].loc[ind])
max_rs = pd.DataFrame(max_rs)
return max_rs
def plot_rim(active_conf, df, max_rs):
fname_plotout = os.path.join(MF_DIROUT,
"footprint_rim_conf%02i.png" % active_conf) # noqa
plt.gca().set_aspect('equal')
plt.scatter(df.x, df.y, marker='.')
plt.scatter(max_rs.x, max_rs.y, marker='.')
plt.title("configuration number: %i" % active_conf)
plt.xlim([-3000, 3000])
plt.savefig(fname_plotout)
plt.close()
def eval_rim_centroid(max_rs, MFE, surfnum, REAXORY):
for j_field in range(len(max_rs)):
op = MFE.AddOperand()
if j_field == 0:
row_start = op.OperandNumber
if j_field == len(max_rs) - 1:
row_end = op.OperandNumber
op.ChangeType(REAXORY)
op.GetOperandCell(2).IntegerValue = surfnum
op.GetOperandCell(4).DoubleValue = max_rs.hx.values[j_field] # Hx
op.GetOperandCell(5).DoubleValue = max_rs.hy.values[j_field] # Hy
op.GetOperandCell(6).DoubleValue = max_rs.px.values[j_field] # Px
op.GetOperandCell(7).DoubleValue = max_rs.py.values[j_field] # Py
op.Weight = 0.0
op = MFE.AddOperand()
op.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.OSUM)
op.GetOperandCell(2).IntegerValue = row_start
op.GetOperandCell(3).IntegerValue = row_end
op.Weight = 10.0
MKPLOT = True
RUNOPTIMIZER = False
MK_MERITFUNCTIONS = True
mce_rows_to_optimize = [19, 20]
TheSystem, ZOSAPI, ZOSAPI_NetHelper = zmx_api.connect_zmx_interactive()
MFE = TheSystem.MFE
MCE = TheSystem.MCE
REAX = ZOSAPI.Editors.MFE.MeritOperandType.REAX
REAY = ZOSAPI.Editors.MFE.MeritOperandType.REAY
surfnum = 44
wavenum = 1
t = np.linspace(0, 2*np.pi, 32)[:-1]
rs = np.linspace(0, 1, 4)[:-1]
Pxs = np.cos(t)
Pys = np.sin(t)
Hxs = np.concatenate([np.cos(t) * r for r in rs])
Hys = np.concatenate([np.sin(t) * r for r in rs])
MF_DIROUT = './center_pri_footprint/'
if not os.path.exists(MF_DIROUT):
os.mkdir(MF_DIROUT)
if MK_MERITFUNCTIONS:
for active_conf in progressbar(range(1, 86)):
# MFE.GetOperandAt(1).GetOperandCell(2).IntegerValue = 1
MCE.SetCurrentConfiguration(active_conf)
px_out, py_out, hx_out, hy_out, x, y = [], [], [], [], [], []
for (Hx, Hy) in zip(Hxs, Hys):
for (Px, Py) in zip(Pxs, Pys):
valx = MFE.GetOperandValue(REAX, surfnum, wavenum,
Hx, Hy, Px, Py, 0, 0)
valy = MFE.GetOperandValue(REAY, surfnum, wavenum,
Hx, Hy, Px, Py, 0, 0)
px_out.append(Px)
py_out.append(Py)
hx_out.append(Hx)
hy_out.append(Hy)
x.append(valx)
y.append(valy)
stopval = MFE.GetOperandValue(REAX, 6, 1,
0, 0, 1, 0, 0, 0)
df = pd.DataFrame({'hx': hx_out,
'hy': hy_out,
'px': px_out,
'py': py_out,
'x': x,
'y': y})
x_mean, y_mean = df.x.mean(), df.y.mean()
max_rs = find_max_radius_fields(df, x_mean, y_mean)
x_mean, y_mean = max_rs.x.mean(), max_rs.y.mean()
max_rs = find_max_radius_fields(df, x_mean, y_mean)
if MKPLOT:
plot_rim(active_conf, df, max_rs)
# now clear merit function and write up a new one
MFE.RemoveOperandsAt(1, MFE.NumberOfOperands)
MFE.AddOperand()
MFE.GetOperandAt(1).GetOperandCell(2).IntegerValue = active_conf
MFE.AddOperand()
op_cvig = MFE.AddOperand()
op_svig = MFE.AddOperand()
op_cvig.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.CVIG)
op_svig.ChangeType(ZOSAPI.Editors.MFE.MeritOperandType.SVIG)
op_svig.GetOperandCell(2).IntegerValue = 2
eval_rim_centroid(max_rs, MFE, surfnum, REAX)
eval_rim_centroid(max_rs, MFE, surfnum, REAY)
eval_distance_to_rim(max_rs, MFE, surfnum)
mf_fnameout = os.path.abspath(os.path.join(MF_DIROUT,
"MF_conf%02i.MF" % active_conf)) # noqa
MFE.SaveMeritFunction(mf_fnameout)
if RUNOPTIMIZER:
for active_conf in progressbar(range(1, 86)):
mf_fnameout = os.path.abspath(os.path.join(MF_DIROUT,
"MF_conf%02i.MF" % active_conf))
MFE.LoadMeritFunction(mf_fnameout)
TheSystem.Tools.RemoveAllVariables()
zmx.set_variables_or_const(mce_rows_to_optimize,
active_conf,
MCE, ZOSAPI, vars=True)
zmx.zemax_optimize(TheSystem, ZOSAPI)
|
[
"26889221+patogallardo@users.noreply.github.com"
] |
26889221+patogallardo@users.noreply.github.com
|
7c250db6b4b45846c7ccc8b7a8c83c3314752242
|
edee2739b7f1500fa3d877b56ed1658c0e49febb
|
/tracker/urls.py
|
ebb01a017aa8a5889626924fc1e4239dcd5150dd
|
[] |
no_license
|
SaurabhKumarVerma/covid__tracker
|
b1752686da9b5a3848d5a1f5b2f4ae91620c60a3
|
7b06854bb61daad61b30e2e3411c84d60ae480a1
|
refs/heads/master
| 2022-11-26T23:55:51.141376
| 2020-08-02T15:07:05
| 2020-08-02T15:07:05
| 281,354,073
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
"""covid__tracker URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path
from . import views
urlpatterns = [
path('', views.home,name='home'),
path('india', views.india, name= 'india'),
]
|
[
"Saurav88871kumar@hotmail.com"
] |
Saurav88871kumar@hotmail.com
|
79ffe5d130fb052429b3fd502d82e0063444a616
|
3145d0d178cd2108c34f15528d21cdb3cc82e12b
|
/nmaptools/xmlclasses/host.py
|
8a3b95454b6df6255b4e6130132637019c8b323e
|
[] |
no_license
|
craig-stevenson/nmaptools
|
b3e51146a7081769c4b42280784a9daac6aadfa0
|
0555cc855085557cfd2423df9ad4ab4ba4fe4ddc
|
refs/heads/master
| 2021-01-18T05:14:44.855940
| 2016-12-07T00:17:06
| 2016-12-07T00:17:06
| 67,656,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
class Host(object):
""" """
def __init__():
""" """
pass
|
[
"craig.stevenson@gmail.com"
] |
craig.stevenson@gmail.com
|
50b2a4e66eb4c1ca11dc8c4dceff8ba7ac17e7ca
|
faf8837e2131721733cdc8dba4dd616d6ef9f647
|
/data_exploration.py
|
964f7b9814c47e6b4ec06324abd99ac989475055
|
[
"MIT"
] |
permissive
|
rubenwo/ml2
|
ffec34e435c37625e62512ebb687c430eb086909
|
a981bb28e9990c8053d6bf909b9591244776d616
|
refs/heads/master
| 2022-12-09T19:41:31.090680
| 2020-09-24T15:15:13
| 2020-09-24T15:15:24
| 297,377,231
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
df_train = pd.read_json('./data/train.json')
# cuisine_count = {}
# uniques = df_train['cuisine'].unique()
#
# for unique in uniques:
# cuisine_count[str(unique)] = 0
#
# for i in range(len(df_train)):
# cuisine_count[str(df_train['cuisine'][i])] += 1
#
# for k, v in cuisine_count.items():
# print("{} occurs {} times in the dataset".format(k, v))
sns.countplot(y='cuisine', data=df_train, palette=sns.color_palette('inferno', 15))
plt.gcf().set_size_inches(15, 10)
plt.title('Cuisine Distribution', size=len(df_train['cuisine'].unique()))
plt.show()
|
[
"rwoldhui@avans.nl"
] |
rwoldhui@avans.nl
|
a25b6496f12166e06a56177364a4c1ecfbc4a31f
|
ffd5e689f88c49ab7af3554c22dc0c36301084fa
|
/thinking_and_testing_uniq_or_not_uniq.py
|
d7b067767d0bea11d3b61a30da4b020ac1ca2f17
|
[] |
no_license
|
ellismckenzielee/codewars-python
|
1710e6f0499047139479de386927c7dbd5f1cdf6
|
af3f4b4534798a58115d0565730aae28ce87437e
|
refs/heads/master
| 2023-08-09T13:38:40.964141
| 2023-08-01T14:45:22
| 2023-08-01T14:45:22
| 168,981,376
| 45
| 18
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
#thinking and testing: uniq or not uniq kata
#https://www.codewars.com/kata/56d949281b5fdc7666000004
def testit(a, b):
a = list(set(a))
b = list(set(b))
a.extend(b)
return sorted(a)
|
[
"ellismckenzielee@gmail.com"
] |
ellismckenzielee@gmail.com
|
36dfa2755c12bf056337ee1a4f8dd4a03a957371
|
17a675141ee04a3f35949306b76fc8af76cca1f8
|
/pollster/polls/urls.py
|
2a7c4dc3ef2d58a4d514ae4841964d9f91116482
|
[] |
no_license
|
huss-a/Django-Python-Poll-App
|
208a071f05453b993eee2720b2e6f76ec90cd972
|
5f147cf52c6bca1c35d17bbddd649fb5f5398fee
|
refs/heads/master
| 2023-04-05T02:13:09.302713
| 2021-04-03T11:47:44
| 2021-04-03T11:47:44
| 354,278,165
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 321
|
py
|
from django.urls import path
from . import views
app_name = "Polls"
urlpatterns= [
path("", views.index, name="index"),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:question_id>/results/', views.results, name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
|
[
"hussaingoodboi@gmail.com"
] |
hussaingoodboi@gmail.com
|
e5bc633da5a7a8bc72a5896a2acd1b80d49ca5f1
|
91fb65972d69ca25ddd892b9d5373919ee518ee7
|
/python-training-courses/pfc-sample-programs/func_example_002_a_with_its_use.py
|
fa56a44bfd53a1e492b260df8427a8512dba5dd3
|
[] |
no_license
|
zeppertrek/my-python-sandpit
|
c36b78e7b3118133c215468e0a387a987d2e62a9
|
c04177b276e6f784f94d4db0481fcd2ee0048265
|
refs/heads/master
| 2022-12-12T00:27:37.338001
| 2020-11-08T08:56:33
| 2020-11-08T08:56:33
| 141,911,099
| 0
| 0
| null | 2022-12-08T04:09:28
| 2018-07-22T16:12:55
|
Python
|
UTF-8
|
Python
| false
| false
| 867
|
py
|
# func_example_002_a_with_its_use.py
# refer to func_example_002_without_its_use.py
#
# Passing variable number of arguments to the function
def add_numbers (*myNumbers):
sum = 0
for i in myNumbers:
sum = sum + i
return sum
num01, num02, num03, num04, num05, num06, num07, num08, num09, num10 = 1,2,3,4,5,6,7,8,9,10
# Calculate and Print sum of the first 5 numbers
sum1 = add_numbers (num01, num02, num03, num04, num05)
print ("Sum of the first 5 numbers is - ", sum1 )
# Calculate and Print sum of the numbers from 6 to 10
sum2 = add_numbers (num06, num07, num08, num09, num10)
print ("Sum of the numbers from 6 to 10 - ", sum2 )
# Calculate and Print sum of the numbers in odd positions
sum3 = add_numbers (num01, num03, num05, num07, num09)
print ("Sum of the numbers in odd positions - ", sum3)
|
[
"zeppertrek@gmail.com"
] |
zeppertrek@gmail.com
|
579ea29fb99dc58412be4efc1f1a6bbed9d95ace
|
3dc4a2d92d1e06ff48618f7fd9ac28ea5840572d
|
/docs/nginx_ui_doc_service_backup/gscloud/backup/openstack/ui/services.py
|
063acc824258f824d60b9c108404ad0a19c1d0b5
|
[] |
no_license
|
padmakarkotule/python-101
|
a390153abe783a0725fcbaa8d3d94eab3ab588bd
|
e6a0d9d742dcdc5ee921169f3447643d18179ba5
|
refs/heads/master
| 2021-09-25T11:36:02.259512
| 2020-07-02T14:42:24
| 2020-07-02T14:42:24
| 197,585,358
| 0
| 0
| null | 2021-09-22T19:18:16
| 2019-07-18T12:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
import sys
import os
from django.conf import settings
from django.conf.urls.static import static
# Class - Used to upload files.
class FileUpload():
def __init__(self, filename, *args):
self.filename = filename
def handle_uploaded_file(self):
filew = 'static/' + self.filename
with open(filew, 'wb+') as destination:
for chunk in self.filename.chunks():
destination.write(chunk)
|
[
"padmakar.kotule@gmail.com"
] |
padmakar.kotule@gmail.com
|
b36813afec190c46a7d2302af035a14eaa02a543
|
e70a7d9c14279c0b07da8e5ad0a11d858c693294
|
/diffusion_benchmarks/iaea3d.py
|
150eafbab48127cc88f3b0fad8270f94816ea753
|
[] |
no_license
|
Relzohery/detran-examples
|
0d035bf04d2fcda94f1f9a6bf48915f0940bcfab
|
d0937c63fdda36dac54420df723c4243cf3abc27
|
refs/heads/master
| 2022-12-17T21:23:31.459132
| 2020-09-01T20:49:33
| 2020-09-01T20:49:33
| 279,376,516
| 0
| 0
| null | 2020-07-13T18:04:50
| 2020-07-13T18:04:49
| null |
UTF-8
|
Python
| false
| false
| 5,873
|
py
|
# detran-examplesdiffusion_benchmarks/iaea.py
#
# Solves the 3D IAEA PWR benchmark problem. Note, because of modeling
# limitations, the void regions beyond the normal reflector are simply
# filled with more reflector.
#
# Reference keff ~ 1.029096
#
# Reference:
# Benchmark Problem Book, ANL-7416, Suppl. 2, Argonne National
# Laboratory (1977)
from detran import *
import time
def run():
#-----------------------------------------------------------------------------#
# Input
#-----------------------------------------------------------------------------#
inp = InputDB.Create()
inp.put_int("number_groups", 2)
inp.put_int("dimension", 2)
inp.put_str("equation", "diffusion")
inp.put_str("bc_west", "reflect")
inp.put_str("bc_east", "reflect")
inp.put_str("bc_south", "reflect")
inp.put_str("bc_north", "reflect")
inp.put_str("bc_bottom", "reflect")
inp.put_str("bc_top", "reflect")
inp.put_int("eigen_max_iters", 1000)
inp.put_str("eigen_solver", "arnoldi")
db = InputDB.Create("callow_db")
# outer gmres parameters
db.put_dbl("linear_solver_atol", 1e-14);
db.put_dbl("linear_solver_rtol", 1e-14);
db.put_str("linear_solver_type", "petsc");
db.put_int("linear_solver_maxit", 5000);
db.put_int("linear_solver_gmres_restart", 30);
db.put_str("eigen_solver_type", "slepc");
db.put_dbl("eigen_solver_tol", 1e-14);
db.put_int("linear_solver_monitor_level", 0);
inp.put_spdb("outer_solver_db", db)
inp.put_spdb("eigen_solver_db", db)
#-----------------------------------------------------------------------------#
# Material
#-----------------------------------------------------------------------------#
# Note, all absorption cross sections are simply put into the total.
mat = Material.Create(6, 2, "IAEA-3D")
# Fuel 1
mat.set_sigma_t(0, vec_dbl([0.03, 0.08]))
mat.set_sigma_s(0, 1, 0, 0.02)
mat.set_diff_coef(0, vec_dbl([1.5, 0.4]))
mat.set_sigma_f(0, 1, 0.135)
mat.set_chi(0, 0, 1.0)
# Fuel 1 + Rod
mat.set_sigma_t(1, vec_dbl([0.030, 0.085]))
mat.set_sigma_s(1, 1, 0, 0.02)
mat.set_diff_coef(1, vec_dbl([1.5, 0.4]))
mat.set_sigma_f(1, 1, 0.135)
mat.set_chi(1, 0, 1.0)
# Fuel 2
mat.set_sigma_t(2, vec_dbl([0.03, 0.13]))
mat.set_sigma_s(2, 1, 0, 0.02)
mat.set_diff_coef(2, vec_dbl([1.5, 0.4]))
mat.set_sigma_f(2, 1, 0.135)
mat.set_chi(2, 0, 1.0)
# Reflector
mat.set_sigma_t(3, vec_dbl([0.04, 0.01]))
mat.set_sigma_s(3, 1, 0, 0.04)
mat.set_diff_coef(3, vec_dbl([2.0, 0.3]))
# Reflector + Rod
mat.set_sigma_t(4, vec_dbl([0.04, 0.55]))
mat.set_sigma_s(4, 1, 0, 0.04)
mat.set_diff_coef(4, vec_dbl([2.0, 0.3]))
# High Absorber
mat.set_sigma_t(5, vec_dbl([1.00, 1.00]))
mat.set_sigma_s(5, 1, 0, 0.00)
mat.set_diff_coef(5, vec_dbl([0.3333, 0.3333]))
mat.finalize()
#-----------------------------------------------------------------------------#
# Geometry
#-----------------------------------------------------------------------------#
# This sets up for a 2cm mesh in all directions.
# XY plane discretization
cmH = [0.0, 10.0, 30.0, 50.0, 70.0, 90.0, 110.0, 130.0, 150.0, 170.0]
fmH = vec_int(9, 2)
for i in range(1, 9) :
fmH[i] = 2*fmH[i]
# Axial discretization
cmV = [0.0, 20.0, 280.0, 360.0, 380.0]
#fmV = [ 5, 65, 20, 5 ]
fmV = [ 2, 26, 8, 2 ]
mt = [# 0.0 - 20.0
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
# 20.0 - 280.0
2, 1, 1, 1, 2, 1, 1, 0, 3,
1, 1, 1, 1, 1, 1, 1, 0, 3,
1, 1, 1, 1, 1, 1, 0, 0, 3,
1, 1, 1, 1, 1, 1, 0, 3, 3,
2, 1, 1, 1, 2, 0, 0, 3, 3,
1, 1, 1, 1, 0, 0, 3, 3, 3,
1, 1, 0, 0, 0, 3, 3, 3, 3,
0, 0, 0, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
# 280.0 - 360.0
2, 1, 1, 1, 2, 1, 1, 0, 3,
1, 1, 1, 1, 1, 1, 1, 0, 3,
1, 1, 2, 1, 1, 1, 0, 0, 3,
1, 1, 1, 1, 1, 1, 0, 3, 3,
2, 1, 1, 1, 2, 0, 0, 3, 3,
1, 1, 1, 1, 0, 0, 3, 3, 3,
1, 1, 0, 0, 0, 3, 3, 3, 3,
0, 0, 0, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
# 360.0 - 380.0
4, 3, 3, 3, 4, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 4, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
4, 3, 3, 3, 4, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3
]
mesh = Mesh3D.Create(fmH, fmH, fmV, cmH, cmH, cmV, mt)
#-----------------------------------------------------------------------------#
# Execute
#-----------------------------------------------------------------------------#
solver = Eigen2D(inp, mat, mesh)
t = time.time()
solver.solve()
state = solver.state()
print state.eigenvalue()
print "elapsed = ", time.time()-t
#-----------------------------------------------------------------------------#
# Plot
#-----------------------------------------------------------------------------#
try :
silo = SiloOutput(mesh)
silo.initialize("iaea3d.silo")
silo.write_scalar_flux(state)
silo.finalize()
except :
print "Silo error (not installed?)"
if __name__ == "__main__":
Manager.initialize(sys.argv)
run()
|
[
"robertsj@mit.edu"
] |
robertsj@mit.edu
|
4ff26129e91fe2b58d30667b48e363455eebdd6a
|
636f56227558589e2323d4a28f14dbad465772f2
|
/fun.py
|
50616e13a666a738150909b357519f4a1bab3e6d
|
[] |
no_license
|
ParthRoot/Basic-Python-Programms
|
82dc4b190c1048d1f69b28b98bd7d67ecbdb4efb
|
16abb3d71c25dd1564ad3fbcd888ef36367cdf5c
|
refs/heads/master
| 2023-03-14T22:29:41.298816
| 2021-03-17T09:11:13
| 2021-03-17T09:11:13
| 262,701,673
| 0
| 1
| null | 2021-03-16T17:04:34
| 2020-05-10T03:05:09
|
Python
|
UTF-8
|
Python
| false
| false
| 612
|
py
|
#Create A function
def fun():
print("Hello")
fun()
#Take argument to function
def fun1(name):
print("Name:-",name)
fun1("Parth")
tup = ("Parth","Jaimin","Varshil","Tirth")
c = len(tup)
for i in range(c):
fun1(tup[i])
A = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z"]
a = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for i in range(len(A) and len(a)):
print(A[i]+ " =",a[i],end=",")
print("\n")
def add(a,b):
return a + b
print("Addition:-",add(10,20))
|
[
"noreply@github.com"
] |
noreply@github.com
|
e4d4b373570fda5ade6a5eb3a42fe9567339e80e
|
f47995e06e0f55045134039f7c3af62a799a1329
|
/learnpythonthehardway/ex8/ex8.py
|
3dc2d3e3be8edc4a39b45c914ee0855240ccfc02
|
[] |
no_license
|
roytest001/PythonHomework
|
bef0b37d96dd7f1aa3a96fe78ca5d25cd9e5e96d
|
e6ef9b4b38093284a31922bff9a8d0beb9ad7ec8
|
refs/heads/master
| 2021-01-21T23:33:51.253675
| 2016-10-02T03:34:16
| 2016-10-02T03:34:16
| 16,234,802
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 693
|
py
|
#coding=utf-8
formatter = "%r %r %r %r"
print formatter % (1, 2, 3, 4)
print formatter % ("one", "two", "three", "four")
print formatter % (True, True, True, False)
print formatter % (formatter, formatter, formatter, formatter)
#为啥打印输出双引号,为什么 %r 有时打印出来的是单引号,而我实际用的是双引号?
#Python 会用最有效的方式打印出字符串,而不是完全按照你写的方式来打印。这样做对于 %r 来 说是可以接受的,因为它是用作 debug 和排错,没必要非打印出多好看的格式。
print formatter % (
"I think this is a bug.",
"That you could type up right",
"But it didn't sing.",
"So I said goodnight."
)n
|
[
"xxjbs001@126.com"
] |
xxjbs001@126.com
|
248595c051f82ce2e6704e87a08a396027838f94
|
7a14adcd5150c970f6a5f1ab83180f091dc0f6cc
|
/cart/models.py
|
d6dd38a5a6ae1625e18ef9beb7bc5d650f93ece2
|
[] |
no_license
|
synnea/the-modern-witcher
|
0b4349e845e12e05f7abf6e0688200a6f54dd96d
|
f85c4de1fb3167b5c595ac6843c33a55495d7259
|
refs/heads/master
| 2022-12-09T15:09:23.602406
| 2020-03-05T18:17:27
| 2020-03-05T18:17:27
| 230,802,118
| 0
| 1
| null | 2022-11-22T05:13:41
| 2019-12-29T20:52:55
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
from django.db import models
# Create your models here.
from django.db import models
from django.contrib.auth.models import User
from items.models import Item
import datetime
class Order(models.Model):
objects = models.Manager()
full_name = models.CharField(max_length=50, blank=False, default="Test")
phone_number = models.CharField(max_length=20, blank=False, default=0)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
date = models.DateField(default=datetime.date.today, null=True)
def __str__(self):
return "{0} @ {1}".format(self.full_name, self.date)
class OrderLineItem(models.Model):
objects = models.Manager()
order = models.ForeignKey(Order, null=False, on_delete=models.CASCADE)
user = models.ForeignKey(User, null=True, on_delete=models.CASCADE)
product = models.ForeignKey(Item, null=False, on_delete=models.CASCADE)
quantity = models.IntegerField(blank=False)
def __str__(self):
return "{0} {1}".format(
self.quantity, self.product.name)
|
[
"carrie.poell@gmail.com"
] |
carrie.poell@gmail.com
|
408eefcd98a92dd07cb9fa4f21371235a339bf84
|
d032bc0c01a7cd598481644e22043de8df4c71c4
|
/consultant_app/versatilimagefield.py
|
90f5c5be364e762bcd094b0cd36c0169a6108c18
|
[] |
no_license
|
amrit-kumar/project-for-engineering
|
eb5f410cd2f0a271633fb6c24132a36e6215f0e0
|
7e975866e540ab4625e735009fdba971df74e393
|
refs/heads/master
| 2020-12-03T01:49:02.429186
| 2017-06-30T09:09:46
| 2017-06-30T09:09:46
| 95,863,800
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,467
|
py
|
import io
from io import StringIO
from PIL import Image
# from StringIO import StringIO
from .views import *
from versatileimagefield.datastructures import SizedImage
from django.utils.datastructures import *
from versatileimagefield.fields import VersatileImageField
from versatileimagefield.registry import versatileimagefield_registry
# Unregistering the 'crop' Sizer
# versatileimagefield_registry.unregister_sizer('crop')
# Registering a custom 'crop' Sizer
# versatileimagefield_registry.register_sizer('crop', SomeCustomSizedImageCls)
class ThumbnailImage(SizedImage):
"""
Sizes an image down to fit within a bounding box
See the `process_image()` method for more information
"""
filename_key = 'thumbnail'
def process_image(self, image, image_format, save_kwargs,
width=400, height=400):
"""
Returns a StringIO instance of `image` that will fit
within a bounding box as specified by `width`x`height`
"""
imagefile = io.BytesIO()
image.thumbnail(
(width, height),
Image.ANTIALIAS
)
image.save(
imagefile,
**save_kwargs
)
return imagefile
# Registering the ThumbnailSizer to be available on VersatileImageField
# via the `thumbnail` attribute
versatileimagefield_registry.unregister_sizer('thumbnail')
versatileimagefield_registry.register_sizer('thumbnail', ThumbnailImage)
|
[
"kumaramrit38@gmail.com"
] |
kumaramrit38@gmail.com
|
c96baa39b9776108de52e68614ff8a956ef413f8
|
444a9480bce2035565332d4d4654244c0b5cd47b
|
/research/recommend/JEPOO/model/mel.py
|
3581eddba520c6e2403c416cad136096a7b09a35
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
mindspore-ai/models
|
7ede9c6454e77e995e674628204e1c6e76bd7b27
|
eab643f51336dbf7d711f02d27e6516e5affee59
|
refs/heads/master
| 2023-07-20T01:49:34.614616
| 2023-07-17T11:43:18
| 2023-07-17T11:43:18
| 417,393,380
| 301
| 92
|
Apache-2.0
| 2023-05-17T11:22:28
| 2021-10-15T06:38:37
|
Python
|
UTF-8
|
Python
| false
| false
| 3,514
|
py
|
# Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.nn as nn
import mindspore as ms
from mindspore import ops
from librosa.filters import mel
from librosa.util import pad_center
from scipy.signal import get_window
class STFT(nn.Cell):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length, hop_length, win_length=None, window='hann'):
super(STFT, self).__init__()
if win_length is None:
win_length = filter_length
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
fourier_basis = np.fft.fft(np.eye(self.filter_length))
self.cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:self.cutoff, :]),
np.imag(fourier_basis[:self.cutoff, :])])
self.forward_basis = ms.Tensor(fourier_basis[:, None, :], ms.float32)
if window is not None:
assert filter_length >= win_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = ms.Tensor(fft_window, ms.float32)
self.forward_basis *= fft_window
def construct(self, input_data):
input_data = ops.expand_dims(input_data, 1)
input_data = ops.Pad(((0, 0), (0, 0), (int(self.filter_length / 2), int(self.filter_length / 2))))(input_data)
forward_transform = nn.Conv1d(1, self.cutoff * 2, self.win_length, stride=self.hop_length, pad_mode='valid',
weight_init=self.forward_basis)(input_data)
real_part = forward_transform[:, :self.cutoff, :]
imag_part = forward_transform[:, self.cutoff:, :]
magnitude = ops.sqrt(real_part**2 + imag_part**2)
phase = ops.atan2(imag_part, real_part)
return magnitude, phase
class MelSpectrogram(nn.Cell):
def __init__(self, n_mels, sample_rate, filter_length, hop_length,
win_length=None, mel_fmin=0.0, mel_fmax=None):
super(MelSpectrogram, self).__init__()
self.stft = STFT(filter_length, hop_length, win_length)
mel_basis = mel(sample_rate, filter_length, n_mels, mel_fmin, mel_fmax, htk=True)
self.mel_basis = ms.Tensor(mel_basis, ms.float32)
self.min_bound = ms.Tensor(1e-5, ms.float32)
def construct(self, y):
magnitudes, _ = self.stft(y)
mel_output = ops.matmul(self.mel_basis, magnitudes)
mel_output = ops.clip_by_value(mel_output, clip_value_min=self.min_bound)
mel_output = ops.log(mel_output)
return mel_output
|
[
"noreply@gitee.com"
] |
noreply@gitee.com
|
869060d928d937c0564f58f256cdcab94ae17193
|
1b124c5209cffe6544062b8233bf5f6b7f699b18
|
/app.py
|
f372930a669abde982b43e8d270ba8bf59f50bcb
|
[] |
no_license
|
graceloveora123/sqlalchemy-challenge
|
272e2ff8d6519448da7c6a6226fb14ed7f5985c1
|
c3d5cfc7a2b5a59524b080aff305032802b365a0
|
refs/heads/main
| 2023-03-18T11:53:55.352590
| 2021-03-08T05:34:10
| 2021-03-08T05:34:10
| 342,921,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,372
|
py
|
import numpy as np
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
from flask import Flask, jsonify
#################################################
# Database Setup
#################################################
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save reference to the table
Measurement = Base.classes.measurement
Station = Base.classes.station
#################################################
# Flask Setup
#################################################
app = Flask(__name__)
#################################################
# Flask Routes
#################################################
@app.route("/")
def welcome():
"""List all available api routes."""
return (
f"Available Routes:<br/>"
f"/api/v1.0/precipitation<br/>"
f"/api/v1.0/stations<br/>"
f"/api/v1.0/tobs<br/>"
f"/api/v1.0/[start_date format:yyyy-mm-dd]/[end_date format:yyyy-mm-dd]<br/>"
)
@app.route("/api/v1.0/precipitation")
def precipitation():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all Precipitation Data"""
# Query all Precipitation data within a year
results = session.query(Measurement.date, Measurement.prcp).\
filter(Measurement.date >= "2016-08-24").\
all()
session.close()
all_prcp = []
for date,prcp in results:
prcp_dict = {}
prcp_dict["date"] = date
prcp_dict["prcp"] = prcp
all_prcp.append(prcp_dict)
return jsonify(all_prcp)
@app.route("/api/v1.0/stations")
def stations():
# Create our session (link) from Python to the DB
session = Session(engine)
"""Return a list of all Stations"""
results = session.query(Station.station).\
order_by(Station.station).all()
session.close()
all_stations = list(np.ravel(results))
return jsonify(all_stations)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
"""Return a list of all TOBs"""
results = session.query(Measurement.date, Measurement.tobs,Measurement.prcp).\
filter(Measurement.date >= '2016-08-23').\
filter(Measurement.station=='USC00519281').\
order_by(Measurement.date).all()
session.close()
all_tobs = []
for prcp, date,tobs in results:
tobs_dict = {}
tobs_dict["prcp"] = prcp
tobs_dict["date"] = date
tobs_dict["tobs"] = tobs
all_tobs.append(tobs_dict)
return jsonify(all_tobs)
@app.route("/api/v1.0/<start_date>")
def Start_date(start_date):
session = Session(engine)
"""Return a list of min, avg and max tobs for a start date"""
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
session.close()
start_date_tobs = []
for min, avg, max in results:
start_date_tobs_dict = {}
start_date_tobs_dict["min_temp"] = min
start_date_tobs_dict["avg_temp"] = avg
start_date_tobs_dict["max_temp"] = max
start_date_tobs.append(start_date_tobs_dict)
return jsonify(start_date_tobs)
@app.route("/api/v1.0/<start_date>/<end_date>")
def Start_end_date(start_date, end_date):
session = Session(engine)
"""Return a list of min, avg and max tobs for start and end dates"""
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
session.close()
# Create a dictionary from the row data and append to a list of all_temporatures
start_end_tobs = []
for min, avg, max in results:
start_end_tobs_dict = {}
start_end_tobs_dict["min_temp"] = min
start_end_tobs_dict["avg_temp"] = avg
start_end_tobs_dict["max_temp"] = max
start_end_tobs.append(start_end_tobs_dict)
return jsonify(start_end_tobs)
if __name__ == "__main__":
app.run(debug=True)
|
[
"grace.bo.sun06@gmail.com"
] |
grace.bo.sun06@gmail.com
|
1754189cd09d90ce234534057586811cd9ec1a8f
|
1ed29cb9c3bc606044c12068ac8a770e5f68497a
|
/pwaProject/pwaProject/asgi.py
|
219df782b15bbb285f43b656ab1b173e9fc26280
|
[
"MIT"
] |
permissive
|
pongkiat/djangoProject
|
a2d15f990e0f3c436ff73396aacaf81370548ab5
|
246db3dea9f885af191b7585f8b88c611ea14984
|
refs/heads/master
| 2020-12-23T01:10:15.519699
| 2020-01-30T03:12:20
| 2020-01-30T03:12:20
| 236,986,169
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 397
|
py
|
"""
ASGI config for pwaProject project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pwaProject.settings')
application = get_asgi_application()
|
[
"pongkiat@OzMBP.local"
] |
pongkiat@OzMBP.local
|
07534f0568e07ffd2dbec5e462876a4982119e0d
|
cc0f8131064d1d7d5e6c97d42d0e71b16c334d81
|
/GraphRepo/graphrepo/config.py
|
ed4f0c7165f0883a9dd23eefa262b3e26858fa89
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
LennardSchaap/Github_Visualization
|
aa6591678cc5cd05c4d87f9b258c6ff167e4307d
|
455dad5ae15d26706587dd92a256fd840654e157
|
refs/heads/master
| 2022-03-30T08:20:40.240726
| 2019-12-12T00:41:39
| 2019-12-12T00:41:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
# Copyright 2019 NullConvergence
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module stores all config constants. It is a singleton
because it is used across in several modules inside the app"""
from graphrepo.singleton import Singleton
class Config(metaclass=Singleton):
"""This class contains all config flags"""
DB_URL = ""
PORT = 0
DB_USER = ""
DB_PWD = ""
REPO = ""
START_DATE = None
END_DATE = None
# If True, each branch will be indexed as a node
# and commits will be linked by a Parent relationship
# If False, then the commits are linked by a Branch
# relationship
BRANCH_AS_NODE = True
def check_config(self):
"""Checks if the config properties are set and
raises ValueError if any value misses"""
if self.DB_URL == "":
raise ValueError("Database URL is not set.")
if self.PORT == 0:
raise ValueError("Database port is not set.")
if self.DB_USER == "" or self.DB_PWD == "":
raise ValueError("Database credentials are not set.")
if self.REPO == "":
raise ValueError("Repository path not set.")
|
[
"lennard.schaap@hotmail.com"
] |
lennard.schaap@hotmail.com
|
d533929137010a828e0c1fe70530eb874680c0e9
|
ca5fc43049f94a794d90a561fd8126f02b603599
|
/i3py/core/features/options.py
|
cdcfff349c6f5457d6e1affa7b82a7ef3f760806
|
[
"BSD-3-Clause"
] |
permissive
|
Exopy/i3py
|
32d9ee343d21d275680a2d030b660a80960e99ac
|
6f004d3e2ee2b788fb4693606cc4092147655ce1
|
refs/heads/master
| 2022-02-18T21:51:16.423188
| 2019-08-28T23:51:02
| 2019-08-28T23:51:02
| 63,874,745
| 1
| 0
|
BSD-3-Clause
| 2018-05-23T09:45:26
| 2016-07-21T14:07:58
|
Python
|
UTF-8
|
Python
| false
| false
| 1,960
|
py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright 2016-2018 by I3py Authors, see AUTHORS for more details.
#
# Distributed under the terms of the BSD license.
#
# The full license is in the file LICENCE, distributed with this software.
# -----------------------------------------------------------------------------
"""Feature for instrument options.
"""
from typing import Any, Union, Optional, Dict, Tuple
from .feature import Feature
from ..abstracts import AbstractOptions
class Options(Feature):
"""Feature used to access the options of an instrument.
Options in I3py are considered static (ie related to the hardware or
firmware) and are hence read only. Because there is no generic pattern
in the formatting of the options, the user is expected to implement
manually the getter function.
Parameters
----------
names : dict
Names of the different options, as returned by this feature. Hint about
the possible values can be provided as a type or a tuple of values.
"""
def __init__(self, getter: Any=True,
setter: Any=None,
names: Dict[str, Optional[Union[type, tuple]]]={},
extract: str='',
retries: int=0,
checks: Optional[str]=None,
discard: Optional[Union[Tuple[str, ...],
Dict[str, Tuple[str, ...]]]]=None,
options: Optional[str]=None) -> None:
if setter is not None:
raise ValueError('Options is read-only can have a setter.')
if not names:
raise ValueError('No names were provided for Options')
Feature.__init__(self, getter, None, extract, retries,
checks, discard, options)
self.creation_kwargs['names'] = names
self.names = names
AbstractOptions.register(Options)
|
[
"marul@laposte.net"
] |
marul@laposte.net
|
334ab93f2d0f8c5b6bb94365ef6c0618db19fb52
|
bdd3499808a2d6212c9067cdb8b7a33fccc5ac34
|
/bin/pip3.7
|
3414d036599848714f811853931ffd7670a68a37
|
[] |
no_license
|
gabrielb77/chgate
|
44963de6a7c1e0f98f02f3672fde0f278fbee2c9
|
a0f3098b105702d534a49f7fd140fad70ace3e4b
|
refs/heads/master
| 2023-04-14T21:56:18.434558
| 2021-05-01T13:14:45
| 2021-05-01T13:14:45
| 350,025,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 249
|
7
|
#!/home/gabriel/gitrepos/chgate/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"gabrielb77@gmail.com"
] |
gabrielb77@gmail.com
|
c9aaf04fd48dfb391e71df87bfe9992e3b8e9e6d
|
2ae92a7512a6821d28f383e050a56203fdf2b6c3
|
/week1/bike.py
|
b2b603ab90032c13991b19d7346c25cd996e9b0e
|
[] |
no_license
|
py1-10-2017/MikeGerrity-
|
7e4958c2190de078fb810fbb0e6bf3b10634f77a
|
f4981f3165024529f2d06bccbe7224b29aa08927
|
refs/heads/master
| 2021-08-06T19:18:27.494733
| 2017-11-06T21:49:36
| 2017-11-06T21:49:36
| 109,753,671
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 720
|
py
|
class Bike(object):
def __init__(self, price, max_speed):
self.price = price
self.max_speed = max_speed
self.miles = 0
def displayinfo(self):
print "Price:", self.price, "Max Speed:", self.max_speed, "Total Miles:", self.miles
return self
def ride(self):
print "Riding..."
self.miles += 10
return self
def reverse(self):
print "Going back..."
self.miles -= 5
return self
bike1 = Bike(200, "25mph")
bike2 = Bike(300, "35mph")
bike3 = Bike(100, "15mph")
bike1.ride().ride().ride().reverse().displayinfo()
bike2.ride().ride().reverse().reverse().displayinfo()
bike3.reverse().reverse().reverse().displayinfo()
|
[
"mikegerrity@hotmail.com"
] |
mikegerrity@hotmail.com
|
bf4ab1b554798c38423c6b25ffc2e3404c7b9980
|
eea1be5dbac7fa10167eae167eb6712e3937f53a
|
/siteuser/utils/models.py
|
607ac2c9399c5f052d881715a70bed9367b4b671
|
[] |
no_license
|
chidimo/Voidcoin
|
40962e46661b2a7106bd8e60d0830c3b9629b8fa
|
227c160dfa671818522781aab013f2d1fcb098a9
|
refs/heads/develop
| 2022-12-09T17:40:26.294425
| 2019-07-04T08:32:20
| 2019-07-04T08:32:20
| 135,197,447
| 5
| 2
| null | 2022-12-08T02:08:45
| 2018-05-28T18:45:19
|
Python
|
UTF-8
|
Python
| false
| false
| 441
|
py
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from .fields import AutoCreatedField, AutoLastModifiedField
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
|
[
"orjichidi95@gmail.com"
] |
orjichidi95@gmail.com
|
cc453d0093726966b9dabdcfb9474dc063796d6f
|
a7c69c094cb77b351af7e5c95ef1e7319c8688a5
|
/src/subscriptions/validators.py
|
b070f13ad5a89d91df65fa5a190344c45847f909
|
[] |
no_license
|
betanio/Eventex-WttD
|
cce65fc1bcee5f9a2713a7a970ba1413b950e771
|
1c62ce6b2d3ef552f0ce54f4b5278916eed50349
|
refs/heads/master
| 2021-01-15T11:48:24.598152
| 2012-04-17T11:23:07
| 2012-04-17T11:23:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 417
|
py
|
# -*- coding: utf-8 -*-
# Optional: ugettext_lazy
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
# TODO: validação do dígito verificador
def CpfValidator(value):
if not value.isdigit():
raise ValidationError(_(u'O CPF deve conter apenas números'))
if len(value) != 11:
raise ValidationError(_(u'O CPF deve ter 11 dígitos'))
|
[
"betanio@betanio.com"
] |
betanio@betanio.com
|
a0053fe45551ebe58fb97b17632014d320aff29c
|
5ebbad9b3a6664a65d1ebb50056f83fe50435d3a
|
/Open Elective Python/Unit 3/7.py
|
215ba42a02c60afd4c2107dbe91ea45345e874e2
|
[] |
no_license
|
jugal13/Python_Lab
|
5892c6f2c77d0222a6b31bc40774d24b46f86475
|
f8b96b3ecf1b913f4121e8e6d89b1a610f4ecba2
|
refs/heads/master
| 2023-03-01T03:53:30.889295
| 2021-02-10T14:42:09
| 2021-02-10T14:42:09
| 145,228,905
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
import time
millis = int(round(time.time() * 1000))
print(millis)
|
[
"jugaldeepak@gmail.com"
] |
jugaldeepak@gmail.com
|
cc17fc76da532ee295786674a578eee107c245ad
|
6752274eab7fb185f493027a1da27ff452dcc20b
|
/dict.ex.py
|
53c8fec8db4ca80112361f4e03e4b6d7c8c1e194
|
[] |
no_license
|
smileyoung1993/pystudy
|
fa598b32b29bb3b5e705430a1e43e08a8bfe7ef5
|
3cd17b006db161a2e4b740a6ab3278a2c44d3f29
|
refs/heads/master
| 2020-03-19T13:22:26.465604
| 2018-06-08T06:29:51
| 2018-06-08T06:29:51
| 136,576,352
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,207
|
py
|
# dict
#method 1
d = dict()
print(d,type (d))
# method2
d = {}
print(d, type(d))
# method 3
d = dict(one = 1 , two = 2 )
print(d,type(d))
# method 4
keys = ("one","two","three")
values = (1,2,3)
print(d,type(d))
d= dict(zip(keys , values))
print(d)
# 사전의 키
print("----------------key")
#
d= {}
d[10] = "10"
d["baseball"]= 9
d[("kim",10)] = "student"
print(d,type(d))
#d(["lee",30]) = "wokers" type error
# dict method
print("-----------------method")
d= {"baseball":9,"soccer":11, "basketball":5}
print(d,type(d))
# keys()
print(d.keys())
# values()
print(d.values())
#items()
print(d.items())
# bring values
print(d['baseball'])
print(d['handball'])# keyerror
# bring values 2 : get()
print(d.get('handball'))
print(d.get('handball',"?"))# bagic value??
# delete value
del d['soccer']
print(d)
# clear()
# d.clear()
print(d)
d = {"baseball":9,"soccer":11,"basketball": 5}
# back
print("------------ back")
d['soccer']=11
# method1 :
# for key in d:
for key in d.keys():
print(str(key,":",d[key]))
# method2 : 키와 값을 함께 받아와서 활용 : items()
for key, value in d.items() :
print("{0}:{1}".format(key,value),end =" ")
else:
print()
|
[
"znzlaos943@naver.com"
] |
znzlaos943@naver.com
|
556e496891ce059c0a7af9cfdb30d3809125d46a
|
5c1cb9ceb61425b42f39824ac3e1d1c591b1ae08
|
/accounts/api/serializers.py
|
a296d3a9eb974238c0859ade14ce17df93dc8cf4
|
[] |
no_license
|
codenit/DjangoBlog-API
|
c0b7e0fa9710c09ae7b955cf6bd89988421831a8
|
0e8a569e351121b8f7f15ce4c0c7da3be8b71a68
|
refs/heads/master
| 2020-12-30T14:34:16.279915
| 2017-05-13T19:26:08
| 2017-05-13T19:26:08
| 90,855,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,112
|
py
|
from django.contrib.auth import get_user_model
from rest_framework.exceptions import ValidationError
from rest_framework.serializers import (ModelSerializer,CharField)
User = get_user_model()
class UserCreateSerializer(ModelSerializer):
password2 = CharField(label='Confirm Password')
class Meta:
model = User
fields = [
'username',
'password',
'password2',
'email',
]
extra_kwargs = {
'password' : {'write_only': True},
'password2': {'write_only': True},
}
def validate_password(self, value):
data = self.get_initial()
password = data.get('password2')
password2 = value
if password != password2:
raise ValidationError('Passwords must match')
return value
def validate_password2(self, value):
data = self.get_initial()
password = data.get('password')
password2 = value
if password != password2:
raise ValidationError('Passwords must match')
return value
def create(self, validated_data):
username = validated_data['username']
email = validated_data['email']
password = validated_data['password']
user_obj = User(
username=username,
email=email
)
user_obj.set_password(password)
user_obj.save()
return validated_data
#OR
'''
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username'],
)
user.set_password(validated_data['password'])
user.save()
return user
'''
#OR
'''
from django.contrib.auth.hashers import make_password
def create(self, validated_data):
user = User.objects.create(
email=validated_data['email'],
username=validated_data['username'],
password=make_password(validated_data['password'])
)
user.save()
return user
'''
|
[
"b.rohit751@gmail.com"
] |
b.rohit751@gmail.com
|
f49e56dfa0326a19c62734b1c325bdc011750569
|
cc86a3b84916ca8c6d94da6af2c910b1e4e06671
|
/tests/test_build_system.py
|
1f8d98d0a43befdf440e6801861ec89e1ee6d7eb
|
[
"MIT"
] |
permissive
|
Jasper-Ben/kas
|
0cf93914ceb47c6e1b4d06b18c5df801673d65e6
|
71cf5dc17bbfacc643500354891b95690e983dce
|
refs/heads/master
| 2022-12-21T13:03:04.148283
| 2022-06-23T12:51:22
| 2022-06-23T15:58:37
| 294,092,277
| 0
| 0
|
NOASSERTION
| 2020-09-09T11:27:21
| 2020-09-09T11:27:21
| null |
UTF-8
|
Python
| false
| false
| 1,835
|
py
|
# kas - setup tool for bitbake based projects
#
# Copyright (c) Siemens AG, 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import shutil
from kas import kas
def test_build_system(changedir, tmpdir):
tdir = str(tmpdir.mkdir('test_build_system'))
shutil.rmtree(tdir, ignore_errors=True)
shutil.copytree('tests/test_build_system', tdir)
os.chdir(tdir)
kas.kas(['shell', 'test-oe.yml', '-c', 'true'])
with open('build-env', 'r') as f:
assert(f.readline().strip() == 'openembedded')
kas.kas(['shell', 'test-isar.yml', '-c', 'true'])
with open('build-env', 'r') as f:
assert(f.readline().strip() == 'isar')
kas.kas(['shell', 'test-openembedded.yml', '-c', 'true'])
with open('build-env', 'r') as f:
assert(f.readline().strip() == 'openembedded')
|
[
"jan.kiszka@siemens.com"
] |
jan.kiszka@siemens.com
|
3e0199080f0e173abc45575108f0294e86c69627
|
94dc470e13c344269b8fc6aec1c6f7051d782151
|
/Python-Scripts/corona_all_active.py
|
c239359c01c303b11a2e9b0b90ee9a71809fb2e1
|
[] |
no_license
|
venkata-sravan/Corona-Stats
|
ba6cd0d88979eaa86711548f0bf8692086a63721
|
303a50b8b176301586a515293d3f69e3e12c505a
|
refs/heads/master
| 2022-04-27T02:57:50.401762
| 2020-04-17T08:28:41
| 2020-04-17T08:28:41
| 256,440,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
#! /usr/bin/python3
import requests
import lxml.html as lh
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas as pd
source = requests.get("https://www.worldometers.info/coronavirus/")
doc = lh.fromstring(source.content)
tr_elements = doc.xpath('//tr')
Active_Map={}
i=0
# Since out first row is the header, data is stored on the second row onwards
for j in range(1, int(len(tr_elements)/2)):
# T is our j'th row
T = tr_elements[j]
z=['World','Total:','','Africa','Oceania','South America','Asia','Europe','North America','Country,Other','Asia']
try:
if (T[0].text_content().replace('\n', "").strip() not in z):
Active_Map[T[0].text_content()]=int(T[6].text_content().replace(',',""))
except:
pass
Active_Map.pop('World',None)
Active_Map.pop('Total:',None)
Active_Map.pop('\n\n',None)
Active_Map.pop('\nAfrica\n',None)
Active_Map.pop('\nOceania\n',None)
Active_Map.pop('\nSouth America\n',None)
Active_Map.pop('\nAsia\n',None)
Active_Map.pop('\nEurope\n',None)
Active_Map.pop('\nNorth America\n',None)
Active_Map=sorted(Active_Map.items(), key = lambda x : x[1],reverse=True)
Active_Map=dict(Active_Map)
try:
with open('active.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['Rank','Country', 'Active Cases'])
i=0
for key, value in Active_Map.items():
i=i+1
writer.writerow([i,key, value])
except IOError:
print("I/O error")
active=pd.read_csv('active.csv',index_col=[0,1,2],encoding = "ISO-8859-1")
active.to_html('active.html')
|
[
"ec2-user@ip-172-31-9-66.eu-west-1.compute.internal"
] |
ec2-user@ip-172-31-9-66.eu-west-1.compute.internal
|
18928dfc8b26a880be56f710486d1fb3a41aa3a6
|
2727aa6dfc13e665f12b28de49a9f9f00b206ef2
|
/personal/yxg352/FitHub/FitHub/settings.py
|
487fae4f10f47a8d881282aefb8e90c3464c6268
|
[
"MIT"
] |
permissive
|
gentsk77/FitHub
|
80e99052e1b41de64406023b7346c67871d1ed2d
|
6a050e1d2718996af2cb672af6b9ea41d0da04d4
|
refs/heads/master
| 2020-05-02T09:04:17.577860
| 2019-05-06T12:17:32
| 2019-05-06T12:17:32
| 177,859,983
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,268
|
py
|
"""
Django settings for FitHub project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mk!3eidv*zlt@6i-2kvlf-lz773(id#v0)v7s6cpw2@3+6ppl)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'login.apps.LoginConfig',
'profile.apps.ProfileConfig',
'landing.apps.LandingConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework_swagger',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'FitHub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'FitHub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_USER_MODEL = "login.User"
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"yxg351@case.edu"
] |
yxg351@case.edu
|
0c8153f27fb67a668ee75237e7cd43c5388cfa62
|
92773cbdd70812f45e1b9b97bbc024aee4b4b18d
|
/Chapter7. Iteration/loop.py
|
8765dd5a07dd4c28348fbcf1c1cc68b803ce3fd9
|
[] |
no_license
|
Abu-Kaisar/Python3Programming--Coursera
|
e46edc86294ac76109a89b2cb02e8b6af178dcce
|
e1b93899c4f507b9b32091283951e761e182b97a
|
refs/heads/master
| 2022-11-21T07:40:28.985698
| 2020-07-19T04:07:51
| 2020-07-19T04:07:51
| 280,787,750
| 0
| 0
| null | 2020-07-19T03:58:52
| 2020-07-19T03:58:52
| null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
# mylist = ["yo","mujju","salman","thuss"]
# for i in mylist:
# print("Hi", i ,"Dawat hai kheenchny aao")
# mylist = "dgsadafdua"
# for char in mylist:
# print("Hi", char )
s = "python rocks"
for ch in s:
print("HELLO")
import turtle # set up alex
wn = turtle.Screen()
mujju = turtle.Turtle()
for aColor in ["yellow", "red", "purple", "blue"]:
alex.color(aColor) # repeat four times
mujju.forward(50)
mujju.left(90)
wn.exitonclick()
|
[
"mdmujahid97@gmail.com"
] |
mdmujahid97@gmail.com
|
6cea5daf636212eaff48705ad4f044d677c81e9d
|
603050499539340ea00d10b47660361ae8a31f55
|
/utils/tools.py
|
9b7c116cbddeae4c754e77a014647b8b0ee4cc12
|
[] |
no_license
|
leondelee/PointGCN
|
ee10bc6b4760c810b20330102f92da880d743ffb
|
118fc938e44ca678eb04aaf7d2abbd71c7d17968
|
refs/heads/master
| 2023-04-07T20:10:29.516774
| 2019-07-28T04:47:19
| 2019-07-28T04:47:19
| 189,544,434
| 13
| 1
| null | 2023-03-24T21:54:59
| 2019-05-31T07:00:16
|
Python
|
UTF-8
|
Python
| false
| false
| 3,724
|
py
|
# Author: llw
import os
import logging
from tqdm import tqdm
import yaml
import torch as t
import pptk
def show_point_clouds(pts, lbs):
v = pptk.viewer(pts)
v.attributes(lbs)
def normalize_point_cloud(pts):
norm = pts[:, 0] ** 2 + pts[:, 1] ** 2 + pts[:, 2] ** 2
norm = t.sqrt(norm).reshape(-1, 1)
pts = pts / norm
return pts
def get_cfg(args):
name = args.name
parent_path = os.path.dirname(__file__)
cfg_path = os.path.join(parent_path, '..', 'cfg/{}.yml'.format(name))
with open(cfg_path, "r") as file:
cfg = yaml.load(file)
file.close()
for arg in vars(args):
if getattr(args, arg) != '-1':
cfg[arg] = getattr(args, arg)
return cfg
def get_logger(cfg):
format = "%(asctime)s - %(name)s: %(message)s"
logging.basicConfig(
level=logging.INFO,
format=format
)
logger = logging.getLogger(cfg["name"] + " - " + cfg["mode"])
file_handler = logging.FileHandler(os.path.join(cfg["root_path"], cfg["log_path"], cfg["name"] + cfg["log_name"]))
file_handler.setFormatter(logging.Formatter(format))
file_handler.setLevel(logging.INFO)
logger.addHandler(file_handler)
return logger
def get_checkpoints(cfg):
checkpoint_path = os.path.join(cfg["root_path"], cfg["checkpoint_path"], cfg["name"] + cfg["checkpoint_name"])
log_path = os.path.join(cfg["root_path"], cfg["log_path"], cfg["name"] + cfg["log_name"])
if os.path.exists(checkpoint_path):
action = input("Found checkpoint at {}.\nPlease type k(eep) or d(elete) or others to ignore.\n".format(checkpoint_path))
if action == 'k':
return checkpoint_path
elif action == 'd':
print("Deleting ", checkpoint_path)
os.unlink(checkpoint_path)
print("Deleting ", log_path)
os.unlink(log_path)
return None
def clean_logs_and_checkpoints(cfg):
checkpoint_path = os.path.join(cfg["root_path"], cfg["checkpoint_path"], cfg["name"] + cfg["checkpoint_name"])
log_path = os.path.join(cfg["root_path"], cfg["log_path"], cfg["name"] + cfg["log_name"])
if os.path.exists(checkpoint_path):
print("Deleting ", checkpoint_path)
os.unlink(checkpoint_path)
if os.path.exists(log_path):
print("Deleting ", log_path)
os.unlink(log_path)
def evaluate(cfg):
# model.eval()
model = cfg['trainer_config']['model']
test_data = cfg['trainer_config']['test_data']
metric = cfg['trainer_config']['metric']
print("-------------------Evaluating model----------------------")
res = 0
cnt = 0
for batch_data in tqdm(test_data):
output = model(batch_data)
res += metric(output)
cnt += 1
res = res / cnt
model.train()
log_info = dict(
metric_name=metric.__name__,
value=res
)
return log_info
def parallel_model(model, input, output_device=0, device_ids=None):
if not device_ids:
device_ids = [0, 1]
pts, edge_index = input
edge_index = edge_index.reshape([-1, 2])
input = [pts, edge_index]
replicas = t.nn.parallel.replicate(model, device_ids)
inputs = t.nn.parallel.scatter(input, device_ids)
replicas = replicas[:len(inputs)]
for idx, ipt in enumerate(inputs):
inputs[idx][1] = inputs[idx][1].reshape([2, -1])
outputs = t.nn.parallel.parallel_apply(replicas, inputs)
return t.nn.parallel.gather(outputs, output_device)
if __name__ == '__main__':
import torch as t
from sklearn.metrics import mean_squared_error
a = t.tensor([[1, 2.1], [2, 3]])
b = t.tensor([[1, 2], [1, 2]])
a = t.autograd.Variable(a, requires_grad=True)
print(t.detach(a))
|
[
"leevecent@outlook.com"
] |
leevecent@outlook.com
|
6b06c413a2f140e7891693fa52a8a9ce3fddecfb
|
65bcb44034f33554b6a1387bb08366ccba932a4c
|
/env_FindTreasure/__init__.py
|
37e4ed17ffcbc90f824bdc04122d051e6e62b300
|
[] |
no_license
|
huzican/Multi-Agent-Reinforcement-Learning-Environment
|
e9cd8e2a8736b0f0b4791dcc5e7f7dcf56c24ef1
|
d529104fb6845aa811a88279a089e192b4f8f488
|
refs/heads/master
| 2022-06-10T10:03:28.807049
| 2020-05-01T23:34:03
| 2020-05-01T23:34:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 32
|
py
|
from .env_FindTreasure import *
|
[
"yifeng.zhu@utexas.edu"
] |
yifeng.zhu@utexas.edu
|
02cc6511db06e84bf79cc05b534d88a1807479be
|
4f07c49ff9394352b464c94ca917faeb967d1b31
|
/trainer/forms.py
|
1fcd3db0b8be12ae62c1b806b52fb0fea9e31862
|
[] |
no_license
|
igandhi/gym_trainer
|
0b47c90768d732547f7d81407260deeb3b6d7255
|
e2068b9211740cf4b9d892bad6ebf2c71227366d
|
refs/heads/master
| 2020-03-07T08:28:38.987607
| 2018-03-30T03:55:49
| 2018-03-30T03:55:49
| 127,379,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 287
|
py
|
from django.forms import ModelForm
from .models import Routine, Exercise
class RoutineForm(ModelForm):
class Meta:
model = Routine
exclude = ['created_timestamp']
class ExerciseForm(ModelForm):
class Meta:
model = Exercise
exclude = ['routine']
|
[
"ikegandhi@gmail.com"
] |
ikegandhi@gmail.com
|
77e406016f26b3a032b6e455aaa719b572e27045
|
d662fb5d91cbf48b43c44958aad9c0bc1efd74a6
|
/ORCA1/gen_amoc_info.py
|
1494c44ff2ae8baac14471dab880bc03c68b3360
|
[
"MIT"
] |
permissive
|
julianmak/NEMO-related
|
e6200c7a69c5fef7ca26d0719e5c26d90ef8281c
|
d4cb9ebf8c496819144c5c03b5e885fd5ad2af71
|
refs/heads/master
| 2023-06-22T03:05:58.779417
| 2023-06-08T12:57:22
| 2023-06-08T12:57:22
| 125,502,547
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,917
|
py
|
#!/usr/bin/env python3
# JM: 13 Oct 2018
# process the AMOC from the *grid_V.nc files and write them to a text file
import glob, sys
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from numpy import zeros, argmax, unravel_index, amax, where, arange
from netCDF4 import Dataset
from pyCDFTOOLS.cdfmoc_atl import *
# style settings
plt.rcParams["font.family"] = "DejaVu Serif"
plt.rcParams["mathtext.fontset"] = "cm"
plt.rcParams["mathtext.rm"] = "serif"
plt.rcParams["image.cmap"] = "RdBu_r" # "*_r" is reverse of standard colour
#--------------------------------------------------------
# define the argument parser
import argparse
parser = argparse.ArgumentParser(description = "Process the scalar netcdf files and output as text")
# fixed arguments
parser.add_argument("data_dir", type = str,
help = "specify data directory")
# optional arguments
parser.add_argument("--lquery",
help = "print out the variables available", action = "store_true")
parser.add_argument("--keys", type = str,
help = "grab a specified set of matching strs (enter string in quotes, default grab everything)",
default = "*")
parser.add_argument("--v_var", type = str,
help = "variable name of v velocity",
default = "vo")
parser.add_argument("--lpng",
help = "output png files", action = "store_true")
# collect arguments
args = parser.parse_args()
#--------------------------------------------------------
# plotting subroutine
def plot_amoc(NADW_info, zW, latV, dmoc, filename):
fig = plt.figure(figsize = (8, 3))
ax = plt.axes()
mesh = ax.contourf(latV, zW, dmoc, arange(-25, 26, 2), cmap = "RdBu_r", extend = "both")
ax.set_xlim(-30, 90)
ax.plot(NADW_info[1], NADW_info[2], "k+")
ax.text(NADW_info[1] - 10, NADW_info[2] - 500, "NADW = %.1f Sv" % NADW_info[0])
ax.set_xlabel(r"Lat (${}^\circ$)")
ax.set_ylabel(r"z ($\mathrm{m}$)")
ax.set_title("Atlantic (no Med sea)")
cb = plt.colorbar(mesh)
cb.ax.set_title(r"$\mathrm{Sv}$")
fig.savefig(filename, dpi = 150, bbox_inches = "tight")
plt.close(fig)
print("generated %s , exiting..." % filename)
#--------------------------------------------------------
# Main commands
# grab the relevant filenames
file_list = []
for file in glob.glob(args.data_dir + args.keys + "grid_V.nc"):
file_list.append(file)
if not file_list:
print("no files grabbed, are you in the right directory?")
print("no files grabbed, are you in the right directory?")
print("no files grabbed, are you in the right directory?")
# sort it according to the timestamps
file_list.sort()
# cycle through the files
for i in range(len(file_list)):
# grab output time in years
# assumes file format is $EXP_$PERIOD_$START_$END_scalar.nc
# so pulls out the $START and $END and keeps only the first four entries
# string here for use in output
start_time = file_list[i].replace(args.data_dir, "").split("_")[2][0:4]
end_time = file_list[i].replace(args.data_dir, "").split("_")[3][0:4]
data = Dataset(file_list[i])
t = data.variables["time_centered"][:]
if args.lquery:
for name, variable in data.variables.items():
for attrname in variable.ncattrs():
if attrname == "standard_name":
print("{} -- {}".format(name, getattr(variable, attrname)))
data.close()
print(" ")
sys.exit("finished query, exiting gen_amoc_info...")
else:
# ?? could do something like the query loop above to pull out all keys
# and dump accordingly; leaving it as manual for now
# pull out the data written in 2d field for some reason and write it out
txt_filename = args.data_dir + file_list[i].replace(args.data_dir, "").replace("grid_V.nc", "amoc.txt")
png_filename = args.data_dir + file_list[i].replace(args.data_dir, "").replace("grid_V.nc", "amoc.png")
txt_file = open(txt_filename, "w")
txt_file.write( "%s %s %s %s\n" % ("time", "NADW_str", "NADW_lat", "NADW_dep") )
for kt in range(len(t)):
print("processing %s at index %i / %i..."
% (file_list[i].replace(args.data_dir, ""), kt, len(t))
)
# global mean/totals
time = (t[kt] - t[0]) / (3600 * 24 * 365) + int(start_time)
kwargs = {"lprint" : False,
"lg_vvl" : True,
"leiv" : True, "eivv_var" : "vo_eiv"}
NADW_info, zW, latV, dmoc, _ = cdfmoc_atl(args.data_dir, file_list[i].replace(args.data_dir, ""), args.v_var, **kwargs)
txt_file.write( "%.2f %.8f %.8f %.8f\n" % (time, NADW_info[0], NADW_info[1], NADW_info[2]) )
if args.lpng:
plot_amoc(NADW_info, zW, latV, dmoc[1, :, :], png_filename)
txt_file.close()
data.close()
print("finished processing, exiting gen_amoc_info...")
|
[
"julian.c.l.mak@googlemail.com"
] |
julian.c.l.mak@googlemail.com
|
4ca8ce30ee99012214565c98a486eb42d7d55227
|
7c28981339bc23ef9a4287fe57ff47baef690603
|
/lookupdict.py
|
584b6e1f96c02f30b736d0a72868aa429b873e3f
|
[
"Apache-2.0"
] |
permissive
|
MetaphorExtractionTools/mokujin
|
26620f7b43df37433d3f3d1ddbce0fed5ebe6af4
|
c48d531dd23bdeef0ada898e796f13ba6797d137
|
refs/heads/master
| 2021-01-22T15:22:12.344968
| 2014-04-26T16:52:45
| 2014-04-26T16:52:45
| 19,182,305
| 3
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,007
|
py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
[
"zvm@guest-wireless-nup-nat-206-117-89-004.usc.edu"
] |
zvm@guest-wireless-nup-nat-206-117-89-004.usc.edu
|
674f90b04e7ff3a972ebfdec9636df1d6f7d64f7
|
77de0b8daf81dd83022015f1f2e52a2d2a81f3ee
|
/MeasureeMapQV.py
|
b027924c4517d5be8f15ab0efe1c74fbe9f3b320
|
[
"MIT"
] |
permissive
|
JinfengChen/chm1_scripts
|
83a2f183f63d65e28fa402a01914aacb12894aa5
|
55d1783139f4ccc6e41c79812920785b1eaea65e
|
refs/heads/master
| 2020-12-25T22:47:13.831530
| 2014-09-26T18:59:20
| 2014-09-26T18:59:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 37
|
py
|
#!/usr/bin/env python
import pysam
|
[
"mchaisso@uw.edu"
] |
mchaisso@uw.edu
|
b496a8209dca6cc74b9a14cb7ba3517009a8d688
|
6cd9f3ab378cbe17cbf8aa3bc66296dfc901a6fe
|
/iiotPython_NEW/sendData.py
|
eb039fa73ce795a2934c19d435afd32f628c604d
|
[] |
no_license
|
loginwaregith/IIOT_backup
|
c1fad36097d1fe98c54197963cf45e30e023c069
|
724adb4171cfb1e2308734682ddff599016f4b65
|
refs/heads/main
| 2023-02-25T16:33:53.437463
| 2021-02-02T12:58:55
| 2021-02-02T12:58:55
| 335,266,581
| 0
| 1
| null | 2021-02-02T12:58:56
| 2021-02-02T11:29:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,952
|
py
|
#*********This script is used to send all the IIOT data from device to server**************************
#importing of required libraries
from time import sleep
import sqlite3
import requests as req
from datetime import datetime
import configuration as config
import logging as log
#making a connection with the database
conn2=sqlite3.connect(config.DATABASENAME)
#create a cursor object to exceute all sql queries
curs2=conn2.cursor()
log.basicConfig(
filename = "IIOT.log",
format = '%(asctime)s, %(levelname)-8s [%(pathname)s:%(lineno)d] %(message)s',
filemode = 'a'
)
logger = log.getLogger(__name__)
logger.setLevel(log.DEBUG)
#Function which sends AlarmInfo data
#parameters : endpoint - at which endpoint to send the data
#no return type for the fucntion
def SendAlarmData(endpoint):
logger.info("****************SENDING ALARM DATA********************")
try:
curs2.execute("select * from alarm ")
result=curs2.fetchall()
#print(result)
if result is not None:
data={}
for colm in result:
Id=colm[0]
data["ID"]=colm[0]
data["MachineID"]=colm[1]
data["OperatorName"]=colm[2]
data["JobID"]=colm[3]
data["Shift"]=colm[4]
data["Component"]=colm[5]
data["ModelName"]=colm[6]
data["Operation"]=colm[7]
data["TimeStamp"]=colm[8]
data["Reason"]=colm[9]
response=req.post(endpoint,data=data,timeout=2)
if(response.status_code>=200 and response.status_code<=206):
curs2.execute("delete from alarm where id=(?)",(Id,))
conn2.commit()
logger.debug(f"{Id} entry send to server and deleted from local database ")
else:
logger.debug(response.status_code)
logger.info("didnot get good response from server")
return
else:
logger.info("no data to send ...")
except Exception as e:
logger.error(f"Exception occured : {e}")
return
#Function which sends liveStatus data
#parameters : endpoint - at which endpoint to send the data
#no return type for the fucntion
def SendLiveStatus(endpoint):
logger.info("****************SENDING LIVE SIGNALS DATA********************")
try:
curs2.execute("select * from live_status")
result=curs2.fetchone()
if result is not None:
Id=str(result[0])
machineId=result[1]
machineType=result[2]
status=str(result[3])
signalColor=result[4]
signalName=result[5]
response=req.post(endpoint+"?ID="+Id+"&MachineID="+machineId+"&MachineType="+machineType+"&Status="+status+"&SignalName="+signalName+"&SignalColor="+signalColor,timeout=2)
if(response.status_code>=200 and response.status_code<=206):
logger.debug(f"Current Live Status : {signalName}")
logger.info(" Live Status data successfully sent ")
else:
logger.info("didnot get good response from server")
return
else:
logger.info("no data to send....")
except Exception as e:
logger.error(f"Exception occured : {e}")
return
#Function which sends production data
#parameters : endpoint - at which endpoint to send the data
#no return type for the fucntion
def SendProductionData(endpoint):
logger.info("********************SENDING PRODUCTION DATA****************************")
try:
curs2.execute("select * from live_status")
liveStatusResult=curs2.fetchone()
if liveStatusResult is not None:
signalName=liveStatusResult[5]
if signalName=='Machine Idle':
curs2.execute("select * from production")
result=curs2.fetchall()
if result is not None:
data={}
for colm in result:
Id=colm[0]
data["ID"]=colm[0]
data["OperatorName"]=colm[1]
data["JobID"]=colm[2]
data["Shift"]=colm[3]
data["Component"]=colm[4]
data["ModelName"]=colm[5]
data["Operation"]=colm[6]
data["CycleTime"]=float(colm[7])
data["InspectionStatus"]=colm[8]
data["Status"]=colm[9]
data["TimeStamp"]=datetime.strptime(colm[10], '%Y/%m/%d %H:%M:%S')
data["MachineID"]=colm[11]
response=req.post(endpoint,timeout=2,data=data)
if(response.status_code>=200 and response.status_code<=206):
curs2.execute("delete from production where id=(?)",(Id,))
conn2.commit()
logger.debug(f"{Id} entry sent to server and deleted from local database..")
else:
logger.info("didnot get good response from server")
return
else:
logger.info("no data to send ...")
except Exception as e:
logger.error(f"Exception occured : {e}")
return
|
[
"erp.anand.loginware@gmail.com"
] |
erp.anand.loginware@gmail.com
|
865a2ee42ce0b83535aff7031964ddbd3c0e5d36
|
2aace9bb170363e181eb7520e93def25f38dbe5c
|
/build/idea-sandbox/system/python_stubs/cache/d7b07d14c6c27a41803588e54091bf9fcf8a2c8736580b3083f089fcd6d4da3f/cython_runtime.py
|
ddb3e50bb33750fa1bc065bec7db3288f07423db
|
[] |
no_license
|
qkpqkp/PlagCheck
|
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
|
d229904674a5a6e46738179c7494488ca930045e
|
refs/heads/master
| 2023-05-28T15:06:08.723143
| 2021-06-09T05:36:34
| 2021-06-09T05:36:34
| 375,235,940
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 274
|
py
|
# encoding: utf-8
# module cython_runtime
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\_lib\_ccallback_c.cp37-win_amd64.pyd
# by generator 1.147
# no doc
# no imports
# Variables with simple values
__loader__ = None
__spec__ = None
# no functions
# no classes
|
[
"qinkunpeng2015@163.com"
] |
qinkunpeng2015@163.com
|
4f3f53556376f2c9e047b5edb06911d416ba074a
|
f062afcfc7da5ef1fae49ef1dbdb33a0edf83086
|
/p1/s2.py
|
f94a87a13ce58c781dcd7b6eca9bc5debdc36449
|
[] |
no_license
|
harvestcore/pyweb
|
473ea66c9b5dea22836dde966709ee5102330b9e
|
dbf6b762c33205f33f1ee9daf80dade56880a9f2
|
refs/heads/master
| 2020-04-01T18:51:53.108458
| 2018-12-18T18:47:10
| 2018-12-18T18:47:10
| 153,520,059
| 0
| 1
| null | 2021-10-10T20:50:33
| 2018-10-17T20:32:54
|
Python
|
UTF-8
|
Python
| false
| false
| 2,733
|
py
|
#!/usr/bin/env python3
import random
from time import time
def arrayAleatorio(array, tam):
for x in range (0, tam):
array[x] = random.randint(0, 100)
return array
def burbuja(array):
for passnum in range(len(array)-1,0,-1):
for i in range(passnum):
if array[i]>array[i+1]:
temp = array[i]
array[i] = array[i+1]
array[i+1] = temp
def insercion(array):
for index in range(1, len(array)):
currentvalue = array[index]
position = index
while position > 0 and array[position-1] > currentvalue:
array[position] = array[position-1]
position = position-1
array[position] = currentvalue
def seleccion(array):
for fillslot in range(len(array)-1, 0, -1):
positionOfMax = 0
for location in range(1, fillslot + 1):
if array[location] > array[positionOfMax]:
positionOfMax = location
temp = array[fillslot]
array[fillslot] = array[positionOfMax]
array[positionOfMax] = temp
def quickSort(array):
quickSortHelper(array, 0, len(array)-1)
def quickSortHelper(array, first, last):
if first < last:
splitpoint = partition(array,first,last)
quickSortHelper(array, first, splitpoint-1)
quickSortHelper(array, splitpoint+1, last)
def partition(array,first,last):
pivotvalue = array[first]
leftmark = first+1
rightmark = last
done = False
while not done:
while leftmark <= rightmark and array[leftmark] <= pivotvalue:
leftmark = leftmark + 1
while array[rightmark] >= pivotvalue and rightmark >= leftmark:
rightmark = rightmark -1
if rightmark < leftmark:
done = True
else:
temp = array[leftmark]
array[leftmark] = array[rightmark]
array[rightmark] = temp
temp = array[first]
array[first] = array[rightmark]
array[rightmark] = temp
return rightmark
if __name__ == '__main__':
tam = 5000
array = [0] * tam
array = arrayAleatorio(array, tam)
inicio = time()
burbuja(array)
fin = time()
#print("Burbuja:\t", array)
print("Burbuja: ", fin - inicio)
array = arrayAleatorio(array, 15)
inicio = time()
insercion(array)
fin = time()
#print("Inserción:\t", array)
print("Inserción: ", fin - inicio)
array = arrayAleatorio(array, 15)
inicio = time()
seleccion(array)
fin = time()
#print("Selección:\t", array)
print("Selección: ", fin - inicio)
array = arrayAleatorio(array, 15)
inicio = time()
quickSort(array)
fin = time()
#print("QuickSort:\t", array)
print("QuickSort: ", fin - inicio)
|
[
"aagomezies@gmail.com"
] |
aagomezies@gmail.com
|
3496cddcadb72837cd6c0a2f3e8ee009aa71c49a
|
46daa7bc5b465c0ae36a988754167259b9fc6c5b
|
/python-cookbook/3-10.py
|
d589e3707dcf5ea32d314aa2a353c80636ef3a9e
|
[] |
no_license
|
loveiset/corePythonExercises
|
30acd883fe6979446be0a564d22578cf1061c7f4
|
e3474568a0f56d22804a5021cb571cd75617f365
|
refs/heads/master
| 2016-09-06T20:02:17.036304
| 2015-10-15T08:00:08
| 2015-10-15T08:00:08
| 40,879,496
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 338
|
py
|
import time,os,sys
def main(cmd,inc=60):
while True:
os.system(cmd)
time.sleep(inc)
if __name__=='__main__':
numargs=len(sys.argv)-1
if numargs<1 or numargs>2:
print 'usage: '+sys.argv[0]+' cmd [seconds]'
sys.exit(1)
cmd=sys.argv[1]
if numargs<3:
main(cmd)
else:
inc=int(sys.argv[2])
main(cmd,inc)
|
[
"loveiset@qq.com"
] |
loveiset@qq.com
|
afce243a4d7ee76553735dace72d9cef5b52557d
|
28e8ab381a8c1b4321cd83acff6aa33468166d6b
|
/python3.4Smartforest/lib/python3.4/site-packages/django/contrib/contenttypes/fields.py
|
0e221dbc92678bea486b69b37beddeebeb62fc33
|
[
"MIT"
] |
permissive
|
letouriste001/SmartForest_2.0
|
343e13bc085d753be2af43aecfb74a5fffaa5e3b
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
refs/heads/master
| 2020-12-21T16:54:22.865824
| 2016-08-11T14:17:45
| 2016-08-11T14:17:45
| 59,734,259
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 24,528
|
py
|
from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() methods if the model
# on the other end of this relation is ordered with respect to this.
def matching_gfk(field):
return (
isinstance(field, GenericForeignKey) and
self.content_type_field_name == field.ct_field and
self.object_id_field_name == field.fk_field
)
def make_generic_foreign_order_accessors(related_model, model):
if matching_gfk(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
|
[
"aubert.christophe.pro@gmail.com"
] |
aubert.christophe.pro@gmail.com
|
a230e6aafa3e03bdd9995d307c925a4d98840639
|
81f8276be090ff9fa960d83db45bfe0b3c69ff39
|
/test.py
|
9d480a3922a04ae7c87c67955f1914d97189c9ae
|
[] |
no_license
|
jon--lee/mlb-call-classifier
|
1860c15b2f889a2b37daaaaefaed23cb19e808e5
|
28e15a908127a2ca78de92aee39c5dff516f6bf2
|
refs/heads/master
| 2020-06-07T03:04:54.034548
| 2015-07-23T00:06:54
| 2015-07-23T00:06:54
| 38,954,788
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 100
|
py
|
import classifier
import neuralpy
import grapher
grapher.graph(filepath='results/bucknor-93-R.txt')
|
[
"123abcjonathanlee@gmail.com"
] |
123abcjonathanlee@gmail.com
|
43183ccf971f9077b211e98ea3d6a03e7bac155e
|
1ef309f94cd545e961acab91d8bf7080c573bd9d
|
/storeproject/admins/urls.py
|
65cc779dc58c20fdd2309dcf28e5eeb2764ae78b
|
[] |
no_license
|
lxd632484901/storeproject
|
3e4cec0dfd0b23431412465db2eb4760206080ee
|
019ec9b9b506dc2f987baf51bfc17e820d774177
|
refs/heads/master
| 2022-02-24T02:15:13.644768
| 2019-09-23T01:24:32
| 2019-09-23T01:24:32
| 208,996,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 885
|
py
|
"""shopping_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from admins import views
from store.views import *
urlpatterns = [
url(r'^$',views.index,name='index'),
url(r'^base/$', views.base, name='base'),
]
|
[
"15998194741m@sina.cn"
] |
15998194741m@sina.cn
|
497277a27e50f16bd4dac4167ba204b4b27a60da
|
80338a9379508bdf5d40e055e12f2621dee01daa
|
/usernameless/__init__.py
|
aadcfca92bdcc1431f589d7f88dbb0d88b56edf3
|
[
"MIT"
] |
permissive
|
johnnncodes/django-usernameless
|
6ec75780edfec667ba653e50b0496e788832a36c
|
cf7b0904030e640ce51bf20c36521daf6abf447f
|
refs/heads/master
| 2021-05-26T22:30:08.415853
| 2013-10-26T02:36:45
| 2013-10-26T02:54:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 104
|
py
|
# -*- coding: utf-8 -*-
__title__ = 'usernameless'
__version__ = '0.1.0'
__author__ = 'Marconi Moreto'
|
[
"caketoad@gmail.com"
] |
caketoad@gmail.com
|
220d417e2a532c64b69fe77a6dbb261b6f5561fc
|
a360a22af5e0b385db438b1324564ef317ff2f38
|
/bancor_module/migrations/0007_bancor_tsymbol.py
|
3b1d42b27ed85223c4faa9f0e0b7bf186e2d5cc0
|
[] |
no_license
|
ogglin/exchange_comparison
|
3eb2d849e731f94e67509e4ce9130e33bb37bbaf
|
f3feae64aff26b574f7ecd24e6f7aff7bb95ec65
|
refs/heads/master
| 2023-04-26T07:45:06.229584
| 2021-05-31T18:52:29
| 2021-05-31T18:52:29
| 287,036,194
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
# Generated by Django 3.1 on 2021-02-20 09:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bancor_module', '0006_bancor_volume'),
]
operations = [
migrations.AddField(
model_name='bancor',
name='tsymbol',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
[
"server.ares@gmail.com"
] |
server.ares@gmail.com
|
43fb2c1c4455250a9f92bec7c8fcdf6199268310
|
8d223de1565567926baefef6fef9f3586c653706
|
/server/crypto.py
|
99af1ef8d4337786784b555203be98d8cc18344b
|
[] |
no_license
|
txuanson/money_exchange_socket
|
9766997b058387dd7db5a05243f77bde4828925c
|
5dc100ce4de729ed653d27fc58dfcfcfb237c990
|
refs/heads/master
| 2023-07-10T15:33:58.413999
| 2021-08-15T15:13:10
| 2021-08-15T15:13:10
| 395,272,826
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 291
|
py
|
import bcrypt
def hash_password(password: str) -> str:
return bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()).decode('utf-8')
def compare_password(password: str, hashed_password: str)-> bool:
return bcrypt.checkpw(password.encode('utf-8'), hashed_password.encode('utf-8'))
|
[
"khatmausr502@gmail.com"
] |
khatmausr502@gmail.com
|
4f43cadd7ce7a2b3252a0896805b3af46be81b92
|
ff2eb3a755d739d9eafc0c7673e86e0e439d119f
|
/is_ironman.py
|
a2b4a51c9a186600ad84fc8b715f31fc92c63f79
|
[] |
no_license
|
telac/discord-group-ironman-bot
|
48a3d8944066ec91776b08c4efdd6a2c67423d39
|
dc8e0595b85a4ce87a56e52f4d9b738317062cfb
|
refs/heads/master
| 2022-12-01T22:43:39.351594
| 2020-08-17T14:46:22
| 2020-08-17T14:46:22
| 288,027,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,203
|
py
|
from asyncio import sleep
from datetime import datetime
from configparser import ConfigParser
import requests
from bs4 import BeautifulSoup
from discord.ext import commands, tasks
def config(section, filename='conf.ini'):
parser = ConfigParser()
parser.read(filename)
conf = {}
if parser.has_section(section):
parameters = parser.items(section)
for param in parameters:
conf[param[0]] = param[1]
else:
raise Exception("config not found!")
return conf
bot = commands.Bot("!")
conf = config('group_ironman')
def get_daily_msg():
now = datetime.now()
str_time = now.strftime("%d-%m-%Y, %H:%M:%S")
daily_update = "ironman status: \n"
if is_group_ironman():
daily_update += "is out!!"
else:
daily_update += "is not out yet. Checking again in 1 hour! \n"
daily_update += f"last updated {str_time}"
return daily_update
def is_group_ironman():
res = requests.get("https://secure.runescape.com/m=hiscore_oldschool/overall")
soup = BeautifulSoup(res.text, 'html.parser')
search_res = soup.find_all('div', class_='ironman-nav')
if "group" in search_res[0].get_text().lower():
return True
class DailyChecker(commands.Cog):
def __init__(self, bot, channel):
self.bot = bot
self.check_daily_ironman.start()
self.channel = channel
@tasks.loop(hours=1)
async def check_daily_ironman(self):
channel = self.bot.get_channel(int(self.channel))
message = await channel.fetch_message(conf['msg'])
msg = get_daily_msg()
await message.edit(content=msg)
@bot.event
async def on_ready():
print('We have logged in as {0.user}'.format(bot))
bot.add_cog(DailyChecker(bot, conf['channel']))
@bot.event
async def on_message(message):
if message.author == bot.user:
return
if "ironman" in message.content.lower():
if is_group_ironman():
await message.channel.send('group ironman is out!')
else:
await message.channel.send('group ironman is not out yet :(')
await bot.process_commands(message)
if __name__ == "__main__":
bot.run(conf['token'])
|
[
"ca.westerlund@gmail.com"
] |
ca.westerlund@gmail.com
|
eb2e7d1b25fa6419ac1847667a5fe019af42f82f
|
19bc1dfbf8a8b4b1bfc9d6ead51479c72602b12e
|
/tests/test_resource_analysis.py
|
fea2cef5999b8d3a27c8c10067bde6f075b62ce6
|
[
"MIT"
] |
permissive
|
procha2/caper
|
a5297d6cfe7cf649ac5ac3544558f513b427713d
|
e9ea0baa3517178ce7b850df8a59eba6479fbcb6
|
refs/heads/master
| 2023-08-10T17:37:40.840958
| 2021-07-01T22:57:45
| 2021-07-01T22:57:45
| 300,260,107
| 0
| 0
|
MIT
| 2020-10-01T11:48:13
| 2020-10-01T11:48:12
| null |
UTF-8
|
Python
| false
| false
| 3,119
|
py
|
"""Test is based on a metadata JSON file generated from
running atac-seq-pipeline v1.8.0 with the following input JSON.
gs://encode-pipeline-test-samples/encode-atac-seq-pipeline/ENCSR356KRQ_subsampled_caper.json
"""
import pytest
from caper.resource_analysis import LinearResourceAnalysis, ResourceAnalysis
def test_resource_analysis_abstract_class(gcp_res_analysis_metadata):
with pytest.raises(TypeError):
# abstract base-class
ResourceAnalysis()
def test_resource_analysis_analyze_task(gcp_res_analysis_metadata):
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result_align1 = analysis.analyze_task(
'atac.align',
in_file_vars=['fastqs_R1'],
reduce_in_file_vars=None,
target_resources=['stats.max.mem', 'stats.mean.cpu_pct'],
)
assert result_align1['x'] == {'fastqs_R1': [15643136, 18963919]}
assert 'stats.mean.cpu_pct' in result_align1['y']
assert 'stats.max.mem' in result_align1['y']
assert 'stats.max.disk' not in result_align1['y']
assert list(result_align1['y'].keys()) == list(result_align1['coeffs'].keys())
assert result_align1['coeffs']['stats.mean.cpu_pct'][0][0] == pytest.approx(
1.6844513715565233e-06
)
assert result_align1['coeffs']['stats.mean.cpu_pct'][1] == pytest.approx(
42.28561239506905
)
assert result_align1['coeffs']['stats.max.mem'][0][0] == pytest.approx(
48.91222341236991
)
assert result_align1['coeffs']['stats.max.mem'][1] == pytest.approx(
124314029.09791338
)
result_align2 = analysis.analyze_task(
'atac.align', in_file_vars=['fastqs_R2'], reduce_in_file_vars=sum
)
assert result_align2['x'] == {'sum(fastqs_R2)': [16495088, 20184668]}
assert 'stats.mean.cpu_pct' not in result_align2['y']
assert 'stats.max.mem' in result_align2['y']
assert 'stats.max.disk' in result_align2['y']
assert list(result_align2['y'].keys()) == list(result_align2['coeffs'].keys())
result_align_star = analysis.analyze_task('atac.align*', reduce_in_file_vars=max)
assert result_align_star['x'] == {
'max(chrsz,fastqs_R1,fastqs_R2,idx_tar,tmp_fastqs)': [
32138224,
39148587,
3749246230,
3749246230,
]
}
def test_resource_analysis_analyze(gcp_res_analysis_metadata):
"""Test method analyze() which analyze all tasks defined in in_file_vars.
"""
analysis = LinearResourceAnalysis()
analysis.collect_resource_data([gcp_res_analysis_metadata])
result = analysis.analyze(
in_file_vars={
'atac.align*': ['fastqs_R1', 'fastqs_R2'],
'atac.filter*': ['bam'],
}
)
assert len(result) == 2
assert result['atac.align*']['x'] == {
'sum(fastqs_R1,fastqs_R2)': [32138224, 39148587, 32138224, 39148587]
}
assert result['atac.filter*']['x'] == {
'sum(bam)': [61315022, 76789196, 61315022, 76789196]
}
result_all = analysis.analyze()
# 38 tasks in total
assert len(result_all) == 38
|
[
"leepc12@gmail.com"
] |
leepc12@gmail.com
|
2342a695f7ae543b2d1627da8416831724d367c1
|
2904bc85e3a688a25dde16497a29c72bf4a5d27c
|
/scenic/model_lib/layers/nn_ops.py
|
e17ba90140baf732dfb65469ee7f3e72dd730a59
|
[
"Apache-2.0"
] |
permissive
|
shreyasarora/scenic
|
2fda9ad6982e4391bb98c032adf59d2e8be3d3ff
|
c3ae6d7b5dc829fafe204a92522a5983959561a0
|
refs/heads/main
| 2023-08-28T21:00:32.857689
| 2021-11-10T16:59:03
| 2021-11-10T17:00:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,188
|
py
|
# Copyright 2021 The Scenic Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common neural network funcitonality that doesn't require parameters."""
import flax.linen as nn
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
def extract_image_patches(lhs,
rhs_shape,
strides,
padding,
rhs_dilation,
data_format='NHWC'):
"""Extract patches of size `rhs_shape` from `lhs`.
Args:
lhs: A 4-D Tensor; With shape `[batch, in_rows, in_cols, depth].
rhs_shape: tuple; Size of the sliding window for each dimension of `lhs`.
strides: tuple; How far the centers of two consecutive patches are in the
lhs. Must be: `[1, stride_rows, stride_cols, 1]`.
padding: str; The type of padding algorithm to use.
We specify the size-related attributes as: ```python ksizes = [1,
ksize_rows, ksize_cols, 1] strides = [1, strides_rows, strides_cols, 1]
rates = [1, rates_rows, rates_cols, 1]```
rhs_dilation: A 1-D Tensor of length 4; Must be: `[1, rate_rows, rate_cols,
1]`. This is the input stride, specifying how far two consecutive patch
samples are in the input. Equivalent to extracting patches with
`patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`,
followed by subsampling them spatially by a factor of `rates`. This is
equivalent to `rate` in dilated (a.k.a. Atrous) convolutions.
data_format: str; The format of the `lhs`. Must be either `'NHWC'` or
`'NCHW'`.
Returns:
A 4-D Tensor. Has the same type and data format as `lhs`, and with shape
`[batch, num_patches_col, num_patches_row, rhs_shape[1], rhs_shape[2], C]`.
"""
num_dims = lhs.ndim
num_spatial_dims = num_dims - 2
batch_dim = data_format.index('N')
feature_dim = data_format.index('C')
depth = lhs.shape[feature_dim]
if rhs_shape[batch_dim] != 1 or rhs_shape[feature_dim] != 1:
raise NotImplementedError(
'Current implementation does not yet support window sizes > 1 in '
'the batch and depth dimensions.')
if strides[batch_dim] != 1 or strides[feature_dim] != 1:
raise NotImplementedError(
'Current implementation does not support strides in the batch '
'and depth dimensions.')
if rhs_dilation[batch_dim] != 1 or rhs_dilation[feature_dim] != 1:
raise NotImplementedError(
'Current implementation does not support dilations in the batch '
'and depth dimensions.')
# Replicating tensorflow's implementation.
lhs_perm = lax.conv_general_permutations(
(data_format, 'HWIO', data_format))[0]
kernel_shape = [rhs_shape[i] for i in lhs_perm[2:]]
kernel_size = np.product(kernel_shape)
conv_filter_shape = kernel_shape[:]
conv_filter_shape.append(1)
conv_filter_shape.append(kernel_size * depth)
iota_kernel_shape = (kernel_size, depth, kernel_size)
conv_filter = lax.eq(
lax.broadcasted_iota(jnp.int32, iota_kernel_shape, 0),
lax.broadcasted_iota(jnp.int32, iota_kernel_shape, 2),
)
conv_filter = lax.convert_element_type(conv_filter, lhs.dtype)
conv_filter = lax.reshape(conv_filter, conv_filter_shape)
dim_num = lax.conv_dimension_numbers(lhs.shape, conv_filter.shape,
(data_format, 'HWIO', data_format))
conv_strides = [0] * num_spatial_dims
conv_rhs_dilation = [0] * num_spatial_dims
for i in range(num_spatial_dims):
dim = dim_num.lhs_spec[i + 2]
conv_strides[i] = strides[dim]
conv_rhs_dilation[i] = rhs_dilation[dim]
conv = lax.conv_general_dilated(lhs, conv_filter, conv_strides, padding, None,
conv_rhs_dilation, dim_num, depth)
conv_dims = list(conv.shape[:-1])
conv_dims.append(depth)
conv_dims.extend(kernel_shape)
conv = lax.reshape(conv, conv_dims)
permutation = list(range(len(conv_dims)))
depth_dim = permutation.pop(-3)
permutation.append(depth_dim)
return lax.transpose(conv, permutation)
def extract_patches(lhs, rhs_shape, strides=(1, 1)):
"""Extracts patches from an image using a convolution operator.
Args:
lhs: A tensor of images of shapes (B, H, W, C).
rhs_shape: The size of the patches to extract (h, w).
strides: The shift between extracted patches (s1, s2)
Returns:
All the patches in a tensor of dimension
(B, (H - h + 1) // s1, (W - w + 1) // s2, h, w, C).
"""
# [batch, channels, height, width]
lhs = jnp.moveaxis(lhs, -1, 1)
d = lhs.shape[1]
h, w = rhs_shape
# Construct the lookup conv weights.
dim_out = jnp.arange(d * h * w).reshape((-1, 1, 1, 1))
dim_in = jnp.arange(d).reshape((1, -1, 1, 1))
i = jnp.arange(h).reshape((1, 1, -1, 1))
j = jnp.arange(w).reshape((1, 1, 1, -1))
weights = ((w * i + j) * d + dim_in == dim_out).astype(jnp.float32)
# [batch, h * w * d, (H - h + 1) // s1, (W - w + 1) // s2]
concatenated_patches = lax.conv(
lhs, weights, window_strides=strides, padding='VALID')
# [batch, (H - h + 1) // s1, (W - w + 1) // s2, h * w * d]
concatenated_patches = jnp.moveaxis(concatenated_patches, 1, -1)
# [batch, (H - h + 1) // s1, (W - w + 1) // s2, h, w, d]
shape = concatenated_patches.shape[:3] + (h, w, d)
return concatenated_patches.reshape(shape)
def compute_relative_positions(query_spatial_shape,
key_spatial_shape,
spatial_axis=None):
"""Generate relative positions of queries and keys.
For relative attention, the pairwise positional distance between each query
and key point is used in the attention weight computation. This function
generates the positional distances between each query-key pair, given the
offset of first position in the query with respect to first position in the
key.
For example, if the query and key are 1d and query has 2 entries and the key
has 3 entries, the relative distance matrix is:
[[0, 1, 2],
[-1, 0, 1]]
where each [i, j] entry = j - i (j = key index, i = query index). Note that
the values in this matrix are being used by an embedding lookup, so we shift
them such that the smallest index is zero:
[[1, 2, 3],
[0, 1, 2]]
This function produces the multi-dimensional distance for a query and key.
It factorizes the distance computation such that there is a positional
distance per dimension. An input with 3 dimensions will have a total of
3 distances, 1 per dimension.
Args:
query_spatial_shape: tuple; Indicating the spatial shape of the query.
key_spatial_shape: tuple; Indicating the spatial shape of the key.
spatial_axis: tuple; The axis over which the distance is calculated. Default
is None, which means distances over all axis is calculated.
Returns:
a numpy (np) int array of shape [len(spatial_axis),
query_spatial_shape(spatial_axis), key_spatial_shape(spatial_axis)]
holding the distance between each query and key pair across dimensions
that are determined by `spatial_axis`, where the query and key are
indexed by their position. The smallest value in the array is zero.
"""
assert len(query_spatial_shape) == len(key_spatial_shape)
if spatial_axis is None:
spatial_axis = range(len(query_spatial_shape))
for sa in spatial_axis:
if not 0 <= sa < len(query_spatial_shape):
raise ValueError('Element of `spatial_axis` should be between 0 and '
'length of `query_spatial_shape`.')
num_dims = len(spatial_axis)
# Keep only dimensions we are iterested in.
query_spatial_shape = tuple([query_spatial_shape[a] for a in spatial_axis])
key_spatial_shape = tuple([key_spatial_shape[a] for a in spatial_axis])
total_queries = np.prod(query_spatial_shape)
total_keys = np.prod(key_spatial_shape)
# A distance per dimension in the flattened query-key arrays.
relative_positions = np.empty((num_dims, total_queries, total_keys),
dtype=np.int32)
# Convert flattened indices to multi-dimension coordinate indices.
coordinates_query = np.unravel_index(
range(total_queries), query_spatial_shape)
coordinates_key = np.unravel_index(range(total_keys), key_spatial_shape)
# Compute distances between each query-key point.
for dim in range(num_dims):
for flat_index_query in range(total_queries):
for flat_index_key in range(total_keys):
relative_positions[dim, flat_index_query, flat_index_key] = (
coordinates_key[dim][flat_index_key] -
coordinates_query[dim][flat_index_query])
relative_positions[dim] = relative_positions[dim]
# These indices are being used by an embedding lookup, so shift the indices
# such that the smallest index is zero.
relative_positions -= np.amin(relative_positions, axis=(1, 2), keepdims=True)
# Reshape to original dim.
relative_positions = relative_positions.reshape((num_dims,) +
query_spatial_shape +
key_spatial_shape)
return relative_positions
def patch_image(inputs,
inputs_shape,
patch_size,
strides=None,
padding='VALID',
mode='i2p'):
"""Applies patching operation on the input.
Args:
inputs: Input data.
inputs_shape: tuple; Shape of the input data.
patch_size: tuple; size of the patch: (height, width).
strides: tuple; Specifies how far two consecutive patches are in the
input.
padding: str; The type of padding algorithm to use.
mode: str; Either 'i2p' to convert the input image to patches or 'p2i' to
convert the patched image to the original shape.
Returns:
Patched image if mode='i2p', original image if mode='p2i'.
"""
strides = strides or patch_size
def i2p(x):
return extract_image_patches(
lhs=x.astype(jnp.float64),
rhs_shape=(1,) + patch_size + (1,),
strides=(1,) + strides + (1,),
padding=padding,
rhs_dilation=(1,) * inputs.ndim,
data_format='NHWC')
if mode == 'i2p':
_, inputs_w, inputs_h, _ = inputs.shape
patch_w, patch_h = patch_size
if (inputs_w < patch_w or inputs_h < patch_h):
raise ValueError(f'Patch height and width ({patch_w} and {patch_h}) '
'should be smaller thatn inputs height and width'
f' ({inputs_w} and {inputs_h}).')
outputs = i2p(inputs)
elif mode == 'p2i':
_, fn_vjp = jax.vjp(i2p, jnp.ones(inputs_shape))
overlap_count = fn_vjp(jnp.ones_like(inputs))[0]
outputs = fn_vjp(inputs)[0] / overlap_count
else:
raise ValueError()
return outputs
def space_to_depth(inputs, window_shape, strides=None, padding='VALID'):
"""Applies space to depth.
Args:
inputs: Input data with dimensions `[bs, window dims, ..., features]`.
window_shape: tuple; Defining the window to reduce over.
strides: tuple, A sequence of `n` integers, representing the inter-window
strides (default: window_shape).
padding: str; Either `'SAME'`, `'VALID'`, or a sequence of `n` `(low,
high)` integer pairs that give the padding to
apply before and after each spatial dimension (default: `'VALID'`).
Returns:
An output image with less or equal spacial dimensions as inputs.
"""
strides = strides or window_shape
patched = extract_image_patches(
lhs=inputs.astype(jnp.float64),
rhs_shape=(1,) + window_shape + (1,),
strides=(1,) + strides + (1,),
padding=padding,
rhs_dilation=(1,) * inputs.ndim,
data_format='NHWC')
bs, n_patch_h, n_patch_w, _, _, _ = patched.shape
return patched.reshape(bs, n_patch_h, n_patch_w, -1)
def pooling(inputs,
window_shape,
pooling_configs=None,
strides=None,
padding='VALID'):
"""Applies configurable pooling.
Args:
inputs: an nd-array; Thego shape of inputs is `[bs, <window dims>,
features]` and for presence_weights, the shape is `[bs, <window dims>]`.
window_shape: tuple; Defining the window to reduce over.
pooling_configs: dict; Configuration for the optional pooling operation.
strides: tuple, A sequence of `n` integers, representing the inter-window
strides (default: window_shape).
padding: str; Either `'SAME'`, `'VALID'`, or a sequence of `n` `(low, high)`
integer pairs that give the padding to
apply before and after each spatial dimension (default: `'VALID'`).
Returns:
An output image with less or equal spacial dimensions as inputs.
"""
# TODO(dehghani): add positional embedding to other type of pooling?
strides = strides or window_shape
pooling_type = pooling_configs.get('pooling_type')
if pooling_type == 'avg_pooling':
x = nn.avg_pool(inputs, window_shape, strides=strides, padding=padding)
elif pooling_type == 'max_pooling':
x = nn.max_pool(inputs, window_shape, strides=strides, padding=padding)
elif pooling_type == 'space_to_depth':
x = space_to_depth(inputs, window_shape, strides=strides, padding=padding)
else:
raise ValueError('Pooling type {} is not defined.'.format(pooling_type))
return x
def weighted_max_pool(inputs,
weights,
window_shape,
strides=None,
padding='VALID',
return_pooled_weights=False):
"""Pools the input by taking max over a window, w.r.t their inputs' weights.
Args:
inputs: Input data with dimensions (batch, <window dims>, features).
weights: Input weights with dimensions (batch, <window dims>).
window_shape: tuple; A shape tuple defining the window to reduce over.
strides: tuple; A sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: str/list(tuple); Either the string `'SAME'`, the string `'VALID'`,
or a sequence of `n` `(low, high)` integer pairs that give the padding to
apply before and after each spatial dimension (default: `'VALID'`).
return_pooled_weights: bool; Also return the pooled weight
Returns:
The maximum of each window slice. If return_pooled_weights is True, it also
returns the maximum of pooled weights.
"""
assert inputs.shape[:-1] == weights.shape
weights = jnp.expand_dims(weights, -1)
inputs = inputs * weights
outputs = nn.max_pool(inputs, window_shape, strides=strides, padding=padding)
if return_pooled_weights:
max_weights = nn.max_pool(
weights, window_shape, strides=strides, padding=padding)
return outputs, max_weights.squeeze(axis=-1)
return outputs
def weighted_avg_pool(inputs,
weights,
window_shape,
strides=None,
padding='VALID',
return_pooled_weights=False):
"""Pools the input by averaging over a window, w.r.t their inputs' weights.
Args:
inputs: Input data with dimensions (batch, <window dims>, features).
weights: Input weights with dimensions (batch, <window dims>).
window_shape: tuple; A shape tuple defining the window to reduce over.
strides: tuple; A sequence of `n` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: str/list(tuple); Either the string `'SAME'`, the string `'VALID'`,
or a sequence of `n` `(low, high)` integer pairs that give the padding to
apply before and after each spatial dimension (default: `'VALID'`).
return_pooled_weights: bool; Also return the pooled weight
Returns:
The average for each window slice. If return_pooled_weights is True, it also
returns the sum of pooled weights.
"""
assert inputs.shape[:-1] == weights.shape
weights = jnp.expand_dims(weights, -1)
inputs = inputs * weights
y = nn.pooling.pool(inputs, 0., lax.add, window_shape, strides, padding)
pooled_weights = nn.pooling.pool(weights, 0., lax.add, window_shape, strides,
padding)
outputs = y / pooled_weights
if return_pooled_weights:
return outputs, (pooled_weights.squeeze(axis=-1) / np.prod(window_shape))
return outputs
def upscale2x_nearest_neighbor(inputs):
"""Doubles image size by repeating every pixel 2x2 times.
Args:
inputs: nd-array: Inputs in shape of `[bs, height, width, channels]'
Returns:
Upscaled inputs, in shape of `[bs, 2*height, 2*width, channels]'
"""
input_channels = inputs.shape[-1]
input_h, input_w = inputs.shape[1], inputs.shape[2]
input_nchw = jnp.transpose(inputs, (0, 3, 1, 2))
flat_input_shape = (-1, input_h, input_w, 1)
flat_input = jnp.reshape(input_nchw, flat_input_shape)
height_scale, width_scale = 2, 2
resize_kernel = jnp.ones((height_scale, width_scale, 1, 1))
strides = (height_scale, width_scale)
flat_output = lax.conv_transpose(
flat_input, resize_kernel, strides, padding='VALID')
output_nchw_shape = (-1, input_channels, height_scale * input_h,
width_scale * input_w)
output_nchw = jnp.reshape(flat_output, output_nchw_shape)
resized_x = jnp.transpose(output_nchw, (0, 2, 3, 1)) # Output: nhwc.
return resized_x
def central_crop(inputs, target_shape):
"""Returns a central crop in axis (1, 2).
Args:
inputs: nd-array; Inputs in shape of `[bs, height, width, channels]'.
target_shape: tuple(int); Target shape after crop.
Returns:
Cropped image.
"""
h, w = target_shape[1:3]
assert h <= inputs.shape[1], f'{h} > {inputs.shape[1]}'
assert w <= inputs.shape[2], f'{w} > {inputs.shape[2]}'
h0 = (inputs.shape[1] - h) // 2
w0 = (inputs.shape[2] - w) // 2
return inputs[:, h0:(h0 + h), w0:(w0 + w)]
def compute_1d_relative_distance(query_len: int, key_len: int) -> np.ndarray:
"""Generate relative positions of queries and keys for relative attention.
Args:
query_len: Length of the query.
key_len: Length of the key.
Returns:
A numpy (np) int array of shape [len_q, len_k] holding the distance
between each query and key pair, where the query and key are
indexed by their position. The smallest value in the array is zero.
"""
# A distance per dimension in the query-key arrays.
relative_positions = (
np.arange(key_len)[np.newaxis, :] - np.arange(query_len)[:, np.newaxis])
# These indices are being used by an embedding lookup, so shift the indices
# such that the smallest index is zero.
relative_positions -= np.min(relative_positions)
return relative_positions
|
[
"dehghani@google.com"
] |
dehghani@google.com
|
b0e9d034f38256d73cecf9f4255f71cbf66f2f94
|
f3bd271bf00325881fb5b2533b9ef7f7448a75ec
|
/classes/_eigensolver1.py
|
b6ce82292c594e6f6578a8c6eb7978f16397aebd
|
[] |
no_license
|
obaica/xcp2k
|
7f99fc9d494859e16b9b0ea8e217b0493f4b2f59
|
6e15c2c95658f545102595dc1783f5e03a9e6916
|
refs/heads/master
| 2020-07-15T17:27:43.378835
| 2019-02-11T16:32:24
| 2019-02-11T16:32:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 468
|
py
|
from xcp2k.inputsection import InputSection
class _eigensolver1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.N = None
self.N_loop = None
self.Diag_method = None
self.Eigenvalues = None
self.Init_method = None
self._name = "EIGENSOLVER"
self._keywords = {'Diag_method': 'DIAG_METHOD', 'N_loop': 'N_LOOP', 'Init_method': 'INIT_METHOD', 'Eigenvalues': 'EIGENVALUES', 'N': 'N'}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
4168a6134ffbc963226f819035ab681efd9c2368
|
85059a2fb09cdf3ba2446bb297b0f052132b20ce
|
/chapter_2/2_1_simple_message.py
|
4f598be9b2abbf907b1f00ecf3140a09724b0629
|
[] |
no_license
|
rauldoblem/python-crash-course
|
26d54eac58416f8c34e98a0f6c5a66c7e437d101
|
89c2364b90ff77fc3d54649d196426e6433e8482
|
refs/heads/master
| 2020-08-11T03:20:01.833271
| 2019-09-12T22:56:11
| 2019-09-12T22:56:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 52
|
py
|
message = "First message with python"
print(message)
|
[
"marlonricslim@gmail.com"
] |
marlonricslim@gmail.com
|
99884be196977a59e2a749233e3dcdcfc0a19ff2
|
2b72a92f2bc8b2f0c31ac100b3a17a6a1769d2a8
|
/first_numbers.py
|
4a7e62ad4c5c2e52474ba9ebeb720b3a2bb6f18c
|
[] |
no_license
|
DePotterDev/Python_Crash_Course
|
623b759f008806436a658de211ae2d07c93ad4a3
|
76ccb0398e164a10b07d49f582eafc6b7f3290fc
|
refs/heads/main
| 2022-12-27T00:00:09.591345
| 2020-10-08T15:28:58
| 2020-10-08T15:28:58
| 302,384,028
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83
|
py
|
for value in range(1, 5):
print(value)
for value in range(6):
print(value)
|
[
"depotter.laurens@gmail.com"
] |
depotter.laurens@gmail.com
|
461c6cec75ee834628870c31c6fd22e0cacb7714
|
eac51f9c87a522cb656b29ce7880e3216a6e58fa
|
/Sortedfunc.py
|
51f50dbf551dd9b9cf484b1afacb69ee2f465b2e
|
[] |
no_license
|
awsanand2018/MyCodeFiles
|
947c8b56f67dea8d41418659b05e63b37912d7f5
|
7d1ed5da7711c31fbcce5d1ababfc12a8f16013f
|
refs/heads/master
| 2020-03-26T18:26:44.670337
| 2018-10-09T08:53:21
| 2018-10-09T08:53:21
| 145,213,342
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 666
|
py
|
num_list=[34,3,53,0,10,1,-34,100,23]
num_list1=[34,3,53,0,10,1,-34,100,23]
asc_sorted_list=[]
dsc_sorted_list=[]
def asc_function(list_input):
c=0
while list_input:
min_value=min(list_input)
x=list_input.pop(list_input.index(min_value))
asc_sorted_list.append(x)
c+=1
print(asc_sorted_list)
def dsc_function(list_input):
d=0
while list_input:
max_value=max(list_input)
y=list_input.pop(list_input.index(max_value))
dsc_sorted_list.append(y)
d+=1
print(dsc_sorted_list)
asc_function(num_list)
dsc_function(num_list1)
|
[
"noreply@github.com"
] |
noreply@github.com
|
ca86f498a7bd3f6a3e7ad355b0e7a117ce894fd8
|
36a610b86b5b4dd7c6c295d4b71d1baf01b88f62
|
/tool_run_model/tool/python3_venv/bin/toco_from_protos
|
8e83c0ab9ee4789379568a2f24d28441bedaa80c
|
[] |
no_license
|
nghialuffy/bkdn-ai-contest
|
cf9530e80c28ebec1024e330ab5655d4aedc03f5
|
d1d79e022dfc98dfda91a37357d635ae000c5a18
|
refs/heads/main
| 2023-07-07T16:03:44.822355
| 2021-05-21T07:13:58
| 2021-05-21T07:13:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
#!/media/nghialuffy/VMWAVE/BKDN_AI_CONTEST/tool_run_model/python3_venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from tensorflow.lite.toco.python.toco_from_protos import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"nghialuffy@github.com"
] |
nghialuffy@github.com
|
|
8f31d85510e5812f6fef5ea509e5d9ab779f7249
|
f8633675678bb57a1e9994a2287da0336e442acb
|
/DayanAbott1.2 - C_v.py
|
58955c98e012ffdc517a09f5342f42dbc5a70db3
|
[] |
no_license
|
MEKogan/Theoretical-Neuroscience-Problems
|
3eb779be5a1a7851f62470cec3569b10b0a34aee
|
0da3a5710cf1954782d3fbc411ea83487975f2f9
|
refs/heads/main
| 2023-06-06T20:47:44.616732
| 2021-07-07T14:25:25
| 2021-07-07T14:25:25
| 383,820,606
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,107
|
py
|
import numpy as np
import scipy as sp
import matplotlib.pyplot as plt
n = 20 #Number of elements in tau_ref
T = 10 #Length of trial, in s
dt = .001 #minimum time step, in s
t_j, t_p = np.meshgrid(np.arange(0,T,dt), np.arange(n)) #time steps of dt from 0 to T in meshgrid form, in s
rate,y = np.meshgrid(np.zeros(len(t_j[0,:])+1), np.arange(n)) #Initializing r(t) array
r_0 = 100 #Value r(t) exponentially recovers to, in Hz
rate[:,0] = r_0 #Sets first element of each row in rate to r_0
tau_ref = np.arange(1,21,20/n) #Refractory recovery rate array from 1 to 20 in steps of 1, in ms
random, x = np.meshgrid(np.zeros(len(t_j[0,:])), np.arange(n)) #Initializing meshgrid of random numbers between 0 and 1
spkt = [] #Initializing array for list of spike times
tau, z = np.meshgrid(np.zeros(len(t_j[0,:])+1), np.arange(n)) #Initializing array to track times between spikes
#Filling random meshgrid with random elements
for j in range(n):
for i in range(len(t_j[0,:])):
random[j,i] = np.random.rand(1)
for j in range(len(tau_ref)):
spkt.append([])
for i in range(len(t_j[0,:])):
# If r(t) = 0, time since spike is incremented and r(t) begins to exponentially rise
if rate[j,i] == 0:
tau[j,i+1] = tau[j,i] + dt
rate[j,i+1] = r_0 * (1 - np.exp(-tau[j,i+1]) / tau_ref[j])
# If spike occurs, r(t) is set to 0 and exponentially approaches r_0
elif random[j,i] < rate[j,i] * dt:
tau[j,i] = 0
rate[j,i+1] = 0
spkt[j].append(t_j[j,i])
# If r(t) = r_0 and spike does not occur, time is incremented and r(t) = r_0
elif rate[j,i] == r_0:
tau[j,i+1] = tau[j,i] + dt
rate[j,i+1] = rate[j,i]
# If r(t) != 0 and != r_0 and spike does not occur, time is incremented and r(t) exponentially approaches r_0
else:
tau[j,i+1] = tau[j,i] + dt
rate[j,i+1] = r_0 * (1 - np.exp(-tau[j,i + 1] / tau_ref[j]))
interspike = []
for j in range(n):
interspike.append([])
for i in range(len(spkt[j][:])-1):
interspike[j].append(spkt[j][i+1] - spkt[j][i])
#Check array values
print(tau_ref)
print(len(tau_ref))
print(len(spkt))
#Plot coefficient of variation as a function of tau_ref over range 1 ms to 20 ms inclusive
mean = np.zeros(n) #Mean of the interspike intervals for each tau_ref
sd = np.zeros(n) #Standard deviation of the interspike intervals for each tau_ref
C_v = np.zeros(n) #Coefficient of variation for each tau_ref
for j in range(n):
mean[j] = np.mean(interspike[j][:])
sd[j] = np.std(interspike[j][:])
C_v[j] = sd[j] / mean[j]
print(C_v)
plt.plot(tau_ref, C_v)
plt.title(r"$C_v$ Plotted Against $\tau_{ref}$")
plt.xlabel(r'$\tau_{ref}$ (ms)')
plt.ylabel(r'$C_v$')
plt.show()
#Plot interspike interval histograms for a few different values of tau_ref in this range
#For tau_ref = 1 ms
plt.hist(interspike[0][:], weights = np.ones(len(interspike[0][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 1$ ms")
plt.show()
# For tau_ref = 5 ms
plt.hist(interspike[4][:], weights = np.ones(len(interspike[4][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 5$ ms")
plt.show()
#For tau_ref = 7 ms
plt.hist(interspike[6][:], weights = np.ones(len(interspike[6][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 7$ ms")
plt.show()
#For tau_ref = 8 ms
plt.hist(interspike[7][:], weights = np.ones(len(interspike[7][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 8$ ms")
plt.show()
#For tau_ref = 11 ms
plt.hist(interspike[10][:], weights = np.ones(len(interspike[10][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 11$ ms")
plt.show()
#For tau_ref = 16 ms
plt.hist(interspike[15][:], weights = np.ones(len(interspike[15][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 16$ ms")
plt.show()
#For tau_ref = 20 ms
plt.hist(interspike[19][:], weights = np.ones(len(interspike[19][:]))/len(interspike[0][:]))
plt.title(r"$tau_{ref} = 20$ ms")
plt.show()
|
[
"noreply@github.com"
] |
noreply@github.com
|
719fe8ceb0742f200fd6db96a72226d3bb87841c
|
72332ab2422f3666a8dbedc93f0e279b6f64b584
|
/rl/storage.py
|
e2afc423a71002c4b538f2fd6dd2587bbb079a0c
|
[
"MIT"
] |
permissive
|
prasoongoyal/rl-learn
|
e5dddf25292d9eb82cd24a34a027c5392c795d77
|
0401ca8c60b11adbc6fddfda12c03876a45c2ad0
|
refs/heads/master
| 2021-06-30T19:31:45.473939
| 2020-11-11T13:50:00
| 2020-11-11T13:50:00
| 192,154,441
| 16
| 14
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,162
|
py
|
import torch
from torch.utils.data.sampler import BatchSampler, SubsetRandomSampler
def _flatten_helper(T, N, _tensor):
return _tensor.view(T * N, *_tensor.size()[2:])
class RolloutStorage(object):
def __init__(self, num_steps, num_processes, obs_shape, action_space, recurrent_hidden_state_size):
self.obs = torch.zeros(num_steps + 1, num_processes, *obs_shape)
self.recurrent_hidden_states = torch.zeros(num_steps + 1, num_processes, recurrent_hidden_state_size)
self.rewards = torch.zeros(num_steps, num_processes, 1)
self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
self.returns = torch.zeros(num_steps + 1, num_processes, 1)
self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
if action_space.__class__.__name__ == 'Discrete':
action_shape = 1
else:
action_shape = action_space.shape[0]
self.actions = torch.zeros(num_steps, num_processes, action_shape)
if action_space.__class__.__name__ == 'Discrete':
self.actions = self.actions.long()
self.masks = torch.ones(num_steps + 1, num_processes, 1)
self.num_steps = num_steps
self.step = 0
def to(self, device):
self.obs = self.obs.to(device)
self.recurrent_hidden_states = self.recurrent_hidden_states.to(device)
self.rewards = self.rewards.to(device)
self.value_preds = self.value_preds.to(device)
self.returns = self.returns.to(device)
self.action_log_probs = self.action_log_probs.to(device)
self.actions = self.actions.to(device)
self.masks = self.masks.to(device)
def insert(self, obs, recurrent_hidden_states, actions, action_log_probs, value_preds, rewards, masks):
self.obs[self.step + 1].copy_(obs)
self.recurrent_hidden_states[self.step + 1].copy_(recurrent_hidden_states)
self.actions[self.step].copy_(actions)
self.action_log_probs[self.step].copy_(action_log_probs)
self.value_preds[self.step].copy_(value_preds)
self.rewards[self.step].copy_(rewards)
self.masks[self.step + 1].copy_(masks)
self.step = (self.step + 1) % self.num_steps
def after_update(self):
self.obs[0].copy_(self.obs[-1])
self.recurrent_hidden_states[0].copy_(self.recurrent_hidden_states[-1])
self.masks[0].copy_(self.masks[-1])
def compute_returns(self, next_value, use_gae, gamma, tau, max_step):
if use_gae:
self.value_preds[max_step] = next_value
gae = 0
for step in reversed(range(max_step)):
delta = self.rewards[step] + gamma * self.value_preds[step + 1] * self.masks[step + 1] - self.value_preds[step]
gae = delta + gamma * tau * self.masks[step + 1] * gae
self.returns[step] = gae + self.value_preds[step]
else:
self.returns[-1] = next_value
for step in reversed(range(self.rewards.size(0))):
self.returns[step] = self.returns[step + 1] * \
gamma * self.masks[step + 1] + self.rewards[step]
def feed_forward_generator(self, advantages, num_mini_batch, max_step):
num_steps, num_processes = self.rewards.size()[0:2]
batch_size = num_processes * num_steps
assert batch_size >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"* number of steps ({}) = {} "
"to be greater than or equal to the number of PPO mini batches ({})."
"".format(num_processes, num_steps, num_processes * num_steps, num_mini_batch))
mini_batch_size = batch_size // num_mini_batch
sampler = BatchSampler(SubsetRandomSampler(range(max_step)), max_step, drop_last=False)
for indices in sampler:
obs_batch = self.obs[:max_step].view(-1, *self.obs.size()[2:])[indices]
recurrent_hidden_states_batch = self.recurrent_hidden_states[:max_step].view(-1,
self.recurrent_hidden_states.size(-1))[indices]
actions_batch = self.actions.view(-1, self.actions.size(-1))[indices]
return_batch = self.returns[:max_step].view(-1, 1)[indices]
masks_batch = self.masks[:max_step].view(-1, 1)[indices]
old_action_log_probs_batch = self.action_log_probs.view(-1, 1)[indices]
adv_targ = advantages.view(-1, 1)[indices]
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
return_batch, masks_batch, old_action_log_probs_batch, adv_targ
def recurrent_generator(self, advantages, num_mini_batch):
num_processes = self.rewards.size(1)
assert num_processes >= num_mini_batch, (
"PPO requires the number of processes ({}) "
"to be greater than or equal to the number of "
"PPO mini batches ({}).".format(num_processes, num_mini_batch))
num_envs_per_batch = num_processes // num_mini_batch
perm = torch.randperm(num_processes)
for start_ind in range(0, num_processes, num_envs_per_batch):
obs_batch = []
recurrent_hidden_states_batch = []
actions_batch = []
return_batch = []
masks_batch = []
old_action_log_probs_batch = []
adv_targ = []
for offset in range(num_envs_per_batch):
ind = perm[start_ind + offset]
obs_batch.append(self.obs[:-1, ind])
recurrent_hidden_states_batch.append(self.recurrent_hidden_states[0:1, ind])
actions_batch.append(self.actions[:, ind])
return_batch.append(self.returns[:-1, ind])
masks_batch.append(self.masks[:-1, ind])
old_action_log_probs_batch.append(self.action_log_probs[:, ind])
adv_targ.append(advantages[:, ind])
T, N = self.num_steps, num_envs_per_batch
# These are all tensors of size (T, N, -1)
obs_batch = torch.stack(obs_batch, 1)
actions_batch = torch.stack(actions_batch, 1)
return_batch = torch.stack(return_batch, 1)
masks_batch = torch.stack(masks_batch, 1)
old_action_log_probs_batch = torch.stack(old_action_log_probs_batch, 1)
adv_targ = torch.stack(adv_targ, 1)
# States is just a (N, -1) tensor
recurrent_hidden_states_batch = torch.stack(recurrent_hidden_states_batch, 1).view(N, -1)
# Flatten the (T, N, ...) tensors to (T * N, ...)
obs_batch = _flatten_helper(T, N, obs_batch)
actions_batch = _flatten_helper(T, N, actions_batch)
return_batch = _flatten_helper(T, N, return_batch)
masks_batch = _flatten_helper(T, N, masks_batch)
old_action_log_probs_batch = _flatten_helper(T, N, \
old_action_log_probs_batch)
adv_targ = _flatten_helper(T, N, adv_targ)
yield obs_batch, recurrent_hidden_states_batch, actions_batch, \
return_batch, masks_batch, old_action_log_probs_batch, adv_targ
|
[
"pgoyal@cs.utexas.edu"
] |
pgoyal@cs.utexas.edu
|
2f76f6578ee20aea9b69a41ffd20301183b7ec6d
|
0faed81feb588ea0b8fd64be3d760ca454bac5b5
|
/callrecords/apps/bills/admin.py
|
5f80e08c9fdd4702e6cb288837fcb8619eb31e40
|
[] |
no_license
|
tiagovizoto/work-at-olist-1
|
b48c7614ee3204e2a4360496217aa73bbbcd967a
|
e5fca8ec7c2caa76fdbfd164433bb1b6dc887fb8
|
refs/heads/development
| 2020-04-27T14:34:13.617869
| 2019-04-03T12:27:38
| 2019-04-03T12:27:38
| 174,414,543
| 0
| 0
| null | 2019-04-03T12:32:52
| 2019-03-07T20:22:48
|
Python
|
UTF-8
|
Python
| false
| false
| 491
|
py
|
from django.contrib import admin
from .models import MinuteFee, FixedFee, Bill
class MinuteFeeAdmin(admin.ModelAdmin):
list_display = ('price', 'start', 'end',)
class FixedFeeAdmin(admin.ModelAdmin):
list_display = ('price', 'start', 'end',)
class BillAdmin(admin.ModelAdmin):
list_display = ('price', 'call_start', 'call_end', 'fixed_fee',)
admin.site.register(MinuteFee, MinuteFeeAdmin)
admin.site.register(FixedFee, FixedFeeAdmin)
admin.site.register(Bill, BillAdmin)
|
[
"vizoto123@gmail.com"
] |
vizoto123@gmail.com
|
6e1b0e5aa34daaa437c9eee45fc76dbcb0dc1c5a
|
2a5d93182aecc251462c9d3844e7c3e28013013e
|
/mysite/chat/tests.py
|
114198533bdb4a28861f61e98807da39f4a8fde4
|
[] |
no_license
|
burbaljaka/websocket_chat
|
20acc9908cd7b0e122a3b96252208defdc7460d9
|
ca6883987befb6bfad5973156b01bfe876b1414f
|
refs/heads/master
| 2021-05-26T22:58:31.151913
| 2020-04-08T19:37:16
| 2020-04-08T19:37:16
| 254,182,479
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,906
|
py
|
from channels.testing import ChannelsLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.wait import WebDriverWait
import dill
class ChatTests(ChannelsLiveServerTestCase):
serve_static = True #emulate StaticLiveServerTestCase
@classmethod
def setUpClass(cls):
super().setUpClass()
try:
# NOTE: Requires "chromedriver" binary to be installed in $PATH
cls.driver = webdriver.Chrome('C:\chromedriver.exe')
except:
super().tearDownClass()
raise
@classmethod
def tearDownClass(cls):
cls.driver.quit()
super().tearDownClass()
def test_chat_message_posted_then_seen_by_everyone_in_same_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_1')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 2 from window 1')
finally:
self._close_all_new_windows()
def test_when_chat_message_posted_then_not_seen_by_anyone_in_different_room(self):
try:
self._enter_chat_room('room_1')
self._open_new_window()
self._enter_chat_room('room_2')
self._switch_to_window(0)
self._post_message('hello')
WebDriverWait(self.driver, 2).until(lambda _:
'hello' in self._chat_log_value,
'Message was not received by window 1 from window 1')
self._switch_to_window(1)
self._post_message('world')
WebDriverWait(self.driver, 2).until(lambda _:
'world' in self._chat_log_value,
'Message was not received by window 2 from window 2')
self.assertTrue('hello' not in self._chat_log_value,
'Message was improperly received by window 2 from window 1')
finally:
self._close_all_new_windows()
# === Utility ===
def _enter_chat_room(self, room_name):
self.driver.get(self.live_server_url + '/chat/')
ActionChains(self.driver).send_keys(room_name + '\n').perform()
WebDriverWait(self.driver, 2).until(lambda _:
room_name in self.driver.current_url)
def _open_new_window(self):
self.driver.execute_script('window.open("about:blank", "_blank");')
self.driver.switch_to_.window(self.driver.window_handles[-1])
def _close_all_new_windows(self):
while len(self.driver.window_handles) > 1:
self.driver.switch_to.window(self.driver.window_handles[-1])
self.driver.execute_script('window.close();')
if len(self.driver.window_handles) == 1:
self.driver.switch_to.window(self.driver.window_handles[0])
def _switch_to_window(self, window_index):
self.driver.switch_to.window(self.driver.window_handles[window_index])
def _post_message(self, message):
ActionChains(self.driver).send_keys(message + '\n').perform()
@property
def _chat_log_value(self):
return self.driver.find_element_by_css_selector('#chat-log').get_property('value')
|
[
"kapitonov.timur@gmail.com"
] |
kapitonov.timur@gmail.com
|
0f1c7b775e18b0d091c6bd1c45c0f3b10c73d105
|
66ae262ef53ffd1c508217efd7217602a94f7f5a
|
/Course/Data structures and algorithms/2.Data structures/2.1 Arrays and Linked Lists/5.LinkedLists.py
|
26949a433f85f34218369c9fa585129947ebfa59
|
[
"MIT"
] |
permissive
|
IP-Algorithmics/Udacity
|
8d8502eced3e2fa3c3b8cdfcbef59e0d3086198b
|
4349f4c12c838bcf3e53409f943ca8aacd58c94b
|
refs/heads/master
| 2022-08-03T02:42:31.357638
| 2020-05-29T19:33:12
| 2020-05-29T19:33:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,927
|
py
|
'''
https://youtu.be/zxkpZrozDUk
https://youtu.be/ZONGA5wmREI
'''
'''
Implementing and traversing a linked list
In this notebook we'll get some practice implementing a basic linked list—something like this:
2 -> 1 -> 4 -> 3 -> 5 -> None
Note - This notebook contains a few audio walkthroughs of the code cells. If you face difficulty in listening to the audio, try reconnecting your audio headsets, and use either Chrome or Firefox browser.
Key characteristics
First, let's review the overall abstract concepts for this data structure. To get started, click the walkthrough button below.
'''
'''
Exercise 1 - Implementing a simple linked list
Now that we've talked about the abstract characteristics that we want our linked list to have, let's look at how we might implement one in Python.
'''
'''
Step 1. Once you've seen the walkthrough, give it a try for yourself:
Create a Node class with value and next attributes
Use the class to create the head node with the value 2
Create and link a second node containing the value 1
Try printing the values (1 and 2) on the nodes you created (to make sure that you can access them!)
'''
class Node:
def __init__(self, value):
self.value = value
self.next = None
head = Node(2)
head.next = Node(1)
print(head.value)
print(head.next.value)
'''
At this point, our linked list looks like this:
2 -> 1 -> none
Our goal is to extend the list until it looks like this:
2 -> 1 -> 4 -> 3 -> 5 -> None
To do this, we need to create three more nodes, and we need to attach each one to the next attribute of the node that comes before it. Notice that we don't have a direct reference to any of the nodes other than the head node!
See if you can write the code to finish creating the above list:
Step 2. Add three more nodes to the list, with the values 4, 3, and 5
'''
head.next.next = Node(4)
head.next.next.next = Node(3)
head.next.next.next.next = Node(5)
print(head.value)
print(head.next.value)
print(head.next.next.value)
print(head.next.next.next.value)
print(head.next.next.next.next.value)
'''
Exercise 2 - Traversing the list
We successfully created a simple linked list. But printing all the values like we did above was pretty tedious. What if we had a list with 1,000 nodes?
Let's see how we might traverse the list and print all the values, no matter how long it might be.
Once you've seen the walkthrough, give it a try for yourself.
Step 3. Write a function that loops through the nodes of the list and prints all of the values
'''
def print_linked_list(head):
current_node = head
while current_node is not None:
print(current_node.value)
current_node = current_node.next
print_linked_list(head)
'''
Creating a linked list using iteration
Previously, we created a linked list using a very manual and tedious method. We called next multiple times on our head node.
Now that we know about iterating over or traversing the linked list, is there a way we can use that to create a linked list?
We've provided our solution below—but it might be a good exercise to see what you can come up with first. Here's the goal:
Step 4. See if you can write the code for the create_linked_list function below
The function should take a Python list of values as input and return the head of a linked list that has those values
There's some test code, and also a solution, below—give it a try for yourself first, but don't hesitate to look over the solution if you get stuck
'''
def create_linked_list(input_list):
head = None
for value in input_list:
if head is None:
head = Node(value)
else:
# Move to the tail (the last node)
current_node = head
while current_node.next:
current_node = current_node.next
current_node.next = Node(value)
return head
# Test Code
def test_function(input_list, head):
try:
if len(input_list) == 0:
if head is not None:
print("Fail")
return
for value in input_list:
if head.value != value:
print("Fail")
return
else:
head = head.next
print("Pass")
except Exception as e:
print("Fail: " + e)
input_list = [1, 2, 3, 4, 5, 6]
head = create_linked_list(input_list)
test_function(input_list, head)
input_list = [1]
head = create_linked_list(input_list)
test_function(input_list, head)
input_list = []
head = create_linked_list(input_list)
test_function(input_list, head)
'''
The above solution works, but it has some shortcomings. In this next walkthrough, we'll demonstrate a different approach and see how its efficiency compares to the solution above.
'''
def create_linked_list_better(input_list):
head = None
tail = None
for value in input_list:
if head is None:
head = Node(value)
tail = head # when we only have 1 node, head and tail refer to the same node
else:
# attach the new node to the `next` of tail
tail.next = Node(value)
tail = tail.next # update the tail
return head
# Test Code
def test_function(input_list, head):
try:
if len(input_list) == 0:
if head is not None:
print("Fail")
return
for value in input_list:
if head.value != value:
print("Fail")
return
else:
head = head.next
print("Pass")
except Exception as e:
print("Fail: " + e)
input_list = [1, 2, 3, 4, 5, 6]
head = create_linked_list_better(input_list)
test_function(input_list, head)
input_list = [1]
head = create_linked_list_better(input_list)
test_function(input_list, head)
input_list = []
head = create_linked_list_better(input_list)
test_function(input_list, head)
|
[
"iulian.octavian.preda@gmail.com"
] |
iulian.octavian.preda@gmail.com
|
c8220cfb06f2fc1e44e934193e67e00662fe9de2
|
ba2b94c483abca07bd300fc254e90dca944714c1
|
/test_gRPC_pb2_grpc.py
|
393d7c8ce56e7f01bc4a03e17a171924c694d72f
|
[] |
no_license
|
agurusa/test_gRPC
|
402675d9b97e017cfe55f16da6bbfd06f82e8c9b
|
fbf646d42412af8129a65ea31699c6a5e4f56a7d
|
refs/heads/master
| 2020-03-11T00:06:33.455852
| 2018-04-15T22:22:24
| 2018-04-15T22:22:24
| 129,655,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import test_gRPC_pb2 as test__gRPC__pb2
class test_gRPCStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetFeature = channel.unary_unary(
'/test_gRPC.test_gRPC/GetFeature',
request_serializer=test__gRPC__pb2.An_unexciting_request.SerializeToString,
response_deserializer=test__gRPC__pb2.An_exciting_response.FromString,
)
class test_gRPCServicer(object):
# missing associated documentation comment in .proto file
pass
def GetFeature(self, request, context):
"""simple RPC where client sends a request to a server using the stub and waits for a response to come back.
rpc GetFeature(An_unexciting_request) returns (An_unexciting_request){}
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_test_gRPCServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetFeature': grpc.unary_unary_rpc_method_handler(
servicer.GetFeature,
request_deserializer=test__gRPC__pb2.An_unexciting_request.FromString,
response_serializer=test__gRPC__pb2.An_exciting_response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'test_gRPC.test_gRPC', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
[
"agurusa@gmail.com"
] |
agurusa@gmail.com
|
c279e12030d6850291b50ede25ac75ba3db5c7fd
|
24f664aa2344d4f5d5e7b048ac4e85231715c4c8
|
/experimental/dsmith/scrapheap/clsmith_run_cl_launcher.py
|
a8c4780ae8872e6567505e8112cc3a515308e79e
|
[] |
no_license
|
speycode/clfuzz
|
79320655e879d1e0a06a481e8ec2e293c7c10db7
|
f2a96cf84a7971f70cb982c07b84207db407b3eb
|
refs/heads/master
| 2020-12-05T13:44:55.486419
| 2020-01-03T14:14:03
| 2020-01-03T14:15:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,786
|
py
|
#!/usr/bin/env python3
import os
import re
from argparse import ArgumentParser
from collections import deque
from tempfile import NamedTemporaryFile
from time import strftime
from typing import Tuple
import progressbar
from dsmith import clsmith
from dsmith import db
from dsmith.db import *
from dsmith.lib import *
from labm8.py import crypto
from third_party.py.pyopencl import pyopencl as cl
def get_platform_name(platform_id):
platform = cl.get_platforms()[platform_id]
return platform.get_info(cl.platform_info.NAME)
def get_device_name(platform_id, device_id):
platform = cl.get_platforms()[platform_id]
device = platform.get_devices()[device_id]
return device.get_info(cl.device_info.NAME)
def get_driver_version(platform_id, device_id):
platform = cl.get_platforms()[platform_id]
device = platform.get_devices()[device_id]
return device.get_info(cl.device_info.DRIVER_VERSION)
def cl_launcher(
src: str, platform_id: int, device_id: int, *args
) -> Tuple[float, int, str, str]:
""" Invoke cl launcher on source """
with NamedTemporaryFile(prefix="cl_launcher-", suffix=".cl") as tmp:
tmp.write(src.encode("utf-8"))
tmp.flush()
return clsmith.cl_launcher(
tmp.name,
platform_id,
device_id,
*args,
timeout=os.environ.get("TIMEOUT", 60),
)
def verify_params(
platform: str,
device: str,
optimizations: bool,
global_size: tuple,
local_size: tuple,
stderr: str,
) -> None:
""" verify that expected params match actual as reported by CLsmith """
optimizations = "on" if optimizations else "off"
actual_platform = None
actual_device = None
actual_optimizations = None
actual_global_size = None
actual_local_size = None
for line in stderr.split("\n"):
if line.startswith("Platform: "):
actual_platform_name = re.sub(r"^Platform: ", "", line).rstrip()
elif line.startswith("Device: "):
actual_device_name = re.sub(r"^Device: ", "", line).rstrip()
elif line.startswith("OpenCL optimizations: "):
actual_optimizations = re.sub(
r"^OpenCL optimizations: ", "", line
).rstrip()
# global size
match = re.match("^3-D global size \d+ = \[(\d+), (\d+), (\d+)\]", line)
if match:
actual_global_size = (
int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
)
match = re.match("^2-D global size \d+ = \[(\d+), (\d+)\]", line)
if match:
actual_global_size = (int(match.group(1)), int(match.group(2)), 0)
match = re.match("^1-D global size \d+ = \[(\d+)\]", line)
if match:
actual_global_size = (int(match.group(1)), 0, 0)
# local size
match = re.match("^3-D local size \d+ = \[(\d+), (\d+), (\d+)\]", line)
if match:
actual_local_size = (
int(match.group(1)),
int(match.group(2)),
int(match.group(3)),
)
match = re.match("^2-D local size \d+ = \[(\d+), (\d+)\]", line)
if match:
actual_local_size = (int(match.group(1)), int(match.group(2)), 0)
match = re.match("^1-D local size \d+ = \[(\d+)\]", line)
if match:
actual_local_size = (int(match.group(1)), 0, 0)
# check if we've collected everything:
if (
actual_platform
and actual_device
and actual_optimizations
and actual_global_size
and actual_local_size
):
assert actual_platform == platform
assert actual_device == device
assert actual_optimizations == optimizations
assert actual_global_size == global_size
assert actual_local_size == local_size
return
def parse_ndrange(ndrange: str) -> Tuple[int, int, int]:
components = ndrange.split(",")
assert len(components) == 3
return (int(components[0]), int(components[1]), int(components[2]))
def get_num_to_run(
session: db.session_t, testbed: Testbed, optimizations: int = None
):
num_ran = session.query(sql.sql.func.count(CLSmithResult.id)).filter(
CLSmithResult.testbed_id == testbed.id
)
total = session.query(sql.sql.func.count(CLSmithTestCase.id))
if optimizations is not None:
num_ran = (
num_ran.join(CLSmithTestCase)
.join(cl_launcherParams)
.filter(cl_launcherParams.optimizations == optimizations)
)
total = total.join(cl_launcherParams).filter(
cl_launcherParams.optimizations == optimizations
)
return num_ran.scalar(), total.scalar()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"-H", "--hostname", type=str, default="cc1", help="MySQL database hostname"
)
parser.add_argument(
"platform_id", metavar="<platform-id>", type=int, help="OpenCL platform ID"
)
parser.add_argument(
"device_id", metavar="<device-id>", type=int, help="OpenCL device ID"
)
parser.add_argument(
"--opt", action="store_true", help="Only test with optimizations on"
)
parser.add_argument(
"--no-opt",
action="store_true",
help="Only test with optimizations disabled",
)
args = parser.parse_args()
# Parse command line options
platform_id = args.platform_id
device_id = args.device_id
# get testbed information
platform_name = get_platform_name(platform_id)
device_name = get_device_name(platform_id, device_id)
driver_version = get_driver_version(platform_id, device_id)
optimizations = None
if args.opt and args.no_opt:
pass # both flags
elif args.opt:
optimizations = 1
elif args.no_opt:
optimizations = 0
db.init(args.hostname) # initialize db engine
with Session() as session:
testbed = get_testbed(session, platform_name, device_name)
devname = util.device_str(testbed.device)
# progress bar
num_ran, num_to_run = get_num_to_run(session, testbed, optimizations)
bar = progressbar.ProgressBar(init_value=num_ran, max_value=num_to_run)
# programs to run, and results to push to database
inbox = deque()
def next_batch():
"""
Fill the inbox with jobs to run.
"""
BATCH_SIZE = 100
print(f"\nnext CLSmith batch for {devname} at", strftime("%H:%M:%S"))
# update the counters
num_ran, num_to_run = get_num_to_run(session, testbed, optimizations)
bar.max_value = num_to_run
bar.update(min(num_ran, num_to_run))
# fill inbox
done = session.query(CLSmithResult.testcase_id).filter(
CLSmithResult.testbed == testbed
)
if optimizations is not None:
done = (
done.join(CLSmithTestCase)
.join(cl_launcherParams)
.filter(cl_launcherParams.optimizations == optimizations)
)
todo = (
session.query(CLSmithTestCase)
.filter(~CLSmithTestCase.id.in_(done))
.order_by(CLSmithTestCase.program_id, CLSmithTestCase.params_id)
)
if optimizations is not None:
todo = todo.join(cl_launcherParams).filter(
cl_launcherParams.optimizations == optimizations
)
todo = todo.limit(BATCH_SIZE)
for testcase in todo:
inbox.append(testcase)
try:
while True:
# get the next batch of programs to run
if not len(inbox):
next_batch()
# we have no programs to run
if not len(inbox):
break
# get next program to run
testcase = inbox.popleft()
program = testcase.program
params = testcase.params
flags = params.to_flags()
# drive the program
runtime, status, stdout, stderr = cl_launcher(
program.src, platform_id, device_id, *flags
)
# assert that executed params match expected
verify_params(
platform=platform_name,
device=device_name,
optimizations=params.optimizations,
global_size=params.gsize,
local_size=params.lsize,
stderr=stderr,
)
# create new result
stdout_ = util.escape_stdout(stdout)
stdout = get_or_create(
session, CLSmithStdout, hash=crypto.sha1_str(stdout_), stdout=stdout_
)
stderr_ = util.escape_stderr(stderr)
stderr = get_or_create(
session, CLSmithStderr, hash=crypto.sha1_str(stderr_), stderr=stderr_
)
session.flush()
result = CLSmithResult(
testbed_id=testbed.id,
testcase_id=testcase.id,
status=status,
runtime=runtime,
stdout_id=stdout.id,
stderr_id=stderr.id,
outcome=analyze.get_cl_launcher_outcome(status, runtime, stderr_),
)
session.add(result)
session.commit()
# update progress bar
num_ran += 1
bar.update(min(num_ran, num_to_run))
finally:
# flush any remaining results
next_batch()
print("done.")
|
[
"chrisc.101@gmail.com"
] |
chrisc.101@gmail.com
|
97ce640d8f9e55d51546c4a93f3597a7132318cf
|
33a747246dab38960c25520d5232d5a37dfe2a01
|
/starbucks/address_to_gecoords.py
|
d842315ca462c234888776d81feaa308e92f2f34
|
[] |
no_license
|
Yxiaokuan/spider
|
6a79a950d170ea20dae13001697b9c214872f345
|
e51a398c7fdee1b1814c50c5a3121ce9a193e302
|
refs/heads/master
| 2022-04-02T16:01:18.104056
| 2020-02-11T03:49:44
| 2020-02-11T03:49:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,951
|
py
|
'''
@author:KongWeiKun
@file: address_to_gecoords.py
@time: 18-1-2 下午5:55
@contact: 836242657@qq.com
'''
import csv
import json
import random
import re
import requests
import time
'''
地址转经纬度
'''
from urllib.request import quote #URL编码
def getLngLat(url,timeOutRetry=5):
try:
response = requests.get(url)
return response.text
except Exception as e:
if timeOutRetry>0:
getLngLat(url,timeOutRetry=(timeOutRetry-1))
print("真的失败了")
def write_to_file(content):
with open('./resources/starbucks_result.txt', 'a', encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + '\n') # 写入文件,并且确定为汉字
f.close()
def pack_url(address):
ak='LVsGVvCzooeqcHGM1lnNzvTTSba7gtvU'
aks = 'fV9ODCmTALCdTtlbkRsheFUacvA9sL7A'
base_url = 'http://api.map.baidu.com/geocoder/v2/?address='
output = 'json'
callback = 'showLocation'
url = base_url+quote(address)+"&output="+output+"&ak="+ak+"&callback"+callback
return url
def readCsv(filename):
reader = csv.reader(open(filename))
return reader
def main():
starbucks = './resources/starbucks.csv'
reader = readCsv(starbucks)
for row in reader:
address = row[0]
url=pack_url(address)
gecoord=getLngLat(url)
print(gecoord)
pattern = re.compile('"lng":(.*?),"lat":(.*?)}')
lngLat=re.findall(pattern, gecoord)
if lngLat:
for ll in lngLat:
print(ll[0])
print('写入文件%s%s'%ll)
write_to_file(','.join(ll))
time.sleep(random.random()*5)
if __name__ == '__main__':
# main()
#利用localtime()
#函数将时间戳转化成localtime的格式
#利用strftime()
#函数重新格式化时间
start = time.time()
main()
end = time.time()
print("转换完成,共消耗%s"%(end-start))
|
[
"kongwiki@163.com"
] |
kongwiki@163.com
|
651310c6f400d407a975549d9c4a6f548f3cf9e9
|
33a1b588372ce8fec94378c0d990372d01e3660f
|
/fsdet/data/builtin.py
|
709c2f3697f2efeae519e3543545737e16f0809f
|
[
"Apache-2.0"
] |
permissive
|
xinghaidemao/few-shot-object-detection-custom
|
0ab5af07ae1a2d532f9999df89c42dc3835415de
|
66277921a9c38b0f0d55a4f0d07c54363b17070b
|
refs/heads/master
| 2023-04-21T00:38:32.657431
| 2021-05-19T01:57:17
| 2021-05-19T01:57:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 17,787
|
py
|
"""
This file registers pre-defined datasets at hard-coded paths, and their metadata.
We hard-code metadata for common datasets. This will enable:
1. Consistency check when loading the datasets
2. Use models on these standard datasets directly and run demos,
without having to download the dataset annotations
We hard-code some paths to the dataset that's assumed to
exist in "./datasets/".
Here we only register the few-shot datasets and complete COCO, PascalVOC and
LVIS have been handled by the builtin datasets in detectron2.
"""
import os
from detectron2.data import DatasetCatalog, MetadataCatalog
from detectron2.data.datasets.lvis import (
get_lvis_instances_meta,
register_lvis_instances,
)
from detectron2.data.datasets.pascal_voc import register_pascal_voc
from detectron2.data.datasets.register_coco import register_coco_instances
from .builtin_meta import _get_builtin_metadata
from .meta_coco import register_meta_coco
from .meta_lvis import register_meta_lvis
from .meta_pascal_voc import register_meta_pascal_voc
# ==== Predefined datasets and splits for COCO ==========
root_pth = "F:/workspace/Daheng/Deep-learning-library/few-shot-object-detection-master/datasets"
_PREDEFINED_SPLITS_COCO = {}
_PREDEFINED_SPLITS_COCO["coco"] = {
# jiaonang
"jiaonang_train": (
"coco/jiaonang/train",
"coco/jiaonang/train.json",
),
# "coco_2014_train": (
# "coco/train2014",
# "coco/annotations/instances_train2014.json",
# ),
# "coco_2014_val": (
# "coco/val2014",
# "coco/annotations/instances_val2014.json",
# ),
# "coco_2014_minival": (
# "coco/val2014",
# "coco/annotations/instances_minival2014.json",
# ),
# "coco_2014_minival_100": (
# "coco/val2014",
# "coco/annotations/instances_minival2014_100.json",
# ),
# "coco_2014_valminusminival": (
# "coco/val2014",
# "coco/annotations/instances_valminusminival2014.json",
# ),
# "coco_2017_train": (
# "coco/train2017",
# "coco/annotations/instances_train2017.json",
# ),
# "coco_2017_val": (
# "coco/val2017",
# "coco/annotations/instances_val2017.json",
# ),
# "coco_2017_test": (
# "coco/test2017",
# "coco/annotations/image_info_test2017.json",
# ),
# "coco_2017_test-dev": (
# "coco/test2017",
# "coco/annotations/image_info_test-dev2017.json",
# ),
# "coco_2017_val_100": (
# "coco/val2017",
# "coco/annotations/instances_val2017_100.json",
# ),
}
def register_all_coco(root=root_pth):
# for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
# for key, (image_root, json_file) in splits_per_dataset.items():
# # Assume pre-defined datasets live in `./datasets`.
# register_coco_instances(
# key,
# _get_builtin_metadata(dataset_name),
# os.path.join(root, json_file)
# if "://" not in json_file
# else json_file,
# os.path.join(root, image_root),
# )
# register meta datasets
METASPLITS = [
(
"jiaonang_train_all",
"coco/jiaonang/train",
"coco/jiaonang/train.json",
),
(
"jiaonang_train_base",
"coco/jiaonang/train",
"coco/jiaonang/train.json",
),
("test_all", "coco/jiaonang/test", "coco/jiaonang/test.json"),
("test_base", "coco/jiaonang/test", "coco/jiaonang/test.json"),
("test_novel", "coco/jiaonang/test", "coco/jiaonang/test.json"),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for shot in [1, 2, 3]:#shot改为自己的
for seed in range(10):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "coco_trainval_{}_{}shot{}".format(prefix, shot, seed)
METASPLITS.append((name, "coco/jiaonang/train", ""))
for name, imgdir, annofile in METASPLITS:
register_meta_coco(
name,
_get_builtin_metadata("coco_fewshot"),
os.path.join(root, imgdir),
os.path.join(root, annofile),
)
# ==== register custom dataset for coco format ==========
_PREDEFINED_BASE_DATA = {
"jiaonang_base_data":{
"base_train":("jiaonang/base/train","jiaonang/base/train.json"),
"base_test":("jiaonang/base/test","jiaonang/base/test.json"),
"base_val":("jiaonang/base/val","jiaonang/base/val.json")
}
}
def register_base_data(root=root_pth):
for dataset_name, splits_per_dataset in _PREDEFINED_BASE_DATA.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
{},
#_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_BALANCE_DATA = {
"jiaonang_balance_data":{
"balance_train":("jiaonang/balance/train","jiaonang/balance/train.json"),
"balance_test":("jiaonang/balance/test","jiaonang/balance/test.json"),
"balance_val":("jiaonang/balance/val","jiaonang/balance/val.json")
}
}
def register_balance_data(root=root_pth):
for dataset_name, splits_per_dataset in _PREDEFINED_BALANCE_DATA.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
{},
# _get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_FEW_DATA = {
"jiaonang_few_data":{
"few_train":("jiaonang/few/train","jiaonang/few/train.json"),
"few_test":("jiaonang/few/few","jiaonang/few/test.json"),
"few_val":("jiaonang/few/val","jiaonang/few/val.json")
}
}
def register_few_data(root=root_pth):
for dataset_name, splits_per_dataset in _PREDEFINED_FEW_DATA.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
{},
# _get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
_PREDEFINED_YINXIAN_DATA = {
"yinxian_data":{
"yx_train":("yinxian/images","yinxian/dataset.json"),
"yx_test": ("yinxian/test", "yinxian/test.json")
}
}
def register_yx_data(root=root_pth):
for dataset_name, splits_per_dataset in _PREDEFINED_YINXIAN_DATA.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_coco_instances(
key,
{},
# _get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# ==== Predefined datasets and splits for LVIS ==========
_PREDEFINED_SPLITS_LVIS = {
"lvis_v0.5": {
# "lvis_v0.5_train": ("coco/train2017", "lvis/lvis_v0.5_train.json"),
"lvis_v0.5_train_freq": (
"coco/train2017",
"lvis/lvis_v0.5_train_freq.json",
),
"lvis_v0.5_train_common": (
"coco/train2017",
"lvis/lvis_v0.5_train_common.json",
),
"lvis_v0.5_train_rare": (
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
# "lvis_v0.5_val": ("coco/val2017", "lvis/lvis_v0.5_val.json"),
# "lvis_v0.5_val_rand_100": (
# "coco/val2017",
# "lvis/lvis_v0.5_val_rand_100.json",
# ),
# "lvis_v0.5_test": (
# "coco/test2017",
# "lvis/lvis_v0.5_image_info_test.json",
# ),
},
}
def register_all_lvis(root="datasets"):
for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
for key, (image_root, json_file) in splits_per_dataset.items():
# Assume pre-defined datasets live in `./datasets`.
register_lvis_instances(
key,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# register meta datasets
METASPLITS = [
(
"lvis_v0.5_train_shots",
"coco/train2017",
"lvissplit/lvis_shots.json",
),
(
"lvis_v0.5_train_rare_novel",
"coco/train2017",
"lvis/lvis_v0.5_train_rare.json",
),
("lvis_v0.5_val_novel", "coco/val2017", "lvis/lvis_v0.5_val.json"),
]
for name, image_root, json_file in METASPLITS:
dataset_name = "lvis_v0.5_fewshot" if "novel" in name else "lvis_v0.5"
register_meta_lvis(
name,
_get_builtin_metadata(dataset_name),
os.path.join(root, json_file)
if "://" not in json_file
else json_file,
os.path.join(root, image_root),
)
# ==== Predefined splits for PASCAL VOC ===========
def register_all_pascal_voc(root=root_pth):
# SPLITS = [
# ("voc_2007_trainval", "VOC2007", "trainval"),
# ("voc_2007_train", "VOC2007", "train"),
# ("voc_2007_val", "VOC2007", "val"),
# ("voc_2007_test", "VOC2007", "test"),
# ("voc_2012_trainval", "VOC2012", "trainval"),
# ("voc_2012_train", "VOC2012", "train"),
# ("voc_2012_val", "VOC2012", "val"),
# ]
# for name, dirname, split in SPLITS:
# year = 2007 if "2007" in name else 2012
# register_pascal_voc(name, os.path.join(root, dirname), split, year)
# MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# register meta datasets
METASPLITS = [
("voc_2007_trainval_base1", "VOC2007", "trainval", "base1", 1),
("voc_2007_trainval_base2", "VOC2007", "trainval", "base2", 2),
("voc_2007_trainval_base3", "VOC2007", "trainval", "base3", 3),
("voc_2012_trainval_base1", "VOC2012", "trainval", "base1", 1),
("voc_2012_trainval_base2", "VOC2012", "trainval", "base2", 2),
("voc_2012_trainval_base3", "VOC2012", "trainval", "base3", 3),
("voc_2007_trainval_all1", "VOC2007", "trainval", "base_novel_1", 1),
("voc_2007_trainval_all2", "VOC2007", "trainval", "base_novel_2", 2),
("voc_2007_trainval_all3", "VOC2007", "trainval", "base_novel_3", 3),
("voc_2012_trainval_all1", "VOC2012", "trainval", "base_novel_1", 1),
("voc_2012_trainval_all2", "VOC2012", "trainval", "base_novel_2", 2),
("voc_2012_trainval_all3", "VOC2012", "trainval", "base_novel_3", 3),
("voc_2007_test_base1", "VOC2007", "test", "base1", 1),
("voc_2007_test_base2", "VOC2007", "test", "base2", 2),
("voc_2007_test_base3", "VOC2007", "test", "base3", 3),
("voc_2007_test_novel1", "VOC2007", "test", "novel1", 1),
("voc_2007_test_novel2", "VOC2007", "test", "novel2", 2),
("voc_2007_test_novel3", "VOC2007", "test", "novel3", 3),
("voc_2007_test_all1", "VOC2007", "test", "base_novel_1", 1),
("voc_2007_test_all2", "VOC2007", "test", "base_novel_2", 2),
("voc_2007_test_all3", "VOC2007", "test", "base_novel_3", 3),
]
# register small meta datasets for fine-tuning stage
for prefix in ["all", "novel"]:
for sid in range(1, 4):
for shot in [1, 2, 3, 5, 10]:
for year in [2007, 2012]:
for seed in range(100):
seed = "" if seed == 0 else "_seed{}".format(seed)
name = "voc_{}_trainval_{}{}_{}shot{}".format(
year, prefix, sid, shot, seed
)
dirname = "VOC{}".format(year)
img_file = "{}_{}shot_split_{}_trainval".format(
prefix, shot, sid
)
keepclasses = (
"base_novel_{}".format(sid)
if prefix == "all"
else "novel{}".format(sid)
)
METASPLITS.append(
(name, dirname, img_file, keepclasses, sid)
)
for name, dirname, split, keepclasses, sid in METASPLITS:
year = 2007 if "2007" in name else 2012
register_meta_pascal_voc(
name,
_get_builtin_metadata("pascal_voc_fewshot"),
os.path.join(root, dirname),
split,
year,
keepclasses,
sid,
)
MetadataCatalog.get(name).evaluator_type = "pascal_voc"
# Register them all under "./datasets"
register_all_coco()
register_all_lvis()
register_all_pascal_voc()
# Register custom data
register_base_data()
register_balance_data()
register_few_data()
register_yx_data()
#引入以下注释
# import cv2
# from detectron2.data import DatasetCatalog, MetadataCatalog
# from detectron2.data.datasets.coco import load_coco_json
# from detectron2.utils.visualizer import Visualizer
# import pycocotools
# #声明类别,尽量保持
# CLASS_NAMES =["0","1","2"]
# # 数据集路径
# DATASET_ROOT = 'F:/workspace/Daheng/Deep-learning-library/few-shot-object-detection-master/datasets/jiaonang'
# ANN_ROOT = os.path.join(DATASET_ROOT, 'base')
#
# TRAIN_PATH = os.path.join(ANN_ROOT, 'train')
# VAL_PATH = os.path.join(ANN_ROOT, 'val')
#
# TRAIN_JSON = os.path.join(ANN_ROOT, 'train.json')
# #VAL_JSON = os.path.join(ANN_ROOT, 'val.json')
# VAL_JSON = os.path.join(ANN_ROOT, 'val.json')
#
# # 声明数据集的子集
# PREDEFINED_SPLITS_DATASET = {
# "coco_my_train": (TRAIN_PATH, TRAIN_JSON),
# "coco_my_val": (VAL_PATH, VAL_JSON),
# }
#
# #注册数据集(这一步就是将自定义数据集注册进Detectron2)
# def register_dataset():
# """
# purpose: register all splits of dataset with PREDEFINED_SPLITS_DATASET
# """
# for key, (image_root, json_file) in PREDEFINED_SPLITS_DATASET.items():
# register_dataset_instances(name=key,
# json_file=json_file,
# image_root=image_root)
#
#
# #注册数据集实例,加载数据集中的对象实例
# def register_dataset_instances(name, json_file, image_root):
# """
# purpose: register dataset to DatasetCatalog,
# register metadata to MetadataCatalog and set attribute
# """
# DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
# MetadataCatalog.get(name).set(json_file=json_file,
# image_root=image_root,
# evaluator_type="coco")
#
#
# # 注册数据集和元数据
# def plain_register_dataset():
# #训练集
# DatasetCatalog.register("coco_my_train", lambda: load_coco_json(TRAIN_JSON, TRAIN_PATH))
# MetadataCatalog.get("coco_my_train").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
# evaluator_type='coco', # 指定评估方式
# json_file=TRAIN_JSON,
# image_root=TRAIN_PATH)
#
# #DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH, "coco_2017_val"))
# #验证/测试集
# DatasetCatalog.register("coco_my_val", lambda: load_coco_json(VAL_JSON, VAL_PATH))
# MetadataCatalog.get("coco_my_val").set(thing_classes=CLASS_NAMES, # 可以选择开启,但是不能显示中文,这里需要注意,中文的话最好关闭
# evaluator_type='coco', # 指定评估方式
# json_file=VAL_JSON,
# image_root=VAL_PATH)
# # 查看数据集标注,可视化检查数据集标注是否正确,
# #这个也可以自己写脚本判断,其实就是判断标注框是否超越图像边界
# #可选择使用此方法
# def checkout_dataset_annotation(name="coco_my_val"):
# #dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH, name)
# dataset_dicts = load_coco_json(TRAIN_JSON, TRAIN_PATH)
# #print(len(dataset_dicts))
# for i, d in enumerate(dataset_dicts,0):
# #print(d)
# img = cv2.imread(d["file_name"])
# visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(name), scale=1.5)
# vis = visualizer.draw_dataset_dict(d)
# cv2.imshow('show', vis.get_image()[:, :, ::-1])
# cv2.imwrite('out/'+str(i) + '.jpg',vis.get_image()[:, :, ::-1])
# cv2.waitKey(0)
# if i == 200:
# break
|
[
"wangzheng@daheng-image.com"
] |
wangzheng@daheng-image.com
|
724d3309334dd86c1d7cf46d985c3ee24b81bfaf
|
5953ab83bc8a74a034bb7036e6f82ed8bf39899c
|
/Beta_version/XGAN/utils.py
|
20e8a15d35f2283d91c5e0ec93168854659f74cf
|
[
"Apache-2.0"
] |
permissive
|
rexwangcc/RecoverGAN
|
fe2f123760588672799d44732d6f1fa4d9d81098
|
f7387271b43e0ce440a66f1f200ae70ce6b3f93d
|
refs/heads/master
| 2021-03-24T12:48:28.364072
| 2017-07-21T03:02:21
| 2017-07-21T03:02:21
| 86,881,150
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,239
|
py
|
import math
import json
import random
import scipy.misc
import numpy as np
from time import gmtime, strftime
from six.moves import xrange
try:
import simplejson as json
except:
pass
"""
Some codes from https://github.com/Newmu/dcgan_code
"""
import tensorflow as tf
import tensorflow.contrib.slim as slim
get_stddev = lambda x, k_h, k_w: 1 / math.sqrt(k_w * k_h * x.get_shape()[-1])
def show_all_variables():
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def get_image(image_path, input_height, input_width,
resize_height=64, resize_width=64,
is_crop=True, is_grayscale=False):
image = imread(image_path, is_grayscale)
return transform(image, input_height, input_width,
resize_height, resize_width, is_crop)
def save_images(images, size, image_path):
return imsave(inverse_transform(images), size, image_path)
def imread(path, is_grayscale=False):
if (is_grayscale):
return scipy.misc.imread(path, flatten=True).astype(np.float)
else:
return scipy.misc.imread(path).astype(np.float)
def merge_images(images, size):
return inverse_transform(images)
def merge(images, size):
h, w = images.shape[1], images.shape[2]
if (images.shape[3] in (3, 4)):
c = images.shape[3]
img = np.zeros((h * size[0], w * size[1], c))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w, :] = image
return img
elif images.shape[3] == 1:
img = np.zeros((h * size[0], w * size[1]))
for idx, image in enumerate(images):
i = idx % size[1]
j = idx // size[1]
img[j * h:j * h + h, i * w:i * w + w] = image[:, :, 0]
return img
else:
raise ValueError('in merge(images,size) images parameter '
'must have dimensions: HxW or HxWx3 or HxWx4')
def imsave(images, size, path):
image = np.squeeze(merge(images, size))
return scipy.misc.imsave(path, image)
def center_crop(x, crop_h, crop_w,
resize_h=64, resize_w=64):
if crop_w is None:
crop_w = crop_h
h, w = x.shape[:2]
j = int(round((h - crop_h) / 2.))
i = int(round((w - crop_w) / 2.))
return scipy.misc.imresize(
x[j:j + crop_h, i:i + crop_w], [resize_h, resize_w])
def transform(image, input_height, input_width,
resize_height=64, resize_width=64, is_crop=True):
if is_crop:
cropped_image = center_crop(
image, input_height, input_width,
resize_height, resize_width)
else:
cropped_image = scipy.misc.imresize(
image, [resize_height, resize_width])
return np.array(cropped_image) / 127.5 - 1.
def inverse_transform(images):
return (images + 1.) / 2.
# def to_json(output_path, *layers):
# with open(output_path, "w") as layer_f:
# lines = ""
# for w, b, bn in layers:
# layer_idx = w.name.split('/')[0].split('h')[1]
# B = b.eval()
# if "lin/" in w.name:
# W = w.eval()
# depth = W.shape[1]
# else:
# W = np.rollaxis(w.eval(), 2, 0)
# depth = W.shape[0]
# biases = {"sy": 1, "sx": 1, "depth": depth,
# "w": ['%.2f' % elem for elem in list(B)]}
# if bn != None:
# gamma = bn.gamma.eval()
# beta = bn.beta.eval()
# gamma = {"sy": 1, "sx": 1, "depth": depth, "w": [
# '%.2f' % elem for elem in list(gamma)]}
# beta = {"sy": 1, "sx": 1, "depth": depth, "w": [
# '%.2f' % elem for elem in list(beta)]}
# else:
# gamma = {"sy": 1, "sx": 1, "depth": 0, "w": []}
# beta = {"sy": 1, "sx": 1, "depth": 0, "w": []}
# if "lin/" in w.name:
# fs = []
# for w in W.T:
# fs.append({"sy": 1, "sx": 1, "depth": W.shape[
# 0], "w": ['%.2f' % elem for elem in list(w)]})
# lines += """
# var layer_%s = {
# "layer_type": "fc",
# "sy": 1, "sx": 1,
# "out_sx": 1, "out_sy": 1,
# "stride": 1, "pad": 0,
# "out_depth": %s, "in_depth": %s,
# "biases": %s,
# "gamma": %s,
# "beta": %s,
# "filters": %s
# };""" % (layer_idx.split('_')[0], W.shape[1], W.shape[0], biases, gamma, beta, fs)
# else:
# fs = []
# for w_ in W:
# fs.append({"sy": 5, "sx": 5, "depth": W.shape[3], "w": [
# '%.2f' % elem for elem in list(w_.flatten())]})
# lines += """
# var layer_%s = {
# "layer_type": "deconv",
# "sy": 5, "sx": 5,
# "out_sx": %s, "out_sy": %s,
# "stride": 2, "pad": 1,
# "out_depth": %s, "in_depth": %s,
# "biases": %s,
# "gamma": %s,
# "beta": %s,
# "filters": %s
# };""" % (layer_idx, 2**(int(layer_idx) + 2), 2**(int(layer_idx) + 2),
# W.shape[0], W.shape[3], biases, gamma, beta, fs)
# layer_f.write(" ".join(lines.replace("'", "").split()))
# def make_gif(images, fname, duration=2, true_image=False):
# import moviepy.editor as mpy
# def make_frame(t):
# try:
# x = images[int(len(images) / duration * t)]
# except:
# x = images[-1]
# if true_image:
# return x.astype(np.uint8)
# else:
# return ((x + 1) / 2 * 255).astype(np.uint8)
# clip = mpy.VideoClip(make_frame, duration=duration)
# clip.write_gif(fname, fps=len(images) / duration)
# def visualize(sess, dcgan, config, option):
# image_frame_dim = int(math.ceil(config.batch_size**.5))
# if option == 0:
# z_sample = np.random.uniform(-0.5, 0.5,
# size=(config.batch_size, dcgan.z_dim))
# samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
# save_images(samples, [image_frame_dim, image_frame_dim],
# './samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
# elif option == 1:
# values = np.arange(0, 1, 1. / config.batch_size)
# for idx in xrange(100):
# print(" [*] %d" % idx)
# z_sample = np.zeros([config.batch_size, dcgan.z_dim])
# for kdx, z in enumerate(z_sample):
# z[idx] = values[kdx]
# if config.dataset == "mnist":
# y = np.random.choice(10, config.batch_size)
# y_one_hot = np.zeros((config.batch_size, 10))
# y_one_hot[np.arange(config.batch_size), y] = 1
# samples = sess.run(dcgan.sampler, feed_dict={
# dcgan.z: z_sample, dcgan.y: y_one_hot})
# else:
# samples = sess.run(dcgan.sampler, feed_dict={
# dcgan.z: z_sample})
# save_images(samples, [image_frame_dim, image_frame_dim],
# './samples/test_arange_%s.png' % (idx))
# elif option == 2:
# values = np.arange(0, 1, 1. / config.batch_size)
# for idx in [random.randint(0, 99) for _ in xrange(100)]:
# print(" [*] %d" % idx)
# z = np.random.uniform(-0.2, 0.2, size=(dcgan.z_dim))
# z_sample = np.tile(z, (config.batch_size, 1))
# #z_sample = np.zeros([config.batch_size, dcgan.z_dim])
# for kdx, z in enumerate(z_sample):
# z[idx] = values[kdx]
# if config.dataset == "mnist":
# y = np.random.choice(10, config.batch_size)
# y_one_hot = np.zeros((config.batch_size, 10))
# y_one_hot[np.arange(config.batch_size), y] = 1
# samples = sess.run(dcgan.sampler, feed_dict={
# dcgan.z: z_sample, dcgan.y: y_one_hot})
# else:
# samples = sess.run(dcgan.sampler, feed_dict={
# dcgan.z: z_sample})
# try:
# make_gif(samples, './samples/test_gif_%s.gif' % (idx))
# except:
# save_images(samples, [image_frame_dim, image_frame_dim],
# './samples/test_%s.png' % strftime("%Y%m%d%H%M%S", gmtime()))
# elif option == 3:
# values = np.arange(0, 1, 1. / config.batch_size)
# for idx in xrange(100):
# print(" [*] %d" % idx)
# z_sample = np.zeros([config.batch_size, dcgan.z_dim])
# for kdx, z in enumerate(z_sample):
# z[idx] = values[kdx]
# samples = sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample})
# make_gif(samples, './samples/test_gif_%s.gif' % (idx))
# elif option == 4:
# image_set = []
# values = np.arange(0, 1, 1. / config.batch_size)
# for idx in xrange(100):
# print(" [*] %d" % idx)
# z_sample = np.zeros([config.batch_size, dcgan.z_dim])
# for kdx, z in enumerate(z_sample):
# z[idx] = values[kdx]
# image_set.append(
# sess.run(dcgan.sampler, feed_dict={dcgan.z: z_sample}))
# make_gif(image_set[-1], './samples/test_gif_%s.gif' % (idx))
# new_image_set = [merge(np.array([images[idx] for images in image_set]), [10, 10])
# for idx in range(64) + range(63, -1, -1)]
# make_gif(new_image_set, './samples/test_gif_merged.gif', duration=8)
|
[
"rexwang@bu.edu"
] |
rexwang@bu.edu
|
ec3a4f6551deee04bcdac358fa630e8eeee8ad77
|
47f0ac3aab36d2f56c7a0683b4024b925967e7b7
|
/create_plot.py
|
0efdd14a9a91837a757019cffc589b06a08cf51f
|
[] |
no_license
|
Joseph7e/Brain
|
b23f70f6af65e0ac82695248a8c15c18bdcc5342
|
d88e71b8e062a6a97eda2387ca56d371fae5cac2
|
refs/heads/master
| 2021-01-10T22:45:49.133412
| 2016-10-08T21:09:22
| 2016-10-08T21:09:22
| 70,356,411
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,230
|
py
|
#!/usr/bin/python3
import matplotlib.pyplot as pyplot
#node detail lists
av_var_list = []
low_var_list = []
copy_number_list = []
length_low_list = []
length_high_list = []
with open('gene_stats/variation_table.xls', 'r') as v:
for line in v.readlines():
if '#' in line or '%' in line:
continue
else:
sample, cp, ll, lh, av, hv = line.split("\t")
if av == '0' or hv == '0':
continue
else:
av_var_list.append(eval(av))
low_var_list.append(eval(hv))
copy_number_list.append(eval(cp))
length_high_list.append(eval(lh))
length_low_list.append(eval(ll))
def plotdata(x_list, y_list, title, subtitle, x_label, y_label, out_name):
""" takes a user defined set of data and creates a jpg graph"""
#x_data = x_list
#y_data = y_list
figure = pyplot.figure()
figure.suptitle("16S", fontsize=20, fontweight='bold')
figure.subplots_adjust(top=0.85)
ax = figure.add_subplot(111)
ax.set_title(subtitle)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
#pyplot.axis([0, 60, 95, 100.5])
pyplot.scatter(x_list,y_list, color="blue", marker=".") #may want to use marker="," if . is too big
pyplot.savefig(out_name + "_scatterplot.jpg")
print("Creating a scatter plot of GC content vs Coverage") #Read print for notes, running plotdata for all data
plotdata(copy_number_list,low_var_list, "Bacterial 16S", "copy_number VS lowID", "Copy Number", "% Identity", "selected_lowIDvscopy", )
print("Creating a scatter plot of Length vs GC content")
plotdata(copy_number_list,av_var_list, "Bacterial 16S", "copy_number VS Average %ID", "Copy Number", "% Identity", "selected_avgIDvscopy", )
print ('average copy number = ', sum(copy_number_list)/len(copy_number_list))
print (len(copy_number_list))
nl = sorted(copy_number_list)
na = sorted(av_var_list)
nn = sorted(low_var_list)
print ("lowest copy number = {}, highest copy number = {}".format(nl[0], nl[-1]))
print ('lowest avg variation = {}, highest ag variation = {}'.format(na[0], na[-1]))
print ('lowest low variation = {}, highest low variation = {}'.format(nn[0],nn[-1]))
|
[
"joseph7e@brain.sr.unh.edu"
] |
joseph7e@brain.sr.unh.edu
|
1b6688fd6a57724daa23f0329b9115745a80ec69
|
3499a6377146c03dbe7aa9af1ad2e1c6e215d6e7
|
/try-django/articles/migrations/0001_initial.py
|
3550049b5b34c412607502ff13d9315c6863d2b5
|
[] |
no_license
|
Krishnamurtyp/django_tutorials
|
ce8cc213a141872b1f2ed01734fddc91d36c8854
|
2f7cfd0e8bff2430bd226cc8ef68bc46f1eee0a4
|
refs/heads/master
| 2023-09-04T00:55:26.776793
| 2021-10-10T12:18:38
| 2021-10-10T12:18:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 526
|
py
|
# Generated by Django 3.2.5 on 2021-10-04 19:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.TextField()),
('content', models.TextField()),
],
),
]
|
[
"mail@abogomolov.com"
] |
mail@abogomolov.com
|
cd3a4c448b37604df838a0d2b35144b0fbb98eda
|
7629e40e79cb2ccb38a3902e5f39a757d3d0a6df
|
/video-generator/src/image/image_generator.py
|
be5d945681eaf00c1878e5f5fc74663323ddce76
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
toannhu96/product_video_ads
|
7700b0fe4723ff868699d6d37cbcb47339c49a6b
|
7cb59d33a0b4f3e2f113055ebecabe90856ddaf4
|
refs/heads/main
| 2023-07-31T20:08:27.182799
| 2021-09-13T03:50:58
| 2021-09-13T03:50:58
| 405,821,992
| 0
| 0
|
Apache-2.0
| 2021-09-13T03:36:22
| 2021-09-13T03:36:22
| null |
UTF-8
|
Python
| false
| false
| 4,967
|
py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import log
from ffmpeg.ffmpeg_generator import FFMPEGGenerator
class ImageGenerator(FFMPEGGenerator):
"""Image-handling class, which actually turns inputs into images."""
logger = log.getLogger()
def process_image(self, image_or_videos_overlays, text_overlays, input_image,
output_image):
# Holds images generated from text
self.text_imgs = []
# Prepares all ffmpeg overlays
img_overlays = self._filter_strings(image_or_videos_overlays, text_overlays)
# Prepares all images/videos import
img_args = self._image_and_video_inputs(image_or_videos_overlays, self.text_imgs)
# assets_args
assets_args = img_args
filter_complex = img_overlays
# Group all args and runs ffmpeg
ffmpeg_output = self._run_ffmpeg_image(assets_args, filter_complex, input_image,
output_image, self.ffmpeg_executable)
self.logger.debug('ffmpeg ran with output %s:', ffmpeg_output)
return output_image
def _filter_strings(self, images_and_videos, text_lines):
"""Generates a complex filter specification for ffmpeg.
Args:
images_and_videos: a list of image overlay objects
text_lines: a list of text overlay objects
Returns:
A string that represents a complex filter specification, ready to be
passed in to ffmpeg.
"""
# groups overlays and creates the first to loop in
retval = []
overlays = (images_and_videos + text_lines)
input_stream = '0:v'
output_stream = None
# loops concatenating other overlays to the first
for i, ovr in enumerate(overlays):
output_stream = 'vidout%i' % i
use_cropped_text_fix = ovr.get('useCroppedTextFix', False)
# if it is an image overlay, renames it to 'vidX'
if 'image' in ovr:
f = '[%s:v] copy [vid%s];' % ((i + 1), (i + 1))
# if it is a text overlay, convert text to img and name overlay as 'imgX'
else:
f = self._text_filter((i + 1), ovr['text'], ovr['font'],
ovr['font_size'], ovr['font_color'],
ovr['align'], ovr['start_time'],
ovr['end_time'],
ovr.get('angle', None), use_cropped_text_fix)
# Angle should be passed normally, except if we're creating text with
# the cropped text fix, in which case, the angle was already taken
# care of in the text overlay creation.
angle_already_used = ('text' in ovr and use_cropped_text_fix)
# Applies ffmpeg effects to images and text generated images
f += self._video_filter(input_stream, (i + 1), ovr['x'], ovr['y'],
ovr.get('width', '-1'),
ovr.get('height', '-1'), ovr['start_time'],
ovr['end_time'], output_stream,
(ovr.get('angle', None) if not angle_already_used
else None),
ovr.get('fade_in_duration', 0),
ovr.get('fade_out_duration', 0),
ovr.get('align', None),
ovr.get('keep_ratio', None))
retval.append(f)
# makes current concat of overlays the one to concat next overlay
input_stream = output_stream
# maps last output to final video, or input video if there are no filters
if output_stream:
self.out_video = '[%s]' % (output_stream)
else:
self.out_video = '0:v'
# returns all overlays
return retval
def process_image_old(self, image_or_videos_overlays, text_overlays, input_image,
output_image):
# Holds images generated from text
self.text_imgs = []
# Prepares all ffmpeg overlays
img_overlays = self._filter_strings(image_or_videos_overlays, text_overlays)
# Prepares all images/videos import
img_args = self._image_and_video_inputs(image_or_videos_overlays, self.text_imgs)
# assets_args
assets_args = img_args
filter_complex = img_overlays
# Group all args and runs ffmpeg
ffmpeg_output = self._run_ffmpeg_image(assets_args, filter_complex, input_image,
output_image, self.ffmpeg_executable)
self.logger.debug('ffmpeg ran with output %s:', ffmpeg_output)
return output_image
|
[
"rgodoy@google.com"
] |
rgodoy@google.com
|
56d6d53b07810d51b36c4842c6af1666223e5ee3
|
d82ac08e029a340da546e6cfaf795519aca37177
|
/chapter_05_dimensionality_reduction/05_kernel_principal_component.py
|
609ded2a21b83c00d4f66aca64610875be219164
|
[] |
no_license
|
CSwithJC/PythonMachineLearning
|
4409303c3f4d4177dc509c83e240d7a589b144a0
|
0c4508861e182a8eeacd4645fb93b51b698ece0f
|
refs/heads/master
| 2021-09-04T04:28:14.608662
| 2018-01-15T20:25:36
| 2018-01-15T20:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,810
|
py
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
from scipy import exp
from scipy.linalg import eigh
from sklearn.datasets import make_moons
from sklearn.datasets import make_circles
from sklearn.decomposition import PCA
from matplotlib.ticker import FormatStrFormatter
""" Kernel PCA
Using Kernel PCA, we perform a nonlinear mapping that transforms
the data onto a higher-dimensional space and use standard PCA
in this higher-dimensional space to project the data back onto a
lower-dimensional space where the samples can be separated by a
linear classifier.
"""
def rbf_kernel_pca(X, gamma, n_components):
"""
RBF kernel PCA implementation
Parameters
----------
X: {NumPy ndarray}, shape = [n_samples, n_features]
gamma: float
Tuning parameter of the RBF kernel
n_components:
Number of principal components to return
Returns
-------
X_pc: {NumPy ndarray}, shape = [n_samples, n_features]
Projected dataset
"""
# Calculate the pairwise squared Euclidean distances
# in the MxN dimensional dataset.
sq_dists = pdist(X, 'sqeuclidean')
# Convert pairwise distances into a square matrix.
mat_sq_dists = squareform(sq_dists)
# Compute the symmetric kernel matrix.
K = exp(-gamma * mat_sq_dists)
# Center the kernel matrix
N = K.shape[0]
one_n = np.ones((N, N)) / N
K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n)
# Obtaining eigenpairs from the centered kernel matrix
# numpy.eigh returns them in sorted order
eigvals, eigvecs = eigh(K)
# Collect the top k eigenvectors (projected samples)
X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1)))
return X_pc
# Examples to apply kernel pca to some datasets:
#
# 1. Half-moon shapes:
#
X, y = make_moons(n_samples=100, random_state=123)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# Now, project the dataset via standard PCA:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Now, try again using our rbf_kernel_pca function
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((50, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((50, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f'))
plt.show()
# In this new plot, we see that the two classes (cirles and traingles)
# are lineraly well separated so that it becomes a suitable training
# dataset for linear classifiers.
#
# 2. Concentric circles:
#
X, y = make_circles(n_samples=1000, random_state=123,
noise=0.1, factor=0.2)
plt.scatter(X[y == 0, 0], X[y == 0, 1],
color='red', marker='^', alpha=0.5)
plt.scatter(X[y == 1, 0], X[y == 1, 1],
color='blue', marker='o', alpha=0.5)
plt.show()
# PCA Approach:
scikit_pca = PCA(n_components=2)
X_spca = scikit_pca.fit_transform(X)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, standard PCA does not produce a good result.
# Now, again using our RBF Kernel PCA Implementation:
X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2)
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3))
ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1],
color='red', marker='^', alpha=0.5)
ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1],
color='blue', marker='o', alpha=0.5)
ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02,
color='red', marker='^', alpha=0.5)
ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02,
color='blue', marker='o', alpha=0.5)
ax[0].set_xlabel('PC1')
ax[0].set_ylabel('PC2')
ax[1].set_ylim([-1, 1])
ax[1].set_yticks([])
ax[1].set_xlabel('PC1')
plt.show()
# Again, RBF Kernel PCA projected the data onto a new
# subspace where the two classes become linearly separable .
# This is seen in the new plot.
|
[
"jean.mendez2@upr.edu"
] |
jean.mendez2@upr.edu
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.