blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
10bd440af25fcfc5967e75f66f53e53c0bd41ade
|
4ea0c695055d4707e7875d9379b2364537a40bb8
|
/run.py
|
1d4baee84bee6c5a43d73f7ba025fe67e0079784
|
[] |
no_license
|
richardangell/bank-statement-getter
|
ddb31dfdc6bb22489d12f7cc413636bdb9cabd70
|
7538eaa8863e1c6a1bfe9a73cd267fc1679d67d3
|
refs/heads/master
| 2021-01-24T01:34:43.959296
| 2020-11-10T20:29:05
| 2020-11-10T20:29:05
| 122,814,261
| 0
| 0
| null | 2020-11-10T20:29:06
| 2018-02-25T07:20:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
from bankstatementgetter.export import BankStatementExporter
from bankstatementgetter.google_sheets import GoogleSheetsManager
class ExporterAndUploader(BankStatementExporter, GoogleSheetsManager):
"""Class combining the exporting and google sheets functionality to run entire process."""
def __init__(self):
BankStatementExporter.__init__(self)
GoogleSheetsManager.__init__(self)
def run(self):
"""Method to go through the whole process of exporting statement and uploading to
google sheets.
"""
# download the current google sheet
google_sheets_statement = self.download_google_sheet()
# get the max date in the current statement
current_max_date = self.get_max_date_from_statement(google_sheets_statement)
# export bank statement with start_date = current maximum in the statement
self.export(start_date = current_max_date)
# read in exported statement
downloaded_statement = self.load_statement(self.downloaded_file)
# appned the new downloaded statement to current on from google drive
appended_statements = self.update_statement(google_sheets_statement, downloaded_statement)
# upload to google sheets
self.upload_to_google_sheet(appended_statements)
if __name__ == '__main__':
exporter = ExporterAndUploader()
exporter.run()
|
[
"noreply@github.com"
] |
richardangell.noreply@github.com
|
a9a74d10d303248d0045e537304581b2556c5942
|
47191dcffda07d0fc1c91a4a5e28fb5f0f264b42
|
/python/afghan_constants.py
|
8a99ce9194586fb1f2082e195e6662110eaeb271
|
[] |
no_license
|
sharonyang/afghan_election_analysis
|
b2950c61dddccaca80afa4819da235f0c3edad1c
|
afe2f2c5f48af711ba30c2a37f6819145906613b
|
refs/heads/master
| 2021-01-19T01:42:22.040827
| 2016-07-07T07:04:59
| 2016-07-07T07:04:59
| 31,369,333
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 903
|
py
|
# By: Laksh Bhasin
# Description: Useful constants for this election. Import this into other
# scripts.
# The total voting age population of Afghanistan, as well as the total
# population of Afghanistan. Both of these figures come from
# http://www.idea.int/en/vt/countryview.cfm?CountryCode=AF
VOTING_AGE_POPULATION = 16208255.0
AFGHAN_POPULATION = 31822848.0
# We will assume that the fraction of people who are eligible to vote in
# each district/province is just the ratio of the above two quantities.
# This homogeneity assumption might not always hold, but it's the best we
# can do. Also, this is really a *maximum* voting-eligible fraction, based
# on how many people are of voting age (I'd imagine that prisoners can't
# vote).
VOTING_FRACTION = VOTING_AGE_POPULATION/float(AFGHAN_POPULATION)
# Colors to use for the two candidates' plots.
ABDULLAH_COLOR = "#FFAE19"
GHANI_COLOR = "#72AFE4"
|
[
"lbhasin@caltech.edu"
] |
lbhasin@caltech.edu
|
a2a811d98170b636628d0408cd93c6f7296bad6e
|
1b0b7dc0e5ca5e944ca557ccc4bee87f1ae4fe68
|
/catalog/migrations/0004_bookinstance_borrower.py
|
a6e7efc7d50f0c8aca24c5ec4d5756788d9ad57f
|
[] |
no_license
|
ssotirop/locallibrary-py-django
|
af6ccb3bfc84f6a16530c732136b40001bbb1bfd
|
c10805def41fdf2aabd67b633fdf5918e119991e
|
refs/heads/master
| 2023-04-28T12:06:23.108221
| 2019-05-24T11:01:52
| 2019-05-24T11:01:52
| 188,205,171
| 0
| 0
| null | 2023-04-21T20:31:57
| 2019-05-23T09:40:13
|
Python
|
UTF-8
|
Python
| false
| false
| 604
|
py
|
# Generated by Django 2.2.1 on 2019-05-19 11:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('catalog', '0003_book_language'),
]
operations = [
migrations.AddField(
model_name='bookinstance',
name='borrower',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
|
[
"sotiris.sotiropoulos@gmail.com"
] |
sotiris.sotiropoulos@gmail.com
|
d16c5c146b7726a1b886ecb41cf281755815d699
|
2265b7f2169d03b2bc6cc33643342cfbee3e97dc
|
/04_Loop/0412-ZigZag1.py
|
b68e543dad5a8c9dc21112e0ce25c823ac31c661
|
[] |
no_license
|
natTP/2110101-comp-prog
|
812b3d789705d03b3f16c4adb71f04cb88312604
|
222eabd4e548bd5c64eebd8213e8550f6115f7ab
|
refs/heads/master
| 2022-12-15T15:43:39.259673
| 2020-09-04T17:23:54
| 2020-09-04T17:23:54
| 292,893,944
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
n = int(input())
left = [] #x1y2
right = [] #y1x2
for i in range(n):
inp = [ int(e) for e in input().split()]
if i%2 == 0:
left.append(inp[0])
right.append(inp[1])
else:
left.append(inp[1])
right.append(inp[0])
instruction = input()
mini = float('inf')
maxi = float('-inf')
if instruction == "Zig-Zag":
#find min of left, max of right
for i in range(n):
mini = min(mini,left[i])
maxi = max(maxi,right[i])
elif instruction == "Zag-Zig":
#find max of left, min of right
for i in range(n):
maxi = max(maxi,left[i])
mini = min(mini,right[i])
print(mini,maxi)
|
[
"noreply@github.com"
] |
natTP.noreply@github.com
|
cc23c1978efa6079896499041f49373ee4f47218
|
a8452675352388feb1e8909a067d8523f8eceea5
|
/aux_files/twentyfour.py
|
eb15571cf75f54ec6f839133348b547a31976ddf
|
[] |
no_license
|
julioaamaral/ml
|
07ac0fa1fd79d52edd654ee19e0cb2a8767d2d75
|
bbe43fa747e1d23e9f4fa5bc7440ca3df313a213
|
refs/heads/master
| 2020-04-05T14:13:12.478943
| 2018-03-16T22:57:02
| 2018-03-16T22:57:02
| 94,831,824
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 68
|
py
|
...
...
#for a given input x
predictions = model.predict(X)
...
...
|
[
"noreply@github.com"
] |
julioaamaral.noreply@github.com
|
953365f16426725c60701d09491b63b7293624d2
|
fbab057c2a98242def74f4e75cd90a1706b76ec3
|
/code/imdbScraping/review/migrations/0004_review_is_positive.py
|
fef5b567dca9f6e627e53e60beb30deb7bb09503
|
[] |
no_license
|
javatarx/imdb-reviews
|
271ea3c45d0d1d7a1e3d18ea69b4ad3cdd40139b
|
679be55fe63d69c4250cd2ecaf7948df90fce213
|
refs/heads/master
| 2022-12-12T12:56:19.897786
| 2018-12-18T21:30:59
| 2018-12-18T21:30:59
| 161,176,622
| 0
| 0
| null | 2022-12-08T01:28:16
| 2018-12-10T13:08:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 394
|
py
|
# Generated by Django 2.1.4 on 2018-12-08 21:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('review', '0003_review_rating'),
]
operations = [
migrations.AddField(
model_name='review',
name='is_positive',
field=models.BooleanField(blank=True, null=True),
),
]
|
[
"lambdev@upeu.edu.pe"
] |
lambdev@upeu.edu.pe
|
39c2edd65a6079cb4857923e9a7fcf48e65aa50e
|
d16a23c513ec6155b25c692eed28ae1a6aac8f72
|
/python1.4.py
|
9ee4435ba6187d664efced34fe9b1366dc295a1f
|
[] |
no_license
|
stewartjonte/codecademy
|
cc66359b89a6bb9b872a699bbe93c6b73d48cd5f
|
ccf58bd9018d79aab6a38e617b3b6c3bb3fa707c
|
refs/heads/master
| 2020-07-09T01:23:26.782858
| 2019-08-22T17:17:05
| 2019-08-22T17:17:05
| 203,835,072
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 48
|
py
|
print"hello " + "Jack Dylan Earl Stewart-Jonte"
|
[
"54412122+stewartjonte@users.noreply.github.com"
] |
54412122+stewartjonte@users.noreply.github.com
|
786ab5906e080853b8f60967a91a017764f24ad3
|
2171068726a5c9769697bb0292edc4370efa6454
|
/provision-service/scripts/install.py
|
fae06c249c10121efd867b18b5bc83f920423015
|
[] |
no_license
|
vincentchivas/nautilus
|
d896e9a6f775fa639fbbbadbdc2a362040afe1f5
|
fdbec9bba17d4d63092d35b708d82df82d68208f
|
refs/heads/master
| 2021-01-22T11:36:53.234945
| 2014-08-21T14:53:47
| 2014-08-21T14:53:47
| 23,190,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
'''
Created on Nov 1, 2011
@author: kunli
'''
import sys
import os
PYH_FILE = 'provision.pth'
if __name__ == '__main__':
paths = filter(lambda p: p.endswith('dist-packages'), sys.path)
if len(paths) > 0:
dist_path = paths[0]
pth_path = os.path.join(dist_path, PYH_FILE)
if os.path.exists(pth_path):
os.remove(pth_path)
if not os.path.exists(PYH_FILE):
with open(PYH_FILE, "w+") as fp:
fp.write(os.getcwd())
print 'Modifying python path...'
os.link(PYH_FILE, pth_path)
|
[
"vincent@ubuntu.ubuntu-domain"
] |
vincent@ubuntu.ubuntu-domain
|
b601eaa6a429e27a4543725d7905f4d287da71a3
|
dceed3b97b902a6b526106e37754fba3b0e17774
|
/面向对象/call方法.py
|
18b38bc7a4ccfa1b50f24a336a6f3bd387114ff8
|
[] |
no_license
|
awuxina/www
|
2ef8e92eb4d290b2eafa271b63b7bacbc43a32be
|
76746bbff013295348041cf9b95cb81d3a0c1fcf
|
refs/heads/master
| 2020-03-22T01:37:13.076909
| 2018-07-01T09:05:44
| 2018-07-01T09:05:44
| 139,317,866
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 236
|
py
|
class People:
def __init__(self,name):
self.name=name
def __call__(self, *args, **kwargs):
print('call')
p=People('ecall')
p()
print(callable(p))
print(type(p))
print(type(People))
#typer---->类----->对象
|
[
"97550080@qq.com"
] |
97550080@qq.com
|
42c0e130fbd9c011da6ab77279953d0c461958f5
|
09060df69b191c048339ba449ee558ed9187dc3e
|
/comprehension_sets.py
|
b35abc371a1ca6fb0b52505eef0c7373ab3fa362
|
[] |
no_license
|
asad632/Python
|
79c4201949d6fec64ba7cde45401724b790e8fdd
|
4dde2c2330e6b480049b7d27aab86d10d4c9fed9
|
refs/heads/main
| 2023-01-31T13:30:43.967450
| 2020-12-09T16:19:01
| 2020-12-09T16:19:01
| 317,615,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
# # set in comprehension
# s={i**2 for i in range(1,9)}
# print(s)
#
#
# names ={'asad', 'khan', 'ullah' }
# asad={name[0] for name in names}
# print(asad)
|
[
"noreply@github.com"
] |
asad632.noreply@github.com
|
abf2b403e29ca06c83b56f0fe6e6d2f6ad9a9656
|
14a1312dfb7c4d5e2b76f49b0837cc024f5a1295
|
/python/servicegate/logic/messagehandler/handlers.py
|
a75d7c114ddd185887797896f754c7050ed18445
|
[] |
no_license
|
bropony/gamit
|
b3a493c55407efa83ae20286b1e624b280b46494
|
47811e2cfe67c3c0de4c4be7394dd30e48732799
|
refs/heads/master
| 2020-05-17T01:51:38.887194
| 2015-11-05T12:57:13
| 2015-11-05T12:57:13
| 36,106,737
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 799
|
py
|
"""
@author: mahanzhou
@date: 8/6/15
@file:
@desc:
"""
from gamit.log.logger import Logger
from gamit.message.commandhandler import CommandHandlerBase
from message.db.mongodb.posttables import TSysTopic
from resources.systopicmanager import SysTopicManager
class SysTopicMessageHandler(CommandHandlerBase):
def onMessage(self, command, toIdList, data):
"""
:type command: int
:type toIdList: list[int]
:type data: TSysTopic
"""
if not isinstance(data, TSysTopic):
Logger.log("SysTopicMessageHandler.onMessage. Unexpected dataType:", data.__class__.__name__)
return
Logger.log("SysTopicMessageHandler.onMessage: new topic incomming:", data.title)
SysTopicManager.addSysTopics([data], pushBack=False)
|
[
"ahda@qq.com"
] |
ahda@qq.com
|
ab10bfb135f46d9dc1a3686506b2c7659799e7ba
|
82e7a4dbe6b78ffa988b363a514d5fd6e34b0ad5
|
/__init__.py
|
11f9a68c2bc6d5d9c6c01f1b388e925898bb9219
|
[] |
no_license
|
scidam/cmsplugin-mathjax
|
6b7fdf089cc48e025b800f26c15bcf6cd0bd5ff4
|
44126b74be3a0dea72289e4461b9915918a8d856
|
refs/heads/master
| 2021-01-10T04:08:23.521096
| 2015-12-03T06:02:43
| 2015-12-03T06:02:43
| 47,104,334
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
def load_defaults(application):
"""Load cmsplugin-mathjax default settings"""
try:
__import__('%s.settings' % application)
import sys
_app_settings = sys.modules['%s.settings' % application]
_def_settings = sys.modules['django.conf.global_settings']
_settings = sys.modules['django.conf'].settings
for _k in dir(_app_settings):
if _k.isupper():
setattr(_def_settings, _k, getattr(_app_settings, _k))
if not hasattr(_settings, _k):
setattr(_settings, _k, getattr(_app_settings, _k))
except ImportError:
pass
load_defaults(__name__)
|
[
"kislov@easydan.com"
] |
kislov@easydan.com
|
5e985e773a00c8bcfe5a71a986f6c77253f7b11d
|
800628018a3ca4aac5339e30662826a11d2fbb6e
|
/python/ray/serve/controller.py
|
e531e2f19bccc972b3f7a4d8679f4895cc93a49c
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
liuyuqi123/ray
|
4d62add89aa9087f41eb19a11880770750a37aac
|
2e2b8cefbf6f51046dd79b90bf751a31fc66ed74
|
refs/heads/master
| 2023-02-06T05:15:16.642007
| 2020-12-29T10:12:26
| 2020-12-29T10:12:26
| 294,570,275
| 0
| 0
|
Apache-2.0
| 2020-12-29T10:12:27
| 2020-09-11T02:11:04
|
Python
|
UTF-8
|
Python
| false
| false
| 46,460
|
py
|
import asyncio
from asyncio.futures import Future
from collections import defaultdict
from itertools import chain
import os
import random
import time
from dataclasses import dataclass, field
from typing import Dict, Any, List, Optional, Set, Tuple
from uuid import uuid4, UUID
from pydantic import BaseModel
import ray
import ray.cloudpickle as pickle
from ray.serve.autoscaling_policy import BasicAutoscalingPolicy
from ray.serve.backend_worker import create_backend_replica
from ray.serve.constants import (
ASYNC_CONCURRENCY,
SERVE_PROXY_NAME,
LongPollKey,
)
from ray.serve.http_proxy import HTTPProxyActor
from ray.serve.kv_store import RayInternalKVStore
from ray.serve.exceptions import RayServeException
from ray.serve.utils import (format_actor_name, get_random_letters, logger,
try_schedule_resources_on_nodes, get_all_node_ids)
from ray.serve.config import BackendConfig, ReplicaConfig, HTTPConfig
from ray.serve.long_poll import LongPollHost
from ray.actor import ActorHandle
import numpy as np
# Used for testing purposes only. If this is set, the controller will crash
# after writing each checkpoint with the specified probability.
_CRASH_AFTER_CHECKPOINT_PROBABILITY = 0
CHECKPOINT_KEY = "serve-controller-checkpoint"
# Feature flag for controller resource checking. If true, controller will
# error if the desired replicas exceed current resource availability.
_RESOURCE_CHECK_ENABLED = True
# How often to call the control loop on the controller.
CONTROL_LOOP_PERIOD_S = 1.0
REPLICA_STARTUP_TIME_WARNING_S = 5
# TypeDefs
BackendTag = str
EndpointTag = str
ReplicaTag = str
NodeId = str
GoalId = int
Duration = float
class TrafficPolicy:
def __init__(self, traffic_dict: Dict[str, float]) -> None:
self.traffic_dict: Dict[str, float] = dict()
self.shadow_dict: Dict[str, float] = dict()
self.set_traffic_dict(traffic_dict)
def set_traffic_dict(self, traffic_dict: Dict[str, float]) -> None:
prob = 0
for backend, weight in traffic_dict.items():
if weight < 0:
raise ValueError(
"Attempted to assign a weight of {} to backend '{}'. "
"Weights cannot be negative.".format(weight, backend))
prob += weight
# These weights will later be plugged into np.random.choice, which
# uses a tolerance of 1e-8.
if not np.isclose(prob, 1, atol=1e-8):
raise ValueError("Traffic dictionary weights must sum to 1, "
"currently they sum to {}".format(prob))
self.traffic_dict = traffic_dict
def set_shadow(self, backend: str, proportion: float):
if proportion == 0 and backend in self.shadow_dict:
del self.shadow_dict[backend]
else:
self.shadow_dict[backend] = proportion
def __repr__(self) -> str:
return f"<Traffic {self.traffic_dict}; Shadow {self.shadow_dict}>"
class HTTPState:
def __init__(self, controller_name: str, detached: bool,
config: HTTPConfig):
self._controller_name = controller_name
self._detached = detached
self._config = config
self._proxy_actors: Dict[NodeId, ActorHandle] = dict()
# Will populate self.proxy_actors with existing actors.
self._start_proxies_if_needed()
def get_config(self):
return self._config
def get_http_proxy_handles(self) -> Dict[NodeId, ActorHandle]:
return self._proxy_actors
def update(self):
self._start_proxies_if_needed()
self._stop_proxies_if_needed()
def _start_proxies_if_needed(self) -> None:
"""Start a proxy on every node if it doesn't already exist."""
if self._config.host is None:
return
for node_id, node_resource in get_all_node_ids():
if node_id in self._proxy_actors:
continue
name = format_actor_name(SERVE_PROXY_NAME, self._controller_name,
node_id)
try:
proxy = ray.get_actor(name)
except ValueError:
logger.info("Starting HTTP proxy with name '{}' on node '{}' "
"listening on '{}:{}'".format(
name, node_id, self._config.host,
self._config.port))
proxy = HTTPProxyActor.options(
name=name,
lifetime="detached" if self._detached else None,
max_concurrency=ASYNC_CONCURRENCY,
max_restarts=-1,
max_task_retries=-1,
resources={
node_resource: 0.01
},
).remote(
self._config.host,
self._config.port,
controller_name=self._controller_name,
http_middlewares=self._config.middlewares)
self._proxy_actors[node_id] = proxy
def _stop_proxies_if_needed(self) -> bool:
"""Removes proxy actors from any nodes that no longer exist."""
all_node_ids = {node_id for node_id, _ in get_all_node_ids()}
to_stop = []
for node_id in self._proxy_actors:
if node_id not in all_node_ids:
logger.info("Removing HTTP proxy on removed node '{}'.".format(
node_id))
to_stop.append(node_id)
for node_id in to_stop:
proxy = self._proxy_actors.pop(node_id)
ray.kill(proxy, no_restart=True)
class BackendInfo(BaseModel):
# TODO(architkulkarni): Add type hint for worker_class after upgrading
# cloudpickle and adding types to RayServeWrappedReplica
worker_class: Any
backend_config: BackendConfig
replica_config: ReplicaConfig
class Config:
# TODO(architkulkarni): Remove once ReplicaConfig is a pydantic
# model
arbitrary_types_allowed = True
class BackendState:
def __init__(self, checkpoint: bytes = None):
self.backends: Dict[BackendTag, BackendInfo] = dict()
if checkpoint is not None:
self.backends = pickle.loads(checkpoint)
def checkpoint(self):
return pickle.dumps(self.backends)
def get_backend_configs(self) -> Dict[BackendTag, BackendConfig]:
return {
tag: info.backend_config
for tag, info in self.backends.items()
}
def get_backend(self, backend_tag: BackendTag) -> Optional[BackendInfo]:
return self.backends.get(backend_tag)
def add_backend(self,
backend_tag: BackendTag,
backend_info: BackendInfo,
goal_id: GoalId = 0) -> None:
self.backends[backend_tag] = backend_info
class EndpointState:
def __init__(self, checkpoint: bytes = None):
self.routes: Dict[BackendTag, Tuple[EndpointTag, Any]] = dict()
self.traffic_policies: Dict[EndpointTag, TrafficPolicy] = dict()
if checkpoint is not None:
self.routes, self.traffic_policies = pickle.loads(checkpoint)
def checkpoint(self):
return pickle.dumps((self.routes, self.traffic_policies))
def get_endpoints(self) -> Dict[EndpointTag, Dict[str, Any]]:
endpoints = {}
for route, (endpoint, methods) in self.routes.items():
if endpoint in self.traffic_policies:
traffic_policy = self.traffic_policies[endpoint]
traffic_dict = traffic_policy.traffic_dict
shadow_dict = traffic_policy.shadow_dict
else:
traffic_dict = {}
shadow_dict = {}
endpoints[endpoint] = {
"route": route if route.startswith("/") else None,
"methods": methods,
"traffic": traffic_dict,
"shadows": shadow_dict,
}
return endpoints
@dataclass
class ActorStateReconciler:
controller_name: str = field(init=True)
detached: bool = field(init=True)
backend_replicas: Dict[BackendTag, Dict[ReplicaTag, ActorHandle]] = field(
default_factory=lambda: defaultdict(dict))
backend_replicas_to_start: Dict[BackendTag, List[ReplicaTag]] = field(
default_factory=lambda: defaultdict(list))
backend_replicas_to_stop: Dict[BackendTag, List[Tuple[
ReplicaTag, Duration]]] = field(
default_factory=lambda: defaultdict(list))
backends_to_remove: List[BackendTag] = field(default_factory=list)
# NOTE(ilr): These are not checkpointed, but will be recreated by
# `_enqueue_pending_scale_changes_loop`.
currently_starting_replicas: Dict[asyncio.Future, Tuple[
BackendTag, ReplicaTag, ActorHandle]] = field(default_factory=dict)
currently_stopping_replicas: Dict[asyncio.Future, Tuple[
BackendTag, ReplicaTag]] = field(default_factory=dict)
def __getstate__(self):
state = self.__dict__.copy()
del state["currently_stopping_replicas"]
del state["currently_starting_replicas"]
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.currently_stopping_replicas = {}
self.currently_starting_replicas = {}
# TODO(edoakes): consider removing this and just using the names.
def get_replica_handles(self) -> List[ActorHandle]:
return list(
chain.from_iterable([
replica_dict.values()
for replica_dict in self.backend_replicas.values()
]))
def get_replica_tags(self) -> List[ReplicaTag]:
return list(
chain.from_iterable([
replica_dict.keys()
for replica_dict in self.backend_replicas.values()
]))
async def _start_backend_replica(self, backend_state: BackendState,
backend_tag: BackendTag,
replica_tag: ReplicaTag) -> ActorHandle:
"""Start a replica and return its actor handle.
Checks if the named actor already exists before starting a new one.
Assumes that the backend configuration is already in the Goal State.
"""
# NOTE(edoakes): the replicas may already be created if we
# failed after creating them but before writing a
# checkpoint.
replica_name = format_actor_name(replica_tag, self.controller_name)
try:
replica_handle = ray.get_actor(replica_name)
except ValueError:
logger.debug("Starting replica '{}' for backend '{}'.".format(
replica_tag, backend_tag))
backend_info = backend_state.get_backend(backend_tag)
replica_handle = ray.remote(backend_info.worker_class).options(
name=replica_name,
lifetime="detached" if self.detached else None,
max_restarts=-1,
max_task_retries=-1,
**backend_info.replica_config.ray_actor_options).remote(
backend_tag, replica_tag,
backend_info.replica_config.actor_init_args,
backend_info.backend_config, self.controller_name)
return replica_handle
def _scale_backend_replicas(
self,
backends: Dict[BackendTag, BackendInfo],
backend_tag: BackendTag,
num_replicas: int,
force_kill: bool = False,
) -> None:
"""Scale the given backend to the number of replicas.
NOTE: this does not actually start or stop the replicas, but instead
adds the intention to start/stop them to self.backend_replicas_to_start
and self.backend_replicas_to_stop. The caller is responsible for then
first writing a checkpoint and then actually starting/stopping the
intended replicas. This avoids inconsistencies with starting/stopping a
replica and then crashing before writing a checkpoint.
"""
logger.debug("Scaling backend '{}' to {} replicas".format(
backend_tag, num_replicas))
assert (backend_tag in backends
), "Backend {} is not registered.".format(backend_tag)
assert num_replicas >= 0, ("Number of replicas must be"
" greater than or equal to 0.")
current_num_replicas = len(self.backend_replicas[backend_tag])
delta_num_replicas = num_replicas - current_num_replicas
backend_info: BackendInfo = backends[backend_tag]
if delta_num_replicas > 0:
can_schedule = try_schedule_resources_on_nodes(requirements=[
backend_info.replica_config.resource_dict
for _ in range(delta_num_replicas)
])
if _RESOURCE_CHECK_ENABLED and not all(can_schedule):
num_possible = sum(can_schedule)
raise RayServeException(
"Cannot scale backend {} to {} replicas. Ray Serve tried "
"to add {} replicas but the resources only allows {} "
"to be added. To fix this, consider scaling to replica to "
"{} or add more resources to the cluster. You can check "
"avaiable resources with ray.nodes().".format(
backend_tag, num_replicas, delta_num_replicas,
num_possible, current_num_replicas + num_possible))
logger.debug("Adding {} replicas to backend {}".format(
delta_num_replicas, backend_tag))
for _ in range(delta_num_replicas):
replica_tag = "{}#{}".format(backend_tag, get_random_letters())
self.backend_replicas_to_start[backend_tag].append(replica_tag)
elif delta_num_replicas < 0:
logger.debug("Removing {} replicas from backend '{}'".format(
-delta_num_replicas, backend_tag))
assert len(
self.backend_replicas[backend_tag]) >= delta_num_replicas
for _ in range(-delta_num_replicas):
replica_tag, _ = self.backend_replicas[backend_tag].popitem()
if len(self.backend_replicas[backend_tag]) == 0:
del self.backend_replicas[backend_tag]
graceful_timeout_s = (backend_info.backend_config.
experimental_graceful_shutdown_timeout_s)
if force_kill:
graceful_timeout_s = 0
self.backend_replicas_to_stop[backend_tag].append((
replica_tag,
graceful_timeout_s,
))
async def _enqueue_pending_scale_changes_loop(self,
backend_state: BackendState):
for backend_tag, replicas_to_create in self.backend_replicas_to_start.\
items():
for replica_tag in replicas_to_create:
replica_handle = await self._start_backend_replica(
backend_state, backend_tag, replica_tag)
ready_future = replica_handle.ready.remote().as_future()
self.currently_starting_replicas[ready_future] = (
backend_tag, replica_tag, replica_handle)
for backend_tag, replicas_to_stop in (
self.backend_replicas_to_stop.items()):
for replica_tag, shutdown_timeout in replicas_to_stop:
replica_name = format_actor_name(replica_tag,
self.controller_name)
async def kill_actor(replica_name_to_use):
# NOTE: the replicas may already be stopped if we failed
# after stopping them but before writing a checkpoint.
try:
replica = ray.get_actor(replica_name_to_use)
except ValueError:
return
try:
await asyncio.wait_for(
replica.drain_pending_queries.remote(),
timeout=shutdown_timeout)
except asyncio.TimeoutError:
# Graceful period passed, kill it forcefully.
logger.debug(
f"{replica_name_to_use} did not shutdown after "
f"{shutdown_timeout}s, killing.")
finally:
ray.kill(replica, no_restart=True)
self.currently_stopping_replicas[asyncio.ensure_future(
kill_actor(replica_name))] = (backend_tag, replica_tag)
async def _check_currently_starting_replicas(self) -> int:
"""Returns the number of pending replicas waiting to start"""
in_flight: Set[Future[Any]] = set()
if self.currently_starting_replicas:
done, in_flight = await asyncio.wait(
list(self.currently_starting_replicas.keys()), timeout=0)
for fut in done:
(backend_tag, replica_tag,
replica_handle) = self.currently_starting_replicas.pop(fut)
self.backend_replicas[backend_tag][
replica_tag] = replica_handle
backend = self.backend_replicas_to_start.get(backend_tag)
if backend:
try:
backend.remove(replica_tag)
except ValueError:
pass
if len(backend) == 0:
del self.backend_replicas_to_start[backend_tag]
return len(in_flight)
async def _check_currently_stopping_replicas(self) -> int:
"""Returns the number of replicas waiting to stop"""
in_flight: Set[Future[Any]] = set()
if self.currently_stopping_replicas:
done_stoppping, in_flight = await asyncio.wait(
list(self.currently_stopping_replicas.keys()), timeout=0)
for fut in done_stoppping:
(backend_tag,
replica_tag) = self.currently_stopping_replicas.pop(fut)
backend = self.backend_replicas_to_stop.get(backend_tag)
if backend:
try:
backend.remove(replica_tag)
except ValueError:
pass
if len(backend) == 0:
del self.backend_replicas_to_stop[backend_tag]
return len(in_flight)
async def backend_control_loop(self):
start = time.time()
prev_warning = start
need_to_continue = True
num_pending_starts, num_pending_stops = 0, 0
while need_to_continue:
if time.time() - prev_warning > REPLICA_STARTUP_TIME_WARNING_S:
prev_warning = time.time()
delta = time.time() - start
logger.warning(
f"Waited {delta:.2f}s for {num_pending_starts} replicas "
f"to start up or {num_pending_stops} replicas to shutdown."
" Make sure there are enough resources to create the "
"replicas.")
num_pending_starts = await self._check_currently_starting_replicas(
)
num_pending_stops = await self._check_currently_stopping_replicas()
need_to_continue = num_pending_starts or num_pending_stops
asyncio.sleep(1)
def _recover_actor_handles(self) -> None:
# Fetch actor handles for all of the backend replicas in the system.
# All of these backend_replicas are guaranteed to already exist because
# they would not be written to a checkpoint in self.backend_replicas
# until they were created.
for backend_tag, replica_dict in self.backend_replicas.items():
for replica_tag in replica_dict.keys():
replica_name = format_actor_name(replica_tag,
self.controller_name)
self.backend_replicas[backend_tag][
replica_tag] = ray.get_actor(replica_name)
async def _recover_from_checkpoint(
self, backend_state: BackendState, controller: "ServeController"
) -> Dict[BackendTag, BasicAutoscalingPolicy]:
self._recover_actor_handles()
autoscaling_policies = dict()
for backend, info in backend_state.backends.items():
metadata = info.backend_config.internal_metadata
if metadata.autoscaling_config is not None:
autoscaling_policies[backend] = BasicAutoscalingPolicy(
backend, metadata.autoscaling_config)
# Start/stop any pending backend replicas.
await self._enqueue_pending_scale_changes_loop(backend_state)
await self.backend_control_loop()
return autoscaling_policies
@dataclass
class FutureResult:
# Goal requested when this future was created
requested_goal: Dict[str, Any]
@dataclass
class Checkpoint:
endpoint_state_checkpoint: bytes
backend_state_checkpoint: bytes
reconciler: ActorStateReconciler
# TODO(ilr) Rename reconciler to PendingState
inflight_reqs: Dict[uuid4, FutureResult]
@ray.remote
class ServeController:
"""Responsible for managing the state of the serving system.
The controller implements fault tolerance by persisting its state in
a new checkpoint each time a state change is made. If the actor crashes,
the latest checkpoint is loaded and the state is recovered. Checkpoints
are written/read using a provided KV-store interface.
All hard state in the system is maintained by this actor and persisted via
these checkpoints. Soft state required by other components is fetched by
those actors from this actor on startup and updates are pushed out from
this actor.
All other actors started by the controller are named, detached actors
so they will not fate share with the controller if it crashes.
The following guarantees are provided for state-changing calls to the
controller:
- If the call succeeds, the change was made and will be reflected in
the system even if the controller or other actors die unexpectedly.
- If the call fails, the change may have been made but isn't guaranteed
to have been. The client should retry in this case. Note that this
requires all implementations here to be idempotent.
"""
async def __init__(self,
controller_name: str,
http_config: HTTPConfig,
detached: bool = False):
# Used to read/write checkpoints.
self.kv_store = RayInternalKVStore(namespace=controller_name)
self.actor_reconciler = ActorStateReconciler(controller_name, detached)
# backend -> AutoscalingPolicy
self.autoscaling_policies = dict()
# Dictionary of backend_tag -> proxy_name -> most recent queue length.
self.backend_stats = defaultdict(lambda: defaultdict(dict))
# Used to ensure that only a single state-changing operation happens
# at any given time.
self.write_lock = asyncio.Lock()
# Map of awaiting results
# TODO(ilr): Checkpoint this once this becomes asynchronous
self.inflight_results: Dict[UUID, asyncio.Event] = dict()
self._serializable_inflight_results: Dict[UUID, FutureResult] = dict()
# HTTP state doesn't currently require a checkpoint.
self.http_state = HTTPState(controller_name, detached, http_config)
checkpoint_bytes = self.kv_store.get(CHECKPOINT_KEY)
if checkpoint_bytes is None:
logger.debug("No checkpoint found")
self.backend_state = BackendState()
self.endpoint_state = EndpointState()
else:
checkpoint: Checkpoint = pickle.loads(checkpoint_bytes)
self.backend_state = BackendState(
checkpoint=checkpoint.backend_state_checkpoint)
self.endpoint_state = EndpointState(
checkpoint=checkpoint.endpoint_state_checkpoint)
await self._recover_from_checkpoint(checkpoint)
# NOTE(simon): Currently we do all-to-all broadcast. This means
# any listeners will receive notification for all changes. This
# can be problem at scale, e.g. updating a single backend config
# will send over the entire configs. In the future, we should
# optimize the logic to support subscription by key.
self.long_poll_host = LongPollHost()
# The configs pushed out here get updated by
# self._recover_from_checkpoint in the failure scenario, so that must
# be run before we notify the changes.
self.notify_backend_configs_changed()
self.notify_replica_handles_changed()
self.notify_traffic_policies_changed()
self.notify_route_table_changed()
asyncio.get_event_loop().create_task(self.run_control_loop())
async def wait_for_event(self, uuid: UUID) -> bool:
if uuid not in self.inflight_results:
return True
event = self.inflight_results[uuid]
await event.wait()
self.inflight_results.pop(uuid)
self._serializable_inflight_results.pop(uuid)
async with self.write_lock:
self._checkpoint()
return True
def _create_event_with_result(
self,
goal_state: Dict[str, any],
recreation_uuid: Optional[UUID] = None) -> UUID:
# NOTE(ilr) Must be called before checkpointing!
event = asyncio.Event()
event.result = FutureResult(goal_state)
event.set()
uuid_val = recreation_uuid or uuid4()
self.inflight_results[uuid_val] = event
self._serializable_inflight_results[uuid_val] = event.result
return uuid_val
async def _num_inflight_results(self) -> int:
return len(self.inflight_results)
def notify_replica_handles_changed(self):
self.long_poll_host.notify_changed(
LongPollKey.REPLICA_HANDLES, {
backend_tag: list(replica_dict.values())
for backend_tag, replica_dict in
self.actor_reconciler.backend_replicas.items()
})
def notify_traffic_policies_changed(self):
self.long_poll_host.notify_changed(
LongPollKey.TRAFFIC_POLICIES,
self.endpoint_state.traffic_policies,
)
def notify_backend_configs_changed(self):
self.long_poll_host.notify_changed(
LongPollKey.BACKEND_CONFIGS,
self.backend_state.get_backend_configs())
def notify_route_table_changed(self):
self.long_poll_host.notify_changed(LongPollKey.ROUTE_TABLE,
self.endpoint_state.routes)
async def listen_for_change(self, keys_to_snapshot_ids: Dict[str, int]):
"""Proxy long pull client's listen request.
Args:
keys_to_snapshot_ids (Dict[str, int]): Snapshot IDs are used to
determine whether or not the host should immediately return the
data or wait for the value to be changed.
"""
return await (
self.long_poll_host.listen_for_change(keys_to_snapshot_ids))
def get_http_proxies(self) -> Dict[NodeId, ActorHandle]:
"""Returns a dictionary of node ID to http_proxy actor handles."""
return self.http_state.get_http_proxy_handles()
def _checkpoint(self) -> None:
"""Checkpoint internal state and write it to the KV store."""
assert self.write_lock.locked()
logger.debug("Writing checkpoint")
start = time.time()
checkpoint = pickle.dumps(
Checkpoint(self.endpoint_state.checkpoint(),
self.backend_state.checkpoint(), self.actor_reconciler,
self._serializable_inflight_results))
self.kv_store.put(CHECKPOINT_KEY, checkpoint)
logger.debug("Wrote checkpoint in {:.3f}s".format(time.time() - start))
if random.random(
) < _CRASH_AFTER_CHECKPOINT_PROBABILITY and self.detached:
logger.warning("Intentionally crashing after checkpoint")
os._exit(0)
async def _recover_from_checkpoint(self, checkpoint: Checkpoint) -> None:
"""Recover the instance state from the provided checkpoint.
This should be called in the constructor to ensure that the internal
state is updated before any other operations run. After running this,
internal state will be updated and long-poll clients may be notified.
Performs the following operations:
1) Deserializes the internal state from the checkpoint.
2) Starts/stops any replicas that are pending creation or
deletion.
"""
start = time.time()
logger.info("Recovering from checkpoint")
self.actor_reconciler = checkpoint.reconciler
self._serializable_inflight_results = checkpoint.inflight_reqs
for uuid, fut_result in self._serializable_inflight_results.items():
self._create_event_with_result(fut_result.requested_goal, uuid)
# NOTE(edoakes): unfortunately, we can't completely recover from a
# checkpoint in the constructor because we block while waiting for
# other actors to start up, and those actors fetch soft state from
# this actor. Because no other tasks will start executing until after
# the constructor finishes, if we were to run this logic in the
# constructor it could lead to deadlock between this actor and a child.
# However, we do need to guarantee that we have fully recovered from a
# checkpoint before any other state-changing calls run. We address this
# by acquiring the write_lock and then posting the task to recover from
# a checkpoint to the event loop. Other state-changing calls acquire
# this lock and will be blocked until recovering from the checkpoint
# finishes. This can be removed once we move to the async control loop.
async def finish_recover_from_checkpoint():
assert self.write_lock.locked()
self.autoscaling_policies = await self.actor_reconciler.\
_recover_from_checkpoint(self.backend_state, self)
self.write_lock.release()
logger.info(
"Recovered from checkpoint in {:.3f}s".format(time.time() -
start))
await self.write_lock.acquire()
asyncio.get_event_loop().create_task(finish_recover_from_checkpoint())
async def do_autoscale(self) -> None:
for backend, info in self.backend_state.backends.items():
if backend not in self.autoscaling_policies:
continue
new_num_replicas = self.autoscaling_policies[backend].scale(
self.backend_stats[backend], info.backend_config.num_replicas)
if new_num_replicas > 0:
await self.update_backend_config(
backend, BackendConfig(num_replicas=new_num_replicas))
async def reconcile_current_and_goal_backends(self):
pass
async def run_control_loop(self) -> None:
while True:
await self.do_autoscale()
async with self.write_lock:
self.http_state.update()
await asyncio.sleep(CONTROL_LOOP_PERIOD_S)
def _all_replica_handles(
self) -> Dict[BackendTag, Dict[ReplicaTag, ActorHandle]]:
"""Used for testing."""
return self.actor_reconciler.backend_replicas
def get_all_backends(self) -> Dict[BackendTag, BackendConfig]:
"""Returns a dictionary of backend tag to backend config."""
return self.backend_state.get_backend_configs()
def get_all_endpoints(self) -> Dict[EndpointTag, Dict[BackendTag, Any]]:
"""Returns a dictionary of backend tag to backend config."""
return self.endpoint_state.get_endpoints()
async def _set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> UUID:
if endpoint_name not in self.endpoint_state.get_endpoints():
raise ValueError("Attempted to assign traffic for an endpoint '{}'"
" that is not registered.".format(endpoint_name))
assert isinstance(traffic_dict,
dict), "Traffic policy must be a dictionary."
for backend in traffic_dict:
if self.backend_state.get_backend(backend) is None:
raise ValueError(
"Attempted to assign traffic to a backend '{}' that "
"is not registered.".format(backend))
traffic_policy = TrafficPolicy(traffic_dict)
self.endpoint_state.traffic_policies[endpoint_name] = traffic_policy
return_uuid = self._create_event_with_result({
endpoint_name: traffic_policy
})
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
return return_uuid
async def set_traffic(self, endpoint_name: str,
traffic_dict: Dict[str, float]) -> UUID:
"""Sets the traffic policy for the specified endpoint."""
async with self.write_lock:
return_uuid = await self._set_traffic(endpoint_name, traffic_dict)
return return_uuid
async def shadow_traffic(self, endpoint_name: str, backend_tag: BackendTag,
proportion: float) -> UUID:
"""Shadow traffic from the endpoint to the backend."""
async with self.write_lock:
if endpoint_name not in self.endpoint_state.get_endpoints():
raise ValueError("Attempted to shadow traffic from an "
"endpoint '{}' that is not registered."
.format(endpoint_name))
if self.backend_state.get_backend(backend_tag) is None:
raise ValueError(
"Attempted to shadow traffic to a backend '{}' that "
"is not registered.".format(backend_tag))
self.endpoint_state.traffic_policies[endpoint_name].set_shadow(
backend_tag, proportion)
traffic_policy = self.endpoint_state.traffic_policies[
endpoint_name]
return_uuid = self._create_event_with_result({
endpoint_name: traffic_policy
})
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
self.notify_traffic_policies_changed()
return return_uuid
# TODO(architkulkarni): add Optional for route after cloudpickle upgrade
async def create_endpoint(self, endpoint: str,
traffic_dict: Dict[str, float], route,
methods) -> UUID:
"""Create a new endpoint with the specified route and methods.
If the route is None, this is a "headless" endpoint that will not
be exposed over HTTP and can only be accessed via a handle.
"""
async with self.write_lock:
# If this is a headless endpoint with no route, key the endpoint
# based on its name.
# TODO(edoakes): we should probably just store routes and endpoints
# separately.
if route is None:
route = endpoint
# TODO(edoakes): move this to client side.
err_prefix = "Cannot create endpoint."
if route in self.endpoint_state.routes:
# Ensures this method is idempotent
if self.endpoint_state.routes[route] == (endpoint, methods):
return
else:
raise ValueError(
"{} Route '{}' is already registered.".format(
err_prefix, route))
if endpoint in self.endpoint_state.get_endpoints():
raise ValueError(
"{} Endpoint '{}' is already registered.".format(
err_prefix, endpoint))
logger.info(
"Registering route '{}' to endpoint '{}' with methods '{}'.".
format(route, endpoint, methods))
self.endpoint_state.routes[route] = (endpoint, methods)
# NOTE(edoakes): checkpoint is written in self._set_traffic.
return_uuid = await self._set_traffic(endpoint, traffic_dict)
self.notify_route_table_changed()
return return_uuid
async def delete_endpoint(self, endpoint: str) -> UUID:
"""Delete the specified endpoint.
Does not modify any corresponding backends.
"""
logger.info("Deleting endpoint '{}'".format(endpoint))
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified endpoint exists on the client.
for route, (route_endpoint,
_) in self.endpoint_state.routes.items():
if route_endpoint == endpoint:
route_to_delete = route
break
else:
logger.info("Endpoint '{}' doesn't exist".format(endpoint))
return
# Remove the routing entry.
del self.endpoint_state.routes[route_to_delete]
# Remove the traffic policy entry if it exists.
if endpoint in self.endpoint_state.traffic_policies:
del self.endpoint_state.traffic_policies[endpoint]
return_uuid = self._create_event_with_result({
route_to_delete: None,
endpoint: None
})
# NOTE(edoakes): we must write a checkpoint before pushing the
# updates to the proxies to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
self.notify_route_table_changed()
return return_uuid
async def create_backend(self, backend_tag: BackendTag,
backend_config: BackendConfig,
replica_config: ReplicaConfig) -> UUID:
"""Register a new backend under the specified tag."""
async with self.write_lock:
# Ensures this method is idempotent.
backend_info = self.backend_state.get_backend(backend_tag)
if backend_info is not None:
if (backend_info.backend_config == backend_config
and backend_info.replica_config == replica_config):
return
backend_replica = create_backend_replica(
replica_config.func_or_class)
# Save creator that starts replicas, the arguments to be passed in,
# and the configuration for the backends.
backend_info = BackendInfo(
worker_class=backend_replica,
backend_config=backend_config,
replica_config=replica_config)
self.backend_state.add_backend(backend_tag, backend_info)
metadata = backend_config.internal_metadata
if metadata.autoscaling_config is not None:
self.autoscaling_policies[
backend_tag] = BasicAutoscalingPolicy(
backend_tag, metadata.autoscaling_config)
try:
# This call should be to run control loop
self.actor_reconciler._scale_backend_replicas(
self.backend_state.backends, backend_tag,
backend_config.num_replicas)
except RayServeException as e:
del self.backend_state.backends[backend_tag]
raise e
return_uuid = self._create_event_with_result({
backend_tag: backend_info
})
# NOTE(edoakes): we must write a checkpoint before starting new
# or pushing the updated config to avoid inconsistent state if we
# crash while making the change.
self._checkpoint()
await self.actor_reconciler._enqueue_pending_scale_changes_loop(
self.backend_state)
await self.actor_reconciler.backend_control_loop()
self.notify_replica_handles_changed()
# Set the backend config inside routers
# (particularly for max_concurrent_queries).
self.notify_backend_configs_changed()
return return_uuid
async def delete_backend(self,
backend_tag: BackendTag,
force_kill: bool = False) -> UUID:
async with self.write_lock:
# This method must be idempotent. We should validate that the
# specified backend exists on the client.
if self.backend_state.get_backend(backend_tag) is None:
return
# Check that the specified backend isn't used by any endpoints.
for endpoint, traffic_policy in self.endpoint_state.\
traffic_policies.items():
if (backend_tag in traffic_policy.traffic_dict
or backend_tag in traffic_policy.shadow_dict):
raise ValueError("Backend '{}' is used by endpoint '{}' "
"and cannot be deleted. Please remove "
"the backend from all endpoints and try "
"again.".format(backend_tag, endpoint))
# Scale its replicas down to 0. This will also remove the backend
# from self.backend_state.backends and
# self.actor_reconciler.backend_replicas.
# This should be a call to the control loop
self.actor_reconciler._scale_backend_replicas(
self.backend_state.backends, backend_tag, 0, force_kill)
# Remove the backend's metadata.
del self.backend_state.backends[backend_tag]
if backend_tag in self.autoscaling_policies:
del self.autoscaling_policies[backend_tag]
# Add the intention to remove the backend from the routers.
self.actor_reconciler.backends_to_remove.append(backend_tag)
return_uuid = self._create_event_with_result({backend_tag: None})
# NOTE(edoakes): we must write a checkpoint before removing the
# backend from the routers to avoid inconsistent state if we crash
# after pushing the update.
self._checkpoint()
await self.actor_reconciler._enqueue_pending_scale_changes_loop(
self.backend_state)
await self.actor_reconciler.backend_control_loop()
self.notify_replica_handles_changed()
return return_uuid
async def update_backend_config(self, backend_tag: BackendTag,
config_options: BackendConfig) -> UUID:
"""Set the config for the specified backend."""
async with self.write_lock:
assert (self.backend_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
assert isinstance(config_options, BackendConfig)
stored_backend_config = self.backend_state.get_backend(
backend_tag).backend_config
backend_config = stored_backend_config.copy(
update=config_options.dict(exclude_unset=True))
backend_config._validate_complete()
self.backend_state.get_backend(
backend_tag).backend_config = backend_config
backend_info = self.backend_state.get_backend(backend_tag)
# Scale the replicas with the new configuration.
# This should be to run the control loop
self.actor_reconciler._scale_backend_replicas(
self.backend_state.backends, backend_tag,
backend_config.num_replicas)
return_uuid = self._create_event_with_result({
backend_tag: backend_info
})
# NOTE(edoakes): we must write a checkpoint before pushing the
# update to avoid inconsistent state if we crash after pushing the
# update.
self._checkpoint()
# Inform the routers about change in configuration
# (particularly for setting max_batch_size).
await self.actor_reconciler._enqueue_pending_scale_changes_loop(
self.backend_state)
await self.actor_reconciler.backend_control_loop()
self.notify_replica_handles_changed()
self.notify_backend_configs_changed()
return return_uuid
def get_backend_config(self, backend_tag: BackendTag) -> BackendConfig:
"""Get the current config for the specified backend."""
assert (self.backend_state.get_backend(backend_tag)
), "Backend {} is not registered.".format(backend_tag)
return self.backend_state.get_backend(backend_tag).backend_config
def get_http_config(self):
"""Return the HTTP proxy configuration."""
return self.http_state.get_config()
async def shutdown(self) -> None:
"""Shuts down the serve instance completely."""
async with self.write_lock:
for proxy in self.http_state.get_http_proxy_handles().values():
ray.kill(proxy, no_restart=True)
for replica in self.actor_reconciler.get_replica_handles():
ray.kill(replica, no_restart=True)
self.kv_store.delete(CHECKPOINT_KEY)
|
[
"noreply@github.com"
] |
liuyuqi123.noreply@github.com
|
9be85af534c4d86583912784bba1926e6783576b
|
617f1b605be66e00d6a8c006db96e8131ad9d4ff
|
/xtreme_vision/Detection/keras_resnet/blocks/_3d.py
|
4b8793932bb2df449ed33cc780b15840e16f6fe0
|
[
"MIT"
] |
permissive
|
AsadRasheed-AR/Xtreme-Vision
|
8d7b5966c0c2c267538c27c0858ec0f4cf2807c3
|
2e09e6972c6b2752bc37f8356fafda151acacd0d
|
refs/heads/master
| 2023-03-26T11:34:04.459514
| 2021-03-27T03:08:40
| 2021-03-27T03:08:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,271
|
py
|
# -*- coding: utf-8 -*-
"""
keras_resnet.blocks._3d
~~~~~~~~~~~~~~~~~~~~~~~
This module implements a number of popular three-dimensional residual blocks.
"""
import tensorflow as tf
from xtreme_vision.Detection.keras_resnet import layers
parameters = {
"kernel_initializer": "he_normal"
}
def basic_3d(
filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
freeze_bn=False
):
"""
A three-dimensional basic block.
:param filters: the output’s feature space
:param stage: int representing the stage of this block (starting from 0)
:param block: int representing this block (starting from 0)
:param kernel_size: size of the kernel
:param numerical_name: if true, uses numbers to represent blocks instead of chars (ResNet{101, 152, 200})
:param stride: int representing the stride used in the shortcut and the first conv layer, default derives stride from block id
:param freeze_bn: if true, freezes BatchNormalization layers (ie. no updates are done in these layers)
Usage:
>>> import keras_resnet.blocks
>>> keras_resnet.blocks.basic_3d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
if tf.keras.backend.image_data_format() == "channels_last":
axis = 3
else:
axis = 1
if block > 0 and numerical_name:
block_char = "b{}".format(block)
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(x):
y = tf.keras.layers.ZeroPadding3D(padding=1, name="padding{}{}_branch2a".format(stage_char, block_char))(x)
y = tf.keras.layers.Conv3D(filters, kernel_size, strides=stride, use_bias=False, name="res{}{}_branch2a".format(stage_char, block_char), **parameters)(y)
y = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch2a".format(stage_char, block_char))(y)
y = tf.keras.layers.Activation("relu", name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)
y = tf.keras.layers.ZeroPadding3D(padding=1, name="padding{}{}_branch2b".format(stage_char, block_char))(y)
y = tf.keras.layers.Conv3D(filters, kernel_size, use_bias=False, name="res{}{}_branch2b".format(stage_char, block_char), **parameters)(y)
y = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch2b".format(stage_char, block_char))(y)
if block == 0:
shortcut = tf.keras.layers.Conv3D(filters, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch1".format(stage_char, block_char), **parameters)(x)
shortcut = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
else:
shortcut = x
y = tf.keras.layers.Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
y = tf.keras.layers.Activation("relu", name="res{}{}_relu".format(stage_char, block_char))(y)
return y
return f
def bottleneck_3d(
filters,
stage=0,
block=0,
kernel_size=3,
numerical_name=False,
stride=None,
freeze_bn=False
):
"""
A three-dimensional bottleneck block.
:param filters: the output’s feature space
:param stage: int representing the stage of this block (starting from 0)
:param block: int representing this block (starting from 0)
:param kernel_size: size of the kernel
:param numerical_name: if true, uses numbers to represent blocks instead of chars (ResNet{101, 152, 200})
:param stride: int representing the stride used in the shortcut and the first conv layer, default derives stride from block id
:param freeze_bn: if true, freezes BatchNormalization layers (ie. no updates are done in these layers)
Usage:
>>> import keras_resnet.blocks
>>> keras_resnet.blocks.bottleneck_3d(64)
"""
if stride is None:
if block != 0 or stage == 0:
stride = 1
else:
stride = 2
if tf.keras.backend.image_data_format() == "channels_last":
axis = 3
else:
axis = 1
if block > 0 and numerical_name:
block_char = "b{}".format(block)
else:
block_char = chr(ord('a') + block)
stage_char = str(stage + 2)
def f(x):
y = tf.keras.layers.Conv3D(filters, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch2a".format(stage_char, block_char), **parameters)(x)
y = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch2a".format(stage_char, block_char))(y)
y = tf.keras.layers.Activation("relu", name="res{}{}_branch2a_relu".format(stage_char, block_char))(y)
y = tf.keras.layers.ZeroPadding3D(padding=1, name="padding{}{}_branch2b".format(stage_char, block_char))(y)
y = tf.keras.layers.Conv3D(filters, kernel_size, use_bias=False, name="res{}{}_branch2b".format(stage_char, block_char), **parameters)(y)
y = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch2b".format(stage_char, block_char))(y)
y = tf.keras.layers.Activation("relu", name="res{}{}_branch2b_relu".format(stage_char, block_char))(y)
y = tf.keras.layers.Conv3D(filters * 4, (1, 1), use_bias=False, name="res{}{}_branch2c".format(stage_char, block_char), **parameters)(y)
y = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch2c".format(stage_char, block_char))(y)
if block == 0:
shortcut = tf.keras.layers.Conv3D(filters * 4, (1, 1), strides=stride, use_bias=False, name="res{}{}_branch1".format(stage_char, block_char), **parameters)(x)
shortcut = layers.BatchNormalization(axis=axis, epsilon=1e-5, freeze=freeze_bn, name="bn{}{}_branch1".format(stage_char, block_char))(shortcut)
else:
shortcut = x
y = tf.keras.layers.Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])
y = tf.keras.layers.Activation("relu", name="res{}{}_relu".format(stage_char, block_char))(y)
return y
return f
|
[
"kingadeel2017@outlook.com"
] |
kingadeel2017@outlook.com
|
5a5e02326b83bc2f2e09ea36e04523b0b8b5f97b
|
587840ef968ad41a8e80bd2404b72d349b21816d
|
/Practice_py/FirstChallenge/practice-10.py
|
af0720f92249770aeac0c50db23f6fa591d3c9b0
|
[] |
no_license
|
DamonReyes/Python_Classes-
|
b4b0ff2a6e11549406b7b9f852fffb4b70c6d888
|
2e15eb2af0cd79a76a371bcf8f8d9f281185c7d0
|
refs/heads/master
| 2023-05-04T05:05:33.457765
| 2021-05-12T03:44:13
| 2021-05-12T03:44:13
| 305,586,509
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
def run():
pass
cantidad = int(input('cuanta cantidad de millas quieres?: '))
km = 1.609344
total = cantidad * km
total = str(total)
print(total + ' ' + 'kilometros')
if __name__ == '__main__':
run()
|
[
"steve069888@gmail.com"
] |
steve069888@gmail.com
|
6e5d4fe9f1ffb878699205e022649cde08a33907
|
355ddf1f4b1b60a8c83c1880d01d0b7071521c73
|
/setup.py
|
be40e74a4d92b317fd06948eeb0f23727855ecbc
|
[
"BSD-3-Clause"
] |
permissive
|
vstadnytskyi/dataq-di-usb
|
bcba792feb6859d3c03f0875af1088371cf0846e
|
720bc5026a54336799894c906bcf5d537cb73cd6
|
refs/heads/master
| 2022-11-22T07:02:01.941923
| 2020-07-07T18:54:02
| 2020-07-07T18:54:02
| 277,897,367
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,138
|
py
|
from os import path
from setuptools import setup, find_packages
import sys
import versioneer
# NOTE: This file must remain Python 2 compatible for the foreseeable future,
# to ensure that we error out properly for people with outdated setuptools
# and/or pip.
min_version = (3, 6)
if sys.version_info < min_version:
error = """
dataq-di-usb does not support Python {0}.{1}.
Python {2}.{3} and above is required. Check your Python version like so:
python3 --version
This may be due to an out-of-date pip. Make sure you have pip >= 9.0.1.
Upgrade pip like so:
pip install --upgrade pip
""".format(*(sys.version_info[:2] + min_version))
sys.exit(error)
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as readme_file:
readme = readme_file.read()
with open(path.join(here, 'requirements.txt')) as requirements_file:
# Parse requirements.txt, ignoring any commented-out lines.
requirements = [line for line in requirements_file.read().splitlines()
if not line.startswith('#')]
setup(
name='dataq-di-usb',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="dataq-di-usb",
long_description=readme,
author="Valentyn Stadnytskyi",
author_email='v.stadnytskyi@gmail.com',
url='https://github.com/vstadnytskyi/dataq-di-usb',
python_requires='>={}'.format('.'.join(str(n) for n in min_version)),
packages=find_packages(exclude=['docs', 'tests']),
entry_points={
'console_scripts': [
# 'command = some.module:some_function',
],
},
include_package_data=True,
package_data={
'dataq_di_usb': [
# When adding files here, remember to update MANIFEST.in as well,
# or else they will not be included in the distribution on PyPI!
# 'path/to/data_file',
]
},
install_requires=requirements,
license="BSD (3-clause)",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
)
|
[
"v.stadnytskyi@gmail.com"
] |
v.stadnytskyi@gmail.com
|
d7ee1eda6b12e0fb1263e0d13d95339759f08aed
|
90a2cd620fff4dd8f63e0ac268f459f85776b3e8
|
/backend/pandora/pandora/core/header.py
|
3ec4b50a13689b855d572008bfda8fc44b2359d3
|
[] |
no_license
|
daleeg/idaas
|
aafe4065608f8db8acf8b697dd5a257ec468ebee
|
3314edefac2fba2ededb4f803ff49cb3bbb90372
|
refs/heads/master
| 2023-06-28T15:48:59.949445
| 2021-08-05T03:46:48
| 2021-08-05T03:46:48
| 243,650,933
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
from __future__ import unicode_literals
from rest_framework import HTTP_HEADER_ENCODING
from django.conf import settings
def get_company_header(request):
company = request.META.get("HTTP_{}".format(settings.COMPANY_HEADER), b"")
if isinstance(company, str):
company = company.encode(HTTP_HEADER_ENCODING)
return company
def get_project_label_header(request):
label = request.META.get("HTTP_{}".format(settings.APP_HEADER), b"")
if isinstance(label, str):
label = label.encode(HTTP_HEADER_ENCODING)
return label
|
[
"wangli1319@jd.com"
] |
wangli1319@jd.com
|
3d1bb13e433521880f53e86aa1c6cced6acc58c7
|
0af4dacb4eadbfefe3e2c00274b811b470b4cb30
|
/musicapp/libs/db_sqlite.py
|
2585c0a5e4b53436f882434cc566c8242788764a
|
[] |
no_license
|
bb17pugc/dashboard
|
0b0473b0e161aa6d3f3e18db97008cc991d21f68
|
1166182c4fefa809c017b1707a6e833dce8b7f84
|
refs/heads/master
| 2022-12-25T22:23:35.695339
| 2020-10-04T20:23:22
| 2020-10-04T20:23:22
| 301,219,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,694
|
py
|
# from db import Database
# from config import get_config
# import sqlite3
# import sys
# from itertools import izip_longest
# from termcolor import colored
# class SqliteDatabase(Database):
# TABLE_SONGS = 'songs'
# TABLE_FINGERPRINTS = 'fingerprints'
# def __init__(self):
# self.connect()
# def connect(self):
# config = get_config()
# self.conn = sqlite3.connect(config['db.file'])
# self.conn.text_factory = str
# self.cur = self.conn.cursor()
# print(colored('sqlite - connection opened','white',attrs=['dark']))
# def __del__(self):
# self.conn.commit()
# self.conn.close()
# print(colored('sqlite - connection has been closed','white',attrs=['dark']))
# def query(self, query, values = []):
# self.cur.execute(query, values)
# def executeOne(self, query, values = []):
# self.cur.execute(query, values)
# return self.cur.fetchone()
# def executeAll(self, query, values = []):
# self.cur.execute(query, values)
# return self.cur.fetchall()
# def buildSelectQuery(self, table, params):
# conditions = []
# values = []
# for k, v in enumerate(params):
# key = v
# value = params[v]
# conditions.append("%s = ?" % key)
# values.append(value)
# conditions = ' AND '.join(conditions)
# query = "SELECT * FROM %s WHERE %s" % (table, conditions)
# return {
# "query": query,
# "values": values
# }
# def findOne(self, table, params):
# select = self.buildSelectQuery(table, params)
# return self.executeOne(select['query'], select['values'])
# def findAll(self, table, params):
# select = self.buildSelectQuery(table, params)
# return self.executeAll(select['query'], select['values'])
# def insert(self, table, params):
# keys = ', '.join(params.keys())
# values = params.values()
# query = "INSERT INTO songs (%s) VALUES (?, ?)" % (keys);
# self.cur.execute(query, values)
# self.conn.commit()
# return self.cur.lastrowid
# def insertMany(self, table, columns, values):
# def grouper(iterable, n, fillvalue=None):
# args = [iter(iterable)] * n
# return (filter(None, values) for values
# in izip_longest(fillvalue=fillvalue, *args))
# for split_values in grouper(values, 1000):
# query = "INSERT OR IGNORE INTO %s (%s) VALUES (?, ?, ?)" % (table, ", ".join(columns))
# self.cur.executemany(query, split_values)
# self.conn.commit()
# def get_song_hashes_count(self, song_id):
# query = 'SELECT count(*) FROM %s WHERE song_fk = %d' % (self.TABLE_FINGERPRINTS, song_id)
# rows = self.executeOne(query)
# return int(rows[0])
|
[
"ali@gmail.com"
] |
ali@gmail.com
|
dbfa75f02f7ee806b6c55b5e7d0257c8a92ad672
|
2c7c03faa533557862bff95530e4814187a0cdff
|
/题目十六/16.py
|
2a4dc799ec73925b8a491f1de2705cc5f118c038
|
[] |
no_license
|
lifengshuo/leetcode
|
c56f90543db68b30ec667f885fc8b41760dcf64b
|
8ad40480b12aba2cbadb1ff8f27e8cd517e1289a
|
refs/heads/master
| 2023-02-11T16:03:41.885352
| 2021-01-03T13:12:32
| 2021-01-03T13:12:32
| 298,794,543
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,258
|
py
|
class Solution(object):
def threeSumClosest(self, nums, target):
M = float("inf") # 三个数的和初步设置为最大值
lens = len(nums)
nums.sort() # 排序
for i in range(lens):
if i>0 and nums[i]==nums[i-1]: #如果nums[i]的值相等,则跳过
continue
left = i + 1
right = lens -1
while left < right:
ans = nums[i] + nums[left] + nums[right]
if target == ans:
return target
#如果差值更接近,则保存差值更接近的三个数的和
if abs(ans - target) < abs(M-target):
M = ans
if target > ans: #target太大,说明左边的值需要往右边移动来靠近target
while left<right and nums[left]==nums[left+1]:
left += 1
left += 1
elif target < ans: #target太小,说明右边的值需要往左边移动变小来靠近target
while left<right and nums[right]==nums[right-1]:
right -= 1
right -= 1
return M
|
[
"‘791320686@qq.com’"
] |
‘791320686@qq.com’
|
3264ddcf2e2f2fd497deb3ef202175e0c68c6af5
|
46073779c036c64fc8835663b26c3cbc937fc58f
|
/node_modules/socket.io/node_modules/engine.io/node_modules/ws/build/config.gypi
|
c43eaf541c42d42f54cfcd279892b7f3879de219
|
[
"MIT"
] |
permissive
|
aalteirac/nodeChat
|
32c9b8dd978a2cf7b82e654d211b820b00893f49
|
9755cd926c7a84e4ef5943cbf1aa406aff5acf43
|
refs/heads/master
| 2016-09-11T08:13:33.492901
| 2014-09-23T17:09:07
| 2014-09-23T17:09:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,344
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"host_arch": "ia32",
"node_install_npm": "true",
"node_prefix": "",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "true",
"node_use_openssl": "true",
"node_use_perfctr": "true",
"node_use_systemtap": "false",
"python": "c:\\python27\\python.exe",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"visibility": "",
"want_separate_host_toolset": 1,
"nodedir": "C:\\Users\\aai\\.node-gyp\\0.10.29",
"copy_dev_lib": "true",
"standalone_static_library": 1,
"user_agent": "npm/1.4.14 node/v0.10.29 win32 x64",
"registry": "https://registry.npmjs.org/",
"email": "aai@qlik.com",
"username": "alteirac",
"prefix": "C:\\Users\\aai\\AppData\\Roaming\\npm",
"always_auth": "",
"bin_links": "true",
"browser": "",
"ca": "",
"cache": "C:\\Users\\aai\\AppData\\Roaming\\npm-cache",
"cache_lock_stale": "60000",
"cache_lock_retries": "10",
"cache_lock_wait": "10000",
"cache_max": "Infinity",
"cache_min": "10",
"cert": "",
"color": "true",
"depth": "Infinity",
"description": "true",
"dev": "",
"editor": "notepad.exe",
"engine_strict": "",
"force": "",
"fetch_retries": "2",
"fetch_retry_factor": "10",
"fetch_retry_mintimeout": "10000",
"fetch_retry_maxtimeout": "60000",
"git": "git",
"git_tag_version": "true",
"global": "",
"globalconfig": "C:\\Users\\aai\\AppData\\Roaming\\npm\\etc\\npmrc",
"group": "",
"heading": "npm",
"ignore_scripts": "",
"init_module": "C:\\Users\\aai\\.npm-init.js",
"init_author_name": "",
"init_author_email": "",
"init_author_url": "",
"init_license": "ISC",
"json": "",
"key": "",
"link": "",
"local_address": "",
"long": "",
"message": "%s",
"node_version": "v0.10.29",
"npat": "",
"onload_script": "",
"optional": "true",
"parseable": "",
"production": "",
"proprietary_attribs": "true",
"https_proxy": "",
"rebuild_bundle": "true",
"rollback": "true",
"save": "",
"save_bundle": "",
"save_dev": "",
"save_exact": "",
"save_optional": "",
"save_prefix": "^",
"searchopts": "",
"searchexclude": "",
"searchsort": "name",
"shell": "C:\\Windows\\system32\\cmd.exe",
"shrinkwrap": "true",
"sign_git_tag": "",
"spin": "true",
"strict_ssl": "true",
"tag": "latest",
"tmp": "C:\\Users\\aai\\AppData\\Local\\Temp",
"unicode": "true",
"unsafe_perm": "true",
"usage": "",
"user": "",
"userconfig": "C:\\Users\\aai\\.npmrc",
"umask": "18",
"version": "",
"versions": "",
"viewer": "browser",
"globalignorefile": "C:\\Users\\aai\\AppData\\Roaming\\npm\\etc\\npmignore"
}
}
|
[
"anthony@alteirac.com"
] |
anthony@alteirac.com
|
bc2abba030ded9f65f92d6af8e69b7e0987aa6b5
|
f19b3885fe8b08607bc1d740ec46f9a0215f423b
|
/tests/sparsezoo/models/classification/test_mobilenet.py
|
1b14968a1a6dc60df3db5d4b0d8b2e06c34995f9
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
chenjun2hao/sparsezoo
|
62c9c0639727303dc8dd9ba319c6b5d7ed6bc1ad
|
1588d22910c820eb9f0ec56a8448fa2f231d3111
|
refs/heads/main
| 2023-03-09T10:32:30.828138
| 2021-03-01T17:02:35
| 2021-03-01T17:02:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,101
|
py
|
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from sparsezoo.models.classification import mobilenet_v1, mobilenet_v2
from tests.sparsezoo.utils import model_constructor
@pytest.mark.parametrize(
"download,framework,repo,dataset,training_scheme,"
"optim_name,optim_category,optim_target",
[
(True, "pytorch", "sparseml", "imagenet", None, "base", "none", None),
(True, "pytorch", "sparseml", "imagenet", None, "pruned", "conservative", None),
(True, "pytorch", "sparseml", "imagenet", None, "pruned", "moderate", None),
],
)
def test_mobilenet_v1(
download,
framework,
repo,
dataset,
training_scheme,
optim_name,
optim_category,
optim_target,
):
model_constructor(
mobilenet_v1,
download,
framework,
repo,
dataset,
training_scheme,
optim_name,
optim_category,
optim_target,
)
@pytest.mark.parametrize(
"download,framework,repo,dataset,training_scheme,"
"optim_name,optim_category,optim_target",
[
(True, "pytorch", "sparseml", "imagenet", None, "base", "none", None),
],
)
def test_mobilenet_v2(
download,
framework,
repo,
dataset,
training_scheme,
optim_name,
optim_category,
optim_target,
):
model_constructor(
mobilenet_v2,
download,
framework,
repo,
dataset,
training_scheme,
optim_name,
optim_category,
optim_target,
)
|
[
"noreply@github.com"
] |
chenjun2hao.noreply@github.com
|
1cd4b52dca6eb61cddde320b65934f00f9bd78f8
|
4c602d7878b3e87557e85925937c0b5daaa9a51c
|
/seguros/users/models.py
|
dbeca59817d0b43b36a753755c52f847d2d99e19
|
[
"MIT"
] |
permissive
|
joaquinquintas/seguros2020
|
490315ed13014d37b36053aa456fb991922d8e2f
|
2d26a6c6a82d740d5c3671ee3fbc6165a731bfab
|
refs/heads/master
| 2021-04-05T23:40:42.453564
| 2018-06-01T14:28:57
| 2018-06-01T14:28:57
| 124,537,212
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 567
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
class User(AbstractUser):
# First Name and Last Name do not cover name patterns
# around the globe.
name = models.CharField(_('Name of User'), blank=True, max_length=255)
is_broker = models.BooleanField(default=False)
def __str__(self):
return self.username
def get_absolute_url(self):
return reverse('users:detail', kwargs={'username': self.username})
|
[
"brunomartintenaglia@gmail.com"
] |
brunomartintenaglia@gmail.com
|
27fe564b6a341cffb6fb0d78d201e3796486cd71
|
26e195c5ffc34dc5f747f2605e35c741e87a5f56
|
/neo_hikitsugi/pages/forms.py
|
8be1280e862ba23f06012ced1b483ce982e3d779
|
[] |
no_license
|
socomplicated808/django-projects
|
eaefb5e687b2f71ed393c458daf71acc8f9ea3f9
|
4cf1053f00a6cd8721c838657a60eb5433b55f76
|
refs/heads/master
| 2023-08-22T15:18:36.124197
| 2021-10-16T04:00:51
| 2021-10-16T04:00:51
| 417,695,600
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 482
|
py
|
from django.forms import ModelForm
from django import forms
from django.forms.widgets import ClearableFileInput
from .models import Cluster, Post,CR
class PostForm(ModelForm):
class Meta:
model = Post
fields = ['name','time','cluster_log','files']
widgets = {'files':ClearableFileInput(attrs={'multiple':True})}
class CrForm(ModelForm):
class Meta:
model = CR
fields = '__all__'
class ClusterForm(ModelForm):
class Meta:
model = Cluster
fields = '__all__'
|
[
"socomplicated808@hotmail.com"
] |
socomplicated808@hotmail.com
|
631b16c7d490e62ddea47a65eba19530c522bb89
|
a8ff08dccbd864bb4c1f165e63e875b8e89c113f
|
/threadssubclass.py
|
e6e09d654dee3c7a5db6e8a7b2bd35fb481705ad
|
[] |
no_license
|
kokot300/python-core-and-advanced
|
d3084d6e90db8661a306c1028eec326817dc41fc
|
e9a75f7b8a9b149b074f6a259d96df7b5e20e0ec
|
refs/heads/master
| 2021-05-17T13:13:21.519377
| 2020-03-30T11:09:46
| 2020-03-30T11:09:46
| 250,787,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 629
|
py
|
import threading
from time import sleep
class MyThread(threading.Thread):
def run(self):
threading.Thread.run(self)
self.displaynumbers()
def displaynumbers(self):
sleep(5)
for i in range(0, 11):
print(i)
print(threading.currentThread().getName())
class MyOwnThread:
def displaynumbers(self):
for i in range(0, 11):
print(i)
print(threading.currentThread().getName())
t=MyThread()
obj=MyOwnThread()
t2=threading.Thread(target=obj.displaynumbers)
t3=threading.Thread(target=obj.displaynumbers)
t.start()
t2.start()
t3.start()
|
[
"kokot300@gmail.com"
] |
kokot300@gmail.com
|
698c86a1789e5e1d7105a013fff5ead5649454cc
|
2cb120360192dfdf9afa233c8585232cb3df6e8c
|
/samples/tf_study/GPU/multi.py
|
d7a26ed4b542ad4459a1b61e81a93dfef25d64c6
|
[] |
no_license
|
CosmosShadow/MLPythonLib
|
6323583bca8c6ff5757465fb1a0d5c4f23deb56c
|
3a2da2601330a032b737ff0addf71f679eeee94b
|
refs/heads/master
| 2020-05-21T15:08:58.082575
| 2017-04-30T17:03:15
| 2017-04-30T17:03:15
| 44,087,820
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
# coding: utf-8
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist
c = []
for d in ['/cpu:0', '/gpu:0']:
with tf.device(d):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3])
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2])
c.append(tf.matmul(a, b))
with tf.device('/cpu:0'):
sum = tf.add_n(c)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
print sess.run([sum, c])
|
[
"lichenarthurml@gmail.com"
] |
lichenarthurml@gmail.com
|
b667077a04eedda09c0606a6d1ae33ff916c5468
|
b4f28a0c699e54348177c8db2b1d0f7d80af0c29
|
/python/log_printing.py
|
adbbbeffd896458119e38df79005a8a7c1ad8f6e
|
[] |
no_license
|
homerobse/snippets
|
8d60dfb8402aaf142b11256f6aa0cf48d8995bfb
|
8e4440cbd34e023bffa5735da630e6a91949cf7f
|
refs/heads/master
| 2023-01-14T04:42:38.438780
| 2023-01-06T04:27:46
| 2023-01-06T04:41:45
| 166,845,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 705
|
py
|
#!/home/homero/miniconda3/bin/python
# https://docs.python.org/3/howto/logging.html#logging-advanced-tutorial
import logging
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
logging.warning('Watch out!') # will print a message to the console
logging.info('I told you so') # will not print anything
logging.basicConfig(level=logging.DEBUG) # THIS DOES NOT WORK. IT CAN ONLY BE SET ONCE.
logging.warning('Watch out! 2') # will print a message to the console
logging.info('I told you so 2') # will NOT print either
x = 100
logging.warning('Run %d times!', x) # will print a message to the console
a= 'two'
b= 'variables'
logging.warning(f'you can also log {a} {b}')
|
[
"homero.esmeraldo@cncb.ox.ac.uk"
] |
homero.esmeraldo@cncb.ox.ac.uk
|
182b5b5d5624603c6b49c9ce628094fc7722e98f
|
f3891312d9bb380543e2ab92e765e242a1937d5d
|
/env/lib/python3.6/struct.py
|
86bf5ed25332b0bf04397ea4b555253b0bd71fd0
|
[] |
no_license
|
akando42/psychology-therapist-backend
|
72d6b99445e72f0fb475ddfac6e18cc15ea99354
|
88463da75b5f1f70052716135019a276cc034aba
|
refs/heads/master
| 2021-10-08T22:54:31.383049
| 2018-12-18T18:15:21
| 2018-12-18T18:15:21
| 159,715,742
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 49
|
py
|
/Users/troydo42/anaconda3/lib/python3.6/struct.py
|
[
"hoangdov@gmail.com"
] |
hoangdov@gmail.com
|
85c94893954d6111bbdb5db2a378b045c734d47e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2413/49687/317163.py
|
f2ba3b2827d0d5f59c37f8ff5bd9bb1d061e5e24
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 688
|
py
|
n = int(input())
start = int(input())
if(n==2 and start == 3):
print("[3, 2, 0, 1]")
elif(n==3 and start == 2):
print("[2, 6, 7, 5, 4, 0, 1, 3]")
elif(n==3 and start == 4):
print("[4, 0, 1, 3, 2, 6, 7, 5]")
elif(n==5 and start ==3):
print("[3, 2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8, 24, 25, 27, 26, 30, 31, 29, 28, 20, 21, 23, 22, 18, 19, 17, 16, 0, 1]")
elif(n==6 and start ==2):
print("[2, 6, 7, 5, 4, 12, 13, 15, 14, 10, 11, 9, 8, 24, 25, 27, 26, 30, 31, 29, 28, 20, 21, 23, 22, 18, 19, 17, 16, 48, 49, 51, 50, 54, 55, 53, 52, 60, 61, 63, 62, 58, 59, 57, 56, 40, 41, 43, 42, 46, 47, 45, 44, 36, 37, 39, 38, 34, 35, 33, 32, 0, 1, 3]")
else:
print(n,start)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
5d5e5ece9ff5ae5c4d05800115cf180e0ad68909
|
d09ca11452ef54d936736c682eb1cc38575bf6e1
|
/10.py
|
018693f0a131d5d09d8a773e545e34caa267a2d0
|
[] |
no_license
|
anishghimire603/Insight-Workshop-Academy
|
f8c424edc76af802c7371643d572de8ead9688a3
|
e5958d827b0c7ff64a3043ba2d2de305a650c3ea
|
refs/heads/main
| 2023-01-22T02:14:08.553931
| 2020-11-30T15:33:03
| 2020-11-30T15:33:03
| 304,255,812
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
def odd_values_string(str):
result = ""
for i in range(len(str)):
if i % 2 == 0:
result = result + str[i]
return result
print(odd_values_string('python'))
|
[
"aghimire603@gmail.com"
] |
aghimire603@gmail.com
|
ec0a4630ff15af894ced23f4850435fee9b7810c
|
33528048ed219a3567777901249ec48641e00ec8
|
/NN_code.py
|
19a841b5aaa34ceb1a22f6dbf27ffe5da4f0b740
|
[] |
no_license
|
priyadiwakar/Face-Image-Classification
|
b4f91c2f510b95795b06ccc6cd7b095ae6030865
|
705f18dc6d2cee80ce998500095fed1e6bb2b0c4
|
refs/heads/master
| 2021-07-09T14:56:01.426185
| 2020-09-30T18:33:46
| 2020-09-30T18:33:46
| 196,488,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,212
|
py
|
# -*- coding: utf-8 -*-
"""
@author: priya
"""
import tensorflow as tf
import numpy as np
import cv2
import glob
from pathlib import Path
from sklearn.preprocessing import StandardScaler
import random
import math
import matplotlib.pyplot as plt
"""
Uncomment commented part to work with new dataset images from system
instead of already saved in form of .npy files dataset
"""
#
#def data_collection(path_nface,path_face,N,D):
# """
# Prepare data from images
# path_nface = path where non face images are located
#
# path_face = path where face images are located
#
# N = number of images in your dataset for each class , in this case 12000
# D=number of features, in this case 60*60*3 =10800
#
# """
#
#
# t = np.zeros([N,D],dtype="float32")
# u = np.zeros([N,D],dtype="float32")
#
# b=0
# for img in glob.glob(str(path_nface/"*.jpg")):
#
# face= cv2.imread(img)
#
# t[b,:] = np.reshape(face, (1,D))
# b+=1
#
#
# a=0
# for img in glob.glob(str(path_face/"*.jpg")):
# nonface=cv2.imread(img)
# u[a,:] = np.reshape(nonface, (1,D))
# a+=1
#
# return t,u
#
#path=Path("C:/Users/priya/Desktop/computervision/Project 2")
#
#path_nface=path/"nonface"
#path_face = path/"face"
#N = 12000 #number of images in your dataset for each class , in this case 12000
#D=10800 #number of features, in this case 60*60*3 =10800
#T1=10000#number of training images for each class , in this case 10000
#T2=1000#number of testing images for each class , in this case 1000
#T3=1000#number of validation images for each class , in this case 1000
#indices = list(range(N))
#random.seed(4)
#random.shuffle(indices)
#"""
#Get the data using the function data_collection
#Also ,
#T1=number of training images for each class , in this case 10000
#T2=number of testing images for each class , in this case 1000
#T3=number of validation images for each class , in this case 1000
#
#"""
#total_nonfacedata,total_facedata =data_collection(path_nface,path_face,N,D)
#a=indices[:T1]
#b=indices[T1:T1+T2]
#c=indices[T1+T2:T1+T2+T3]
#
#y1=np.concatenate((np.zeros([T1,1]),np.ones([T1,1])),axis=0)
#y2=np.concatenate((np.ones([T1,1]),np.zeros([T1,1])),axis=0)
#y3=np.concatenate((y2,y1),axis=1)
#
#y4=np.concatenate((np.zeros([T2,1]),np.ones([T2,1])),axis=0)
#y5=np.concatenate((np.ones([T2,1]),np.zeros([T2,1])),axis=0)
#y6=np.concatenate((y5,y4),axis=1)
#
#y7=np.concatenate((np.zeros([T3,1]),np.ones([T3,1])),axis=0)
#y8=np.concatenate((np.ones([T3,1]),np.zeros([T3,1])),axis=0)
#y9=np.concatenate((y8,y7),axis=1)
#
##training data and labels, labels defined using one hot method
#train_data=np.concatenate((total_facedata[a],total_nonfacedata[a]))
#train_label=y3
#
##testing data and labels, labels defined using one hot method
#test_data=np.concatenate((total_facedata[b], total_nonfacedata[b]))
#test_label=y6
#
##validation data and labels, labels defined using one hot method
#valid_data=np.concatenate((total_facedata[c], total_nonfacedata[c]))
#valid_label=y9
#
#"""
#Save the data in .npy format for later use
#
#"""
#np.save('train_data.npy',train_data)
#np.save('train_label.npy',train_label)
#np.save('test_data.npy',test_data)
#np.save('test_label.npy',test_label)
#np.save('valid_data.npy',valid_data)
#np.save('valid_label.npy',valid_label)
"""Load already saved data"""
train_data=np.load('train_data.npy')
train_label=np.load('train_label.npy')
test_data=np.load('test_data.npy')
test_label=np.load('test_label.npy')
valid_data=np.load('valid_data.npy')
valid_label=np.load('valid_label.npy')
"""
Preprocess data to get zero centered and normalized data
"""
scaler = StandardScaler()
scaler.fit(train_data)
StandardScaler(copy=True, with_mean=True, with_std=True)
train_data = scaler.transform(train_data)
test_data = scaler.transform(test_data)
valid_data = scaler.transform(valid_data)
num_nodes= 1000
batch_size = 100
beta = 0.01
num_labels=2
size=10800
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder('float', shape=(batch_size, size))
tf_train_labels = tf.placeholder('float', shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_data)
tf_test_dataset = tf.constant(test_data)
# Variables.
weights_1 = tf.Variable(tf.truncated_normal([size, num_nodes]))
biases_1 = tf.Variable(tf.zeros([num_nodes]))
weights_2 = tf.Variable(tf.truncated_normal([num_nodes, num_labels]))
biases_2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits_1 = tf.matmul(tf_train_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
# Normal loss function
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits_2, labels=tf_train_labels))
# Loss function with L2 Regularization with beta=0.01
regularizers = tf.nn.l2_loss(weights_1) + tf.nn.l2_loss(weights_2)
loss = tf.reduce_mean(loss + beta * regularizers)
# global_step = tf.Variable(0) # count the number of steps taken.
# start_learning_rate = 0.5
# learning_rate = tf.train.exponential_decay(start_learning_rate, global_step, 500, 0.5, staircase=True)
# Optimizer.
optimizer = tf.train.AdamOptimizer().minimize(loss)
# Predictions for the training
train_prediction = tf.nn.softmax(logits_2)
# Predictions for validation
logits_1 = tf.matmul(tf_valid_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
valid_prediction = tf.nn.softmax(logits_2)
# Predictions for test
logits_1 = tf.matmul(tf_test_dataset, weights_1) + biases_1
relu_layer= tf.nn.relu(logits_1)
logits_2 = tf.matmul(relu_layer, weights_2) + biases_2
test_prediction = tf.nn.softmax(logits_2)
num_steps = 3001
losslist=[]
trainbatchacc=[]
validacc=[]
steps=[]
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_label.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_data[offset:(offset + batch_size), :]
batch_labels = train_label[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
#lr = learning_rate.eval()
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run([optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 3000 == 0):
steps.append(step)
losslist.append(l)
ta=accuracy(predictions, batch_labels)
trainbatchacc.append(ta)
va=accuracy(valid_prediction.eval(), valid_label)
validacc.append(va)
print("Minibatch loss at step {}: {}".format(step, l))
print("Minibatch accuracy: {:.1f}".format(ta))
print("Validation accuracy: {:.1f}".format(va))
testacc=accuracy(test_prediction.eval(), test_label)
print("Test accuracy: {:.1f}".format(testacc))
plt.figure(1)
plt.plot(steps,losslist)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Loss curve")
plt.show()
plt.figure(2)
p1,=plt.plot(steps,trainbatchacc,label="Training Accuracy")
p2,=plt.plot(steps,validacc,label="Validation Accuracy", linestyle='--')
plt.xlabel("Epoch")
plt.legend([p1, p2],["Training Accuracy","Validation Accuracy"])
plt.show()
|
[
"noreply@github.com"
] |
priyadiwakar.noreply@github.com
|
ec7639ddadc6ad338a0ad28c87af1740f8afa6e5
|
ebcf04a2a7755b65dad8fe758222e87e69bd005f
|
/professionals/views.py
|
6dacf4ff6fcc66faf868d0785ad1fd47a4106031
|
[] |
no_license
|
ezekieltech/theanalyticshub
|
845bce5bc2a106013496705c4ac1d1b3916dc7b5
|
3887585353596404d9e45c427a481df3d9a22c79
|
refs/heads/main
| 2023-04-25T01:31:30.417644
| 2021-05-17T21:28:22
| 2021-05-17T21:28:22
| 325,266,230
| 0
| 0
| null | 2021-05-17T21:28:22
| 2020-12-29T11:17:46
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,034
|
py
|
from django.shortcuts import render
from django.views import generic
from django.conf import settings
from django_hosts.resolvers import reverse_host
from account.models import CustomUser
from mainWebsite.models import Post, Service
from utilities.getUsernameFromSubdomain import getUsernameFromSubdomain
host = getUsernameFromSubdomain()
class UserDetail(generic.ListView):
model = CustomUser
template_name = 'professionals/profile.html'
def get_context_data(self, **kwargs):
context = super(UserDetail, self).get_context_data(**kwargs)
account = CustomUser.objects.get(username=host)
context['account'] = account
return context
class UserPosts(generic.ListView):
model = Post
template_name = 'professionals/blog.html'
def get_context_data(self, **kwargs):
context = super(UserPosts, self).get_context_data(**kwargs)
account = CustomUser.objects.get(username=host)
context['account'] = account
user_posts = Post.objects.filter(author__username=host)
context['user_posts'] = user_posts
all_projects = Post.objects.filter(author__username=host, post_type='project')
context['all_projects'] = all_projects
return context
class UserPostDetail(generic.DetailView):
model = Post
template_name = 'professionals/blog_detail.html'
def get_context_data(self, **kwargs):
context = super(UserPostDetail, self).get_context_data(**kwargs)
account = CustomUser.objects.get(username=host)
context['account'] = account
related_blog_post_by_service = Post.objects.filter(status=1, post_type= self.object.post_type, service=self.object.service).order_by('-created_on').exclude(title=self.object.title)
context['related_blog_post_by_service'] = related_blog_post_by_service[:5]
list_of_services = Service.objects.all # used for main and footer menu
context['list_of_services'] = list_of_services # used for main and footer menu
return context
|
[
"ezekielobhafuoso@gmail.com"
] |
ezekielobhafuoso@gmail.com
|
85a083b5cbd917e6f8a1b1ff4dd171598c7fad8a
|
ac59aa52ddb3aafa47508907247cc0ccac847b50
|
/config_app/migrations/0026_snmpconfigparameters_traps_activated.py
|
e71a7f5f6406a02c69375c7cab1a656ebbeff5f1
|
[
"MIT"
] |
permissive
|
radekska/django-network-controller
|
f07efa26bf068d2cc7ac832c08929436ba0d365a
|
6bcb847cbe1efa7dee118974de5e49b4f411e5da
|
refs/heads/master
| 2023-08-04T15:11:22.138718
| 2021-09-18T15:54:24
| 2021-09-18T15:54:24
| 298,096,472
| 0
| 0
|
MIT
| 2021-09-18T15:54:25
| 2020-09-23T21:16:47
|
CSS
|
UTF-8
|
Python
| false
| false
| 436
|
py
|
# Generated by Django 3.1.1 on 2020-11-01 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('config_app', '0025_auto_20201026_1845'),
]
operations = [
migrations.AddField(
model_name='snmpconfigparameters',
name='traps_activated',
field=models.BooleanField(blank=True, default=False, null=True),
),
]
|
[
"bp4fradek@gmail.com"
] |
bp4fradek@gmail.com
|
f75eeb964f8b417bd2871a3d445441e8686f8ee4
|
d6f10cb259d01313afabe96c35c71ac5e72e8263
|
/benben/__init__.py
|
b3194c411c92271d5c6086542c65f766824884e1
|
[] |
no_license
|
alemarpiz/benben
|
00840af6e0c057d06775fe53968eddc6834ea02a
|
0ea6aebc9ad07c50af0095a8c0321a00b83c2145
|
refs/heads/master
| 2020-12-02T21:05:34.001425
| 2013-04-15T21:54:19
| 2013-04-15T21:54:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
from benben.models import get_root
from pyramid.config import Configurator
from pyramid.threadlocal import get_current_registry
from sqlalchemy import engine_from_config
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
from benben.models import initialize_sql
engine = engine_from_config(settings, 'sqlalchemy.')
initialize_sql(engine, drop_all=True)
config = Configurator(settings=settings, root_factory=get_root)
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_route('api', 'api*traverse')
config.scan()
return config.make_wsgi_app()
def get_settings():
return get_current_registry().settings
|
[
"david@glicksoftware.com"
] |
david@glicksoftware.com
|
d873671b3a3cfe8525278b3c30928ab16b59aadf
|
21e64f9410323a11d4550b889fd0bb0d68543fab
|
/apps/rss_feeds/migrations/0037_remove_story_author.py
|
6bddb79fe330e3ec0439cfb490873060cd36b240
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
quanganhdo/NewsBlur
|
a7eaa3c5bdb2e57998651d736db861f88fcd1e75
|
cef29f01658c845564a5044b48b4cf19efcaa4d6
|
refs/heads/master
| 2021-03-05T23:56:27.976498
| 2020-02-27T15:23:23
| 2020-02-27T15:23:23
| 246,164,347
| 1
| 0
|
MIT
| 2020-03-09T23:34:18
| 2020-03-09T23:34:17
| null |
UTF-8
|
Python
| false
| false
| 7,240
|
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Tag'
db.delete_table('rss_feeds_tag')
# Deleting model 'StoryAuthor'
db.delete_table('rss_feeds_storyauthor')
def backwards(self, orm):
# Adding model 'Tag'
db.create_table('rss_feeds_tag', (
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss_feeds.Feed'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
))
db.send_create_signal('rss_feeds', ['Tag'])
# Adding model 'StoryAuthor'
db.create_table('rss_feeds_storyauthor', (
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rss_feeds.Feed'])),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('author_name', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('rss_feeds', ['StoryAuthor'])
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['rss_feeds']
|
[
"samuel@ofbrooklyn.com"
] |
samuel@ofbrooklyn.com
|
58cb122a9f766c59712b222fb978b8441746c9ae
|
1aaca8a5a7dc142ca7c112ea27b17e24ccfca781
|
/Day01/__init__.py
|
15c996c80f973beb81630afab50056b5ac1fad75
|
[] |
no_license
|
TWI1/AdventOfCode2016
|
90e9caa3f4f37233e609f0123457b3d97a0044cb
|
b6f9839bc4a51128c631efc575644ab6f7c88154
|
refs/heads/master
| 2021-01-11T22:49:56.972092
| 2017-08-19T19:49:09
| 2017-08-19T19:49:09
| 78,510,240
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,247
|
py
|
#get the input
#create a method Solver - input is a list of doubles - direction and length, output is a number of blocks
#global vars - x, y, direction
#if direction is 0 (north), I'm adding to x
#if direction is 1 (east), I'm adding to y
#if direction is 2 (south), I'm subtracting from x
#if direction is 3 (west), I'm subtracting from y
#for R -> direction=direction+1
#for L -> direction=direction-1
#I may use a division reminder of direction / 4
#Done:
#*put into it into array or List
#*add code to calculate taxiCabDistance
input="R1, L2, R3"
input2="R5, L2, L1, R1, R3, R3, L3, R3, R4, L2, R4, L4, R4, R3, L2, L1, L1, R2, R4, R4, L4, R3, L2, R1, L4, R1, R3, L5, L4, L5, R3, L3, L1, L1, R4, R2, R2, L1, L4, R191, R5, L2, R46, R3, L1, R74, L2, R2, R187, R3, R4, R1, L4, L4, L2, R4, L5, R4, R3, L2, L1, R3, R3, R3, R1, R1, L4, R4, R1, R5, R2, R1, R3, L4, L2, L2, R1, L3, R1, R3, L5, L3, R5, R3, R4, L1, R3, R2, R1, R2, L4, L1, L1, R3, L3, R4, L2, L4, L5, L5, L4, R2, R5, L4, R4, L2, R3, L4, L3, L5, R5, L4, L2, R3, R5, R5, L1, L4, R3, L1, R2, L5, L1, R4, L1, R5, R1, L4, L4, L4, R4, R3, L5, R1, L3, R4, R3, L2, L1, R1, R2, R2, R2, L1, L1, L2, L5, L3, L1"
def parse_input(input):
list=[x.strip() for x in input.split(',')]
doubles=[]
for i in list:
doubles.append((i[0],i[1:]))
return doubles
def day0_solver(input):
x=0
y=0
direction=0
path=[]
crossSections=[]
path.append([x,y])
for i in input:
if i[0]=='R':
direction=direction+1
else: #=='L'
direction=direction-1
for j in range(int(i[1])):
if direction%4==0:
x=x+1
elif direction%4==1:
y=y+1
elif direction%4==2:
x=x-1
else:
y=y-1
if ([x,y] in path):
crossSections.append([x,y])
path.append([x,y])
crossSectionsDistances=[]
for i in crossSections:
crossSectionsDistances.append(abs(i[0])+abs(i[1]))
return [abs(x)+abs(y), crossSectionsDistances]
l=parse_input(input2)
print(day0_solver(l))
|
[
"twinsen@mainbox"
] |
twinsen@mainbox
|
bfbca156f14639004cba3ff38eb6bb1430924d4d
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02898/s358750792.py
|
b820a14fcc2dce85d984a436348f64beb8ef6863
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 127
|
py
|
n=input().split()
h=input().split()
count=0
for i in range(len(h)):
if int(h[i])>=int(n[1]):
count+=1
print(count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
783203ad1f6026a89b93743dcae8d305dbb86752
|
f523332c62c0d82b5483864ad09b0a4a5e4afca1
|
/training/versuch1/ranking/trainModelWithHyperParameterTuningRanking.py
|
584f47812985e0c3bcc62cdc6f804adf8e6cb464
|
[] |
no_license
|
Martinay/masterthesis_code
|
081d838500f583517f428d5a1cae4ef2e423b220
|
d15cce5880a3630d149f84cccf69a57713b80daa
|
refs/heads/master
| 2020-12-11T13:53:01.423396
| 2020-01-14T15:01:41
| 2020-01-14T15:01:41
| 233,864,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,794
|
py
|
import ptvsd
ptvsd.enable_attach(address=('0.0.0.0', 5678))
# ptvsd.wait_for_attach()
from tensorflow import keras
from tensorflow.keras import layers, metrics
from tensorflow.keras.callbacks import EarlyStopping
from kerastuner.tuners import Hyperband
from kerastuner.engine.hypermodel import HyperModel
from kerastuner.engine.hyperparameters import HyperParameters
import sys
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras_preprocessing.text import tokenizer_from_json
data_directory = sys.argv[1]
tokenizer_directory = sys.argv[2]
model_directory = sys.argv[3]
tokenizer_character_count = int(sys.argv[4])
tokenizer_word_count = int(sys.argv[5])
training_data_path = data_directory + "trainingRanking.csv"
test_data_path = data_directory + "testRanking.csv"
validation_data_path = data_directory + "validationRanking.csv"
model_onnx_file_path = model_directory + "model.onnx"
model_keras_file_path = model_directory + "model.h5"
model_history_file_path = model_directory + "history.json"
training_csv_file_path = model_directory + "training_history.csv"
model_evaluation_file_path = model_directory + "evaluation.json"
model_tuning_directory = model_directory + "model_tuning"
tokenizer_names = ["structure01",
"structure02",
"structure03",
"structure04",
"structure05",
"structure06",
"structure07"]
def read_text(path):
with open(path, "r", encoding="utf-8") as text_file:
text = text_file.read()
return text
def write_text(path, text):
with open(path, "w", encoding="utf-8") as text_file:
print(text, file=text_file)
def load_tokenizers():
jsons_characters = [(read_text("{0}tokenizer_{1}.json".format(tokenizer_directory, name)), name) for name in tokenizer_names]
jsons_words = [(read_text("{0}tokenizer_{1}_words.json".format(tokenizer_directory, name)), "{0}_words".format(name)) for name in tokenizer_names]
jsons = jsons_characters + jsons_words
tokenizers = {name: tokenizer_from_json(json) for json, name in jsons}
for key, tokenizer in tokenizers.items(): # fix string interpretation after loading tokenizer
tokenizer.num_words = int(tokenizer.num_words)
return tokenizers
print("load data")
training_set_features = pd.read_csv(training_data_path, delimiter='\t', converters={i: str for i in range(0, 100)})
validation_set_features = pd.read_csv(validation_data_path, delimiter='\t', converters={i: str for i in range(0, 100)})
test_set_features = pd.read_csv(test_data_path, delimiter='\t', converters={i: str for i in range(0, 100)})
header_index_with_template_categories = [i for i, x in enumerate(training_set_features.columns.values) if x.startswith("Template_category")]
print("load tokenizer")
tokenizers = load_tokenizers()
print("encode features")
def encode_features(dataset):
features = {}
for name in tokenizer_names:
encoded_template = tokenizers[name].texts_to_matrix(dataset['Template_{0}'.format(name)], mode="tfidf")
features['Template_{0}'.format(name)] = encoded_template
encoded_basic = tokenizers[name].texts_to_matrix(dataset['Basic_{0}'.format(name)], mode="tfidf")
features['Basic_{0}'.format(name)] = encoded_basic
for name in tokenizer_names:
encoded_template = tokenizers['{0}_words'.format(name)].texts_to_matrix(dataset['Template_{0}_words'.format(name)], mode="tfidf")
features['Template_{0}_words'.format(name)] = encoded_template
encoded_basic = tokenizers['{0}_words'.format(name)].texts_to_matrix(dataset['Basic_{0}_words'.format(name)], mode="tfidf")
features['Basic_{0}_words'.format(name)] = encoded_basic
return features
encoded_features_training = encode_features(training_set_features)
encoded_features_validation = encode_features(validation_set_features)
encoded_features_test = encode_features(test_set_features)
print("load x any y")
def get_x_y(dataset, encoded_features):
matching = np.array(dataset["ranking"])
y = matching.astype(np.int)
x = encoded_features
x["Template_Count"] = np.array([[int(x)] for x in dataset["Template_count"].values])
x["Template_Categories"] = np.array(dataset.iloc[ : , header_index_with_template_categories ].values)
x["Template_Categories"] = x["Template_Categories"].astype(np.int)
return (x, y)
x_train, y_train = get_x_y(training_set_features, encoded_features_training)
x_validation, y_validation = get_x_y(validation_set_features, encoded_features_validation)
x_test, y_test = get_x_y(test_set_features, encoded_features_test)
class MyHyperModel(HyperModel):
def __init__(self, len_template_categories, tokenizer_word_count, tokenizer_character_count):
self.len_template_categories = len_template_categories
self.tokenizer_word_count = tokenizer_word_count
self.tokenizer_character_count = tokenizer_character_count
def build(self, hp):
dropout_rate = hp.Choice('dropout_rate', [0.5, 0.2])
input_template_categories = layers.Input(shape=(self.len_template_categories,), name='Template_Categories')
dense_template_categories = layers.Dense(hp.Int('dense_category_units', 2, 10, step=2), activation='relu')(input_template_categories)
input_placement_count = layers.Input(shape=(1,), name='Template_Count')
input_words = []
convolutional_basic_words = []
convolutional_template_words = []
template_words_convolutional_kernel_size = hp.Int('template_words_convolutional_kernel_size', 2, 10, step=2)
basic_words_convolutional_kernel_size = hp.Int('basic_words_convolutional_kernel_size', 2, 10, step=2)
template_words_convolutional_count = hp.Int('template_words_convolutional_count', 16, 64, step=16)
basic_words_convolutional_count = hp.Int('basic_words_convolutional_count', 16, 64, step=16)
template_words_second_convolutional = hp.Choice('template_words_second_convolutional', [True, False])
basic_words_second_convolutional = hp.Choice('basic_words_second_convolutional', [True, False])
with hp.conditional_scope('template_words_second_convolutional', [True]):
template_words_second_convolutional_kernel_size = hp.Int('template_words_second_convolutional_kernel_size', 2, 10, step=2)
template_words_second_convolutional_count = hp.Int('template_words_second_convolutional_count', 8, 32, step=8)
with hp.conditional_scope('basic_words_second_convolutional', [True]):
basic_words_second_convolutional_kernel_size = hp.Int('basic_words_second_convolutional_kernel_size', 2, 10, step=2)
basic_words_second_convolutional_count = hp.Int('basic_words_second_convolutional_count', 8, 32, step=8)
for name in tokenizer_names:
input_layer = layers.Input(shape=(self.tokenizer_word_count,), name='Template_{0}_words'.format(name))
embedding_layer = layers.Embedding(self.tokenizer_word_count, 4)(input_layer)
convolutional_layer = layers.Conv1D(template_words_convolutional_count, kernel_size=template_words_convolutional_kernel_size, activation='relu')(embedding_layer)
if template_words_second_convolutional:
convolutional_layer = layers.Conv1D(template_words_second_convolutional_count, kernel_size=template_words_second_convolutional_kernel_size, activation='relu')(convolutional_layer)
input_words.append(input_layer)
convolutional_basic_words.append(embedding_layer)
#
input_layer = layers.Input(shape=(self.tokenizer_word_count,), name='Basic_{0}_words'.format(name))
embedding_layer = layers.Embedding(self.tokenizer_word_count, 4)(input_layer)
convolutional_layer = layers.Conv1D(basic_words_convolutional_count, kernel_size=basic_words_convolutional_kernel_size, activation='relu')(embedding_layer)
if basic_words_second_convolutional:
convolutional_layer = layers.Conv1D(basic_words_second_convolutional_count, kernel_size=basic_words_second_convolutional_kernel_size, activation='relu')(convolutional_layer)
input_words.append(input_layer)
convolutional_template_words.append(convolutional_layer)
#
input_chars = []
convolutional_template_char = []
convolutional_basic_char = []
template_char_second_convolutional = hp.Choice('template_char_second_convolutional', [True, False])
basic_char_second_convolutional = hp.Choice('basic_char_second_convolutional', [True, False])
template_char_convolutional_kernel_size = hp.Int('template_char_convolutional_kernel_size', 2, 10, step=2)
basic_char_convolutional_kernel_size = hp.Int('basic_char_convolutional_kernel_size', 2, 10, step=2)
template_char_convolutional_count = hp.Int('template_char_convolutional_count', 16, 64, step=16)
basic_char_convolutional_count = hp.Int('basic_char_convolutional_count', 16, 64, step=16)
with hp.conditional_scope('template_char_second_convolutional', [True]):
template_char_second_convolutional_kernel_size = hp.Int('template_char_second_convolutional_kernel_size', 2, 10, step=2)
template_char_second_convolutional_count = hp.Int('template_char_second_convolutional_count', 8, 32, step=8)
with hp.conditional_scope('basic_char_second_convolutional', [True]):
basic_char_second_convolutional_kernel_size = hp.Int('basic_char_second_convolutional_kernel_size', 2, 10, step=2)
basic_char_second_convolutional_count = hp.Int('basic_char_second_convolutional_count', 8, 32, step=8)
for name in tokenizer_names:
input_layer = layers.Input(shape=(self.tokenizer_character_count,), name='Template_{0}'.format(name))
embedding_layer = layers.Embedding(self.tokenizer_character_count, 4)(input_layer)
convolutional_layer = layers.Conv1D(template_char_convolutional_count, kernel_size=template_char_convolutional_kernel_size, activation='relu')(embedding_layer)
if template_char_second_convolutional:
convolutional_layer = layers.Conv1D(template_char_second_convolutional_count, kernel_size=template_char_second_convolutional_kernel_size, activation='relu')(convolutional_layer)
input_chars.append(input_layer)
convolutional_template_char.append(embedding_layer)
#
input_layer = layers.Input(shape=(self.tokenizer_character_count,), name='Basic_{0}'.format(name))
embedding_layer = layers.Embedding(self.tokenizer_character_count, 4)(input_layer)
convolutional_layer = layers.Conv1D(basic_char_convolutional_count, kernel_size=basic_char_convolutional_kernel_size, activation='relu')(embedding_layer)
if basic_char_second_convolutional:
convolutional_layer = layers.Conv1D(basic_char_second_convolutional_count, kernel_size=basic_char_second_convolutional_kernel_size, activation='relu')(convolutional_layer)
input_chars.append(input_layer)
convolutional_basic_char.append(embedding_layer)
#
layer_template_words = layers.concatenate(convolutional_template_words)
layer_template_words = layers.Flatten()(layer_template_words)
layer_template_words = layers.Dense(hp.Int('layer_template_words_units', 64, 512, step=64), activation='relu')(layer_template_words)
if hp.Choice('layer_template_words_dropout', [True, False]):
layer_template_words = layers.Dropout(dropout_rate)(layer_template_words)
layer_basic_words = layers.concatenate(convolutional_basic_words)
layer_basic_words = layers.Flatten()(layer_basic_words)
layer_basic_words = layers.Dense(hp.Int('layer_basic_words_units', 64, 256, step=64), activation='relu')(layer_basic_words)
if hp.Choice('layer_basic_words_dropout', [True, False]):
layer_basic_words = layers.Dropout(dropout_rate)(layer_basic_words)
layer_words = layers.concatenate([layer_template_words, layer_basic_words])
layer_words = layers.Dense(hp.Int('layer_basic_words', 32, 128, step=32), activation='relu')(layer_words)
#
layer_template_char = layers.concatenate(convolutional_template_char)
layer_template_char = layers.Flatten()(layer_template_char)
layer_template_char = layers.Dense(hp.Int('layer_template_char_nits', 64, 512, step=64), activation='relu')(layer_template_char)
if hp.Choice('layer_template_char_dropout', [True, False]):
layer_template_char = layers.Dropout(dropout_rate)(layer_template_char)
layer_basic_char = layers.concatenate(convolutional_basic_char)
layer_basic_char = layers.Flatten()(layer_basic_char)
layer_basic_char = layers.Dense(hp.Int('layer_basic_char_units', 64, 512, step=64), activation='relu')(layer_basic_char)
if hp.Choice('layer_basic_char_dropout', [True, False]):
layer_basic_char = layers.Dropout(dropout_rate)(layer_basic_char)
layer_chars = layers.concatenate([layer_template_char, layer_basic_char])
layer_chars = layers.Dense(hp.Int('layer_basic_words', 32, 128, step=32), activation='relu')(layer_chars)
#
layer = layers.concatenate([layer_words, layer_chars])
layer = layers.Dense(hp.Int('layer_basic_words', 16, 64, step=16), activation='relu')(layer)
layer = layers.Dropout(dropout_rate)(layer)
layer = layers.concatenate([dense_template_categories, input_placement_count, layer])
layer = layers.Dense(hp.Int('layer_basic_words', 16, 32, step=16), activation='relu')(layer)
layer = layers.Dropout(dropout_rate)(layer)
layer = layers.Dense(1)(layer)
inputs = [input_template_categories, input_placement_count] + input_words + input_chars
model = keras.Model(inputs=inputs, outputs=[layer])
model.compile(
optimizer=keras.optimizers.Adagrad(hp.Choice('learning_rate', [1e-2, 1e-3, 1e-4])),
loss='huber_loss',
metrics=[metrics.MeanSquaredError(), metrics.MeanAbsoluteError()])
return model
hyperModel = MyHyperModel(
len(header_index_with_template_categories),
tokenizer_word_count,
tokenizer_character_count)
tuner = Hyperband(
hyperModel,
objective='mean_squared_error',
max_epochs=40,
directory=model_tuning_directory,
project_name='masterthesis')
tuner.search(x_train,
y=y_train,
epochs=100,
validation_data=(x_validation, y_validation),
callbacks=[EarlyStopping('val_loss', patience=3)])
model = tuner.get_best_models(num_models=1)[0]
tuner.results_summary()
print("done")
|
[
"4411119+Martinay@users.noreply.github.com"
] |
4411119+Martinay@users.noreply.github.com
|
f90d78e9b315ef2b087db8e4b081d2ea77b0be6d
|
4ab08b3a9c0239fbcb774c81b1bcec6cd82744c7
|
/aws/aws/urls.py
|
822332ea08a397dab504d2c96bf9ff7727360a72
|
[] |
no_license
|
yongdol/awstest
|
7e0cbaa1cb9ac84a678cf9f41b92ec30ed9eeca1
|
7fc0804bb2f458fb12aa50e4a07bbf23cc26bea4
|
refs/heads/master
| 2020-12-24T12:20:51.285992
| 2016-11-08T07:38:17
| 2016-11-08T07:38:17
| 73,050,397
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 760
|
py
|
"""aws URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
[
"kungbuk77@gmail.com"
] |
kungbuk77@gmail.com
|
fe4212bddcc8ceeddcff2a7c7216ecd8a3f98102
|
dfe984dbe420f86c4af22355432951179dc7eeea
|
/src/tests/test_album_actions.py
|
137c5ef23a9ff112dd05d0722a4f9122a4188763
|
[
"Apache-2.0"
] |
permissive
|
svnovikov/jsonplaceholder-tests
|
dd8c8f746002218957bc02c89ebc3d712a2efe28
|
583c3248c8cbd167e3a8d6efffc90fd7074e6559
|
refs/heads/master
| 2023-01-12T02:14:24.582395
| 2018-10-15T09:28:45
| 2018-10-15T09:28:45
| 152,973,677
| 1
| 0
|
Apache-2.0
| 2022-12-27T15:44:30
| 2018-10-14T12:55:23
|
Python
|
UTF-8
|
Python
| false
| false
| 5,153
|
py
|
import pytest
from src.utils import check_fields
@pytest.mark.albums
@pytest.mark.parametrize(
'endpoint, expected_code',
[
('/albums/1', 200),
('/albums/0', 404),
('/albums/100', 200),
('/albums/101', 404),
('/users/1/albums', 200),
('/users/11/albums', 404)
])
def test_get_album(client, endpoint, expected_code):
code, body = client.get(endpoint)
assert code == expected_code, \
'Actual response code does not equal expected code: {} != {}' \
.format(code, expected_code)
@pytest.mark.albums
@pytest.mark.parametrize(
'endpoint, data, expected_code',
[
('/albums', {'title': 'foo', 'userId': '1'}, 201),
('/users/1/albums', {'title': 'foo'}, 201),
('/albums', {'title': 'foo'}, 400),
('/albums', {'title': 'foo', 'userId': 11}, 400),
('/albums', {}, 400),
('/albums', {'id': 101}, 400),
('/albums', {'id': 1}, 400),
('/albums/1', {'id': 1}, 400)
])
def test_create_album(client, endpoint, data, expected_code):
code, body = client.post(
endpoint,
data=data)
assert code == expected_code, \
'Actual response code does not equal expected code: {} != {}'\
.format(code, expected_code)
if code == 201:
assert body.get('id'), 'The resource does not have ID!'
# NOTE: Assume we send data with correct fields and
# received data should contain all fields of sent data
assert check_fields(data, body),\
'Received data {} should contain' \
'all fields of sent data {}!'.format(body, data)
@pytest.mark.albums
@pytest.mark.parametrize(
'endpoint, data, expected_code, user_id',
[
('/users/1/albums', {'title': 'foo'}, 201, '1'),
('/users/2/albums', {'title': 'foo'}, 201, '2'),
('/users/11/albums', {'title': 'foo'}, 404, None),
('/users/1/albums', {'id': 101}, 400, None),
('/users/1/albums', {'id': 1}, 400, None)
])
def test_create_user_album(client, endpoint, data, expected_code, user_id):
code, body = client.post(
endpoint,
data=data)
assert code == expected_code, \
'Actual response code does not equal expected code: {} != {}'\
.format(code, expected_code)
if code == 201:
assert body.get('id'), 'The resource does not have ID!'
# NOTE: Assume we send data with correct fields and
# received data should contain all fields of sent data
assert user_id == body.get('userId'), \
'UserId is not correct!'
@pytest.mark.albums
@pytest.mark.parametrize(
'endpoint, data, expected_code',
[
('/albums/1', {'title': 'foo', 'userId': '1'}, 200),
('/albums/1', {'title': 'foo'}, 200),
('/albums/1', {}, 400),
('/albums/1', {'id': 101}, 400),
('/albums/1', {'id': 10}, 400),
('/albums/101', {'title': 'foo', 'userId': '1'}, 404)
])
def test_update_album(client, endpoint, data, expected_code):
code, body = client.put(
endpoint,
data=data)
assert code == expected_code, \
'Actual response code does not equal expected code: {} != {}'\
.format(code, expected_code)
if code == 200:
assert body.get('id'), 'The resource does not have ID!'
# NOTE: Assume we send data with correct fields and
# received data should contain all fields of sent data
assert check_fields(data, body),\
'Received data {} should contain' \
'all fields of sent data {}!'.format(body, data)
@pytest.mark.albums
@pytest.mark.parametrize(
'endpoint, expected_code',
[
('/albums/1', 200),
('/albums/0', 404),
('/albums/100', 200),
('/albums/101', 404)
])
def test_delete_album(client, endpoint, expected_code):
code, body = client.delete(endpoint)
assert code == expected_code, \
'Actual response code does not equal expected code: {} != {}' \
.format(code, expected_code)
@pytest.mark.albums
@pytest.mark.parametrize(
'filter_endpoint, check_endpoints',
[
('/albums?id=1', ['/albums/1']),
('/albums?id=0', []),
('/albums?id=100', ['/albums/100']),
('/albums?id=101', []),
('/albums?id=101&id=1', ['/albums/1']),
('/albums?id=100&id=1', ['/albums/1', '/albums/100']),
('/albums?userId=1', ['/users/1/albums']),
('/albums?userId=1&userId=2', ['/users/1/albums', '/users/2/albums'])
])
def test_filter_albums(client, filter_endpoint, check_endpoints):
code, body = client.get(filter_endpoint)
assert code == 200, \
'Actual response code does not equal expected code: {} != 200' \
.format(code)
if not check_endpoints:
return
check_bodies = []
for check_endpoint in check_endpoints:
_, check_body = client.get(check_endpoint)
if isinstance(check_body, list):
check_bodies.extend(check_body)
else:
check_bodies.append(check_body)
assert body == check_bodies, 'Filter does not work!'
|
[
"snovikov@mirantis.com"
] |
snovikov@mirantis.com
|
cf8eff9dea17234409bec5e7ad16ea03ff90a305
|
984959f729615658dc4e2947db33e92d3588ca9d
|
/crawler/config.py
|
7ff9019c36b050adb5c0d7f3f7ee83f605b5d5c2
|
[
"MIT"
] |
permissive
|
plrthink/musicians
|
153c5a8d127a2963c310f81c3226270cd90bf1e1
|
dfd914a384f9609d44948cb21514e09ab6fb19cd
|
refs/heads/master
| 2020-12-11T05:51:55.944597
| 2014-12-21T08:35:13
| 2014-12-21T08:35:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
from firebase import firebase
MUSICIANS_PAGE_PICKLE_PATH = 'musicians.pickle'
FIREBASE_BASE_PATH = "https://musicians.firebaseio.com/"
MUSICIAN_STORE = 'musicians'
firebase = firebase.FirebaseApplication(
FIREBASE_BASE_PATH,
None
)
|
[
"iseansay@gmail.com"
] |
iseansay@gmail.com
|
64d7ab6ab96aaa73f4156389bc168f83c3e2dac1
|
29084eefb0dcf231b94f341b7cec00c221b52a6c
|
/lib/reast.py
|
df7a9cb31d31ecaef80f0a118cf5e5560384b8f1
|
[] |
no_license
|
akechi/todobot
|
156615546b8d97cde7db2ff0ed4b14a6acf103d4
|
060340e205e389c6e395e3c53a4c764045060d0b
|
refs/heads/master
| 2020-06-03T21:18:06.086923
| 2013-08-09T13:44:37
| 2013-08-09T13:44:37
| 10,821,776
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,993
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re
import inspect
from inspect import Parameter
from functools import partial
from itertools import filterfalse
class Capture(object):
def __init__(self, astnode, parent=None):
assert isinstance(astnode, ASTNode)
self.children = {}
self.parent = parent
self.named_matches = []
self.astnode = astnode
@property
def name(self):
return self.astnode.name
def make_child(self, astnode):
c = Capture(astnode, self)
name = astnode.name
if name in self.children:
xs = self.children[name]
xs.append(c)
self.children[name] = xs
else:
self.children[name] = [c]
return c
def __getitem__(self, name):
return self.children[name]
def __contains__(self, name):
return name in self.children
def __iter__(self):
for xs in self.children.values():
for x in xs:
yield x
def pprint(self, indent=None):
if indent is None:
indent = 0
print(' '*indent + self.name)
for c in self:
c.pprint(indent+4)
@property
def regexp_name(self):
return self.astnode.regexp_name
def associate(self, d):
'''
associate regular expression match object groupdict() and ast.
'''
seen = dict()
for c in self:
p = c.regexp_name
if p in d and d[p] is not None:
seen[p] = c
c.named_matches.append(p)
seen.update(c.associate(d))#{k: v for k, v in d.items() if k != p}))
return seen
@property
def n_lets(self):
if self.parent is None:
return []
return self.parent[self.name]
@property
def multimatch(self):
return len(self.n_lets) > 1
class ASTNode(object):
c = ''
d = ''
def __init__(self, parent=None):
self.parent = parent
self.children = []
@property
def regexp_name(self):
if self.parent is None:
return ''
return self.parent.regexp_name
@property
def path(self):
if self.parent:
x = getattr(self, 'name', None)
if x:
return self.parent.path + (x, )
return self.parent.path
else:
return ()
def find(self, spec, kls=None):
if kls is None:
kls = _named
found = set()
def enter(node):
if node.path == spec and isinstance(node, kls):
found.add(node)
def leave(node):
pass
self.visit(enter, leave)
return found
def visit(self, enter, leave):
enter(self)
for c in self.children:
c.visit(enter, leave)
leave(self)
def make_pat(self):
return "(?:{}){}".format(
self.c.join([c.make_pat() for c in self.children]),
self.d)
def compile(self):
return re.compile(self.make_pat())
def make_capture(self):
root = Capture(self)
stack = [root]
def enter(rnode):
if isinstance(rnode, _named):
top = stack[-1]
c = top.make_child(rnode)
stack.append(c)
def leave(rnode):
if isinstance(rnode, _named):
assert stack[-1].name == rnode.name
stack.pop(-1)
self.visit(enter, leave)
return root
class _Or(ASTNode):
c = '|'
d = ''
class _Cat(ASTNode):
c = ''
d = ''
class _Option(ASTNode):
c = ''
d = '?'
class _OneOrMore(ASTNode):
c = ''
d = '+'
class _ZeroOrMore(ASTNode):
c = ''
d = '*'
class _unnamed(ASTNode):
def __init__(self, parent, pat):
ASTNode.__init__(self, parent)
self.pat = pat
def make_pat(self):
return "(?:{0}{1}{2})".format(self.pat,
self.c.join([c.make_pat() for c in self.children]),
self.d)
class _named(_unnamed):
SEP = '_'
def __init__(self, parent, name, pat):
_unnamed.__init__(self, parent, pat)
self.name = name
@property
def regexp_name(self):
if self.parent is None:
return self.SEP + self.name
assert isinstance(self.parent, ASTNode)
return self.parent.regexp_name+ self.SEP + self.name
def make_pat(self):
return r"(?P<{0}>{1}{2})".format(self.regexp_name, self.pat,
self.c.join([c.make_pat() for c in self.children]),
self.d)
class _counted(_named):
def __init__(self, parent, name, pat):
_named.__init__(self, parent, name, pat)
@property
def regexp_name(self):
if self.parent is None:
return self.SEP + self.name
assert isinstance(self.parent, ASTNode)
return self.parent.regexp_name+ self.SEP + self.name + "{}".format(id(self))
class Builder(object):
ast_class = ASTNode
def __init__(self, *xs):
self.xs = xs
def build(self, parent=None):
assert isinstance(parent, ASTNode) or parent is None
param = [x for x in self.xs if not isinstance(x, Builder)]
#print(self.ast_class, param)
node = self.ast_class(parent, *param)
node.children = [x.build(node) for x in self.xs if isinstance(x, Builder)]
return node
class Or(Builder):
ast_class = _Or
class Cat(Builder):
ast_class = _Cat
class Option(Builder):
ast_class = _Option
class OneOrMore(Builder):
ast_class = _OneOrMore
class ZeroOrMore(Builder):
ast_class = _ZeroOrMore
class unnamed(Builder):
ast_class = _unnamed
class named(Builder):
ast_class = _named
class counted(Builder):
ast_class = _counted
def bindable(assoc, d, nots):
result = {}
for k, v in filterfalse(lambda x : x[1].name in nots , assoc.items()):
if v.multimatch:
x = result.get(v.name, None)
if x is None:
x = set({})
x.add(d[k])
else:
x = d[k]
result[v.name] = x
return result
def findbind(f, d):
'''
success: able to call f with **d
fail:
missing <name>: first arg named <name> is needed to call f
TooManyFound <name>: key <name> in d is not used in f.
limitations:
user MUST supply name.
cannot use positional only parameters
'''
sig = inspect.signature(f)
missing = set([])
toomany = set(d.keys())
for p in sig.parameters.values():
assert p.kind is not Parameter.POSITIONAL_ONLY
k = p.name
if k in d:
toomany.remove(k)
if k not in d and p.default is Parameter.empty:
''' if f has default,
we donot need to supply'''
missing.add(k)
return missing, toomany,
if __name__ == '__main__':
ws = unnamed(" ")
def may_be(*xs):
return Option(OneOrMore(ws), Option(*xs))
description = named("description", ".+")
nicknames = counted("nicknames", "[a-zA-Z@][a-zA-Z0-9_]*")
nickname = named("nickname", "[a-zA-Z@][a-zA-Z0-9_]*")
comma = unnamed(",")
x = Cat(Or(named("add", "add", may_be(description)),
named("addto", "addto",
may_be(
Option(nicknames, comma),
Option(nicknames, comma),
Option(nicknames, comma),
Option(nicknames, comma),
ZeroOrMore(named("too_many", "", nickname, comma)),
Option(nickname, unnamed("(?!,)"))),
may_be(description))
), unnamed("$"))
x = x
t = x.build()
cap = t.make_capture()
r = t.compile()
m = r.match("addto raa0121,deris0126,thinca hogehoge")
d = m.groupdict()
print(d)
assoc = cap.associate(d)
print(assoc)
|
[
"bgnori@gmail.com"
] |
bgnori@gmail.com
|
fd575ac1cde7dd951bbf9862a7ee743a2b207ee6
|
b975bcfe4c15a22e3ab5a14d95890a2339e8f132
|
/python/167.py
|
0c9ad42b89238fbefe33d5bb9c0a8e3cf295a0f9
|
[
"Apache-2.0"
] |
permissive
|
HymEric/LeetCode
|
54987f6b8396beb084e8a41db3e10edbb1c7eb16
|
e32439a76968d67f99881b6d07fb16e21c979c9e
|
refs/heads/master
| 2020-08-01T19:39:48.229668
| 2020-02-13T14:15:33
| 2020-02-13T14:15:33
| 211,094,924
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,087
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019/10/6 0006 15:31
# @Author : Erichym
# @Email : 951523291@qq.com
# @File : 167.py
# @Software: PyCharm
class Solution:
def twoSum(self, numbers: list, target: int) -> list:
low,high=-1,-1
for i in range(len(numbers)):
try:
j=numbers.index(target-numbers[i],i+1)
except:
# print(i)
if i == len(numbers)-1:
return None
else:
low=i
high=j
break
return [low+1,high+1]
def twoSum2(self, numbers: list, target: int) -> list:
low,high=-1,-1
nums_dict={}
for i in range(len(numbers)):
nums_dict[numbers[i]]=i
for i in range(len(numbers)):
j=nums_dict.get(target-numbers[i])
if j!=None and i!=j:
low=i
high=j
break
return [low+1,high+1]
if __name__=="__main__":
nums=[0,0,11,15]
so=Solution()
a=so.twoSum2(nums,11)
print(a)
|
[
"noreply@github.com"
] |
HymEric.noreply@github.com
|
1dcb11ed3b38bd5a76bf4a8703e465770acd8297
|
6eee5e023c026dcbcb7101189389905785601282
|
/vrep_pypot/matplotlib_Dialog.py
|
ad2e0b9e97277cc69d98c02e35668ed285e998ec
|
[] |
no_license
|
kmolLin/Pyquino_plugin
|
26e7d9e8f75235f455ff550631ebb391ae1a81ee
|
7c4aff7b6e6e84ced41e3ae124f87600a1c56533
|
refs/heads/master
| 2021-01-18T17:25:49.294387
| 2017-04-09T13:04:07
| 2017-04-09T13:04:07
| 86,797,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,125
|
py
|
# -*- coding: utf-8 -*-
"""
Module implementing matplotlib_show.
"""
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from Ui_matplotlib_Dialog import Ui_Dialog
import sys
import os
import random
import matplotlib
# Make sure that we are using QT5
matplotlib.use('Qt5Agg')
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
progname = os.path.basename(sys.argv[0])
progversion = "0.1"
class MyMplCanvas(FigureCanvas):
"""Ultimately, this is a QWidget (as well as a FigureCanvasAgg, etc.)."""
def __init__(self, parent=None, width=5, height=4, dpi=100):
fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = fig.add_subplot(111)
FigureCanvas.__init__(self, fig)
self.setParent(parent)
FigureCanvas.setSizePolicy(self, QSizePolicy.Expanding, QSizePolicy.Fixed)
FigureCanvas.updateGeometry(self)
@pyqtSlot(float)
def update_figure(self,t0,x):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
#l = [random.randint(0, 10) for i in range(4)]
self.axes.cla()
self.axes.plot(t0,x)
self.draw()
'''
class MyStaticMplCanvas(MyMplCanvas):
"""Simple canvas with a sine plot."""
def compute_initial_figure(self):
t = arange(0.0, 3.0, 0.01)
s = sin(2*pi*t)
self.axes.plot(t, s)
'''
'''
class MyDynamicMplCanvas(MyMplCanvas):
"""A canvas that updates itself every second with a new plot."""
def __init__(self, *args, **kwargs):
MyMplCanvas.__init__(self, *args, **kwargs)
timer = QTimer(self)
timer.timeout.connect(self.update_figure)
timer.start(1000)
def compute_initial_figure(self):
self.axes.plot([0, 1, 2, 3], [1, 2, 0, 4], 'r')
def update_figure(self):
# Build a list of 4 random integers between 0 and 10 (both inclusive)
l = [random.randint(0, 10) for i in range(4)]
self.axes.cla()
self.axes.plot([0, 1, 2, 3], l, 'r')
self.draw()
'''
|
[
"smpss91341@gmail.com"
] |
smpss91341@gmail.com
|
98b72acc87e1366fe2c9308189b0e28b738d95a6
|
710ac369ce06c8648ab7b8de905ca68b6fcb30da
|
/Python/Finished/Problem151_StringReversal.py
|
c76bda25a5e5e8868f16642b2d1471898bde9af3
|
[] |
no_license
|
XuanShawnLi/LeetCode
|
d268b1623ab311ae75d0896b71176a484e1f99f7
|
a7ff2c37de71ef1f82f78f31d862738820630730
|
refs/heads/master
| 2021-01-01T06:38:36.274615
| 2015-04-27T21:52:19
| 2015-04-27T21:52:19
| 31,913,092
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
"""
Given an input string, reverse the string word by word.
For example,
Given s = "the sky is blue",
return "blue is sky the".
Update (2015-02-12):
For C programmers: Try to solve it in-place in O(1) space.
"""
class Solution:
# @param s, a string
# @return a string
def reverseWords(self, s):
newword=s.strip()
wordlist=newword.split()
wordlist.reverse()
return " ".join(wordlist)
s=Solution()
word=" the sky is blue "
print word
print s.reverseWords(word)
|
[
"xuanli1981@gmail.com"
] |
xuanli1981@gmail.com
|
def4b651e82986350783a83c780c9be71ee185fb
|
47ebf27cd965269321b5d07beea10aec6da494d9
|
/Analysis/scripts/old_scripts/radial_profile_KerrSF_phi_sphere_vs_slice.py
|
c700d75b81f86839dd2bd74119751d979809b534
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
JamieBamber/GRChombo
|
9220fa67eeaa97eee17bc3c0a8ad17bfd3d02d0e
|
4399e51f71618754282049d6f2946b69ad2c12ee
|
refs/heads/master
| 2022-03-21T18:49:41.668222
| 2020-11-24T23:21:14
| 2020-11-24T23:21:14
| 201,951,780
| 0
| 0
|
BSD-3-Clause
| 2020-03-11T10:19:26
| 2019-08-12T14:55:47
|
C++
|
UTF-8
|
Python
| false
| false
| 2,992
|
py
|
import yt
import numpy as np
from yt import derived_field
import time
# import sys
import matplotlib.pyplot as plt
import math
from yt.units import cm
start_time = time.time()
# load dataset
data_root_path = "/rds/user/dc-bamb1/rds-dirac-dp131/dc-bamb1/GRChombo_data/KerrSF"
data_sub_dir = "run2.2_KNL_l0_m0_a0.99_Al0"
number = "000600"
dataset_path = data_root_path + "/" + data_sub_dir + "/KerrSFp_" + number + ".3d.hdf5"
ds = yt.load(dataset_path)
print("loaded data from ", dataset_path)
print("time = ", time.time() - start_time)
# set centre
center = [512.0, 512.0, 0]
L = max(ds.domain_width.v)
# set up parameters
z_position = 0.001 # s position of slice
r_outer_horizon = 0.25 # R_outer = r_+ / 4 ~= 1 / 4 for an extremal Kerr BH
r_min = r_outer_horizon
r_max = 500
N_bins = 264
a = 0.99
### derived fields
"""@derived_field(name = "rho_E_eff", units = "")
def _rho_E_eff(field, data):
return data["rho"]*pow(data["chi"],-3)
@derived_field(name = "rho_J_eff", units = "")
def _rho_J_eff(field, data):
return data["S_azimuth"]*pow(data["chi"],-3)
@derived_field(name = "rho_J_prime_eff", units = "")
def _rho_J_prime_eff(field, data):
return data["S_azimuth_prime"]*pow(data["chi"],-3)"""
# weighting field = (cell_volume)^(2/3) / (2*pi * r * dr)
@derived_field(name = "sphere_weighting_field", units = "")
def _sphere_weighting_field(field, data):
return data["cell_volume"].in_base("cgs") * N_bins / (2*math.pi* (data["spherical_radius"]**2)*(r_max - r_min)*cm)
@derived_field(name = "slice_weighting_field", units = "")
def _slice_weighting_field(field, data):
return pow(data["cell_volume"].in_base("cgs"),2.0/3) * N_bins / (2*math.pi* (data["cylindrical_radius"])*(r_max - r_min)*cm)
sphere = ds.sphere(center, r_max)
slice = ds.r[:,:,z_position]
slice.set_field_parameter("center", center)
# make profile
rp_sphere = yt.create_profile(sphere, "spherical_radius", fields=["phi"], n_bins=N_bins, weight_field="sphere_weighting_field", extrema={"spherical_radius" : (r_min, r_max)})
rp_slice = yt.create_profile(slice, "spherical_radius", fields=["phi"], n_bins=N_bins, weight_field="slice_weighting_field", extrema={"spherical_radius" : (r_min, r_max)})
### plot profile
r_plus = 1 + math.sqrt(1 - a**2)
R_1 = rp_sphere.x.value
phi_1 = rp_sphere["phi"].value
r_BL_1 = R_1*(1 + r_plus/(4*R_1))**2
x_1 = np.log(r_BL_1 - 1)
R_2 = rp_slice.x.value
phi_2 = rp_slice["phi"].value
r_BL_2 = R_2*(1 + r_plus/(4*R_2))**2
x_2 = np.log(r_BL_2 - 1)
plt.plot(x_1, phi_1, 'r-', label="from sphere")
plt.plot(x_2, phi_2, 'b--', label="from slice")
plt.xlabel("$\\ln(r_{BL}-1)$")
plt.ylabel("$\\phi$")
plt.grid(axis='both')
#plt.ylim((-0.5, 0.5))
dt = 0.25
title = data_sub_dir + " time = {:.1f}".format(int(number)*dt)
plt.legend()
plt.title(title)
plt.tight_layout()
save_name = "plots/" + data_sub_dir + "_" + number + "_phi_profile_sphere_vs_slice.png"
print("saved " + save_name)
plt.savefig(save_name, transparent=False)
plt.clf()
|
[
"dc-bamb1@login-e-14.data.cluster"
] |
dc-bamb1@login-e-14.data.cluster
|
788fe06647b09768ae78371dc177c5c3795a772c
|
bb08eb6609c9dbf721ffdf613135571c662e6756
|
/MNIST Clusters.py
|
1e56622d6a72ae42fe867cf83664662e21264d13
|
[] |
no_license
|
rumiio/ServerlessAI-1
|
d269bb9fad2ff7dd3be72f8b09ccdb20c288c99e
|
d1441d5f087c45267994d3082f95f3a872f64dc3
|
refs/heads/master
| 2020-04-01T02:14:54.624303
| 2018-10-11T00:36:08
| 2018-10-11T00:36:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,095
|
py
|
# coding: utf-8
# # MNIST Clusters
# In[5]:
from sagemaker import get_execution_role
role = get_execution_role()
# In[6]:
get_ipython().run_cell_magic('time', '', 'import pickle, gzip, numpy, urllib.request, json\n\n# Load the dataset\nurllib.request.urlretrieve("http://deeplearning.net/data/mnist/mnist.pkl.gz", "mnist.pkl.gz")\nwith gzip.open(\'mnist.pkl.gz\', \'rb\') as f:\n train_set, valid_set, test_set = pickle.load(f, encoding=\'latin1\')')
# In[7]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (2,10)
def show_digit(img, caption='', subplot=None):
if subplot == None:
_, (subplot) = plt.subplots(1,1)
imgr = img.reshape((28,28))
subplot.axis('off')
subplot.imshow(imgr, cmap='gray')
plt.title(caption)
show_digit(train_set[0][30], 'This is a {}'.format(train_set[1][30]))
# In[4]:
get_ipython().run_cell_magic('time', '', "import io\nimport boto3\nfrom sagemaker.amazon.common import write_numpy_to_dense_tensor\n\nbucket = '2018-10-08-batch-test' # Use the name of your s3 bucket here\ntrain_folder = 'KMtest'\ntest_folder = 'KMtest'\nmodel_location = 'KMtest'\n\ntrainURL = 's3://{}/{}'.format(bucket, train_folder)\ntestURL = 's3://{}/{}'.format(bucket, test_folder)\nmodelFolder = 's3://{}/{}'.format(bucket, model_location)\nprint('training data will be uploaded to: {}'.format(trainURL))\nprint('training artifacts will be created in: {}'.format(modelFolder))\n\n# Convert the training data into the format required by the SageMaker KMeans algorithm\nbuf = io.BytesIO()\nwrite_numpy_to_dense_tensor(buf, train_set[0], train_set[1])\nbuf.seek(0)\n\nboto3.resource('s3').Bucket(bucket).Object(train_folder).upload_fileobj(buf)")
# In[ ]:
# from time import gmtime, strftime
# job_name = 'KMeans-' + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
# print("Training job", job_name)
# In[ ]:
from sagemaker import KMeans
kmeans = KMeans(role=role,
train_instance_count=2,
train_instance_type='ml.c4.8xlarge',
output_path="s3://2018-10-08-batch-test",
k=10,
data_location=trainURL)
# Use the high-level SDK
# In[ ]:
get_ipython().run_cell_magic('time', '', '\nkmeans.fit(kmeans.record_set(train_set[0]))')
# In[ ]:
kmeans.latest_training_job.job_name
modelURL = 's3://{}/{}/output/model.tar.gz'.format(bucket, kmeans.latest_training_job.job_name)
modelURL
# In[ ]:
sagemaker = boto3.client('sagemaker')
from sagemaker.amazon.amazon_estimator import get_image_uri
image = get_image_uri(boto3.Session().region_name, 'kmeans')
kmeans_hosting_container = {
'Image': image,
'ModelDataUrl': modelURL
}
# In[ ]:
kmeans_hosting_container
# In[ ]:
create_model_response = sagemaker.create_model(
ModelName="MNIST-high-level",
ExecutionRoleArn=role,
PrimaryContainer=kmeans_hosting_container)
# In[ ]:
import sagemaker
val_key = 'kmeans_highlevel_example/data/val.data'
prediction_key = 'kmeans_highlevel_example/prediction/valid-data.csv.out'
### Convert the validation set numpy array to a csv file and upload to s3
numpy.savetxt('valid-data.csv', valid_set[0], delimiter=',', fmt='%g')
s3_client = boto3.client('s3')
result = s3_client.upload_file('valid-data.csv', bucket, val_key)
result
inputURL = 's3://{}/{}'.format(bucket, val_key)
outputURL = 's3://{}/{}'.format(bucket, prediction_key)
# Initialize the transformer object
transformer =sagemaker.transformer.Transformer(
base_transform_job_name='Batch-Transform',
model_name="MNIST-high-level",
instance_count=1,
instance_type='ml.c4.xlarge',
output_path=outputURL
)
# To start a transform job:
transformer.transform(inputURL, content_type='text/csv', split_type='Line')
# Then wait until transform job is completed
transformer.wait()
# To fetch validation result
s3_client.download_file(bucket, prediction_key, 'valid-result')
with open('valid-result') as f:
results = f.readlines()
print("Sample transform result: {}".format(results[0]))
|
[
"noreply@github.com"
] |
rumiio.noreply@github.com
|
b1503a00135afba514e7590e99b4df2ec8d63a57
|
a8053bd5e459e1496112787049182b59540e29fb
|
/python/getpop.py
|
5f64a8097369c9bb8d48323f916905123b3ee4d2
|
[] |
no_license
|
larsbutler/ged
|
d0c81409423727a1cbf552eed379c38c90570d5d
|
bac70d66aab6172778e25dee4ac8dc92f370cfc7
|
refs/heads/master
| 2020-12-25T00:06:00.919684
| 2013-02-18T14:27:38
| 2013-02-18T14:27:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,804
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (c) 2010-2012, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Read GRUMP population and urban-extent data into text file
for import into DB
"""
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
import sys
#
# Raster file paths
#
# TODO convert to command line arguments
#
_URBAN_FILE = '/data/ged/rebuild-pop/urban-rural/prj.adf'
_POP_FILE = '/data/ged/rebuild-pop/pop-input/prj.adf'
def _open_raster_file(filepath):
"""
Open the given raster file for reading, raise an IOError on failure.
Note does not actually read the real data yet, see load_data
"""
#
# Force use of float64 for population data
# Note that this requires use of GDAL 1.9 (see env.sh)
# See http://lists.osgeo.org/pipermail/gdal-dev/2006-July/009570.html
#
gdal.SetConfigOption('AAIGRID_DATATYPE', 'Float64')
_handle = gdal.Open(filepath, GA_ReadOnly)
# Clear flag after open
gdal.SetConfigOption('AAIGRID_DATATYPE', None)
if _handle is None:
raise IOError("Failed to open file {0}\n".format(filepath))
return _handle
class RasterFile(object):
"""
Representation of a raster file
"""
def __init__(self, filepath):
self.data = None
self._filepath = filepath
self._handle = _open_raster_file(filepath)
self.width = self._handle.RasterXSize
self.height = self._handle.RasterYSize
_transform = self._handle.GetGeoTransform()
self.x_origin = _transform[0]
self.y_origin = _transform[3]
self.pixel_width = _transform[1]
self.pixel_height = _transform[5]
def load_data(self):
"""
Load raster files into a (huge) array.
For smaller machines a different approach should be used, for example
by iterating over blocks of cells
"""
_band = self._handle.GetRasterBand(1)
sys.stderr.write('Loading data from {0}...\n'.format(self._filepath))
self.data = _band.ReadAsArray(0, 0, self.width, self.height)
if self.data is None:
raise IOError('Failed to load data from {0}'
.format(self._filepath))
sys.stderr.write('DONE Loading data from {0}...\n'
.format(self._filepath))
def lon(self, x_pixel):
"""
The longitude corresponding to the given get_x (pixel) value
"""
# Note + 0.5 to place point in centre of pixel
return ((x_pixel + 0.5) * self.pixel_width) + self.x_origin
def lat(self, y_line):
"""
The latitude corresponding to the given get_y (pixel) value
"""
# Note + 0.5 to place point in centre of pixel
return ((y_line + 0.5) * self.pixel_height) + self.y_origin
def get_x(self, lon):
"""
The x (pixel) value for the given longitude
"""
return int((lon - self.x_origin) / self.pixel_width)
def get_y(self, lat):
"""
The y (line) value for the given latitude
"""
return int((lat - self.y_origin) / self.pixel_height)
class _CellCountValidator(object):
"""
Helper class to check that cell counts are self-consistent
"""
def __init__(self, raster):
self.raster = raster
#
# Counters for different types of cell
# Used for validation later
self.water = 0
self.land = 0
self.skipped = 0
self.urban = 0
self.rural = 0
self.null = 0
self.total_read = 0
def validate(self, startx=0):
"""
Validation checks - do our numbers add up?
"""
total = (self.raster.width - startx) * self.raster.height
expected = total - self.skipped
water_and_land = self.water + self.land
urban_rural_num_sum = self.urban + self.rural + self.null
sys.stderr.write('DONE total_read cells=' +
'{0} x{1} = {2} skipped={3} read={4} expected={5}\n'
.format(self.raster.width - startx,
self.raster.height,
total, self.skipped,
self.total_read,
expected))
sys.stderr.write(' water={0}, land={1} sum={2}\n'
.format(self.water, self.land,
water_and_land))
sys.stderr.write(' raster={0} rural={1} null={2} sum={3}\n'
.format(self.urban, self.rural,
self.null,
urban_rural_num_sum))
if self.total_read != expected:
sys.stderr.write('WARNING cells read=' +
'{0} != total_read-skipped={1}\n'
.format(self.total_read, expected))
if total != water_and_land + self.skipped:
sys.stderr.write('WARNING total_read=' +
'{0} != land+water+skipped={1}\n'
.format(total,
water_and_land +
self.skipped))
if self.land != urban_rural_num_sum:
sys.stderr.write('WARNING total_read land cells={0} != u+r+n={1}\n'
.format(self.land, urban_rural_num_sum))
def _extract_data(urban, pop, validator, startx=0, starty=0):
"""
Loop over rasters, print out values for land-mass cells, update counters
"""
for pop_x in range(startx, pop.width):
lon = pop.lon(pop_x)
urban_x = urban.get_x(lon)
for pop_y in range(starty, pop.height):
pop_value = pop.data[pop_y, pop_x]
validator.total_read += 1
lat = pop.lat(pop_y)
urban_y = urban.get_y(lat)
if urban_y >= urban.height or urban_x >= urban.width:
ur_value = 255
else:
ur_value = urban.data[urban_y, urban_x]
is_urban = None
if ur_value == 1:
is_urban = 'f'
validator.rural += 1
elif ur_value == 2:
is_urban = 't'
validator.urban += 1
elif ur_value == 255:
# no land mass
if pop_value == 0:
validator.water += 1
continue # do NOT write output or update land
else:
#
# GRUMP 1995 Urban/Rural mapping has null values for
# the Maldives; we cannot simply assume null implies water
#
sys.stderr.write(
'WARNING NULL U/R values for ' +
'x={0},get_y={1}, lat={2},lon={3}, pop={4}\n'.format(
urban_x, urban_y, lat, lon, pop_value))
is_urban = '\\N' # NULL SQL code
validator.null += 1
else:
sys.stderr.write(
'ERROR Unexpected U/R value ' +
'{4} found at get_x={0},get_y={1}, lat={2},lon={3}\n' +
' Check file format and GDAL version\n'.format(
urban_x, urban_y, lat, lon, ur_value))
sys.exit(1)
sys.stdout.write('{0}\t{1}\t{2}\t{3}\n'.format(
lat, lon, pop_value, is_urban))
validator.land += 1
def main():
"""
Read GRUMP population and urban-extent data into text file
for import into DB
"""
urban = RasterFile(_URBAN_FILE)
pop = RasterFile(_POP_FILE)
# Use for end-game testing
#_startx=urban.width-2#width-10 #30322 #22360
# TODO make this a command line argument
_startx = 0
validator = _CellCountValidator(pop)
urban.load_data()
pop.load_data()
_extract_data(urban, pop, validator, _startx)
# After loading data, check the validate cell counts
validator.validate(_startx)
#
# Main driver
#
if __name__ == "__main__":
main()
|
[
"paul@openquake.org"
] |
paul@openquake.org
|
89a5954ff3954c83f37c579a6f9b43cbe7fd9c34
|
904b0d81152649ccd3349f94f88e7b89a7b5c76a
|
/scripts/plotting/plot_wedge.py
|
cf9b2395645d90bc12e00db3f26c19924497385e
|
[
"BSD-3-Clause"
] |
permissive
|
desihub/LSS
|
ec33538a0e7280ad1c6b257368cc009ed4b39cbb
|
5645461929172d327ed30389d76e7e887043c9bf
|
refs/heads/main
| 2023-08-18T23:17:13.123605
| 2023-08-18T20:08:22
| 2023-08-18T20:08:22
| 36,753,969
| 14
| 28
|
BSD-3-Clause
| 2023-09-13T18:37:35
| 2015-06-02T18:42:51
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,022
|
py
|
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import fitsio
from astropy.table import join,Table
import healpy as hp
from LSS.tabulated_cosmo import TabulatedDESI
cosmo = TabulatedDESI()
dis_dc = cosmo.comoving_radial_distance
outdir = '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/LSScats/plots/'
zcol = 'Z_not4clus'
#ram = 130
#rax = 220
ram = 0
rax = 360
ra0 = (ram+rax)/2.
decm = -0.25
decx = .25
zmin = 0
zmax = 3.5
#plt.figure()
fig, ax = plt.subplots(dpi=300)
ax.set_aspect('equal')
ax.patch.set_facecolor('black')
#ax.patch.set_alpha(1)
tps = ['QSO','LRG','BGS_ANY','ELG']
cl = ['y','r','lime','b']
zordl = [10,5,2,1]
for tp,c,zo in zip(tps,cl,zordl):
cols = ['RA','DEC',zcol,'ZWARN','DELTACHI2','LOCATION_ASSIGNED']
if tp == 'ELG':
cols.append('o2c')
zmin = 0.6
dt = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/LSScats/test/'+tp+'_full.dat.fits',columns=cols)
sel = dt['RA'] > ram
sel &= dt['RA'] < rax
sel &= dt['DEC'] > decm
sel &= dt['DEC'] < decx
#sel &= dt[zcol] < zmax
#sel &= dt[zcol] > zmin
dt = dt[sel]
wz = dt['ZWARN']*0 == 0
wz &= dt['ZWARN'] != 1.e20
wz &= dt['ZWARN'] != 999999
wz &= dt['LOCATION_ASSIGNED'] == 1
if tp == 'QSO':
#good redshifts are currently just the ones that should have been defined in the QSO file when merged in full
wg = dt[zcol]*0 == 0
wg &= dt[zcol] != 999999
wg &= dt[zcol] != 1.e20
if tp[:3] == 'ELG':
wg = dt['o2c'] > 0.9
if tp == 'LRG':
# Custom DELTACHI2 vs z cut from Rongpu
#wg = dt['ZWARN'] == 0
#drz = (10**(3 - 3.5*dt[zcol]))
#mask_bad = (drz>30) & (dt['DELTACHI2']<30)
#mask_bad |= (drz<30) & (dt['DELTACHI2']<drz)
#mask_bad |= (dt['DELTACHI2']<10)
#wg &= dt[zcol]<1.4
#wg &= (~mask_bad)
wg = dt['DELTACHI2'] > 15
wg &= dt['ZWARN'] == 0
wg &= dt[zcol]<1.5
if tp[:3] == 'BGS':
wg = dt['DELTACHI2'] > 40
print(tp+':')
print('# of good obs: '+str(len(dt[wz])))
print('# of good z: '+str(len(dt[wz&wg])))
print('completeness: '+str(round(len(dt[wz])/len(dt),3)))
dt = dt[wg&wz]
sel = dt[zcol] < zmax
sel &= dt[zcol] > zmin
dt = dt[sel]
r = dis_dc(dt[zcol])
th = (90-dt['DEC'])*np.pi/180.
phi = (dt['RA']-ra0)*np.pi/180
x = r*np.cos(phi)*np.sin(th)
y = r*np.sin(phi)*np.sin(th)
z = r*np.cos(th)
ax.plot(x,y,',',color=c,zorder=zo,lw=.1)
if tp == 'QSO':
sel = dt[zcol] > 2.1
ax.plot(x[sel],y[sel],',',color='white',zorder=zo,lw=1)
#plt.show()
del dt
print(tp+' done')
#plt.axis('off')
for spine in ax.spines.values():
spine.set_visible(False)
ax.tick_params(bottom=False, labelbottom=False,
left=False, labelleft=False)
plt.savefig('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/daily/LSScats/plots/wedge_all.png')
plt.show()
|
[
"ashley.jacob.ross@gmail.com"
] |
ashley.jacob.ross@gmail.com
|
0d0c91379fb0afa3b4f47cb1b6414956158c67f4
|
ed9c1fc1f34e7546d896cd3d64b4ce983b30282c
|
/assignment-10.py
|
37c2c618122f11793adefa2e0769010553df310b
|
[] |
no_license
|
rahulsingh9878/Daily_Assignments
|
6d21fccd12e7e979a2704e879640240df536d9d5
|
7b1a3029674efb4d81f37ab7775c0f3ce48c9f3d
|
refs/heads/master
| 2020-03-26T14:05:04.900444
| 2018-10-07T16:54:01
| 2018-10-07T16:54:01
| 144,971,143
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,380
|
py
|
f=open("Rahul.txt",'w')
f.write("hi")
f.write("\n hello")
f.write("\n up")
f.write("\n hi")
f.write("\n below")
f.write("\n hello")
f.write("\n werty")
f.write("\n hi")
f.write("\n hello")
f.close()
#Q.1- Write a Python program to read n lines of a file
f=open("Rahul.txt",'r')
lines = f.readlines()
n=int(input('No. of lines you want to read:- '))
print(lines[:n])
#Q.2- Write a Python program to count the frequency of words in a file.
from collections import Counter
with open('Rahul.txt') as t:
word=Counter(t.read().split())
print(word)
#Q.3- Write a Python program to copy the contents of a file to another file
f2 = open('output.txt', 'w')
for l in lines:
f2.write(l)
f2.close()
#Q.4- Write a Python program to combine each line from first file with the corresponding line in second file.
f2 = open('output.txt', 'a')
f3 = open('Rahul.txt', 'r')
for l in lines:
li = f3.readline()
f2.write(l + li)
f2.close()
f3.close()
#Q.5- Write a Python program to write 10 random numbers into a file. Read the file and then sort the numbers and then store it to another file.
import random
import random as r
f=open("number.txt",'w')
for i in range(0,10):
a=r.randint(1,9)
f.write(str(a)+"\n")
f.close()
f=open("number.txt",'r')
a=f.readlines()
f.close()
f=open("sort_file.txt",'w')
a.sort()
for i in range(len(a)):
f.write(a[i])
f.close()
|
[
"rahulsimgh9878@gmail.com"
] |
rahulsimgh9878@gmail.com
|
d84b359fb10c180b482718d87c4d2b0b17cfdc49
|
cca186e9e8d5d29807e93a7c34cf8b975c420621
|
/geops/built_in/compound_surface.py
|
b1c0e1500effe7630b0ad126cd38ec5c59a156ae
|
[
"MIT"
] |
permissive
|
tokarevart/geops
|
972ec17b75a67b5e10a3995571998e09a9997bd0
|
c358d79e7e4bfd11da604b181f992494f7ed39ea
|
refs/heads/master
| 2020-07-03T23:27:17.610821
| 2019-08-13T15:37:26
| 2019-08-13T15:37:26
| 202,085,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
from .surface_base import SurfaceBase
class CompoundSurface(SurfaceBase):
"""
Generates the Compound Surface Gmsh function.
Creates a compound surface from several elementary surfaces.
When meshed, a compound surface will be reparametrized as
a single surface, whose mesh can thus cross internal boundaries.
Compound surfaces are mostly useful for remeshing discrete models.
Parameters
----------
surfaces : array-like[N]
Contains the identification number of the elementary surfaces
that should be reparametrized as a single surface.
"""
def __init__(self, surfaces):
super(CompoundSurface, self).__init__()
self.num_edges = sum(s.num_edges for s in surfaces)
self.surfaces = surfaces
self.code = "\n".join(
[
"{} = news;".format(self.id),
"Compound Surface({}) = {{{}}};".format(
self.id, ", ".join([s.id for s in surfaces])
),
]
)
return
|
[
"tokarev28.art@gmail.com"
] |
tokarev28.art@gmail.com
|
bfb894afda59ae12d5f2976185153477c045b24e
|
c6949a72cdadab6558c0abad85046b8373b13ff0
|
/netprovider/bin/sqlformat
|
bc24e8261734f870928d6bea8e5f5e0ed655485c
|
[] |
no_license
|
priyambajpai22/netprovider
|
fb6c5974fcd3af79c25e5f2c04b6d903c808f6df
|
b5274ee7b763710dcd4a76a3cb5b85c014c046ee
|
refs/heads/master
| 2022-08-25T04:21:01.817502
| 2019-08-20T08:53:34
| 2019-08-20T08:53:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
#!/home/golu/env/netprovider/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"golubajpai302@gmail.com"
] |
golubajpai302@gmail.com
|
|
765c2d34cad0ed3db835fc0f216d0c88986ca9f8
|
1fc35e54ee4723cfa3d13de713895eac30616847
|
/baekjun/stage solve/20.stack/10828.py
|
935d7ec8096e90a8ee88591746be2958546e03fb
|
[] |
no_license
|
yhs3434/Algorithms
|
02f55a5dc21085c0a17d9eaec5e3ba0cbd6d651d
|
24d234a301077aac1bc4efbb269b41a963cedccf
|
refs/heads/master
| 2021-07-12T19:21:00.446399
| 2021-01-20T01:44:27
| 2021-01-20T01:44:27
| 226,431,877
| 6
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 764
|
py
|
# 스택
# https://www.acmicpc.net/problem/10828
import sys
rl = sys.stdin.readline
st = []
def solution(cOrd):
if len(cOrd)>1:
st.append(int(cOrd[1]))
else:
cur = cOrd[0]
if cur == 'pop':
if not st:
return -1
else:
return st.pop()
elif cur == 'size':
return len(st)
elif cur == 'empty':
if st:
return 0
else:
return 1
elif cur == 'top':
if not st:
return -1
else:
return st[-1]
n = int(rl().rstrip())
for xxx in range(n):
cOrder = rl().rstrip().split(' ')
ret = solution(cOrder)
if ret!=None:
print(ret)
|
[
"yoonhs3434@naver.com"
] |
yoonhs3434@naver.com
|
566dfb97cc4da20f81cf6becff3ec01cfb7e91f8
|
f8f4cc4c94b304def8efbd593f8a44c55897f7f0
|
/杭州交付/web_bert_classify.py
|
ad5687deaa154aed14e2ef824fb1cf9d383cc3f2
|
[] |
no_license
|
AdamCuiYH/website-classification
|
e986d6397cae114c013a7072ef9b4acefd9a6a91
|
c617c123b5637077c17b452e31c35c5f4fa05939
|
refs/heads/main
| 2023-03-18T09:38:44.920107
| 2021-03-12T03:31:14
| 2021-03-12T03:31:14
| 304,557,347
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,113
|
py
|
import pandas as pd
import time
import multiprocessing
from utils import chrome_spyder
from simpletransformers.classification import ClassificationModel
global html_content
if __name__ == '__main__':
print(' ---------- 开始爬虫,请耐心等待 ----------')
# 调用爬虫模块,得到爬虫文本
start1 = time.time()
pool = multiprocessing.Pool(multiprocessing.cpu_count())
with open('url.txt', "r", encoding='utf-8') as f:
C = f.read()
urls =C.split()
#urlss = urls[30:]
print('一共爬虫网站数量:',len(urls))
pool.map(chrome_spyder, urls)
pool.close()
pool.join()
end1 = time.time()
print(' ------------ 爬虫结束 -------------')
print(end1-start1)
# 读取爬虫的文本,转成 DataFrame 的形式
data_df = pd.read_csv('爬虫文本.csv', header=None, usecols=[0, 1, 2], names=['status', 'url', 'content'], dtype='str')
df1 = data_df.dropna(subset=['content'])
df2 = df1.loc[df1['status'] == 'True']
data_df1 = df2[['url','content']]
data_df2 = data_df1.copy()
df4 = list(data_df2['content'])
print(' ------------- 开始识别网站 -------------')
# 模型预测
model = ClassificationModel('bert', 'model_weight',use_cuda=False,args={"thread_count":1})
start = time.time()
predictions, raw_outputs = model.predict(df4)
end = time.time()
print(end-start,'s')
dict_prediction = {0: '贷款-P2P', 1: '贷款-抵押', 2: '贷款-小额', 3: '贷款-咨讯', 4: '贷款-综合', 5: '贷款-租赁', 6: '赌-彩票预测',
7: '赌-赌场系', 8: '赌-购彩系', 9: '赌-电子游戏', 10: '赌-球', 11: '黄-视频', 12: '黄-成人用品药', 13: '签名网站',
14: '黄-小说漫画', 15: '黄-性感图', 16: '黄-直播', 17: '宗教-场所', 18: '宗教-机构', 19: '宗教-文化', 20: '宗教-用品',
21: 'vpn-非法', 22: 'vpn-商务', 23: '打码', 24: 'VPS', 25: '短链接', 26: '配资', 27: '其他', 28: '四方支付',
29: '云发卡', 30: '流量刷单', 31: '微交易', 32: '云呼'}
predictions_label = [dict_prediction[i] if i in dict_prediction else i for i in predictions]
data_df2['BERT预测标签'] = predictions_label
df5 = data_df.loc[(data_df['status'] == 'False') | (data_df['content'].isnull())]
df6 = df5['url']
# 将预测结果导出
writer = pd.ExcelWriter('网站识别结果.xlsx', engine='xlsxwriter',
options={'strings_to_urls': False})
data_df2.to_excel(writer, index=None,sheet_name='网站类别')
df6.to_excel(writer,sheet_name='爬虫失败和内容为空的网址')
writer.close()
# 统计所用时间
end2 = time.time()
print(' ---------------- 程序运行结束,已退出 ---------------')
print('网页爬虫用时:', (end1 - start1), 's')
print('模型预测用时', (end2 - end1), 's')
print('一共用时', (end2 - start1), 's')
|
[
"noreply@github.com"
] |
AdamCuiYH.noreply@github.com
|
2e5a372d00383fe3ea8f959e7e3ad7991bc68056
|
18d4189c0cfd054890fcc70c6f223ff030f08c9c
|
/assignments/a1/html-compressor-separated.py
|
4ab6c38095d62f7a5d937f81dda69ce6cc5071b7
|
[] |
no_license
|
erikaris/cs834-f16
|
572309e431835b97b143b94222bd24347625d4d7
|
895d671823f94ddd77d52349f24dcd839e29e8a0
|
refs/heads/master
| 2021-01-22T01:06:05.098411
| 2016-12-17T16:52:54
| 2016-12-17T16:52:54
| 67,218,768
| 0
| 0
| null | 2016-09-02T11:56:38
| 2016-09-02T11:56:38
| null |
UTF-8
|
Python
| false
| false
| 3,154
|
py
|
import json
import string
from HTMLParser import HTMLParser
import htmlmin as htmlmin
import requests
# HTMLEncoder is extent of HTMLParser
class HTMLEncoder(HTMLParser):
_tags = [] # convention for private variable
def handle_starttag(self, tag, attrs):
# Append to array, with type 'starttag'
self._tags.append(((tag, attrs), 'starttag'))
def handle_endtag(self, tag):
# Append to array, with type 'endtag'
self._tags.append((tag, 'endtag'))
def handle_data(self, data):
# Append to array, with type 'data'
self._tags.append((data, 'data'))
def make_chars(self, number):
num_char = int(number/len(string.lowercase))
rem = number - (num_char * len(string.lowercase))
chars = ''.join([string.lowercase[len(string.lowercase)-1] for i in range(0,num_char)]) + \
string.lowercase[rem]
return chars
def encode(self, html):
# Process normal html with HTMLParser
self.feed(html)
self.close()
# After parsing is done, process array _tags
tag_list = []
minified_html = ''
for data, type in self._tags:
if type == 'starttag':
(tag, attrs) = data
if not tag in tag_list: # jika tag blm tercantum di tag list, maka append.
tag_list.append(tag)
# Process attrs of tag, e.g:
# [('href', 'http://...'), ('title', 'Some link')] become
# <a href='http://...' title='Some link'>
str_attrs = ' '.join(['{}="{}"'.format(name, val) for name, val in attrs])
# Append encoded tag and it's attrs into var html
encoded_tag = self.make_chars(tag_list.index(tag)) # convert index dari int menjadi char.
minified_html += '<{}{}>'.format(
encoded_tag, (' ' if str_attrs else '') + str_attrs
)
elif type == 'endtag':
# Append encoded end-tag into var html
encoded_tag = self.make_chars(tag_list.index(tag))
minified_html += '</{}>'.format(encoded_tag)
elif type == 'data':
# Append data into var html
minified_html += data
# Process json of definition as a comment
definitions = '<!--{}-->'.format( ## konversti tag list.
json.dumps({ self.make_chars(key): val for key, val in enumerate(tag_list) })
)
# Return definition and minified html
return definitions + minified_html ## concatenate definition dengan minified.
# Capture input from keyboard
url = raw_input("Enter a URL: ")
url = url.strip()
# Crawl URL with requests, methods GET
resp = requests.get(url)
# Process only if status code 200
if resp.status_code == 200:
# Instantiate the our html encoder
encoder = HTMLEncoder()
# Encode text
html = resp.text.encode('utf-8').strip()
enc_html = encoder.encode(html)
print enc_html
else:
print('Debug : Cannot open URL {}, Status code: {}'.format(url, resp.status_code))
|
[
"noreply@github.com"
] |
erikaris.noreply@github.com
|
adbbd7044769fce98db6a4c2fe373101e6b31812
|
ef9aab4d53caf9126ddcb83a268a1702b2b4dac7
|
/contmon/content/models.py
|
413a1e1c51b0814467e90a8e0a0818521d6eb301
|
[
"BSD-3-Clause"
] |
permissive
|
adandan01/contmon
|
c347365f59beac6dcfe0f243457f477dacae90c4
|
95cb6ff432ce5e02cdaa823430626a8abb03d01e
|
refs/heads/master
| 2021-01-06T20:42:45.363275
| 2015-07-09T17:47:28
| 2015-07-09T17:47:28
| 37,098,228
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,698
|
py
|
from django.db import models
# Create your models here.
# class ContMonWebsite(models.Model):
# pass
from jsonfield import JSONField
from model_utils.models import TimeStampedModel
import reversion
class CrawlUrl(TimeStampedModel):
url = models.CharField(max_length=500, db_index=True)
domain = models.CharField(max_length=400, db_index=True)
path = models.CharField(max_length=400)
def __unicode__(self):
return self.url
class CrawledPage(TimeStampedModel):
crawl_url = models.ForeignKey(CrawlUrl)
page_number = models.IntegerField()
image = models.ImageField(upload_to='crawled_page')
text = models.TextField(blank=True)
content_hash = models.CharField(max_length=500, db_index=True)
def __unicode__(self):
return "crawled page: %s page number:" % (self.crawl_url, self.page_number)
class AbstractExtractedContent(TimeStampedModel):
REVIEW_STATES_NEVER_REVIEWED = 0
REVIEW_STATES_COMPLIANT = 1
REVIEW_STATES_NOT_COMPLIANT = 2
REVIEW_STATES_IRRELEVANT = 3
REVIEW_STATES_NEVER_REVIEWED_LABEL = 'Never Reviewed'
REVIEW_STATES_COMPLIANT_LABEL = 'Compliant'
REVIEW_STATES_NOT_COMPLIANT_LABEL = 'Not Compliant'
REVIEW_STATES_IRRELEVANT_LABEL = 'Irrelevant: Ignore'
REVIEW_STATES = (
(REVIEW_STATES_NEVER_REVIEWED, REVIEW_STATES_NEVER_REVIEWED_LABEL),
(REVIEW_STATES_COMPLIANT, REVIEW_STATES_COMPLIANT_LABEL),
(REVIEW_STATES_NOT_COMPLIANT, REVIEW_STATES_NOT_COMPLIANT_LABEL),
(REVIEW_STATES_IRRELEVANT, REVIEW_STATES_IRRELEVANT_LABEL),
)
REVIEW_STATES_DICT = dict(REVIEW_STATES)
crawl_urls = models.ManyToManyField(CrawlUrl)
domain = models.CharField(max_length=400, db_index=True, default='')
image = models.ImageField(upload_to='extracted_content')
html = models.FileField(upload_to='html')
extracted_fields = JSONField()
location_x = models.FloatField()
location_y = models.FloatField()
size_width = models.FloatField()
size_height = models.FloatField()
content_hash = models.CharField(max_length=500, db_index=True)
text = models.TextField(blank=True)
review_state = models.SmallIntegerField(choices=REVIEW_STATES, db_index=True, default=REVIEW_STATES_NEVER_REVIEWED)
@property
def review_state_change_history(self):
available_versions = list(reversion.get_for_object(self)[:20])
history_log = []
field_name = 'review_state'
for i, version in enumerate(available_versions):
if i < (len(available_versions)-1):
old_version = available_versions[i+1]
old_text = old_version.field_dict.get(field_name, "")
else:
old_text = 0 #never_reviewed
new_version = available_versions[i]
new_text = new_version.field_dict.get(field_name, "")
if new_text != old_text:
message = "<del><span class='bg-warning'>%s</span></del> <ins><span class='bg-info'>%s</span></ins>" % (self.REVIEW_STATES_DICT[old_text], self.REVIEW_STATES_DICT[new_text])
history_log.append({'user':version.revision.user.username if version.revision.user else '','date': version.revision.date_created.strftime('%B %d., %Y, %I:%M%p:%S'), 'patch_html':message })
return history_log
#TODO should reference the extractor
class Meta:
abstract = True
unique_together = ("domain", "content_hash")
class CreditCardOffer(AbstractExtractedContent):
@property
def name(self):
return self.extracted_fields.get('name', '')
# class ComplianceViolation(models.Model):
# pass
#
# class Extractor(models.Model):
# pass
|
[
"danio.chen@gmail.com"
] |
danio.chen@gmail.com
|
5a2fff6f3995b075b57cb0927c81e5fa76fb7714
|
d305e9667f18127e4a1d4d65e5370cf60df30102
|
/tests/st/ops/gpu/test_softplus_op.py
|
1ee9d3c9c4594b3b54afe4a11dabd5056013e300
|
[
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
imyzx2017/mindspore_pcl
|
d8e5bd1f80458538d07ef0a8fc447b552bd87420
|
f548c9dae106879d1a83377dd06b10d96427fd2d
|
refs/heads/master
| 2023-01-13T22:28:42.064535
| 2020-11-18T11:15:41
| 2020-11-18T11:15:41
| 313,906,414
| 6
| 1
|
Apache-2.0
| 2020-11-18T11:25:08
| 2020-11-18T10:57:26
| null |
UTF-8
|
Python
| false
| false
| 2,782
|
py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class SoftplusNet(nn.Cell):
def __init__(self):
super(SoftplusNet, self).__init__()
self.softplus = P.Softplus()
def construct(self, x):
return self.softplus(x)
def SoftplusCompute(x):
return np.log(1 + np.exp(x))
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softplus_1d():
x_np = np.random.random((50,)).astype(np.float32)
y_np = SoftplusCompute(x_np)
x_ms = Tensor(x_np)
net = SoftplusNet()
y_ms = net(x_ms)
assert np.allclose(y_np, y_ms.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softplus_2d():
x_np = np.random.random((50, 40)).astype(np.float32)
y_np = SoftplusCompute(x_np)
x_ms = Tensor(x_np)
net = SoftplusNet()
y_ms = net(x_ms)
assert np.allclose(y_np, y_ms.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softplus_4d():
x_np = np.random.random((32, 3, 224, 224)).astype(np.float32)
y_np = SoftplusCompute(x_np)
x_ms = Tensor(x_np)
net = SoftplusNet()
y_ms = net(x_ms)
assert np.allclose(y_np, y_ms.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softplus_neg():
x_np = np.random.random((32, 3, 224, 224)).astype(np.float32) * -1
y_np = SoftplusCompute(x_np)
x_ms = Tensor(x_np)
net = SoftplusNet()
y_ms = net(x_ms)
assert np.allclose(y_np, y_ms.asnumpy())
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softplus_4d_fp16():
x_np = np.random.random((32, 3, 224, 224)).astype(np.float16)
y_np = SoftplusCompute(x_np)
x_ms = Tensor(x_np)
net = SoftplusNet()
y_ms = net(x_ms)
assert np.allclose(y_np, y_ms.asnumpy(), rtol=5e-3)
|
[
"513344092@qq.com"
] |
513344092@qq.com
|
3bfefc21757436c7a416f2e49b7aa84b341eb947
|
f1119c7d9d2ae98cee7611ecfd0c9c20b2adf7a0
|
/removeCSVHeader.py
|
82a069cecd84b0beed274ce201f2ccc59b21251e
|
[] |
no_license
|
Starviper/PythonAutomation
|
56d2d89109597451bcb1fb11ba3af5f11cd6f13b
|
6cb44683804215e4a9d633a4c5cad78eb2c3d633
|
refs/heads/master
| 2022-11-17T21:34:07.040641
| 2020-07-21T06:24:22
| 2020-07-21T06:24:22
| 277,970,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 911
|
py
|
#! pythjon3
# removeCsvHeader.py - Removes the header from all CSV files in the current working directory.
import csv, os
os.makedirs('headerRemoved', exist_ok=True)
# Loop through every file in the cwd
for csvFilename in os.listdir('.'):
if not csvFilename.endswith('.csv'):
continue # skip non-csv files
print('Removing header from ' + csvFilename + '...')
# Read the CSV file in (skipping first row).
csvRows = []
csvFileObj = open(csvFilename)
readerObj = csv.reader(csvFileObj)
for row in readerObj:
if readerObj.line_num == 1:
continue
csvRows.append(row)
csvFileObj.close()
# Write out the CSV file
csvFileObj = open(os.path.join('HeaderRemoved', csvFilename), 'w', newline='')
csvWriter = csv.writer(csvFileObj)
for row in csvRows:
csvWriter.writerow(row)
csvFileObj.close()
|
[
"noreply@github.com"
] |
Starviper.noreply@github.com
|
4676e4095a7a2d94af717ea6d54b5d378ea60db3
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_asperities.py
|
1ebc25dc9f32377f9af0114b9c0e7203225008b1
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 254
|
py
|
from xai.brain.wordbase.nouns._asperity import _ASPERITY
#calss header
class _ASPERITIES(_ASPERITY, ):
def __init__(self,):
_ASPERITY.__init__(self)
self.name = "ASPERITIES"
self.specie = 'nouns'
self.basic = "asperity"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
76b491c83b4d9a471381859aa1a9f826f2c54f53
|
027c1ef71a4e55cf13f8acb9b24f3f93f1f8ec54
|
/DeeplingAI_course1/week3_simple_net/week3_work.py
|
b0971565fba353045d99189d8ee022de25c8fac8
|
[] |
no_license
|
JiangChenrui/DeeplingAI
|
7c7ade8db4221f105225bb4073b34c68205af6bc
|
d336665be3767a93c894a13514d4957a59c6cacb
|
refs/heads/master
| 2020-04-09T11:33:52.907097
| 2019-07-13T05:19:42
| 2019-07-13T05:19:42
| 160,314,873
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,929
|
py
|
#%% Change working directory from the workspace root to the ipynb file location. Turn this addition off with the DataSciece.changeDirOnImportExport setting
import os
try:
os.chdir(
os.path.join(os.getcwd(),
'DeeplingAI\DeeplingAI_course1\week3_simple_net'))
print(os.getcwd())
except:
pass
#%%
import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets
get_ipython().magic('matplotlib inline')
np.random.seed(1) # 设置一个固定的随机种子
#%%
# 加载数据集
X, Y = load_planar_dataset()
print("X.shape=" + str(X.shape))
print("Y.shape=" + str(Y.shape))
# 对数据进行可视化
plt.scatter(
X[0, :], X[1, :], c=np.squeeze(Y), s=4, cmap=plt.cm.Spectral) #绘制散点图
#%%
# 使用逻辑回归观察表现
clf = sklearn.linear_model.LogisticRegressionCV()
clf.fit(X.T, Y.T)
#%%
plot_decision_boundary(lambda x: clf.predict(x), X, Y) # 绘制决策边界
plt.title("Logistic Regression") # 图标题
LR_predictions = clf.predict(X.T) # 预测结果
print("逻辑回归的准确性: %d " % float(
(np.dot(Y, LR_predictions) + np.dot(1 - Y, 1 - LR_predictions)) /
float(Y.size) * 100) + "% " + "(正确标记的数据点所占的百分比)")
#%%
# 构建神经网络结构
def layer_sizes(X, Y):
"""
参数:
X :输入数据集,维度为(输入的数量,训练/测试的数量)
Y :标签,维度为(输出的数量,训练/测试数量)
返回:
n_x :输入层数量
n_y : 输出层数量
n_h : 隐藏层数量
"""
n_x = X.shape[0] # 输入层
n_h = 4 # 隐藏层
n_y = Y.shape[0] #输出层
return (n_x, n_h, n_y)
#%%
# 测试layer_sizes
print("=========================测试layer_sizes=========================")
X_asses, Y_asses = layer_sizes_test_case()
(n_x, n_h, n_y) = layer_sizes(X_asses, Y_asses)
print("输入层的节点数量为: n_x = " + str(n_x))
print("隐藏层的节点数量为: n_h = " + str(n_h))
print("输出层的节点数量为: n_y = " + str(n_y))
#%%
# 初始化模型参数
def initialize_parameters(n_x, n_h, n_y):
"""
参数:
n_x 输入节点的数量
n_h 隐藏节点的数量
n_y 输出节点的数量
返回:
parameters 包含参数的字典
W1 权重矩阵,维度为(n_h, n_x)
b1 偏向量,维度为(n_h, 1)
W2 权重矩阵,维度为(n_y, n_h)
b1 偏向量,维度为(n_y, 1)
"""
np.random.seed(2) # 指定一个随机种子
W1 = np.random.randn(n_h, n_x) * 0.01 # 随机初始化一个维度为(n_h, n_x)的矩阵
b1 = np.zeros(shape=(n_h, 1))
W2 = np.random.randn(n_y, n_h) * 0.01
b2 = np.zeros(shape=(n_y, 1))
# 使用断言判断格式是否正确
assert (W1.shape == (n_h, n_x))
assert (b1.shape == (n_h, 1))
assert (W2.shape == (n_y, n_h))
assert (b2.shape == (n_y, 1))
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
return parameters
#%%
# 测试初始化模型
print(
"=======================测试initialize_parameters模型========================")
n_x, n_h, n_y = initialize_parameters_test_case()
parameters = initialize_parameters(n_x, n_h, n_y)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
#%%
def forward_propagation(X, parameters):
"""
参数:
X 维度为(n_x, m)的输入数据
parameters 初始化函数(initialize_parameters)的输出
返回:
A2 使用sigmide()函数计算的第二次激活后的数值
cache 包含“Z1”,“Z2”,“A1”和“A2”的字典类型变量
"""
W1 = parameters["W1"]
W2 = parameters["W2"]
b1 = parameters["b1"]
b2 = parameters["b2"]
# 前向传播计算A2
Z1 = np.dot(W1, X) + b1
A1 = np.tanh(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = sigmoid(Z2)
# 使用断言判断格式
assert (A2.shape == (1, X.shape[1]))
cache = {"Z1": Z1, "Z2": Z2, "A1": A1, "A2": A2}
return (A2, cache)
#%%
# 测试forward_proagation()
print("=================测试forward_proagation===================")
X_asses, parameters = forward_propagation_test_case()
A2, cache = forward_propagation(X_asses, parameters)
print(
np.mean(cache["Z1"]), np.mean(cache["A1"]), np.mean(cache["Z2"]),
np.mean(cache["A2"]))
#%%
# 计算交叉熵损失
def compute_cost(A2, Y, parameters):
"""
参数:
A2 使用sigmoid()函数计算的第二次激活后的值
Y 标签向量,维度为(1, 数量)
parameters 包含W1,b1,W2和b2的字典类型的变量
返回:
成本 交叉熵损失
"""
m = Y.shape[1]
W1 = parameters["W1"]
W2 = parameters["W2"]
# 计算成本
logprobs = np.multiply(np.log(A2), Y) + np.multiply(np.log(1 - A2), (1 - Y))
cost = -np.sum(logprobs) / m
cost = float(np.squeeze(cost))
assert (isinstance(cost, float))
return cost
#%%
# 测试compute_cost()
print("===================测试compute_cost=====================")
A2, Y_asses, parameters = compute_cost_test_case()
print("cost=" + str(compute_cost(A2, Y_asses, parameters)))
#%%
# 反向传播
def backward_propagation(parameters, cache, X, Y):
"""
参数:
parameters 包含参数的一个字典型变量
cache 包含“Z1”,“A1”,“Z2”和“A2”的字典类型的变量
X 输入数据,维度为(2, 数量)
Y 标签,维度为(1,数量)
返回
grads 包含W和b的一个字典型变量
"""
m = X.shape[1]
W1 = parameters["W1"]
# b1 = parameters["b1"]
W2 = parameters["W2"]
# b2 = parameters["b2"]
A1 = cache["A1"]
A2 = cache["A2"]
dZ2 = A2 - Y
dW2 = (1 / m) * np.dot(dZ2, A1.T)
db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True)
dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2))
dW1 = (1 / m) * np.dot(dZ1, X.T)
db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True)
grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2}
return grads
#%%
# 测试backward_propagation
print("====================测试backward_propagation=======================")
parameters, cache, X_asses, Y_asses = backward_propagation_test_case()
grads = backward_propagation(parameters, cache, X_asses, Y_asses)
print("dW1 = " + str(grads["dW1"]))
print("db1 = " + str(grads["db1"]))
print("dW2 = " + str(grads["dW2"]))
print("db2 = " + str(grads["db2"]))
#%%
# 更新参数
def update_parameters(parameters, grads, learning_rate=1.2):
"""
参数:
parameters 包含参数的字典类型变量
grads 包含导数值的字典类型变量
learning_rate 学习率
返回:
parameters 包含更新参数后的字典类型变量
"""
W1, W2 = parameters["W1"], parameters["W2"]
b1, b2 = parameters["b1"], parameters["b2"]
dW1, dW2 = grads["dW1"], grads["dW2"]
db1, db2 = grads["db1"], grads["db2"]
W1 = W1 - learning_rate * dW1
W2 = W2 - learning_rate * dW2
b1 = b1 - learning_rate * db1
b2 = b2 - learning_rate * db2
parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2}
return parameters
#%%
# 测试update_parameters
parameters, grads = update_parameters_test_case()
parameters = update_parameters(parameters, grads)
print("W1=" + str(parameters["W1"]))
print("b1=" + str(parameters["b1"]))
print("W2=" + str(parameters["W2"]))
print("b2=" + str(parameters["b2"]))
#%%
def nn_model(X, Y, n_h, num_iterations, print_cost=False):
"""
参数:
X 数据集,维度为(2, 示例数)
Y 标签,维度为(1, 示例数)
n_h 隐藏层的数量
num_iterations 梯度下降循环中的循环次数
print_cost 如果为True,则每1000次迭代打印一次成本数值
返回:
parameters 学习模型的参数,用它们来进行预测
"""
np.random.seed(3) # 指定随机种子
n_x = layer_sizes(X, Y)[0]
n_y = layer_sizes(X, Y)[2]
parameters = initialize_parameters(n_x, n_h, n_y)
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
for i in range(num_iterations):
A2, cache = forward_propagation(X, parameters)
cost = compute_cost(A2, Y, parameters)
grads = backward_propagation(parameters, cache, X, Y)
parameters = update_parameters(parameters, grads, learning_rate=0.05)
if print_cost:
if i % 1000 == 0:
print("第 ", i, "次循环,成本为" + str(cost))
return parameters
#%%
# 测试nn_model
print("===============测试nn_model=================")
X_asses, Y_asses = nn_model_test_case()
parameters = nn_model(
X_asses, Y_asses, 4, num_iterations=10000, print_cost=False)
print("W1=" + str(parameters["W1"]))
print("b1=" + str(parameters["b1"]))
print("W2=" + str(parameters["W2"]))
print("b2=" + str(parameters["b2"]))
#%%
# 预测
def predict(parameters, X):
"""
参数:
parameters 包含参数的字典类型的变量
X 输入数据(n_x, m)
返回:
prediction 模型预测的向量(红色:0/蓝色:1)
"""
A2, cache = forward_propagation(X, parameters)
prediction = np.round(A2)
return prediction
#%%
#测试predict
print("=========================测试predict=========================")
parameters, X_assess = predict_test_case()
predictions = predict(parameters, X_assess)
print("预测的平均值 = " + str(np.mean(predictions)))
#%%
# 正式运行
parameters = nn_model(X, Y, n_h=4, num_iterations=100000, print_cost=True)
# 绘制边界
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
plt.title("Decision Boundary for hidden layer size" + str(4))
predictions = predict(parameters, X)
print("准确率:%d" % float(
(np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) /
float(Y.size) * 100) + '%')
#%%
plt.figure(figsize=(16, 32))
hidden_layer_sizes = [1, 2, 3, 4, 5, 20, 50] #隐藏层数量
for i, n_h in enumerate(hidden_layer_sizes):
plt.subplot(5, 2, i + 1)
plt.title('Hidden Layer of size %d' % n_h)
parameters = nn_model(X, Y, n_h, num_iterations=5000)
plot_decision_boundary(lambda x: predict(parameters, x.T), X, Y)
predictions = predict(parameters, X)
accuracy = float(
(np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(
Y.size) * 100)
print("隐藏层的节点数量: {} ,准确率: {} %".format(n_h, accuracy))
#%%
|
[
"1102592323@qq.com"
] |
1102592323@qq.com
|
ac809c7c237ab4e8610c52747662d4478919dabb
|
fc7813a3d01a4c4a02d50f75020a021533846e8f
|
/data.py
|
4dc1614cd357a959fdb606a9b25539d6d031f394
|
[] |
no_license
|
danielspeixoto/MovieGenreClassifier
|
64bb0588f2b86705ef1ce8e8e137dccc48882083
|
e1fefd7202dc89bbc24c38d33c5068b2299eda9e
|
refs/heads/master
| 2021-08-27T20:48:06.279924
| 2021-08-01T22:43:24
| 2021-08-01T22:43:24
| 158,778,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,182
|
py
|
import pickle
import re
import nltk
import pandas as pd
from nltk import TweetTokenizer
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
import nltk.corpus.reader.wordnet as wordnet
def get(filename, genres):
data = pd.read_csv(filename, encoding='utf8')
data = data[['Plot', 'Genre']]
data = data[data['Genre'].isin(genres)]
df = data['Plot']
y = data['Genre'].apply(lambda x: 0 if x == 'comedy' else 1)
return df, y
stemmer = SnowballStemmer(language='english')
stop_words = set(stopwords.words('english'))
lemmatizer = WordNetLemmatizer()
ADJECTIVE = 'JJ'
ADVERB = 'RB'
VERB = 'VB'
NOUN = 'NN'
NOUN_PROPER = 'NNP'
TAG_INDEX = 1
WORD_INDEX = 0
lemma_tag = {
ADJECTIVE: wordnet.ADJ,
ADVERB: wordnet.ADV,
VERB: wordnet.VERB,
NOUN: wordnet.NOUN
}
tag_list = [NOUN, VERB, ADVERB, ADJECTIVE]
def pos_tag_filter(words_tag):
filtered = []
for word_tag in words_tag:
if word_tag[TAG_INDEX] == NOUN_PROPER:
continue
for tag in tag_list:
if tag in word_tag[TAG_INDEX] and word_tag[WORD_INDEX] not in stop_words:
filtered.append(
(
word_tag[WORD_INDEX].lower(),
lemma_tag[tag]
)
)
return filtered
def analyze(sentence: str):
tokenizer = TweetTokenizer().tokenize
tokens = tokenizer(sentence)
words_tag = nltk.pos_tag(tokens)
filtered_words_tag = pos_tag_filter(words_tag)
lemmas = [lemmatizer.lemmatize(word[WORD_INDEX], pos=word[TAG_INDEX])
for word in filtered_words_tag]
return lemmas
def analyze_simple(sentence: str):
tokenizer = TweetTokenizer().tokenize
tokens = tokenizer(sentence)
return tokens
def pre_process(doc):
return ' '.join(analyze(doc))
def unpickle(filename):
with open(filename) as pkl:
df, y = pickle.load(pkl)
return df, y
def read(filename):
data = pd.read_csv(filename, encoding='utf8')
df = data['Plot']
y = data['Genre']
return df, y
|
[
"danielspeixoto@outlook.com"
] |
danielspeixoto@outlook.com
|
120e96c7d21f0726bc44ad6697697cb916a49650
|
a8c9d9547e38abbd34d318061ec15d3fe357c8ae
|
/main.py
|
de0cda64812b1f26a096561ebdefd21011c9c466
|
[] |
no_license
|
momotaro98/HouseholdsRecommendationFormGenerators
|
69476340b0a4c7caff7309cc6050bf1376c75b2c
|
69ac8d8a839764f7ab7438ced192225402a15c52
|
refs/heads/master
| 2021-01-12T13:26:57.571897
| 2016-12-21T04:10:57
| 2016-12-21T04:10:57
| 69,164,404
| 0
| 0
| null | 2016-12-21T04:10:58
| 2016-09-25T12:58:42
|
Python
|
UTF-8
|
Python
| false
| false
| 14,055
|
py
|
import csv
from datetime import datetime
from datetime import timedelta
import time
import random
from household_recommendation_form_generators import *
class PredictionEvaluation:
"""
>>> pe = PredictionEvaluation([0, 1, 0, 0], [0, 1, 1, 0])
>>> precision = pe.ret_Precision()
>>> recall = pe.ret_Recall()
>>> fvalue = pe.ret_Fvalue()
>>> precsioin, recall, fvalue = pe.ret_Precisioin_Recall_Fvalue()
"""
'''
精度(Precision)、再現率(Recall)、F値(F-measure)を求めるためのクラス
'''
def __init__(self, y_true, y_pred):
if len(y_true) != len(y_pred):
raise Exception("True, Predの2つのリストの長さがちゃうで")
self.TP = 0
self.FP = 0
self.TN = 0
self.FN = 0
for true, pred in zip(y_true, y_pred):
if true and pred:
self.TP += 1
elif true and not pred:
self.FN += 1
elif not true and pred:
self.FP += 1
elif not true and not pred:
self.TN += 1
def ret_Precision(self):
return self.TP / (self.TP + self.FP)
def ret_Recall(self):
return self.TP / (self.TP + self.FN)
def ret_Fvalue(self):
try:
precision = self.ret_Precision()
recall = self.ret_Recall()
fvalue = (2 * recall * precision) / (recall + precision)
except ZeroDivisionError:
fvalue = 0.0
return fvalue
def ret_Precisioin_Recall_Fvalue(self):
return self.ret_Precision(), self.ret_Recall(), self.ret_Fvalue()
class MyPredictionEvaluation:
"""
修論研究における評価量である精度(Precision)を求める
>>> pe = MyPredictionEvaluation([0, 1, 0, 0], [0, 1, 1, 0])
>>> precision = pe.ret_Precision()
"""
def __init__(self, y_true, y_pred):
if len(y_true) != len(y_pred):
raise Exception("True, Predの2つのリストの長さがちゃうで")
self.TP = 0
self.FP = 0
self.TN = 0
self.FN = 0
for true, pred in zip(y_true, y_pred):
if true and pred:
self.TP += 1
elif true and not pred:
self.FN += 1
elif not true and pred:
self.FP += 1
elif not true and not pred:
self.TN += 1
def ret_Precision(self):
try:
ret = self.TP / (self.TP + self.FP)
except ZeroDivisionError:
ret = 0.0
return ret
def _ret_act_list(eyh_instance):
act_y_list = [ay[1] for ay in eyh_instance.ret_act_Y_list()]
return act_y_list
def _ret_pred_list(isreco_instance, start_dt, end_dt):
pred_y_list = []
s_dt = start_dt
while s_dt <= end_dt:
pred_Y = isreco_instance.ret_pred_Y(s_dt.date())
pred_y_list.append(pred_Y)
s_dt += timedelta(days=1)
return pred_y_list
def ret_act_and_pred_y_list(house_group, target_home_id):
# 家庭クラスタの決定木モデルから得られる2016年冬時期のY予測値
start_dt = datetime(2016, 12, 1, 0, 0, 0)
end_dt = datetime(2016, 12, 14, 23, 59, 59)
'''
contents_dict = {
'tu': {'act': None, 'pred': None},
'cu': {'act': None, 'pred': None},
}
'''
content_list = ['tu', 'cu']
# content_list = ['tu']
# content_list = ['cu']
contents_dict = {}
for content in content_list:
contents_dict[content] = {}
for act_or_pred in ('act', 'pred'):
if content == 'tu' and act_or_pred == 'act':
contents_dict[content][act_or_pred] = ExperimentHomesYactTotalUsage(
home_id=target_home_id, # ikexp実験宅
start_train_dt=start_dt,
end_train_dt=end_dt,
)
elif content == 'cu' and act_or_pred == 'act':
contents_dict[content][act_or_pred] = ExperimentHomesYactChangeUsage(
home_id=target_home_id, # ikexp実験宅
start_train_dt=start_dt,
end_train_dt=end_dt,
)
elif content == 'tu' and act_or_pred == 'pred':
contents_dict[content][act_or_pred] = IsTotalUsage(house_group)
elif content == 'cu' and act_or_pred == 'pred':
contents_dict[content][act_or_pred] = IsChangeUsage(house_group)
act_y_list = []
pred_y_list = []
for content, ac_dict in contents_dict.items():
act_y_list += _ret_act_list(ac_dict['act'])
pred_y_list += _ret_pred_list(ac_dict['pred'], start_dt, end_dt)
return act_y_list, pred_y_list
def ret_learning_time(house_group, n_clusters):
any_date = datetime(2015, 12, 24).date()
start = time.time()
cu_pred_Y = IsChangeUsage(house_group).ret_pred_Y(any_date)
tu_pred_Y = IsTotalUsage(house_group).ret_pred_Y(any_date)
end = time.time()
elapsed_time = end - start
learning_time = elapsed_time * n_clusters
return learning_time
def run_standard_output(n_clusters, learning_time, precision):
n_clusters_str = str(n_clusters).rjust(5)
learning_time_str = str(learning_time).rjust(20)
precision_str = str(precision).rjust(20)
print("{0}|{1}|{2}".format(n_clusters_str, learning_time_str, precision_str))
def run_csv_output(n_clusters, learning_time, precision):
with open('eval_cluter.csv', 'a') as f:
writer = csv.writer(f)
writer.writerow([n_clusters, learning_time, precision])
def run_eval():
# make homes_id_list
homes_id_list = ret_homes_id_list()
# make all house group
all_house_group = HouseholdGroup(homes_id_list) # All ModulesUseFlags are True
# Instanciate HomesClusteringWithKMeans
hcwk = HomesClusteringWithKMeans(all_house_group)
# set n_clusters_list
n_clusters_list = [
10,
9,
8,
7,
6,
5,
4,
3,
2,
1,
]
print('n_clusters | Learning_Time[sec] | Precision[rate]')
for n_clusters in n_clusters_list:
print('-' * 79)
# print current n_clusters for evaluations
# print('n_clusters: ', n_clusters)
# Evaluation 01. Eval Learning Computing Time
# 学習計算コスト評価
learning_time = 0
# Evaluation 02. Eval Quality of Recommendations
# レコメンド品質を検証する評価
# set empty act_y_list and pred_y_list
act_y_list = []
pred_y_list = []
# set target_home_id
# target_home_id_list = [1, 8, 9, 11]
target_home_id_list = [1, 9, 11] # 採用!!!
# target_home_id_list = [8, 9, 11]
# target_home_id_list = [9, 11]
# print('target_homes_id', target_home_id_list)
for target_home_id in target_home_id_list: # ikexp
# make target home's cluter's home_id list
target_home_cluster_homes_id_list = hcwk.run(
target_home_id=target_home_id,
n_clusters=n_clusters
)
# print('homes num: ', len(target_home_cluster_homes_id_list))
clustered_house_group = HouseholdGroup(target_home_cluster_homes_id_list)
# Evaluation 01. Eval Learning Computing Time
# 学習計算コスト評価
learning_time += ret_learning_time(clustered_house_group, n_clusters)
# Evaluation 02. Eval Quality of Recommendations
# レコメンド品質を検証する評価
new_act_y_list, new_pred_y_list = ret_act_and_pred_y_list(
clustered_house_group,
target_home_id,
)
act_y_list += new_act_y_list
pred_y_list += new_pred_y_list
# Evaluation 01. Eval Learning Computing Time
# 学習計算コスト評価
learning_time = learning_time / len(target_home_id_list)
# print('learning_time: ', learning_time, ' [sec]')
# Evaluation 02. Eval Quality of Recommendations
# レコメンド品質を検証する評価
# print('act_y_list: ', act_y_list)
print('pre_y_list: ', pred_y_list)
precision = MyPredictionEvaluation(act_y_list, pred_y_list).ret_Precision()
# print('Precision', precision)
# 標準出力
run_standard_output(n_clusters, learning_time, precision)
# CSV出力
run_csv_output(n_clusters, learning_time, precision)
def ret_homes_id_list(hems='all'):
ikexp_homes_id_list = [1, 8, 9, 11]
kosugi_homes_id_list = [
2004, 2010, 2011, 2012, 2014, 2017, 2018, 2019, 2020, 2021,
2023, 2025, 2027, 2030, 2047, 2048, 2053, 2054, 2059, 2070,
2071, 2073, 2079, 2082, 2087, 2088, 2096, 2099, 2104, 2105,
2106, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2122,
2123, 2124, 2126, 2129, 2130, 2131, 2137, 2150, 2151, 2152,
] # 50件
if hems == 'all':
homes_id_list = ikexp_homes_id_list + kosugi_homes_id_list
elif hems == 'ikexp':
homes_id_list = ikexp_homes_id_list
elif hems == 'kosugi':
homes_id_list = kosugi_homes_id_list
return homes_id_list
if __name__ == "__main__":
"""
# アプリケーション側(FormGenerator側)が利用する家庭群を
# 用意する処理を始めに行う必要がある
# 前処理段階ではデータを格納しない
# アプリケーションが必要なときにデータを得られるようにする
# 家庭番号(user_id)とデータ期間(timestamp)のみが必要(のはず)
# 家庭番号はHouseholdインスタンスが持つ各DataRows型のインスタンスが保持する
# 期間はアプリケーション側が指定する
# 研究における家庭番号割り当てについて
池田実験協力家庭
0000番台を割り当てる
実際宅 [
0001: Shiraki,
0008: Ikeda,
0009: Matsuoka,
0010: Shiobara,
0011: Nakamura
]
栗原 家庭
1000番台を割り当てる
実際数 1001〜1016
武蔵小杉 家庭
2000番台を割り当てる
実際数 2001〜2160
"""
# *** 家庭グループ準備処理 Start ***
# homes_id_list = ret_homes_id_list()
# ひとまずランダムにクラスタ作成
# TODO: metaデータを用いてk-meansクラスタリング処理
# random.shuffle(homes_id_list) # homes_id_listに対する破壊的処理
# @@@@ HomeMeta情報に基づきK-Meansクラスタリングでグループ化 Start @@@
# list to make
'''
[
# [home_id, family_type, kind_type, area_type],
[1, 1, 2, 3],
[8, 2, 2, 3],
[9, 3, 1, 3],
[10, 3, 1, 3],
[11, 2, 2, 3],
[2004, 1, 2, 3],
[2005, 2, 3, 2],
[2152, 1, 2, 3],
]
'''
# house = Household(home_id=2006)
# meta_row = house.get_home_meta()
# print('meta_row', meta_row)
# print('meta_row.family_num', meta_row.family_num)
'''
# Test HomesClusteringWithKMeans
house_group = HouseholdGroup() # All ModulesUseFlags are True
for home_id in homes_id_list:
# 各家庭が自家庭のHouseholdインスタンスを持つ
house = Household(home_id)
house_group.append(house)
# print(HomesClusteringWithKMeans(house_group).ret_homes_id_pred_dict(n_clusters=4))
# print(HomesClusteringWithKMeans(house_group).ret_target_home_cluster_homes_id_list(target_home_id=9))
# print(HomesClusteringWithKMeans(house_group).run(target_home_id=9, n_clusters=4))
# @@@@ HomeMeta情報に基づきK-Meansクラスタリングでグループ化 End @@@
'''
# *** 家庭グループ準備処理 End ***
run_eval()
"""
###=== FormGeneratorアプリケーション 実行フェーズ Start ===###
elapsed_time_dict = {}
###+++ Non Switching Case Start +++###
# generate form phase
start = time.time()
for house in house_group.get_iter():
# 以下の処理は各家庭のコンピュータが行う
start_time = datetime(2015, 8, 1)
end_time = datetime(2015, 9, 7)
form_generator = FormGenerator(
house, start_time=start_time, end_time=end_time
)
print("home_id", house.id)
form_generator.run()
end = time.time()
elapsed_time_dict['Non Switching'] = end - start
###+++ Non Switching Case End +++###
###+++ Switching Case Start +++###
# Switch flags phase
# この処理はサーバ側で実行される
for house in house_group.get_iter():
sw_fs = UseFlagSwitcher(house)
sw_fs.run() # Switching
# generate form phase
start = time.time()
for house in house_group.get_iter():
start_time = datetime(2015, 8, 1)
end_time = datetime(2015, 9, 7)
form_generator = FormGenerator(
house, start_time=start_time, end_time=end_time
)
print("home_id", house.id)
form_generator.run()
end = time.time()
elapsed_time_dict['Switching'] = end - start
# reset flags phase
for house in house_group.get_iter():
fs = UseFlagSwitcher(house)
fs.reset()
###+++ Non Switching Case End +++###
###=== FormGeneratorアプリケーション 実行フェーズ End ===###
# 各場合の計算時間を表示
print(elapsed_time_dict)
"""
"""
# non report
# start_dt = datetime(2016, 11, 28)
# end_dt = datetime(2016, 12, 4)
# all contents
# start_dt = datetime(2016, 12, 5)
# end_dt = datetime(2016, 12, 11)
# proposed method
start_dt = datetime(2016, 12, 12)
end_dt = datetime(2016, 12, 19)
house = Household(home_id=8)
etu = EvalTotalUsage(house, start_dt, end_dt)
total_usage_hour_per_day = etu.ret_total_usage_hour_per_day()
print('total_usage_hour_per_day', total_usage_hour_per_day)
"""
|
[
"ikenshirogivenup98@gmail.com"
] |
ikenshirogivenup98@gmail.com
|
5f35e60b6dc952576092adbd362d9e286493add4
|
64fc74529b7d26d79de4093f27dceba082e5a298
|
/mnist-regression/mnist-regression.py
|
6fe7fc5af9d90ba9c4f05bfe1ff6794c70ce755d
|
[] |
no_license
|
ttskch/tensorflow-tutorial
|
90c9b3285f7a0b941616f1ec31e09766c32dcfbc
|
398a79b11bb753dfac19acbfe18ad57a395def46
|
refs/heads/master
| 2021-01-23T05:29:24.169759
| 2017-09-05T11:57:35
| 2017-09-05T11:57:35
| 102,469,676
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,630
|
py
|
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import tensorflow as tf
# float値をとる784x可変長の2次元配列のプレースホルダ
# 訓練画像のピクセルごとのピクセル強度のデータ(784pix)を、訓練画像の枚数だけ持つためのもの
x = tf.placeholder(tf.float32, [None, 784])
# 784のfloat値が10個の入力ニューロンに入力される際のそれぞれの重み
# これから学習して決める値なので初期値はどうでもいいけど、とりあえず0パディングで初期化
W = tf.Variable(tf.zeros([784, 10]))
# 10個の出力ニューロンそれぞれのバイアス項
# これから学習して決める値なので初期値はどうでもいいけど、とりあえず0パディングで初期化
b = tf.Variable(tf.zeros([10]))
# NNモデル
# matmulはmatrix-multiple(行列の積)
y = tf.nn.softmax(tf.matmul(x, W) + b)
# チュートリアルには上記のコードが書いてあるけど、後の説明で交差エントロピーの計算にtf.nn.softmax_cross_entropy_with_logitsを使う場合は
# softmax関数はそこで適用されるみたいなので、ここでは下記でOK
# https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits
y = tf.matmul(x, W) + b
# 訓練データの正解を持つためのプレースホルダ
y_ = tf.placeholder(tf.float32, [None, 10])
# 交差エントロピー
# reduce_meanは与えられた行列の要素の平均を計算する関数
# reduction_indices(axis)が1つだけの場合は戻り値はベクトル、それ以外はスカラー?
# https://www.tensorflow.org/api_docs/python/tf/reduce_mean
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), 1)) # これでも等価
# 実は上記の数式は数値的安定性が低いので、代わりにTensorFlow組み込みのsoftmax_cross_entropy_with_logitsを使う
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y))
# 学習率0.5で最急降下法によって交差エントロピーを最小化
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# InteractiveSession上でモデルを起動する?
sess = tf.InteractiveSession()
tf.global_variables_initializer().run() # 作ったVariableをすべて初期化するというおまじないらしい
for _ in range(1000):
# 訓練データから、画像とラベルの組をランダムに100件取得(確率的勾配降下法)
batch_xs, batch_ys = mnist.train.next_batch(100)
# プレースホルダにバッチをフィードした状態でtrain_stepを実行
# https://www.tensorflow.org/api_docs/python/tf/Session#run
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
# y(出力結果)の1次元目(0〜9)とy_(正解)の1次元目(0〜9)が等しいかどうかを調べて、boolean値の配列を返す
# https://www.tensorflow.org/api_docs/python/tf/argmax
# https://www.tensorflow.org/api_docs/python/tf/equal
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
# boolean配列をfloat32配列にキャスト(false=0,true=1)して、平均値を出す
# https://www.tensorflow.org/versions/r1.2/api_docs/python/tf/cast
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
sess.close() # 作法的にはしたほうがいい?
|
[
"kanemoto.takashi@gmail.com"
] |
kanemoto.takashi@gmail.com
|
292a15f37cd349657a16a0ba08546b23f2d030d6
|
7ad1ac9c5631676b92c9271d54edd70fe147fca4
|
/ros_ws/build/learning_ros_external_pkgs_noetic/gazebo_ros_pkgs/gazebo_ros/catkin_generated/installspace/spawn_model
|
8951e64f58c55e66278a87348222917d73e139ea
|
[] |
no_license
|
Iris-ye233/final_project_1155150731
|
e92432603592a69593aec2fd556f3f2eec6a5b64
|
b68cf6f7581e545cc765d3fecb4acae09c2a9787
|
refs/heads/master
| 2023-05-04T05:09:26.441975
| 2021-05-26T09:21:55
| 2021-05-26T09:21:55
| 370,749,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,692
|
#!/usr/bin/env python3
#
# Copyright 2018 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Desc: helper script for spawning models in gazebo
# Author: John Hsu, Dave Coleman
#
import rospy
import sys
import os
import argparse
import xml
try: # Python 3.x
from urllib.parse import urlsplit, SplitResult
except ImportError: # Python 2.x
from urlparse import urlsplit, SplitResult
from gazebo_ros import gazebo_interface
from gazebo_msgs.msg import ModelStates
from gazebo_msgs.srv import DeleteModel
from std_srvs.srv import Empty
from geometry_msgs.msg import Pose, Quaternion
from tf.transformations import quaternion_from_euler
class SpawnModelNode():
'''
Node to spawn a model in Gazebo using the ROS API
'''
MODEL_DATABASE_TEMPLATE = """\
<sdf version="1.4">
<world name="default">
<include>
<uri>model://{}</uri>
</include>
</world>
</sdf>"""
def __init__(self):
parser = argparse.ArgumentParser(description='Spawn a model in gazebo using the ROS API')
xmlformat = parser.add_mutually_exclusive_group(required=True)
xmlformat.add_argument('-urdf', action='store_true', help='Incoming xml is in urdf format')
xmlformat.add_argument('-sdf', action='store_true', help='Incoming xml is in sdf format')
source = parser.add_mutually_exclusive_group(required=True)
source.add_argument('-file', type=str, metavar='FILE_NAME', help='Load model xml from file')
source.add_argument('-param', type=str, metavar='PARAM_NAME', help='Load model xml from ROS parameter')
source.add_argument('-database', type=str, metavar='MODEL_NAME',
help='Load model XML from specified model in Gazebo Model Database')
source.add_argument('-stdin', action='store_true', help='Load model from stdin')
parser.add_argument('-model', required=True, type=str, metavar='MODEL_NAME', help='Name of model to spawn')
parser.add_argument('-reference_frame', type=str, default='',
help='Name of the model/body where initial pose is defined.\
If left empty or specified as "world", gazebo world frame is used')
parser.add_argument('-gazebo_namespace', type=str, default='/gazebo',
help='ROS namespace of gazebo offered ROS interfaces. Defaults to /gazebo/')
parser.add_argument('-robot_namespace', type=str, default=rospy.get_namespace(),
help='change ROS namespace of gazebo-plugins')
parser.add_argument('-unpause', action='store_true',
help='!!!Experimental!!! unpause physics after spawning model')
parser.add_argument('-wait', type=str, metavar='MODEL_NAME', help='!!!Experimental!!! wait for model to exist')
parser.add_argument('-x', type=float, default=0, help='x component of initial position, meters')
parser.add_argument('-y', type=float, default=0, help='y component of initial position, meters')
parser.add_argument('-z', type=float, default=0, help='z component of initial position, meters')
parser.add_argument('-R', type=float, default=0, help='roll angle of initial orientation, radians')
parser.add_argument('-P', type=float, default=0, help='pitch angle of initial orientation, radians')
parser.add_argument('-Y', type=float, default=0, help='yaw angle of initial orientation, radians')
parser.add_argument('-J', dest='joints', default=[], action='append', metavar=('JOINT_NAME', 'JOINT_POSITION'),
type=str, nargs=2, help='initialize the specified joint at the specified position')
parser.add_argument('-package_to_model', action='store_true',
help='convert urdf <mesh filename="package://..." to <mesh filename="model://..."')
parser.add_argument('-b', dest='bond', action='store_true',
help='bond to gazebo and delete the model when this program is interrupted')
args = rospy.myargv()
self.args = parser.parse_args(args[1:])
# Convert position of joints to floats
for i in range(len(self.args.joints)):
self.args.joints[i][1] = float(self.args.joints[i][1])
def run(self):
'''
Run node, spawning model and doing other actions as configured in program arguments.
Returns exit code, 1 for failure, 0 for success
'''
# Wait for model to exist if wait flag is enabled
if self.args.wait:
self.model_exists = False
def models_cb(models):
self.model_exists = self.args.wait in models.name
rospy.Subscriber("%s/model_states" % self.args.gazebo_namespace, ModelStates, models_cb)
r = rospy.Rate(10)
rospy.loginfo('Waiting for model {} before proceeding.'.format(self.args.wait))
while not rospy.is_shutdown() and not self.model_exists:
r.sleep()
if rospy.is_shutdown():
return 0
# Load model XML from file
if self.args.file:
rospy.loginfo("Loading model XML from file %s" % self.args.file)
if not os.path.exists(self.args.file):
rospy.logfatal("Error: specified file %s does not exist", self.args.file)
return 1
if not os.path.isfile(self.args.file):
rospy.logfatal("Error: specified file %s is not a file", self.args.file)
return 1
# load file
try:
f = open(self.args.file, 'r')
model_xml = f.read()
except IOError as e:
rospy.logerr("Error reading file {}: {}".format(self.args.file, e))
return 1
if model_xml == "":
rospy.logerr("Error: file %s is empty", self.args.file)
return 1
# Load model XML from ROS param
elif self.args.param:
rospy.loginfo("Loading model XML from ros parameter %s" % self.args.param)
model_xml = rospy.get_param(self.args.param)
if model_xml == "":
rospy.logerr("Error: param does not exist or is empty")
return 1
# Generate model XML by putting requested model name into request template
elif self.args.database:
rospy.loginfo("Loading model XML from Gazebo Model Database")
model_xml = self.MODEL_DATABASE_TEMPLATE.format(self.args.database)
elif self.args.stdin:
rospy.loginfo("Loading model XML from stdin")
model_xml = sys.stdin.read()
if model_xml == "":
rospy.logerr("Error: stdin buffer was empty")
return 1
# Parse xml to detect invalid xml before sending to gazebo
try:
xml_parsed = xml.etree.ElementTree.fromstring(model_xml)
except xml.etree.ElementTree.ParseError as e:
rospy.logerr('Invalid XML: {}'.format(e))
return 1
# Replace package:// with model:// for mesh tags if flag is set
if self.args.package_to_model:
for element in xml_parsed.iterfind('.//mesh'):
filename_tag = element.get('filename')
if filename_tag is None:
continue
url = urlsplit(filename_tag)
if url.scheme == 'package':
url = SplitResult('model', *url[1:])
element.set('filename', url.geturl())
# Encode xml object back into string for service call
model_xml = xml.etree.ElementTree.tostring(xml_parsed)
# For Python 3
if not isinstance(model_xml, str):
model_xml = model_xml.decode(encoding='ascii')
# Form requested Pose from arguments
initial_pose = Pose()
initial_pose.position.x = self.args.x
initial_pose.position.y = self.args.y
initial_pose.position.z = self.args.z
q = quaternion_from_euler(self.args.R, self.args.P, self.args.Y)
initial_pose.orientation = Quaternion(*q)
# Spawn model using urdf or sdf service based on arguments
success = False
if self.args.urdf:
success = gazebo_interface.spawn_urdf_model_client(self.args.model, model_xml, self.args.robot_namespace,
initial_pose, self.args.reference_frame,
self.args.gazebo_namespace)
elif self.args.sdf:
success = gazebo_interface.spawn_sdf_model_client(self.args.model, model_xml, self.args.robot_namespace,
initial_pose, self.args.reference_frame,
self.args.gazebo_namespace)
if not success:
rospy.logerr('Spawn service failed. Exiting.')
return 1
# Apply joint positions if any specified
if len(self.args.joints) != 0:
joint_names = [joint[0] for joint in self.args.joints]
joint_positions = [joint[1] for joint in self.args.joints]
success = gazebo_interface.set_model_configuration_client(self.args.model, "",
joint_names, joint_positions,
self.args.gazebo_namespace)
if not success:
rospy.logerr('SetModelConfiguration service failed. Exiting.')
return 1
# Unpause physics if user requested
if self.args.unpause:
rospy.loginfo('Unpausing physics')
rospy.wait_for_service('%s/unpause_physics' % self.args.gazebo_namespace)
try:
unpause_physics = rospy.ServiceProxy('%s/unpause_physics' % self.args.gazebo_namespace, Empty)
unpause_physics()
except rospy.ServiceException as e:
rospy.logerr("Unpause physics service call failed: {}".format(e))
return 1
# If bond enabled, setup shutdown callback and wait for shutdown
if self.args.bond:
rospy.on_shutdown(self._delete_model)
rospy.loginfo('Waiting for shutdown to delete model {}'.format(self.args.model))
rospy.spin()
return 0
def _delete_model(self):
'''
Delete model from gazebo on shutdown if bond flag enabled
'''
rospy.loginfo('Deleting model {}'.format(self.args.model))
try:
delete_model = rospy.ServiceProxy('%s/delete_model' % self.args.gazebo_namespace, DeleteModel)
delete_model(model_name=self.args.model)
except rospy.ServiceException as e:
rospy.logerr("Delete model service call failed: %s", e)
if __name__ == "__main__":
sm = SpawnModelNode()
rospy.init_node('spawn_model', anonymous=True)
exit_code = sm.run()
sys.exit(exit_code)
|
[
"1155150731@link.cuhk.edu.hk"
] |
1155150731@link.cuhk.edu.hk
|
|
4eeecd18a80bf773f8d4642633b66f15f11bf0f8
|
d138deda43e36f6c79c5e3a9ef1cc62c6a92e881
|
/python/paddle/vision/__init__.py
|
aeb07bf281fb0a0289640e0591af4d864ca10b39
|
[
"Apache-2.0"
] |
permissive
|
seiriosPlus/Paddle
|
51afd6f5c85c3ce41dd72953ee659d1539c19f90
|
9602a182b2a4979247c09df1ec283fc39cb4a981
|
refs/heads/develop
| 2021-08-16T16:05:10.848535
| 2020-12-27T15:15:19
| 2020-12-27T15:15:19
| 123,257,829
| 2
| 0
|
Apache-2.0
| 2019-12-10T08:22:01
| 2018-02-28T08:57:42
|
C++
|
UTF-8
|
Python
| false
| false
| 929
|
py
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import models
from .models import *
from . import transforms
from .transforms import *
from . import datasets
from .datasets import *
from . import image
from .image import *
from . import ops
__all__ = models.__all__ \
+ transforms.__all__ \
+ datasets.__all__ \
+ image.__all__
|
[
"noreply@github.com"
] |
seiriosPlus.noreply@github.com
|
b6a66dcb3d0936e7e0833050be4330e71692e6d8
|
8a101318023566a379096c46d94816dc3237b58a
|
/RaspberryPi/nafiqad1/python/hware.py
|
b70d879b40ddbafc56330cdcdce0060aff7dc859
|
[] |
no_license
|
binhpham1909/MyProject
|
8aef1971073af88874adf027e5072c1df26bc1fe
|
11c2ebf99bae3ea9180421324e8e886d8ab87e21
|
refs/heads/master
| 2021-01-20T20:15:24.575508
| 2017-05-12T09:25:11
| 2017-05-12T09:25:11
| 60,657,275
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,071
|
py
|
import os
import RPi.GPIO as GPIO
import database
class HARDWARE:
def __init__(self,alertbtn,alertled,smsbtn,smsled,ringbtn,ringsms,bellbtn,bellout):
self.alert_btn=alertbtn
self.alert_led=alertled
self.sms_btn=smsbtn
self.sms_led=smsled
self.ring_btn=ringbtn
self.ring_led=ringsms
self.bell_btn=bellbtn
self.bell_out=bellout
self.RELAY_ON=GPIO.LOW
self.RELAY_OFF=GPIO.HIGH
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.alert_btn, GPIO.IN)
GPIO.setup(self.sms_btn, GPIO.IN)
GPIO.setup(self.ring_btn, GPIO.IN)
GPIO.setup(self.bell_btn, GPIO.IN)
GPIO.setup(self.alert_led, GPIO.OUT)
GPIO.setup(self.sms_led, GPIO.OUT)
GPIO.setup(self.ring_led, GPIO.OUT)
GPIO.setup(self.bell_out, GPIO.OUT)
GPIO.output(self.bell_out, self.RELAY_OFF)
GPIO.add_event_detect(self.alert_btn, GPIO.FALLING, callback=self.UpdateAlert)
GPIO.add_event_detect(self.ring_btn, GPIO.FALLING, callback=self.UpdateRing)
GPIO.add_event_detect(self.bell_btn, GPIO.FALLING, callback=self.DisableBell)
GPIO.add_event_detect(self.sms_btn, GPIO.FALLING, callback=self.UpdateSms)
def UpdateAlert(self,chanel):
os.system('shutdown now -h')
# db=database.DB("localhost","root","!()(!(*(","hbiot")
# if(db.GetSetting('alert')):
# db.SetSetting('alert',0)
# db.SetSetting('ring',0)
# db.SetSetting('sms',0)
## db.SetSetting('email',0)
# self.SetLed(self.alert_led,db.GetSetting('alert'))
# self.SetLed(self.sms_led,db.GetSetting('sms'))
# self.SetLed(self.ring_led,db.GetSetting('ring'))
# else:
# db.SetSetting('alert',db.GetSetting('alert'))
# db.SetSetting('ring',1)
# db.SetSetting('sms',1)
# db.SetSetting('email',1)
# self.SetLed(self.alert_led,db.GetSetting('alert'))
# self.SetLed(self.sms_led,db.GetSetting('sms'))
# self.SetLed(self.ring_led,db.GetSetting('ring'))
# db.CloseDb()
def UpdateRing(self,chanel):
os.system('reboot')
# db=database.DB("localhost","root","!()(!(*(","hbiot")
# if(db.GetSetting('ring')):
# db.SetSetting('ring',0)
# self.SetLed(self.ring_led,db.GetSetting('ring'))
# else:
# db.SetSetting('ring',1)
# self.SetLed(self.ring_led,db.GetSetting('ring'))
# db.CloseDb()
def UpdateSms(self,chanel):
db=database.DB("localhost","root","!()(!(*(","hbiot")
if(db.GetSetting('sms')):
db.SetSetting('sms',0)
self.SetLed(self.sms_led,db.GetSetting('sms'))
else:
db.SetSetting('sms',1)
self.SetLed(self.sms_led,db.GetSetting('sms'))
db.CloseDb()
def DisableBell(self,chanel):
db=database.DB("localhost","root","!()(!(*(","hbiot")
db.SetSetting('bell',0)
self.RingBell(0)
db.CloseDb()
def RingBell(self,val):
if(val):
GPIO.output(self.bell_out, self.RELAY_ON)
else:
GPIO.output(self.bell_out, self.RELAY_OFF)
def SetLed(self,pin,val):
GPIO.output(pin,val)
def UpdateIndicator(self):
db=database.DB("localhost","root","!()(!(*(","hbiot")
al=db.GetSetting('alert')
sm=db.GetSetting('sms')
ri=db.GetSetting('ring')
self.SetLed(self.alert_led,al)
self.SetLed(self.sms_led,sm)
self.SetLed(self.ring_led,ri)
db.CloseDb()
|
[
"binhpham1909@gmail.com"
] |
binhpham1909@gmail.com
|
949f0533477c0e3fb2249c565b4b279fd4869472
|
562d297e429949d92d7081016d18bbe1c357f1a9
|
/todo/models.py
|
2e63260f8931a69f26187e6af74ba5deb4394903
|
[] |
no_license
|
dan0712/django_todo
|
7411e23f34305ea72a06eaf0b6a84f0a994264e1
|
7997df678bfb1063b2d5077e13d251faab03ab11
|
refs/heads/master
| 2021-01-12T11:08:47.298372
| 2016-12-01T11:52:41
| 2016-12-01T11:52:41
| 72,846,378
| 0
| 0
| null | 2016-12-01T11:52:42
| 2016-11-04T12:36:53
|
Python
|
UTF-8
|
Python
| false
| false
| 3,522
|
py
|
from __future__ import unicode_literals
import datetime
from django.db import models
from django.contrib.auth.models import User, Group
from django.template.defaultfilters import slugify
from django.core.urlresolvers import reverse
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class List(models.Model):
name = models.CharField(max_length=60)
slug = models.SlugField(max_length=60, editable=False)
group = models.ForeignKey(Group)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(List, self).save(*args, **kwargs)
def __str__(self):
return self.name
def incomplete_tasks(self):
# Count all incomplete tasks on the current list instance
return Item.objects.filter(list=self, completed=0)
class Meta:
ordering = ["name"]
verbose_name_plural = "Lists"
# Prevents (at the database level) creation of two lists with the same name in the same group
unique_together = ("group", "slug")
@python_2_unicode_compatible
class Item(models.Model):
title = models.CharField(max_length=140)
list = models.ForeignKey(List)
created_date = models.DateField(auto_now=True)
due_date = models.DateField(blank=True, null=True, )
completed = models.BooleanField(default=None)
completed_date = models.DateField(blank=True, null=True)
created_by = models.ForeignKey(User, related_name='todo_created_by')
assigned_to = models.ForeignKey(User, blank=True, null=True, related_name='todo_assigned_to')
note = models.TextField(blank=True, null=True)
priority = models.PositiveIntegerField()
# Has due date for an instance of this object passed?
def overdue_status(self):
"Returns whether the item's due date has passed or not."
if self.due_date and datetime.date.today() > self.due_date:
return 1
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('todo-task_detail', kwargs={'task_id': self.id, })
# Auto-set the item creation / completed date
def save(self):
# If Item is being marked complete, set the completed_date
if self.completed:
self.completed_date = datetime.datetime.now()
super(Item, self).save()
class Meta:
ordering = ["priority"]
@python_2_unicode_compatible
class Comment(models.Model):
"""
Not using Django's built-in comments because we want to be able to save
a comment and change task details at the same time. Rolling our own since it's easy.
"""
author = models.ForeignKey(User)
task = models.ForeignKey(Item)
date = models.DateTimeField(default=datetime.datetime.now)
body = models.TextField(blank=True)
def snippet(self):
# Define here rather than in __str__ so we can use it in the admin list_display
return "{author} - {snippet}...".format(author=self.author, snippet=self.body[:35])
def __str__(self):
return self.snippet
@python_2_unicode_compatible
class Inventory(models.Model):
date = models.DateTimeField(default=datetime.datetime.now)
name = models.CharField(max_length=140)
def __str__(self):
return self.name
@python_2_unicode_compatible
class InventoryItem(models.Model):
date = models.DateTimeField(default=datetime.datetime.now)
item = models.CharField(max_length=140)
def __str__(self):
return self.item
|
[
"dan.balan0712@gmail.com"
] |
dan.balan0712@gmail.com
|
1ffc2c5a06d51f5b06d730f8f40258749e0ef1b1
|
9b3c1f5c65b72a70fe4821fb86b550fada74fe0e
|
/app/controllers/usuario.py
|
71f99258dcb9341920953fe1ee66df5b26aa1429
|
[] |
no_license
|
rodrigondec/pds_Home-Control
|
e2c27a1c5fbf0ee139625f2614bcf3556d29a236
|
d4767bba06921d6eb5182e632c212bac8f8333a7
|
refs/heads/master
| 2021-06-18T05:29:03.531780
| 2017-06-20T13:09:39
| 2017-06-20T13:09:39
| 91,518,250
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
from flask import render_template, Blueprint, session, abort, flash, redirect, url_for
from app import db
from app.models import Administrador, Usuario, Component
from app.forms import UsuarioForm, AdicionarUsuariosForm
mod_usuario = Blueprint('usuario', __name__, url_prefix='/usuario', template_folder='templates')
@mod_usuario.route('/')
def index():
if 'logged_in' in session:
print(session['logged_in'])
return render_template('usuario/index.html')
else:
flash('Entre primeiro')
return redirect('/')
@mod_usuario.route('/cadastrar', methods=['GET', 'POST'])
def cadastrar_usuario():
form = UsuarioForm()
if form.validate_on_submit():
if form.is_admin.data:
usuario = Administrador(form.nome.data, form.email.data, form.senha.data)
else:
usuario = Usuario(form.nome.data, form.email.data, form.senha.data)
db.session.add(usuario)
db.session.commit()
flash('Usuário criado com sucesso')
return redirect('/')
return render_template('usuario/cadastrar.html', title='Sign In', form=form)
@mod_usuario.route('/adicionar/<id_modulo>', methods=['GET', 'POST'])
def adicionar_usuario(id_modulo):
if 'logged_in' in session:
usuario = Usuario.query.filter_by(id_usuario=session['id_usuario']).first()
modulo = Component.query.filter_by(id_component=id_modulo).first()
if not modulo.alteravel_por(usuario):
flash('Você não tem permissão para alterar esse modulo')
return redirect('/dashboard/')
usuarios = Usuario.query.all()
for user in modulo.usuarios:
usuarios.remove(user)
form = AdicionarUsuariosForm(usuarios)
if form.validate_on_submit():
for id_usuario in form.usuarios.data:
user = Usuario.query.filter_by(id_usuario=id_usuario).first()
modulo.add_usuario(user)
db.session.commit()
return redirect('/dashboard/modulo/'+id_modulo)
return render_template('usuario/adicionar.html', form=form, modulo=modulo)
else:
flash('Entre primeiro')
return redirect('/')
|
[
"rodrigondec@gmail.com"
] |
rodrigondec@gmail.com
|
100e0f5d8059c567277afc8783402dc3af76b53c
|
66d02af8c7aa5b0d27a3ba4adeab6b218f456fd9
|
/main.py
|
4950bf754005e01b99df7578f263376a7a834178
|
[] |
no_license
|
ruodingt/Distillation-of-Faster-rcnn
|
ad433ae57c09e241659831115bec3f1e4bee1ff4
|
43c8ec0b7f77296092b8e620ebb9cf8e14fec8d6
|
refs/heads/master
| 2020-09-01T08:49:03.739412
| 2019-07-16T07:13:54
| 2019-07-16T07:13:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,634
|
py
|
output = model(to_device(input))
output_teacher = model_teacher(to_device(input))
# rois_label_t=output['cls_target']
'''
Classification and regression distillation:
'''
if cfg_distillation.get('cls_distillation',None):
cfg_cls_distillation = cfg_distillation.get('cls_distillation')
rcn_cls_score_t = output_teacher['cls_pred']
rcn_cls_score_s = output['cls_pred']
RCNN_loss_cls_s = output['BboxNet.cls_loss']
start_mu=cfg_cls_distillation.get('start_mu')
end_mu=cfg_cls_distillation.get('end_mu')
mu=start_mu+(end_mu-start_mu)*(float(epoch)/max_epoch)
loss_rcn_cls, loss_rcn_cls_soft = compute_loss_classification(rcn_cls_score_t, rcn_cls_score_s, mu,
RCNN_loss_cls_s, T=1, weighted=True)
# loss_rcn_cls, loss_rcn_cls_soft = compute_loss_classification(rcn_cls_score_t, rcn_cls_score_s, mu,
#
output['BboxNet.cls_loss']=loss_rcn_cls
if cfg_distillation.get('loc_distillation',None):
cfg_loc_distillation=cfg_distillation.get('loc_distillation')
RCNN_loss_bbox_s=output['BboxNet.loc_loss']
bbox_pred_s=output['loc_pred']
bbox_pred_t=output_teacher['loc_pred']
rois_target_s=output['loc_target']
rois_target_t=output_teacher['loc_target']
start_ni=cfg_loc_distillation.get('start_ni')
end_ni=cfg_loc_distillation.get('end_ni')
ni=start_ni+(end_ni-start_ni)*(float(epoch)/max_epoch)
loss_rcn_reg, loss_rcn_reg_soft,_,_ = \
compute_loss_regression(RCNN_loss_bbox_s, bbox_pred_s, bbox_pred_t,rois_target_s, rois_target_t, m=0.01, ni=ni)
# loss_rcn_cls, loss_rcn_cls_soft = compute_loss_classification(rcn_cls_score_t, rcn_cls_score_s, mu,
# RCNN_loss_cls_s, T=1, weighted=True)
output['BboxNet.loc_loss'] = loss_rcn_reg
'''
Feature level distillation:
'''
# sup_loss = (torch.pow(sup_feature - stu_feature_adap, 2) * mask_batch).sum() / norms
# sup_loss = sup_loss * args.imitation_loss_weigth
if cfg_distillation.get('feature_distillation', None):
cfg_feature_distillation=cfg_distillation.get('feature_distillation')
sup_feature=output_teacher['features'][0]
stu_feature=output['features'][0]
stu_feature_adap=model_adap(stu_feature)
start_weigth=cfg_feature_distillation.get('start_weigth')
end_weigth=cfg_feature_distillation.get('end_weigth')
imitation_loss_weigth = start_weigth + (end_weigth - start_weigth) * (float(epoch) / max_epoch)
if cfg_feature_distillation.get('start_weigth', None):
mask_batch = output_teacher['RoINet.mask_batch']
mask_list = []
for mask in mask_batch:
mask = (mask > 0).float().unsqueeze(0)
mask_list.append(mask)
mask_batch = torch.stack(mask_list, dim=0)
norms = mask_batch.sum() * 2
sup_loss = (torch.pow(sup_feature - stu_feature_adap, 2) * mask_batch).sum() / norms
else:
sup_loss = (torch.pow(sup_feature - stu_feature_adap, 2)).sum()
# imitation_loss_weigth=0.0001
sup_loss = sup_loss * imitation_loss_weigth
output['sup.loss']=sup_loss
|
[
"noreply@github.com"
] |
ruodingt.noreply@github.com
|
e3f0c0455ddf71a846c572c5a51b3c57cf28f2d0
|
eadf2f0440d70963e00fa053082ed42cbb930e24
|
/MutilProcess.py
|
b51b5b9d8875198cdd5c8a146a48a046d02b6b85
|
[] |
no_license
|
chenyunzhan/StudyPython
|
a3277d933b5ce2ae63783ed1313c37c1af22048a
|
c1c4e58210bb50352a9b13bf3a17e7692d7cd1ed
|
refs/heads/master
| 2021-08-07T21:23:05.301980
| 2017-11-09T01:30:31
| 2017-11-09T01:30:31
| 107,389,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 450
|
py
|
from multiprocessing import Process
import time
class MyProcess(Process):
def __init__(self, time):
super().__init__()
self.time=time
def run(self):
start = time.time()
time.sleep(self.time)
stop = time.time()
print("child spend %d"%(stop-start))
start = time.time()
for item in range(5):
p = MyProcess(item+1)
p.start()
stop = time.time()
print("parent spend %d" %(stop - start))
|
[
"chenyunzhan08@126.com"
] |
chenyunzhan08@126.com
|
dc26e457355547f57dfa09f0d3e429952a1b3679
|
220d1675123669aae6250b1f51e430731af0ddee
|
/mundo-01/ex019.py
|
9673e0e0938cc4f02594c9d73d8a28f3a6028c43
|
[
"MIT"
] |
permissive
|
bryan-lima/python-cursoemvideo
|
de6ce440a6ee29435b8021a9dcaa62e5f4c884ee
|
6b8b621fb5958d8b5314bc788e8ef3bb4bfe5217
|
refs/heads/master
| 2023-02-20T08:31:59.103951
| 2021-01-23T23:42:16
| 2021-01-23T23:42:16
| 321,140,944
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 509
|
py
|
# Um professor quer sortear um dos seus quatro alunos para apagar o quadro
# Faça um programa que ajude ele, lendo o nome deles e escrevendo o nome do escolhido
from random import choice
student1 = input('Nome do primeiro aluno: ')
student2 = input('Nome do segundo aluno: ')
student3 = input('Nome do terceiro aluno: ')
student4 = input('Nome do quarto aluno: ')
students = [student1, student2, student3, student4]
chosenStudent = choice(students)
print('O aluno escolhido foi {}' .format(chosenStudent))
|
[
"bryanlima200@outlook.com"
] |
bryanlima200@outlook.com
|
2229eeaf95cea95f93d89096dce48baac03028f0
|
1578ce8cc9d5d2af4500fadd3d6cabf42e3389fc
|
/bits.py
|
31fdd469ae3cbfeb90ffceb47a6551652ffe9e44
|
[] |
no_license
|
Man0j-kumar/python
|
28bb0291a39c90bcaeab73fc46c6973ac91f6255
|
43e9e8ce2b90c2b8f9318ea4724d23786ac46629
|
refs/heads/master
| 2020-06-28T09:38:07.618541
| 2019-08-16T10:26:06
| 2019-08-16T10:26:06
| 200,201,198
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 46
|
py
|
import struct
print(struct.calcsize("p")*8)
|
[
"noreply@github.com"
] |
Man0j-kumar.noreply@github.com
|
60933e4c69b13bbfd89cf6070d6829a1b44ecea6
|
040303210bb29385d68d1333423c843cf0ed3ab6
|
/preprocessing/simplepreprocessor.py
|
d3fcf11c4298369deb47d91c0de61f91f239b2c2
|
[] |
no_license
|
TrungKhoaLe/ComputerVisionProject
|
d83dece743b48559ecd40be28eccd1489372536c
|
7d21af25c8899330598f454c7fbf95217e5a6bce
|
refs/heads/master
| 2020-06-13T21:15:51.885643
| 2019-12-15T11:36:51
| 2019-12-15T11:36:51
| 194,790,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
import cv2
class SimplePreprocessor:
def __init__(self, width, height, inter=cv2.INTER_AREA):
# store the target image width, height, and interpolation
# method used when resizing
self.width = width
self.height = height
self.inter = inter
def preprocess(self, image):
# return the image to a fixed size, ignoring the aspect
# ratio
return cv2.resize(image, (self.width, self.height),
interpolation=self.inter)
|
[
"KhoaLeTrung@Les-MacBook-Pro.local"
] |
KhoaLeTrung@Les-MacBook-Pro.local
|
5fd5332acc29cd3209b53c9e2b7dac33c0cc5b89
|
e92c110f34f54c239cf51f47b9b78336f73e0982
|
/exercises/mar4/birthdays1.py
|
dcfbfd14a54e9eae6b16d1725ece2c4b38d7ec44
|
[] |
no_license
|
averycordle/csce204
|
953149b9255a66f6da61fd1f9772a3489abba7b9
|
9c864aa100927aa689a80c1cf3cd8caf0249b3a4
|
refs/heads/main
| 2023-04-15T08:38:08.687747
| 2021-05-03T17:01:46
| 2021-05-03T17:01:46
| 328,207,946
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 678
|
py
|
#Author: Avery Cordle
from datetime import date
birthdays = [
date(2021, 7, 23), date(2021, 2, 25), date(2021, 10, 9), date(2021, 4, 29), date(2021, 9, 20), date(2021, 10, 20), date(2021, 11, 29)
]
closestBirthday = date(2021, 12, 31)
for birthday in birthdays:
daysTilClosest = (closestBirthday - date.today()).days
daysTillBirthday = (birthday - date.today()).days
#birthday already passed
if daysTillBirthday<0:
continue #go to the next item in the list
#what we want
if daysTillBirthday<daysTilClosest:
closestBirthday = birthday
print ("Closest birthday is: " + closestBirthday.strftime("%m/%d/%Y"))
|
[
"noreply@github.com"
] |
averycordle.noreply@github.com
|
24199eef493f792f7fa42254f02857e7fbe96929
|
ce664ee0abe3487316059c9d6379582544696eea
|
/smallest_positive.py
|
8bb9e2b2c729dc61e202f123f30b3e664bc45aa0
|
[
"MIT"
] |
permissive
|
OuedraogoAbdoul/python-review
|
eddcc16015d01f98f92f039423d8f6cd0df90abc
|
644cbfdd6f77d11241d0ca30d4717c3d9c8b7cc2
|
refs/heads/master
| 2021-01-13T18:26:45.760483
| 2020-02-23T04:57:51
| 2020-02-23T04:57:51
| 242,456,449
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 379
|
py
|
def smallest_positive(in_list):
# TODO: Define a control structure that finds the smallest positive
# number in in_list and returns the correct smallest number.
return sorted([i for i in in_list if i > 0])[0]
# Test cases
print(smallest_positive([4, -6, 7, 2, -4, 10]))
# Correct output: 2
print(smallest_positive([.2, 5, 3, -.1, 7, 7, 6]))
# Correct output: 0.2
|
[
"ouabdoul@hotmail.com"
] |
ouabdoul@hotmail.com
|
61455cbafe0dcc297640de81ea173c2d3ff67f9f
|
6f3389c93cf1057bca5398940932561c19dbec1d
|
/백준/기초/2577.py
|
7c4f1c1e4c1bdb889d09109f1aa9f249c9ca0c60
|
[] |
no_license
|
Jeonseoghyeon/APR
|
0af9ac1b4ba666a97d78b92e3e599c5a8bc87acc
|
7a3822913b84ae6ecf80c8e35c7c8e400981d1fe
|
refs/heads/master
| 2020-12-23T13:55:24.194463
| 2020-06-30T07:00:23
| 2020-06-30T07:00:23
| 237,172,582
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 220
|
py
|
x,y,z = [int(input()) for x in range(3)]
print(x,y,z)
result = str(x*y*z)
print(result)
for i in range(10):
if i == 0:
print(result.count('0'))
continue
else:
print(result.count(str(i)))
|
[
"jsh28592@gmail.com"
] |
jsh28592@gmail.com
|
dcd99135a01a9bb542329a85735346226d9bf41f
|
6bf879172a44c5c07d4595c44a6634c225aa56fc
|
/snippets/admin.py
|
a72a807674d8936806fb2dd7472556bb2fd2209f
|
[] |
no_license
|
shubham1560/Google-sign-in
|
0f6ce102ae4994867ff2b6ad216009b8467e851e
|
f7fd1cbf6f5934cb8ea17381c78c5b45234be47b
|
refs/heads/master
| 2022-08-30T01:39:45.764622
| 2020-05-27T12:51:46
| 2020-05-27T12:51:46
| 267,318,698
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
from django.contrib import admin
from .models import SystemProperties, Snippet
class SytemPropertiesAdmin(admin.ModelAdmin):
list_display = ('key', 'value', 'description')
class SnippetAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'sys_created_on', 'sys_updated_on')
admin.site.register(SystemProperties, SytemPropertiesAdmin)
admin.site.register(Snippet, SnippetAdmin)
# Register your models here.
|
[
"shubhamsinha2050@gmail.com"
] |
shubhamsinha2050@gmail.com
|
2bd370ce7abc4118cb4016644984971b14cbd8cd
|
677c06029ec9026ca4cee4f10b790b613142ce9e
|
/Animation Paint Board/Project Source and Support Source/demo.py
|
d793d3f6478b32f8deb850d0af3003ff39eed85d
|
[] |
no_license
|
zhuzihope/WenzhuLiu
|
ba40711073ab969e5b8cad4f8fd40d0440b8c02e
|
8cdca9264a9aa05b377888d1cb11b33979e3b203
|
refs/heads/master
| 2021-04-09T16:39:49.377805
| 2014-06-26T19:58:07
| 2014-06-26T19:58:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,252
|
py
|
# Wenzhu Liu + wenzhul + section L
# This is the main class which makes the paint board work well.
# We could draw like pencil, pen, and dropper. It also includes eraser, images
# and the method how to draw a regular line.
# What's more, we could put some images on the canvas and make them move
# in different directions and different speeds. But what a pity that
# only the two different images could move on the canvas. The number of
# one image could be just one.
import pygame
from pygame.locals import *
from sys import exit
from loader import Loader
from PaintBrush import PaintBrush
class demo(object):
def __init__(self):
self.screen = pygame.display.set_mode((1000, 600),1)
# load image and font on the canvas
self.loader = Loader()
# load the palette
self.palette = self.loader.load_image("palette.png",True)
# the knob which change the transparency of color
self.knob = self.loader.load_image("knob.png",True)
self.knob_rect = self.knob.get_rect()
self.knob_rect.topleft = (14,215)
# the origninal paint board on the canvas
self.back = self.loader.load_image("paintboard.png", True)
# the 2 backgrounds to choose
self.back1_back = self.loader.load_image("back1.jpg",False)
self.back2_back = self.loader.load_image("back2.jpg",False)
# load the animation pictures
self.sprite1 = self.loader.load_image("sprite1.png", True)
self.sprite2 = self.loader.load_image("sprite2.png", True)
# load the different brushes
self.b1 = self.loader.load_image("brush_1.png", True)
self.b2 = self.loader.load_image("brush_2.png", True)
self.b3 = self.loader.load_image("brush_3.png", True)
self.b5 = self.loader.load_image("brush_5.png", True)
self.b6 = self.loader.load_image("brush_6.png", True)
self.b7 = self.loader.load_image("brush_7.png", True)
self.b8 = self.loader.load_image("brush_8.png", True)
self.b9 = self.loader.load_image("brush_9.png", True)
# draw the icon
self.icon = self.loader.load_image("Paint.png", True)
# the current color is black
self.cur_color = pygame.Color(0,0,0)
# put the canvas which could be drawed things on it
self.paper_rect = pygame.Rect(127,12,659,574)
self.paper = self.loader.load_image("paper.png",True)
# draw regular line, initially, it is false
self.draw_lines = False
# before choose a brush, user could not draw anything
# on the canvas
self.painting = False
# load the palette
self.pal_rect = pygame.Rect(12,12,101,200)
# different brushes on different positions on the board
self.brush_rect = pygame.Rect(12,231,101,355)
self.brush_rects = []
self.brush_rects.append(pygame.Rect(12,231,101,200))
self.brush_rects.append(pygame.Rect(12,332,50,50))
self.brush_rects.append(pygame.Rect(63,332,50,50))
self.brush_rects.append(pygame.Rect(12,332+51*1,50,50))
self.brush_rects.append(pygame.Rect(63,332+51*1,50,50))
self.brush_rects.append(pygame.Rect(12,332+51*2,50,50))
self.brush_rects.append(pygame.Rect(63,332+51*2,50,50))
self.brush_rects.append(pygame.Rect(12,332+51*3,50,50))
self.brush_rects.append(pygame.Rect(63,332+51*3,50,50))
self.brush_rects.append(pygame.Rect(63,332+51*4,50,50))
self.lines_rect = pygame.Rect(12,332+51*4,50,50)
# input the class PaintBrush
self.brush = PaintBrush(self.paper)
# the positions about animation part
self.sprite1_rect = pygame.Rect(826,122,72,72)
self.sprite2_rect = pygame.Rect(905,122,72,72)
self.left_rect = pygame.Rect(818,306,46,43)
self.right_rect = pygame.Rect(938,305,42,45)
self.down_rect = pygame.Rect(876,305,48,48)
self.slow_rect = pygame.Rect(835,427,69,27)
self.quick_rect = pygame.Rect(910,427,67,30)
# the data and parameter of the animation
self.slow_speed = 40
self.quick_speed = 140
self.move_left = False
self.move_right = False
self.move_down = False
self.keep_move_left1 = False
self.keep_move_right1 = False
self.keep_move_down1 = False
self.keep_move_left2 = False
self.keep_move_right2 = False
self.keep_move_down2 = False
self.sprite1_move = False
self.sprite2_move = False
self.keep_sprite1_move = False
self.keep_sprite2_move = False
self.keep_slow_speed1 = False
self.keep_slow_speed2 = False
self.keep_quick_speed1 = False
self.keep_quick_speed2 = False
# the positions of 2 sprites, first, they are none
self.x1 = None
self.y1 = None
self.x2 = None
self.y2 = None
# give the choosed color to brush
def set_color(self,c):
self.cur_color = c
self.brush.set_color(c)
# the transparency of the color
def set_alpha(self,a):
if a <= 0.0:
a = 0.005
x = 20
elif a >= 1.0:
a = 1.0
x = 100
else:
x = int(round(20.0+100.0*a))
self.brush.set_alpha(a)
self.knob_rect.left = x
# when choose different brush, let the brush draw sth on the canvas
def set_brush(self,idx):
# draw the pencil
if idx == 1:
self.brush.set_brush(self.b1)
self.brush.set_space(0.5)
self.brush.set_color(self.cur_color)
self.set_alpha(1.0)
# draw the brush pen
elif idx == 2:
self.brush.set_brush(self.b2)
self.brush.set_space(1.0)
self.brush.set_color(self.cur_color)
self.set_alpha(0.1)
# draw like dropper
elif idx == 3:
self.brush.set_brush(self.b3)
self.brush.set_space(1.0)
self.brush.set_color(self.cur_color)
self.set_alpha(1.0)
# draw the break line
elif idx == 4:
self.brush.set_brush(self.b1)
self.brush.set_space(1.0)
self.brush.set_color(self.cur_color)
self.brush.set_pattern([2,20])
self.set_alpha(1.0)
# it is the eraser
elif idx == 5:
self.cur_color = pygame.Color(255,255,255)
self.brush.set_brush(self.b5)
self.brush.set_space(1.0)
self.brush.set_color(self.cur_color)
self.set_alpha(0.2)
# draw the picture which is snow
elif idx == 6:
self.brush.set_brush(self.b6,True)
self.brush.set_space(65.0)
self.set_alpha(1.0)
# draw the picture which is heart
elif idx == 7:
self.brush.set_brush(self.b7,True)
self.brush.set_space(65.0)
self.set_alpha(1.0)
# draw the picture which is star
elif idx == 8:
self.brush.set_brush(self.b8,True)
self.brush.set_space(65.0)
self.set_alpha(1.0)
# draw the moon
elif idx == 9:
self.brush.set_brush(self.b9,True)
self.brush.set_space(80.0)
self.set_alpha(1.0)
# choose which image on the right column of the board should move
def set_image1(self):
self.brush.set_brush(self.sprite1)
self.brush.set_space(65.0)
self.set_alpha(1.0)
def set_image2(self):
self.brush.set_brush(self.sprite2)
self.brush.set_space(65.0)
self.set_alpha(1.0)
# start drawing
def paint_start(self):
self.painting = True
###self.save_paper()
# stop drawing
def paint_stop(self):
self.painting = False
# the sprite 1 moves left
def moveLeft1(self,speed):
self.screen.blit(self.sprite1, (self.x1-240, self.y1-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.x1 -= self.distance_moved
# If the image goes off the end of the screen, move it back
if self.x1 < 300.:
self.x1 += 290.
# the sprite 2 move left
def moveLeft2(self,speed):
self.screen.blit(self.sprite2, (self.x2-240, self.y2-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.x2 -= self.distance_moved
# If the image goes off the end of the screen, move it back
if self.x2 < 350.:
self.x2 += 300.
# the sprite 1 move right
def moveRight1(self,speed):
self.screen.blit(self.sprite1, (self.x1-240, self.y1-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.x1 += self.distance_moved
# If the image goes off the end of the screen, move it back
if self.x1 > 610.:
self.x1 -= 300.
# the sprite 2 move right
def moveRight2(self,speed):
self.screen.blit(self.sprite2, (self.x2-240, self.y2-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.x2 += self.distance_moved
# If the image goes off the end of the screen, move it back
if self.x2 > 650.:
self.x2 -= 300.
# the sprite 1 move down
def moveDown1(self,speed):
self.screen.blit(self.sprite1, (self.x1-240, self.y1-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.y1 += self.distance_moved
# If the image goes off the end of the screen, move it back
if self.y1 > 500.:
self.y1 -= 350.
# the sprite 2 move down
def moveDown2(self,speed):
self.screen.blit(self.sprite2, (self.x2-240, self.y2-150))
self.clock = pygame.time.Clock()
self.time_passed = self.clock.tick(30)
self.time_passed_seconds = self.time_passed / 1000.0
self.distance_moved = self.time_passed_seconds * speed
self.y2 += self.distance_moved
# If the image goes off the end of the screen, move it back
if self.y2 > 450.:
self.y2 -= 375.
# this is refer to some code from the web
def main_loop(self):
# set the time
clock = pygame.time.Clock()
line_from = None
line_to = None
circle_from = None
circle_to = None
cur_color = pygame.Color(0,0,0)
next_update = pygame.time.get_ticks()
drag_knob = False
# make the brushes and sprites work
while 1:
# draw the title of the window and the icon
pygame.display.set_caption("Paint Board")
pygame.display.set_icon(self.icon)
# working follows the keyboard and the mouse
for event in pygame.event.get():
# when press one key
if event.type == KEYDOWN:
# change backgrounds
if event.key == K_F1:
self.paper = self.back1_back
elif event.key == K_F2:
self.paper = self.back2_back
elif event.key == K_F3:
# this is a white drawing board
self.paper = (pygame.Surface(self.paper_rect.size,1)).convert()
self.paper.fill((255,255,255))
# the brush could be used on these backgrounds
self.brush = PaintBrush(self.paper)
# mouse pressed
if event.type == MOUSEBUTTONDOWN:
# check if the user press the direction button on the board
# move left
if self.left_rect.collidepoint(event.pos):
self.move_left = True
self.move_right = False
self.move_down = False
self.draw_lines = False
# if press the sprite 1 before, then the sprite 1 moves
if self.sprite1_move:
self.keep_move_left1 = True
self.keep_move_right1 = False
self.keep_move_down1 = False
else:
self.keep_move_left2 = True
self.keep_move_right2 = False
self.keep_move_down2 = False
# move right
if self.right_rect.collidepoint(event.pos):
self.move_right = True
self.move_left = False
self.move_down = False
self.draw_lines = False
if self.sprite1_move:
self.keep_move_right1 = True
self.keep_move_left1 = False
self.keep_move_down1 = False
else:
self.keep_move_right2 = True
self.keep_move_left2 = False
self.keep_move_down2 = False
# move down
if self.down_rect.collidepoint(event.pos):
self.move_down = True
self.move_left = False
self.move_right = False
self.draw_lines = False
if self.sprite1_move:
self.keep_move_down1 = True
self.keep_move_left1 = False
self.keep_move_right1 = False
if self.sprite2_move:
self.keep_move_down2 = True
self.keep_move_left2 = False
self.keep_move_right2 = False
# set the speed
# slow
if self.slow_rect.collidepoint(event.pos):
if self.sprite1_move:
self.keep_slow_speed1 = True
self.keep_quick_speed1 = False
if self.sprite2_move:
self.keep_slow_speed2 = True
self.keep_quick_speed2 = False
# quickly
elif self.quick_rect.collidepoint(event.pos):
if self.sprite1_move:
self.keep_quick_speed1 = True
self.keep_slow_speed1 = False
if self.sprite2_move:
self.keep_quick_speed2 = True
self.keep_slow_speed2 = False
# if click on the palette
if self.pal_rect.collidepoint(event.pos):
self.move_left = False
self.move_right = False
self.move_down = False
self.draw_lines = False
# choose the painting color
c = self.back.get_at(event.pos)
self.set_color(c)
# if click on the brush
elif self.brush_rect.collidepoint(event.pos):
self.move_left = False
self.move_right = False
self.move_down = False
self.draw_lines = False
# the mouse press on the brush rectangle,
# except the last two, because they draw regular graphics
if self.lines_rect.collidepoint(event.pos):
self.draw_lines = True
else:
i = 0
for r in self.brush_rects:
if r.collidepoint(event.pos):
self.set_brush(i)
i+=1
# when mouse press on the sprite which should move
# sprite 1 moves
elif self.sprite1_rect.collidepoint(event.pos):
self.sprite1_move = True
self.sprite2_move = False
self.keep_sprite1_move = True
self.draw_lines = False
# sprite 2 moves
elif self.sprite2_rect.collidepoint(event.pos):
self.sprite2_move = True
self.sprite1_move = False
self.keep_sprite2_move = True
self.draw_lines = False
# click the knob
elif self.knob_rect.collidepoint(event.pos):
self.draw_lines = False
# change the transparency of the color
drag_knob = True
# draw things on the canvas
elif self.paper_rect.collidepoint(event.pos):
# the mouse press on the canvas,
# set the start point of drawing
if self.draw_lines:
# draw regular lines
line_from = event.pos
# give the start position to the sprite
elif self.move_left or self.move_right or self.move_down:
# sprite 1
if self.sprite1_move:
self.x1 = event.pos[0]
self.y1 = event.pos[1]
self.sprite1_move = False
# sprite 2
elif self.sprite2_move:
self.x2 = event.pos[0]
self.y2 = event.pos[1]
self.sprite2_move = False
else:
# use brush draw everything you want
self.paint_start()
x = event.pos[0]-self.paper_rect.x
y = event.pos[1]-self.paper_rect.y
self.brush.paint_from((x,y))
elif event.type == MOUSEMOTION:
# when mouse motions
if event.buttons[0]:
if drag_knob:
self.knob_rect.left+=event.rel[0]
# the knob could only move during (15,100)
# change the transparency of the color
if self.knob_rect.left < 15:
self.knob_rect.left = 15
if self.knob_rect.left > 100:
self.knob_rect.left = 100
elif self.draw_lines == True:
# draw regular lines
line_to = event.pos
painting = False
elif self.paper_rect.collidepoint(event.pos):
# draw whatever the user wants
if self.painting:
x = event.pos[0]-self.paper_rect.x
y = event.pos[1]-self.paper_rect.y
self.brush.paint_to((x,y))
elif event.type == MOUSEBUTTONUP:
# when the mouse button is up
if drag_knob:
# stop changing tranparency
drag_knob = False
a = float(self.knob_rect.left-14)/83.0
self.set_alpha(a)
if event.button == 1 and self.painting:
# stop painting
self.paint_stop()
elif line_from:
# load the regular line on the canvas
self.paint_start()
fx = line_from[0]-self.paper_rect.x
fy = line_from[1]-self.paper_rect.y
tx = event.pos[0]-self.paper_rect.x
ty = event.pos[1]-self.paper_rect.y
self.brush.paint_line((fx,fy),(tx,ty))
self.paint_stop()
line_from = None
line_to = None
if pygame.time.get_ticks() >= next_update:
# update the board all the time
next_update+=33
# the interval is 33
self.screen.blit(self.back,(0,0))
# load the whole paint board
self.screen.blit(self.paper,self.paper_rect.topleft)
# load the canvas
if line_from and line_to:
# when draw regular line, it is the assistant line
pygame.draw.line(self.screen, (0,0,0), line_from, line_to)
self.screen.blit(self.knob,self.knob_rect.topleft)
# show the knob
pygame.draw.circle(self.screen,self.cur_color,(62,281),25,0)
# show the color which the user choose
# make the sprite 1 move
if self.x1 != None and self.y1 != None:
if self.keep_move_left1 ==True:
if self.keep_sprite1_move == True:
# sprite 1 moves left in different speeds
if self.keep_slow_speed1:
self.moveLeft1(self.slow_speed)
if self.keep_quick_speed1:
self.moveLeft1(self.quick_speed)
elif self.keep_move_right1 == True:
if self.keep_sprite1_move == True:
# sprite 1 moves right in different speeds
if self.keep_slow_speed1:
self.moveRight1(self.slow_speed)
if self.keep_quick_speed1:
self.moveRight1(self.quick_speed)
elif self.keep_move_down1 == True:
if self.keep_sprite1_move == True:
if self.keep_slow_speed1:
self.moveDown1(self.slow_speed)
if self.keep_quick_speed1:
self.moveDown1(self.quick_speed)
# make the sprite 2 moves
if self.x2 != None and self.y2 != None:
if self.keep_move_left2 ==True:
if self.keep_sprite2_move == True:
if self.keep_slow_speed2:
self.moveLeft2(self.slow_speed)
if self.keep_quick_speed2:
self.moveLeft2(self.quick_speed)
elif self.keep_move_right2 == True:
if self.keep_sprite2_move == True:
if self.keep_slow_speed2:
self.moveRight2(self.slow_speed)
if self.keep_quick_speed2:
self.moveRight2(self.quick_speed)
elif self.keep_move_down2 == True:
if self.keep_sprite2_move == True:
if self.keep_slow_speed2:
self.moveDown2(self.slow_speed)
if self.keep_quick_speed2:
self.moveDown2(self.quick_speed)
#flip the display
pygame.display.flip()
def main():
try:
import psyco
psyco.full()
except ImportError:
pass
pygame.init()
g = demo()
g.main_loop()
if __name__ == '__main__':
main()
|
[
"liuwenzhu2012@gmail.com"
] |
liuwenzhu2012@gmail.com
|
2710e4f3d65358a568d7e312ed2f939553f89fbb
|
f8beb6e0eefdca3a38014615e79b2da96f9ba1ce
|
/chsr501.py
|
c129cb391aa24277d994df42bfa1d53003ce411d
|
[] |
no_license
|
YuheiTakagawa/python-firebase
|
9faafe8a05cc4a2216c0ce2f0bce4b0934c6bf4f
|
a2ce97ac05be765fe077350f6e48222aff5a7f84
|
refs/heads/master
| 2020-05-14T14:35:03.340784
| 2019-06-30T17:20:24
| 2019-06-30T17:20:24
| 181,836,867
| 0
| 0
| null | 2019-05-05T04:39:51
| 2019-04-17T07:11:24
|
Python
|
UTF-8
|
Python
| false
| false
| 691
|
py
|
from datetime import datetime
import time
import RPi.GPIO as GPIO
INTERVAL = 3
SLEEPTIME = 5
GPIO_PIN = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.IN)
if __name__ == '__main__':
try:
cnt = 1
while True:
if(GPIO.input(GPIO_PIN) == GPIO.HIGH):
print(datetime.now().strftime('%Y/%m/%d %H:%M:%S') +
":" + str("{0:05d}".format(cnt)))
cnt = cnt + 1
time.sleep(SLEEPTIME)
else:
print(GPIO.input(GPIO_PIN))
time.sleep(INTERVAL)
except KeyboardInterrupt:
print("finishing")
finally:
GPIO.cleanup()
print("finished")
|
[
"roto11fon@gmail.com"
] |
roto11fon@gmail.com
|
ae8ea68cb62921a729e08c5b2abc5e7dee0f88ea
|
28445de3b12cee662dc31ab1f0f011bdfa1e3602
|
/tests/test_alignment.py
|
d456579231702ae030cc34757c42e4aeeaf25d0f
|
[] |
no_license
|
hitochan777/nmt_processor
|
2504ae225c7e660dabd257db10327301f20b41d1
|
c7c67f507a455f9087443f5b7fa687398f607e05
|
refs/heads/master
| 2021-01-18T22:29:29.618288
| 2016-11-09T05:16:36
| 2016-11-09T05:16:36
| 72,538,019
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
from unittest import TestCase
from alignment import Alignment
class TestAlignment(TestCase):
def test_convert_string_to_alignment_dictionary1(self):
line = "1-2 2-1 3-2 4-1 1-3"
alignment = Alignment.convert_string_to_alignment_dictionary(line)
self.assertDictEqual(alignment, {1: [2, 3], 2: [1], 3: [2], 4: [1]})
def test_convert_string_to_alignment_dictionary2(self):
# Empty line
line = ""
alignment = Alignment.convert_string_to_alignment_dictionary(line)
self.assertDictEqual(alignment, {})
|
[
"hitochan777@gmail.com"
] |
hitochan777@gmail.com
|
b83ca65d4f219d8cd22c9bdee20821c364e01fa6
|
08f5ea96a3a3fdce0b3192e7c3ed214cd3cabc59
|
/src/pandaframe/fast_df.py
|
e87b4c6622d3806c51f06567d75a0834ec713ed5
|
[] |
no_license
|
SivaanandM/TickAlgoAgent
|
55739f8a18cbe64b4a8928da820e9922f0366b16
|
749878656f02599260ad21c3645b733d56a018d4
|
refs/heads/master
| 2022-10-05T15:46:57.035861
| 2020-06-03T15:48:23
| 2020-06-03T15:48:23
| 208,987,355
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,397
|
py
|
import os
import sys
sys.path.append(os.getcwd()[:os.getcwd().find("TickAlgoAgent")+len("TickAlgoAgent")])
from src.main.algo_agent_object import AlgoAgentObjects as abObj
from src.pandaframe import fast_indicators as indi_obj
import traceback
# from src.loghandler import log
import time
# os.environ['TZ'] = 'Asia/Kolkata'
# time.tzset()
import pandas as pd
# logger = abObj.log
class FastDF(object):
def __init__(self):
pass
@staticmethod
def generate_fast_min_df(ticks):
def get_ohlc():
try:
data = pd.DataFrame(abObj.fast_min_ticks, columns=['time', 'price'])
data['time'] = pd.to_datetime(data['time'], unit='s', utc=True)
data = data.set_index('time')
data = data.tz_convert(tz='Asia/Kolkata')
ti = data.loc[:, ['price']]
fast_min_bars = ti.price.resample(str(abObj.fast_min)+'min').ohlc()
for index, row in fast_min_bars.iterrows():
abObj.fast_min_pd_DF = abObj.fast_min_pd_DF.append(row, sort=False)
break
indi_obj.load_indicators()
except:
# print(traceback.format_exc())
abObj.log.error(traceback.format_exc())
tick_time = ticks.get('Timestamp')
tick_price = ticks.get('Price')
try:
if len(abObj.fast_min_ticks) > 0:
if int(str(time.strftime("%M", time.localtime(int(tick_time))))) > abObj.cur_fast_min - 1:
get_ohlc()
abObj.fast_min_ticks.clear()
abObj.fast_min_ticks.append([tick_time, tick_price])
abObj.cur_fast_min = int(str(time.strftime("%M", time.localtime(int(tick_time))))) + abObj.fast_min
else:
abObj.fast_min_ticks.append([tick_time, tick_price])
if (int(str(time.strftime("%M", time.localtime(int(tick_time))))) == 0) and abObj.cur_fast_min >= 59:
abObj.cur_fast_min = abObj.cur_fast_min - 60
else:
abObj.cur_fast_min = int(str(time.strftime("%M", time.localtime(int(tick_time))))) + abObj.fast_min
abObj.fast_min_ticks.append([tick_time, tick_price])
except:
# print(traceback.format_exc())
abObj.log.error(traceback.format_exc())
|
[
"sivaanand.m@gmail.com"
] |
sivaanand.m@gmail.com
|
bb49ad91b47ee7061f94169c8fb31d3728856b71
|
17b2254410e1dbf5af3ec0e73f86b5cfb577180c
|
/mainapp/migrations/0006_auto_20190328_1944.py
|
168364636102e05a7f9d34b515924b039f59771f
|
[] |
no_license
|
nicolasbolt/VideoSharingSite
|
aca8f551dfd6b99b59df7f38b31fffb71804280a
|
0fb89f5d9d0e440a90bf5df38618f1cc18f38283
|
refs/heads/master
| 2020-05-04T12:58:49.104727
| 2019-06-24T15:49:12
| 2019-06-24T15:49:12
| 179,142,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 429
|
py
|
# Generated by Django 2.1.4 on 2019-03-28 23:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0005_video_thumbnail'),
]
operations = [
migrations.AlterField(
model_name='video',
name='thumbnail',
field=models.ImageField(default='default-thumbnail.jpg', upload_to='thumbnails/'),
),
]
|
[
"nicolas.bolt@gmail.com"
] |
nicolas.bolt@gmail.com
|
e10dfbcf3fe3feda140d976234b316d2afb6a44e
|
e5dbae874442fbe162e5fff58641fb042a7f56e7
|
/pbsnodesParser.py
|
81b5539f42aac97dfabd29b79ca46cc1e40bab3b
|
[] |
no_license
|
IceCube-PSU/clustertools
|
37cbc8011bfbef01e02186ab6075b73d1debb5a2
|
9e6332652bb789cda9113a0c0464b9582238b3d8
|
refs/heads/master
| 2020-12-24T16:31:44.766795
| 2020-05-14T18:07:08
| 2020-05-14T18:07:08
| 23,080,559
| 0
| 0
| null | 2018-02-19T19:33:01
| 2014-08-18T17:45:53
|
Python
|
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
import re
import sys
sys.path.append('./lib')
from vsc.pbs.pbsnodes import PbsnodesParser
#from vsc.pbs.node import NodeStatus
parser = PbsnodesParser()
#
# Next incarnation of this script will do the following:
# - loop over all accumulated pbsnodes files
# - extract time from filename
# - keep track of various params as a function of node type
# - node type: cyberlamp hi-mem, aci standard,...
# - params: number of free cpus, number of sessions, other things
# - plot everything vs. time
# - add average of each quantity to plot title
pbs_file = open("./pbsnodes.dat",'r')
nodes = parser.parse_file(pbs_file)
total_cpu = 0
total_sessions = 0
for node in nodes:
if re.match(r'comp-clgc-0001',node.hostname):
# if re.match(r'comp-cl',node.hostname):
this_node_mem = 0
print node.hostname,node.np,node.memory
total_cpu += int(node.status['ncpus'])
total_sessions += int(node.status['nsessions'])
pmem = int(re.split(r'\D+',node.status['physmem'])[0])
totmem = int(re.split(r'\D+',node.status['totmem'])[0])
availmem = int(re.split(r'\D+',node.status['availmem'])[0])
# print node.memory
# print node.memload
number_of_jobs = len(node.jobs.keys())
for jobnum in node.jobs.keys():
full_jobnum = node.jobs[jobnum]
# Possible keys: ['energy_used', 'mem', 'cput', 'session_id', 'vmem', 'walltime']
print node.status['job_info'][full_jobnum]['mem']
job_mem = int(re.split(r'\D+',node.status['job_info'][full_jobnum]['mem'])[0])
this_node_mem += job_mem
# print node.jobs.keys()
# print node.job_ids
# print node.status['job_info']
# print node.status.keys()
#
# Naively expected that total memory minus available memory would
# equal memory used, i.e., "this_node_mem" but it does not...
#
print "totmem-availmem, this_node_mem: ",totmem-availmem,this_node_mem
print "pmem-availmem, this_node_mem: ",pmem-availmem,this_node_mem
print number_of_jobs
print total_sessions
print total_cpu
# print node.status['job_info'][full_jobnum]['session_id']
# print node.properties
# node's 1 min bsd load average (number of processes wanted to run in last min)
# print node.status['loadave']
|
[
"noreply@github.com"
] |
IceCube-PSU.noreply@github.com
|
42beec1d7a484e6b3f943490519107cdf81b1853
|
202c605d4ff02f93625c46b91cf15a5a46ce33c1
|
/login/migrations/0001_initial.py
|
a5198f1d926a3aa8829afbf74ea553b476cdf535
|
[] |
no_license
|
zhuerl/mysite
|
bd91e9435ae45ca4cbf5b5d496a1e1aa865a7552
|
4e281d846a1b641638679c7e5953e4bcc050a9e9
|
refs/heads/master
| 2023-05-03T14:31:55.318804
| 2019-11-09T19:18:19
| 2019-11-09T19:18:19
| 220,687,312
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,736
|
py
|
# Generated by Django 2.2 on 2019-11-10 01:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('password', models.CharField(max_length=256)),
('email', models.EmailField(max_length=254, unique=True)),
('sex', models.CharField(choices=[('male', '男'), ('female', '女')], default='男', max_length=32)),
('c_time', models.DateTimeField(auto_now_add=True)),
('has_confirmed', models.BooleanField(default=False)),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'ordering': ['-c_time'],
},
),
migrations.CreateModel(
name='ConfirmString',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=256)),
('c_time', models.DateTimeField(auto_now_add=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='login.User')),
],
options={
'verbose_name': '验证码',
'verbose_name_plural': '验证码',
'ordering': ['c_time'],
},
),
]
|
[
"190381929@qq.com"
] |
190381929@qq.com
|
09c1e07399852eea39b1f91ebc3088c04b873bf4
|
0410073eb08bb92d44ef70333a065e80700227c0
|
/BaseTools/Source/Python/Ecc/Check.py
|
d563a2f961b2487a3091a98fe46540f6ab339468
|
[
"BSD-2-Clause"
] |
permissive
|
Perry31/mu_basecore
|
f69bc463835ec19a80005bdf7b7fc45df47ed84c
|
0d16728c1b41e25e55476358f6dcf4d8fed45f8d
|
refs/heads/release/201808
| 2023-08-16T21:03:50.066318
| 2018-11-21T23:51:35
| 2018-11-21T23:51:35
| 159,019,460
| 1
| 0
|
BSD-2-Clause
| 2023-06-21T14:19:01
| 2018-11-25T10:17:46
|
C
|
UTF-8
|
Python
| false
| false
| 98,519
|
py
|
## @file
# This file is used to define checkpoints used by ECC tool
#
# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import re
from CommonDataClass.DataClass import *
import Common.DataType as DT
from Ecc.EccToolError import *
from Ecc.MetaDataParser import ParseHeaderCommentSection
from Ecc import EccGlobalData
from Ecc import c
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
## Check
#
# This class is to define checkpoints used by ECC tool
#
# @param object: Inherited from object class
#
class Check(object):
def __init__(self):
pass
# Check all required checkpoints
def Check(self):
self.GeneralCheck()
self.MetaDataFileCheck()
self.DoxygenCheck()
self.IncludeFileCheck()
self.PredicateExpressionCheck()
self.DeclAndDataTypeCheck()
self.FunctionLayoutCheck()
self.NamingConventionCheck()
self.SmmCommParaCheck()
def SmmCommParaCheck(self):
self.SmmCommParaCheckBufferType()
# Check if SMM communication function has correct parameter type
# 1. Get function calling with instance./->Communicate() interface
# and make sure the protocol instance is of type EFI_SMM_COMMUNICATION_PROTOCOL.
# 2. Find the origin of the 2nd parameter of Communicate() interface, if -
# a. it is a local buffer on stack
# report error.
# b. it is a global buffer, check the driver that holds the global buffer is of type DXE_RUNTIME_DRIVER
# report success.
# c. it is a buffer by AllocatePage/AllocatePool (may be wrapped by nested function calls),
# check the EFI_MEMORY_TYPE to be EfiRuntimeServicesCode,EfiRuntimeServicesData,
# EfiACPIMemoryNVS or EfiReservedMemoryType
# report success.
# d. it is a buffer located via EFI_SYSTEM_TABLE.ConfigurationTable (may be wrapped by nested function calls)
# report warning to indicate human code review.
# e. it is a buffer from other kind of pointers (may need to trace into nested function calls to locate),
# repeat checks in a.b.c and d.
def SmmCommParaCheckBufferType(self):
if EccGlobalData.gConfig.SmmCommParaCheckBufferType == '1' or EccGlobalData.gConfig.SmmCommParaCheckAll == '1':
EdkLogger.quiet("Checking SMM communication parameter type ...")
# Get all EFI_SMM_COMMUNICATION_PROTOCOL interface
CommApiList = []
for IdentifierTable in EccGlobalData.gIdentifierTableList:
SqlCommand = """select ID, Name, BelongsToFile from %s
where Modifier = 'EFI_SMM_COMMUNICATION_PROTOCOL*' """ % (IdentifierTable)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
if RecordSet:
for Record in RecordSet:
if Record[1] not in CommApiList:
CommApiList.append(Record[1])
# For each interface, check the second parameter
for CommApi in CommApiList:
for IdentifierTable in EccGlobalData.gIdentifierTableList:
SqlCommand = """select ID, Name, Value, BelongsToFile, StartLine from %s
where Name = '%s->Communicate' and Model = %s""" \
% (IdentifierTable, CommApi, MODEL_IDENTIFIER_FUNCTION_CALLING)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
if RecordSet:
# print IdentifierTable
for Record in RecordSet:
# Get the second parameter for Communicate function
SecondPara = Record[2].split(',')[1].strip()
SecondParaIndex = None
if SecondPara.startswith('&'):
SecondPara = SecondPara[1:]
if SecondPara.endswith(']'):
SecondParaIndex = SecondPara[SecondPara.find('[') + 1:-1]
SecondPara = SecondPara[:SecondPara.find('[')]
# Get the ID
Id = Record[0]
# Get the BelongsToFile
BelongsToFile = Record[3]
# Get the source file path
SqlCommand = """select FullPath from File where ID = %s""" % BelongsToFile
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
FullPath = NewRecordSet[0][0]
# Get the line no of function calling
StartLine = Record[4]
# Get the module type
SqlCommand = """select Value3 from INF where BelongsToFile = (select ID from File
where Path = (select Path from File where ID = %s) and Model = 1011)
and Value2 = 'MODULE_TYPE'""" % BelongsToFile
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
ModuleType = NewRecordSet[0][0] if NewRecordSet else None
# print BelongsToFile, FullPath, StartLine, ModuleType, SecondPara
Value = FindPara(FullPath, SecondPara, StartLine)
# Find the value of the parameter
if Value:
if 'AllocatePage' in Value \
or 'AllocatePool' in Value \
or 'AllocateRuntimePool' in Value \
or 'AllocateZeroPool' in Value:
pass
else:
if '->' in Value:
if not EccGlobalData.gException.IsException(
ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):
EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,
OtherMsg="Please review the buffer type"
+ "is correct or not. If it is correct" +
" please add [%s] to exception list"
% Value,
BelongsToTable=IdentifierTable,
BelongsToItem=Id)
else:
if not EccGlobalData.gException.IsException(
ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):
EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,
OtherMsg="Please review the buffer type"
+ "is correct or not. If it is correct" +
" please add [%s] to exception list"
% Value,
BelongsToTable=IdentifierTable,
BelongsToItem=Id)
# Not find the value of the parameter
else:
SqlCommand = """select ID, Modifier, Name, Value, Model, BelongsToFunction from %s
where Name = '%s' and StartLine < %s order by StartLine DESC""" \
% (IdentifierTable, SecondPara, StartLine)
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
if NewRecordSet:
Value = NewRecordSet[0][1]
if 'AllocatePage' in Value \
or 'AllocatePool' in Value \
or 'AllocateRuntimePool' in Value \
or 'AllocateZeroPool' in Value:
pass
else:
if not EccGlobalData.gException.IsException(
ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):
EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,
OtherMsg="Please review the buffer type"
+ "is correct or not. If it is correct" +
" please add [%s] to exception list"
% Value,
BelongsToTable=IdentifierTable,
BelongsToItem=Id)
else:
pass
# Check UNI files
def UniCheck(self):
if EccGlobalData.gConfig.GeneralCheckUni == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking whether UNI file is UTF-16 ...")
SqlCommand = """select ID, FullPath, ExtName from File where ExtName like 'uni'"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
File = Record[1]
FileIn = open(File, 'rb').read(2)
if FileIn != '\xff\xfe':
OtherMsg = "File %s is not a valid UTF-16 UNI file" % Record[1]
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_UNI, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# General Checking
def GeneralCheck(self):
self.GeneralCheckNonAcsii()
self.UniCheck()
self.GeneralCheckNoTab()
self.GeneralCheckLineEnding()
self.GeneralCheckTrailingWhiteSpaceLine()
# Check whether NO Tab is used, replaced with spaces
def GeneralCheckNoTab(self):
if EccGlobalData.gConfig.GeneralCheckNoTab == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking No TAB used in file ...")
SqlCommand = """select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:
op = open(Record[1]).readlines()
IndexOfLine = 0
for Line in op:
IndexOfLine += 1
IndexOfChar = 0
for Char in Line:
IndexOfChar += 1
if Char == '\t':
OtherMsg = "File %s has TAB char at line %s column %s" % (Record[1], IndexOfLine, IndexOfChar)
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_NO_TAB, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# Check Only use CRLF (Carriage Return Line Feed) line endings.
def GeneralCheckLineEnding(self):
if EccGlobalData.gConfig.GeneralCheckLineEnding == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking line ending in file ...")
SqlCommand = """select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:
op = open(Record[1], 'rb').readlines()
IndexOfLine = 0
for Line in op:
IndexOfLine += 1
if not bytes.decode(Line).endswith('\r\n'):
OtherMsg = "File %s has invalid line ending at line %s" % (Record[1], IndexOfLine)
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_INVALID_LINE_ENDING, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# Check if there is no trailing white space in one line.
def GeneralCheckTrailingWhiteSpaceLine(self):
if EccGlobalData.gConfig.GeneralCheckTrailingWhiteSpaceLine == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking trailing white space line in file ...")
SqlCommand = """select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:
op = open(Record[1], 'r').readlines()
IndexOfLine = 0
for Line in op:
IndexOfLine += 1
if Line.replace('\r', '').replace('\n', '').endswith(' '):
OtherMsg = "File %s has trailing white spaces at line %s" % (Record[1], IndexOfLine)
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_TRAILING_WHITE_SPACE_LINE, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# Check whether file has non ACSII char
def GeneralCheckNonAcsii(self):
if EccGlobalData.gConfig.GeneralCheckNonAcsii == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Non-ACSII char in file ...")
SqlCommand = """select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:
op = open(Record[1]).readlines()
IndexOfLine = 0
for Line in op:
IndexOfLine += 1
IndexOfChar = 0
for Char in Line:
IndexOfChar += 1
if ord(Char) > 126:
OtherMsg = "File %s has Non-ASCII char at line %s column %s" % (Record[1], IndexOfLine, IndexOfChar)
EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_NON_ACSII, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])
# C Function Layout Checking
def FunctionLayoutCheck(self):
self.FunctionLayoutCheckReturnType()
self.FunctionLayoutCheckModifier()
self.FunctionLayoutCheckName()
self.FunctionLayoutCheckPrototype()
self.FunctionLayoutCheckBody()
self.FunctionLayoutCheckLocalVariable()
def WalkTree(self):
IgnoredPattern = c.GetIgnoredDirListPattern()
for Dirpath, Dirnames, Filenames in os.walk(EccGlobalData.gTarget):
for Dir in Dirnames:
Dirname = os.path.join(Dirpath, Dir)
if os.path.islink(Dirname):
Dirname = os.path.realpath(Dirname)
if os.path.isdir(Dirname):
# symlinks to directories are treated as directories
Dirnames.remove(Dir)
Dirnames.append(Dirname)
if IgnoredPattern.match(Dirpath.upper()):
continue
for f in Filenames[:]:
if f.lower() in EccGlobalData.gConfig.SkipFileList:
Filenames.remove(f)
yield (Dirpath, Dirnames, Filenames)
# Check whether return type exists and in the first line
def FunctionLayoutCheckReturnType(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckReturnType == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout return type ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutReturnType(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutReturnType(FullName)
# Check whether any optional functional modifiers exist and next to the return type
def FunctionLayoutCheckModifier(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckOptionalFunctionalModifier == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout modifier ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutModifier(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutModifier(FullName)
# Check whether the next line contains the function name, left justified, followed by the beginning of the parameter list
# Check whether the closing parenthesis is on its own line and also indented two spaces
def FunctionLayoutCheckName(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionName == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function name ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c', '.h'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutName(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckFuncLayoutName(FullName)
# Check whether the function prototypes in include files have the same form as function definitions
def FunctionLayoutCheckPrototype(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionPrototype == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function prototype ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[PROTOTYPE]" + FullName)
# c.CheckFuncLayoutPrototype(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[PROTOTYPE]" + FullName)
c.CheckFuncLayoutPrototype(FullName)
# Check whether the body of a function is contained by open and close braces that must be in the first column
def FunctionLayoutCheckBody(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionBody == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout function body ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutBody(FullName)
for FullName in EccGlobalData.gCFileList:
c.CheckFuncLayoutBody(FullName)
# Check whether the data declarations is the first code in a module.
# self.CFunctionLayoutCheckDataDeclaration = 1
# Check whether no initialization of a variable as part of its declaration
def FunctionLayoutCheckLocalVariable(self):
if EccGlobalData.gConfig.CFunctionLayoutCheckNoInitOfVariable == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking function layout local variables ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckFuncLayoutLocalVariable(FullName)
for FullName in EccGlobalData.gCFileList:
c.CheckFuncLayoutLocalVariable(FullName)
# Check whether no use of STATIC for functions
# self.CFunctionLayoutCheckNoStatic = 1
# Declarations and Data Types Checking
def DeclAndDataTypeCheck(self):
self.DeclCheckNoUseCType()
self.DeclCheckInOutModifier()
self.DeclCheckEFIAPIModifier()
self.DeclCheckEnumeratedType()
self.DeclCheckStructureDeclaration()
self.DeclCheckSameStructure()
self.DeclCheckUnionType()
# Check whether no use of int, unsigned, char, void, static, long in any .c, .h or .asl files.
def DeclCheckNoUseCType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckNoUseCType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration No use C type ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckDeclNoUseCType(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckDeclNoUseCType(FullName)
# Check whether the modifiers IN, OUT, OPTIONAL, and UNALIGNED are used only to qualify arguments to a function and should not appear in a data type declaration
def DeclCheckInOutModifier(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckInOutModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration argument modifier ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# c.CheckDeclArgModifier(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
c.CheckDeclArgModifier(FullName)
# Check whether the EFIAPI modifier should be used at the entry of drivers, events, and member functions of protocols
def DeclCheckEFIAPIModifier(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckEFIAPIModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
pass
# Check whether Enumerated Type has a 'typedef' and the name is capital
def DeclCheckEnumeratedType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckEnumeratedType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration enum typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[ENUM]" + FullName)
# c.CheckDeclEnumTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[ENUM]" + FullName)
c.CheckDeclEnumTypedef(FullName)
# Check whether Structure Type has a 'typedef' and the name is capital
def DeclCheckStructureDeclaration(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckStructureDeclaration == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration struct typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[STRUCT]" + FullName)
# c.CheckDeclStructTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[STRUCT]" + FullName)
c.CheckDeclStructTypedef(FullName)
# Check whether having same Structure
def DeclCheckSameStructure(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckSameStructure == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking same struct ...")
AllStructure = {}
for IdentifierTable in EccGlobalData.gIdentifierTableList:
SqlCommand = """select ID, Name, BelongsToFile from %s where Model = %s""" % (IdentifierTable, MODEL_IDENTIFIER_STRUCTURE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if Record[1] != '':
if Record[1] not in AllStructure.keys():
AllStructure[Record[1]] = Record[2]
else:
ID = AllStructure[Record[1]]
SqlCommand = """select FullPath from File where ID = %s """ % ID
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
OtherMsg = "The structure name '%s' is duplicate" % Record[1]
if NewRecordSet != []:
OtherMsg = "The structure name [%s] is duplicate with the one defined in %s, maybe struct NOT typedefed or the typedef new type NOT used to qualify variables" % (Record[1], NewRecordSet[0][0])
if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, OtherMsg=OtherMsg, BelongsToTable=IdentifierTable, BelongsToItem=Record[0])
# Check whether Union Type has a 'typedef' and the name is capital
def DeclCheckUnionType(self):
if EccGlobalData.gConfig.DeclarationDataTypeCheckUnionType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Declaration union typedef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[UNION]" + FullName)
# c.CheckDeclUnionTypedef(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
EdkLogger.quiet("[UNION]" + FullName)
c.CheckDeclUnionTypedef(FullName)
# Predicate Expression Checking
def PredicateExpressionCheck(self):
self.PredicateExpressionCheckBooleanValue()
self.PredicateExpressionCheckNonBooleanOperator()
self.PredicateExpressionCheckComparisonNullType()
# Check whether Boolean values, variable type BOOLEAN not use explicit comparisons to TRUE or FALSE
def PredicateExpressionCheckBooleanValue(self):
if EccGlobalData.gConfig.PredicateExpressionCheckBooleanValue == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression Boolean value ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[BOOLEAN]" + FullName)
# c.CheckBooleanValueComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[BOOLEAN]" + FullName)
c.CheckBooleanValueComparison(FullName)
# Check whether Non-Boolean comparisons use a compare operator (==, !=, >, < >=, <=).
def PredicateExpressionCheckNonBooleanOperator(self):
if EccGlobalData.gConfig.PredicateExpressionCheckNonBooleanOperator == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression Non-Boolean variable...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[NON-BOOLEAN]" + FullName)
# c.CheckNonBooleanValueComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[NON-BOOLEAN]" + FullName)
c.CheckNonBooleanValueComparison(FullName)
# Check whether a comparison of any pointer to zero must be done via the NULL type
def PredicateExpressionCheckComparisonNullType(self):
if EccGlobalData.gConfig.PredicateExpressionCheckComparisonNullType == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking predicate expression NULL pointer ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.c'):
# FullName = os.path.join(Dirpath, F)
# EdkLogger.quiet("[POINTER]" + FullName)
# c.CheckPointerNullComparison(FullName)
for FullName in EccGlobalData.gCFileList:
EdkLogger.quiet("[POINTER]" + FullName)
c.CheckPointerNullComparison(FullName)
# Include file checking
def IncludeFileCheck(self):
self.IncludeFileCheckIfndef()
self.IncludeFileCheckData()
self.IncludeFileCheckSameName()
# Check whether having include files with same name
def IncludeFileCheckSameName(self):
if EccGlobalData.gConfig.IncludeFileCheckSameName == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking same header file name ...")
SqlCommand = """select ID, FullPath from File
where Model = 1002 order by Name """
RecordDict = {}
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
List = Record[1].replace('/', '\\').split('\\')
if len(List) >= 2:
Key = List[-2] + '\\' + List[-1]
else:
Key = List[0]
if Key not in RecordDict:
RecordDict[Key] = [Record]
else:
RecordDict[Key].append(Record)
for Key in RecordDict:
if len(RecordDict[Key]) > 1:
for Item in RecordDict[Key]:
Path = mws.relpath(Item[1], EccGlobalData.gWorkspace)
if not EccGlobalData.gException.IsException(ERROR_INCLUDE_FILE_CHECK_NAME, Path):
EccGlobalData.gDb.TblReport.Insert(ERROR_INCLUDE_FILE_CHECK_NAME, OtherMsg="The file name for [%s] is duplicate" % Path, BelongsToTable='File', BelongsToItem=Item[0])
# Check whether all include file contents is guarded by a #ifndef statement.
def IncludeFileCheckIfndef(self):
if EccGlobalData.gConfig.IncludeFileCheckIfndefStatement == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking header file ifndef ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckHeaderFileIfndef(FullName)
for FullName in EccGlobalData.gHFileList:
MsgList = c.CheckHeaderFileIfndef(FullName)
# Check whether include files NOT contain code or define data variables
def IncludeFileCheckData(self):
if EccGlobalData.gConfig.IncludeFileCheckData == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking header file data ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckHeaderFileData(FullName)
for FullName in EccGlobalData.gHFileList:
MsgList = c.CheckHeaderFileData(FullName)
# Doxygen document checking
def DoxygenCheck(self):
self.DoxygenCheckFileHeader()
self.DoxygenCheckFunctionHeader()
self.DoxygenCheckCommentDescription()
self.DoxygenCheckCommentFormat()
self.DoxygenCheckCommand()
# Check whether the file headers are followed Doxygen special documentation blocks in section 2.3.5
def DoxygenCheckFileHeader(self):
if EccGlobalData.gConfig.DoxygenCheckFileHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen file header ...")
for Dirpath, Dirnames, Filenames in self.WalkTree():
for F in Filenames:
Ext = os.path.splitext(F)[1]
if Ext in ('.h', '.c'):
FullName = os.path.join(Dirpath, F)
MsgList = c.CheckFileHeaderDoxygenComments(FullName)
elif Ext in ('.inf', '.dec', '.dsc', '.fdf'):
FullName = os.path.join(Dirpath, F)
op = open(FullName).readlines()
FileLinesList = op
LineNo = 0
CurrentSection = MODEL_UNKNOWN
HeaderSectionLines = []
HeaderCommentStart = False
HeaderCommentEnd = False
for Line in FileLinesList:
LineNo = LineNo + 1
Line = Line.strip()
if (LineNo < len(FileLinesList) - 1):
NextLine = FileLinesList[LineNo].strip()
#
# blank line
#
if (Line == '' or not Line) and LineNo == len(FileLinesList):
LastSectionFalg = True
#
# check whether file header comment section started
#
if Line.startswith('#') and \
(Line.find('@file') > -1) and \
not HeaderCommentStart:
if CurrentSection != MODEL_UNKNOWN:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should begin with ""## @file"" or ""# @file""at the very top file'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
else:
CurrentSection = MODEL_IDENTIFIER_FILE_HEADER
#
# Append the first line to section lines.
#
HeaderSectionLines.append((Line, LineNo))
HeaderCommentStart = True
continue
#
# Collect Header content.
#
if (Line.startswith('#') and CurrentSection == MODEL_IDENTIFIER_FILE_HEADER) and\
HeaderCommentStart and not Line.startswith('##') and not\
HeaderCommentEnd and NextLine != '':
HeaderSectionLines.append((Line, LineNo))
continue
#
# Header content end
#
if (Line.startswith('##') or not Line.strip().startswith("#")) and HeaderCommentStart \
and not HeaderCommentEnd:
if Line.startswith('##'):
HeaderCommentEnd = True
HeaderSectionLines.append((Line, LineNo))
ParseHeaderCommentSection(HeaderSectionLines, FullName)
break
if HeaderCommentStart == False:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should begin with ""## @file"" or ""# @file"" at the very top file'
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
if HeaderCommentEnd == False:
SqlStatement = """ select ID from File where FullPath like '%s'""" % FullName
ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)
for Result in ResultSet:
Msg = 'INF/DEC/DSC/FDF file header comment should end with ""##"" at the end of file header comment block'
# Check whether File header Comment End with '##'
if EccGlobalData.gConfig.HeaderCheckFileCommentEnd == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, "File", Result[0])
# Check whether the function headers are followed Doxygen special documentation blocks in section 2.3.5
def DoxygenCheckFunctionHeader(self):
if EccGlobalData.gConfig.DoxygenCheckFunctionHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen function header ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckFuncHeaderDoxygenComments(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckFuncHeaderDoxygenComments(FullName)
# Check whether the first line of text in a comment block is a brief description of the element being documented.
# The brief description must end with a period.
def DoxygenCheckCommentDescription(self):
if EccGlobalData.gConfig.DoxygenCheckCommentDescription == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
pass
# Check whether comment lines with '///< ... text ...' format, if it is used, it should be after the code section.
def DoxygenCheckCommentFormat(self):
if EccGlobalData.gConfig.DoxygenCheckCommentFormat == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen comment ///< ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckDoxygenTripleForwardSlash(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckDoxygenTripleForwardSlash(FullName)
# Check whether only Doxygen commands allowed to mark the code are @bug and @todo.
def DoxygenCheckCommand(self):
if EccGlobalData.gConfig.DoxygenCheckCommand == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking Doxygen command ...")
# for Dirpath, Dirnames, Filenames in self.WalkTree():
# for F in Filenames:
# if os.path.splitext(F)[1] in ('.h', '.c'):
# FullName = os.path.join(Dirpath, F)
# MsgList = c.CheckDoxygenCommand(FullName)
for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:
MsgList = c.CheckDoxygenCommand(FullName)
# Meta-Data File Processing Checking
def MetaDataFileCheck(self):
self.MetaDataFileCheckPathName()
self.MetaDataFileCheckGenerateFileList()
self.MetaDataFileCheckLibraryInstance()
self.MetaDataFileCheckLibraryInstanceDependent()
self.MetaDataFileCheckLibraryInstanceOrder()
self.MetaDataFileCheckLibraryNoUse()
self.MetaDataFileCheckLibraryDefinedInDec()
self.MetaDataFileCheckBinaryInfInFdf()
self.MetaDataFileCheckPcdDuplicate()
self.MetaDataFileCheckPcdFlash()
self.MetaDataFileCheckPcdNoUse()
self.MetaDataFileCheckGuidDuplicate()
self.MetaDataFileCheckModuleFileNoUse()
self.MetaDataFileCheckPcdType()
self.MetaDataFileCheckModuleFileGuidDuplication()
self.MetaDataFileCheckModuleFileGuidFormat()
self.MetaDataFileCheckModuleFileProtocolFormat()
self.MetaDataFileCheckModuleFilePpiFormat()
self.MetaDataFileCheckModuleFilePcdFormat()
# Check whether each file defined in meta-data exists
def MetaDataFileCheckPathName(self):
if EccGlobalData.gConfig.MetaDataFileCheckPathName == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This item is covered when parsing Inf/Dec/Dsc files
pass
# Generate a list for all files defined in meta-data files
def MetaDataFileCheckGenerateFileList(self):
if EccGlobalData.gConfig.MetaDataFileCheckGenerateFileList == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This item is covered when parsing Inf/Dec/Dsc files
pass
# Check whether all Library Instances defined for a given module (or dependent library instance) match the module's type.
# Each Library Instance must specify the Supported Module Types in its Inf file,
# and any module specifying the library instance must be one of the supported types.
def MetaDataFileCheckLibraryInstance(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstance == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance type issue ...")
SqlCommand = """select A.ID, A.Value3, B.Value3 from Inf as A left join Inf as B
where A.Value2 = 'LIBRARY_CLASS' and A.Model = %s
and B.Value2 = 'MODULE_TYPE' and B.Model = %s and A.BelongsToFile = B.BelongsToFile
group by A.BelongsToFile""" % (MODEL_META_DATA_HEADER, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
LibraryClasses = {}
for Record in RecordSet:
List = Record[1].split('|', 1)
SupModType = []
if len(List) == 1:
SupModType = DT.SUP_MODULE_LIST_STRING.split(DT.TAB_VALUE_SPLIT)
elif len(List) == 2:
SupModType = List[1].split()
if List[0] not in LibraryClasses:
LibraryClasses[List[0]] = SupModType
else:
for Item in SupModType:
if Item not in LibraryClasses[List[0]]:
LibraryClasses[List[0]].append(Item)
if Record[2] != DT.SUP_MODULE_BASE and Record[2] not in SupModType:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_2, OtherMsg="The Library Class '%s' does not specify its supported module types" % (List[0]), BelongsToTable='Inf', BelongsToItem=Record[0])
SqlCommand = """select A.ID, A.Value1, B.Value3 from Inf as A left join Inf as B
where A.Model = %s and B.Value2 = '%s' and B.Model = %s
and B.BelongsToFile = A.BelongsToFile""" \
% (MODEL_EFI_LIBRARY_CLASS, 'MODULE_TYPE', MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
# Merge all LibraryClasses' supmodlist
RecordDict = {}
for Record in RecordSet:
if Record[1] not in RecordDict:
RecordDict[Record[1]] = [str(Record[2])]
else:
if Record[2] not in RecordDict[Record[1]]:
RecordDict[Record[1]].append(Record[2])
for Record in RecordSet:
if Record[1] in LibraryClasses:
if Record[2] not in LibraryClasses[Record[1]] and DT.SUP_MODULE_BASE not in RecordDict[Record[1]]:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg="The type of Library Class [%s] defined in Inf file does not match the type of the module" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
else:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg="The type of Library Class [%s] defined in Inf file does not match the type of the module" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
# Check whether a Library Instance has been defined for all dependent library classes
def MetaDataFileCheckLibraryInstanceDependent(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceDependent == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance dependent issue ...")
SqlCommand = """select ID, Value1, Value2 from Dsc where Model = %s""" % MODEL_EFI_LIBRARY_CLASS
LibraryClasses = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for LibraryClass in LibraryClasses:
if LibraryClass[1].upper() == 'NULL' or LibraryClass[1].startswith('!ifdef') or LibraryClass[1].startswith('!ifndef') or LibraryClass[1].endswith('!endif'):
continue
else:
LibraryIns = os.path.normpath(mws.join(EccGlobalData.gWorkspace, LibraryClass[2]))
SkipDirString = '|'.join(EccGlobalData.gConfig.SkipDirList)
p = re.compile(r'.*[\\/](?:%s^\S)[\\/]?.*' % SkipDirString)
if p.match(os.path.split(LibraryIns)[0].upper()):
continue
SqlCommand = """select Value3 from Inf where BelongsToFile =
(select ID from File where lower(FullPath) = lower('%s'))
and Value2 = '%s'""" % (LibraryIns, DT.PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
IsFound = False
for Record in RecordSet:
LibName = Record[0].split('|', 1)[0]
if LibraryClass[1] == LibName:
IsFound = True
if not IsFound:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, LibraryClass[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, OtherMsg="The Library Class [%s] is not specified in '%s'" % (LibraryClass[1], LibraryClass[2]), BelongsToTable='Dsc', BelongsToItem=LibraryClass[0])
# Check whether the Library Instances specified by the LibraryClasses sections are listed in order of dependencies
def MetaDataFileCheckLibraryInstanceOrder(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceOrder == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
# This checkpoint is not necessary for Ecc check
pass
# Check whether the unnecessary inclusion of library classes in the Inf file
# Check whether the unnecessary duplication of library classe names in the DSC file
def MetaDataFileCheckLibraryNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance not used ...")
SqlCommand = """select ID, Value1 from Inf as A where A.Model = %s and A.Value1 not in (select B.Value1 from Dsc as B where Model = %s)""" % (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, OtherMsg="The Library Class [%s] is not used in any platform" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])
SqlCommand = """
select A.ID, A.Value1, A.BelongsToFile, A.StartLine, B.StartLine from Dsc as A left join Dsc as B
where A.Model = %s and B.Model = %s and A.Scope1 = B.Scope1 and A.Scope2 = B.Scope2 and A.ID != B.ID
and A.Value1 = B.Value1 and A.Value2 != B.Value2 and A.BelongsToItem = -1 and B.BelongsToItem = -1 and A.StartLine != B.StartLine and B.BelongsToFile = A.BelongsToFile""" \
% (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)
RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
if Record[3] and Record[4] and Record[3] != Record[4] and Record[1] != 'NULL':
SqlCommand = """select FullPath from File where ID = %s""" % (Record[2])
FilePathList = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for FilePath in FilePathList:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, OtherMsg="The Library Class [%s] is duplicated in '%s' line %s and line %s." % (Record[1], FilePath, Record[3], Record[4]), BelongsToTable='Dsc', BelongsToItem=Record[0])
# Check the header file in Include\Library directory whether be defined in the package DEC file.
def MetaDataFileCheckLibraryDefinedInDec(self):
if EccGlobalData.gConfig.MetaDataFileCheckLibraryDefinedInDec == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for library instance whether be defined in the package dec file ...")
SqlCommand = """
select A.Value1, A.StartLine, A.ID, B.Value1 from Inf as A left join Dec as B
on A.Model = B.Model and A.Value1 = B.Value1 where A.Model=%s
""" % MODEL_EFI_LIBRARY_CLASS
RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
LibraryInInf, Line, ID, LibraryDec = Record
if not LibraryDec:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, LibraryInInf):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, \
OtherMsg="The Library Class [%s] in %s line is not defined in the associated package file." % (LibraryInInf, Line),
BelongsToTable='Inf', BelongsToItem=ID)
# Check whether an Inf file is specified in the FDF file, but not in the Dsc file, then the Inf file must be for a Binary module only
def MetaDataFileCheckBinaryInfInFdf(self):
if EccGlobalData.gConfig.MetaDataFileCheckBinaryInfInFdf == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for non-binary modules defined in FDF files ...")
SqlCommand = """select A.ID, A.Value1 from Fdf as A
where A.Model = %s
and A.Enabled > -1
and A.Value1 not in
(select B.Value1 from Dsc as B
where B.Model = %s
and B.Enabled > -1)""" % (MODEL_META_DATA_COMPONENT, MODEL_META_DATA_COMPONENT)
RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)
for Record in RecordSet:
FdfID = Record[0]
FilePath = Record[1]
FilePath = os.path.normpath(mws.join(EccGlobalData.gWorkspace, FilePath))
SqlCommand = """select ID from Inf where Model = %s and BelongsToFile = (select ID from File where FullPath like '%s')
""" % (MODEL_EFI_SOURCE_FILE, FilePath)
NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
if NewRecordSet != []:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, FilePath):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, OtherMsg="File [%s] defined in FDF file and not in DSC file must be a binary module" % (FilePath), BelongsToTable='Fdf', BelongsToItem=FdfID)
# Check whether a PCD is set in a Dsc file or the FDF file, but not in both.
def MetaDataFileCheckPcdDuplicate(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for duplicate PCDs defined in both DSC and FDF files ...")
SqlCommand = """
select A.ID, A.Value1, A.Value2, A.BelongsToFile, B.ID, B.Value1, B.Value2, B.BelongsToFile from Dsc as A, Fdf as B
where A.Model >= %s and A.Model < %s
and B.Model >= %s and B.Model < %s
and A.Value1 = B.Value1
and A.Value2 = B.Value2
and A.Enabled > -1
and B.Enabled > -1
group by A.ID
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)
for Record in RecordSet:
SqlCommand1 = """select Name from File where ID = %s""" % Record[3]
SqlCommand2 = """select Name from File where ID = %s""" % Record[7]
DscFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand1)[0][0])[0]
FdfFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand2)[0][0])[0]
if DscFileName != FdfFileName:
continue
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined in both FDF file and DSC file" % (Record[1] + '.' + Record[2]), BelongsToTable='Dsc', BelongsToItem=Record[0])
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[5] + '.' + Record[6]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined in both FDF file and DSC file" % (Record[5] + '.' + Record[6]), BelongsToTable='Fdf', BelongsToItem=Record[4])
EdkLogger.quiet("Checking for duplicate PCDs defined in DEC files ...")
SqlCommand = """
select A.ID, A.Value1, A.Value2, A.Model, B.Model from Dec as A left join Dec as B
where A.Model >= %s and A.Model < %s
and B.Model >= %s and B.Model < %s
and A.Value1 = B.Value1
and A.Value2 = B.Value2
and A.Scope1 = B.Scope1
and A.ID != B.ID
and A.Model = B.Model
and A.Enabled > -1
and B.Enabled > -1
and A.BelongsToFile = B.BelongsToFile
group by A.ID
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblDec.Exec(SqlCommand)
for Record in RecordSet:
RecordCat = Record[1] + '.' + Record[2]
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, RecordCat):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg="The PCD [%s] is defined duplicated in DEC file" % RecordCat, BelongsToTable='Dec', BelongsToItem=Record[0])
# Check whether PCD settings in the FDF file can only be related to flash.
def MetaDataFileCheckPcdFlash(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdFlash == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking only Flash related PCDs are used in FDF ...")
SqlCommand = """
select ID, Value1, Value2, BelongsToFile from Fdf as A
where A.Model >= %s and Model < %s
and A.Enabled > -1
and A.Value2 not like '%%Flash%%'
""" % (MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, OtherMsg="The PCD [%s] defined in FDF file is not related to Flash" % (Record[1] + '.' + Record[2]), BelongsToTable='Fdf', BelongsToItem=Record[0])
# Check whether PCDs used in Inf files but not specified in Dsc or FDF files
def MetaDataFileCheckPcdNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for non-specified PCDs ...")
SqlCommand = """
select ID, Value1, Value2, BelongsToFile from Inf as A
where A.Model >= %s and Model < %s
and A.Enabled > -1
and (A.Value1, A.Value2) not in
(select Value1, Value2 from Dsc as B
where B.Model >= %s and B.Model < %s
and B.Enabled > -1)
and (A.Value1, A.Value2) not in
(select Value1, Value2 from Fdf as C
where C.Model >= %s and C.Model < %s
and C.Enabled > -1)
""" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, Record[1] + '.' + Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, OtherMsg="The PCD [%s] defined in INF file is not specified in either DSC or FDF files" % (Record[1] + '.' + Record[2]), BelongsToTable='Inf', BelongsToItem=Record[0])
# Check whether having duplicate guids defined for Guid/Protocol/Ppi
def MetaDataFileCheckGuidDuplicate(self):
if EccGlobalData.gConfig.MetaDataFileCheckGuidDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for duplicate GUID/PPI/PROTOCOL ...")
# Check Guid
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID)
# Check protocol
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL)
# Check ppi
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDec)
self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDsc)
self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI)
# Check whether all files under module directory are described in INF files
def MetaDataFileCheckModuleFileNoUse(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for no used module files ...")
SqlCommand = """
select upper(Path) from File where ID in (select BelongsToFile from Inf where BelongsToFile != -1)
"""
InfPathSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
InfPathList = []
for Item in InfPathSet:
if Item[0] not in InfPathList:
InfPathList.append(Item[0])
SqlCommand = """
select ID, Path, FullPath from File where upper(FullPath) not in
(select upper(A.Path) || '\\' || upper(B.Value1) from File as A, INF as B
where A.ID in (select BelongsToFile from INF where Model = %s group by BelongsToFile) and
B.BelongsToFile = A.ID and B.Model = %s)
and (Model = %s or Model = %s)
""" % (MODEL_EFI_SOURCE_FILE, MODEL_EFI_SOURCE_FILE, MODEL_FILE_C, MODEL_FILE_H)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Record in RecordSet:
Path = Record[1]
Path = Path.upper().replace('\X64', '').replace('\IA32', '').replace('\EBC', '').replace('\IPF', '').replace('\ARM', '')
if Path in InfPathList:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, Record[2]):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, OtherMsg="The source file [%s] is existing in module directory but it is not described in INF file." % (Record[2]), BelongsToTable='File', BelongsToItem=Record[0])
# Check whether the PCD is correctly used in C function via its type
def MetaDataFileCheckPcdType(self):
if EccGlobalData.gConfig.MetaDataFileCheckPcdType == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for pcd type in c code function usage ...")
SqlCommand = """
select ID, Model, Value1, Value2, BelongsToFile from INF where Model > %s and Model < %s
""" % (MODEL_PCD, MODEL_META_DATA_HEADER)
PcdSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
for Pcd in PcdSet:
Model = Pcd[1]
PcdName = Pcd[2]
if Pcd[3]:
PcdName = Pcd[3]
BelongsToFile = Pcd[4]
SqlCommand = """
select ID from File where FullPath in
(select B.Path || '\\' || A.Value1 from INF as A, File as B where A.Model = %s and A.BelongsToFile = %s
and B.ID = %s and (B.Model = %s or B.Model = %s))
""" % (MODEL_EFI_SOURCE_FILE, BelongsToFile, BelongsToFile, MODEL_FILE_C, MODEL_FILE_H)
TableSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Tbl in TableSet:
TblName = 'Identifier' + str(Tbl[0])
SqlCommand = """
select Name, ID from %s where value like '%s' and Model = %s
""" % (TblName, PcdName, MODEL_IDENTIFIER_FUNCTION_CALLING)
RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)
TblNumber = TblName.replace('Identifier', '')
for Record in RecordSet:
FunName = Record[0]
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, FunName):
if Model in [MODEL_PCD_FIXED_AT_BUILD] and not FunName.startswith('FixedPcdGet'):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a FixPcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
if Model in [MODEL_PCD_FEATURE_FLAG] and (not FunName.startswith('FeaturePcdGet') and not FunName.startswith('FeaturePcdSet')):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a FeaturePcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
if Model in [MODEL_PCD_PATCHABLE_IN_MODULE] and (not FunName.startswith('PatchablePcdGet') and not FunName.startswith('PatchablePcdSet')):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg="The pcd '%s' is defined as a PatchablePcd but now it is called by c function [%s]" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])
#ERROR_META_DATA_FILE_CHECK_PCD_TYPE
pass
# Internal worker function to get the INF workspace relative path from FileID
def GetInfFilePathFromID(self, FileID):
Table = EccGlobalData.gDb.TblFile
SqlCommand = """select A.FullPath from %s as A where A.ID = %s""" % (Table.Table, FileID)
RecordSet = Table.Exec(SqlCommand)
Path = ""
for Record in RecordSet:
Path = mws.relpath(Record[0], EccGlobalData.gWorkspace)
return Path
# Check whether two module INFs under one workspace has the same FILE_GUID value
def MetaDataFileCheckModuleFileGuidDuplication(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileGuidDuplication == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking for pcd type in c code function usage ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select A.ID, A.Value3, A.BelongsToFile, B.BelongsToFile from %s as A, %s as B
where A.Value2 = 'FILE_GUID' and B.Value2 = 'FILE_GUID' and
A.Value3 = B.Value3 and A.ID != B.ID group by A.ID
""" % (Table.Table, Table.Table)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
InfPath1 = self.GetInfFilePathFromID(Record[2])
InfPath2 = self.GetInfFilePathFromID(Record[3])
if InfPath1 and InfPath2:
if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, InfPath1):
Msg = "The FILE_GUID of INF file [%s] is duplicated with that of %s" % (InfPath1, InfPath2)
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check Guid Format in module INF
def MetaDataFileCheckModuleFileGuidFormat(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileGuidFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Check Guid Format in module INF ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID
""" % (Table.Table, MODEL_EFI_GUID)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
Value1 = Record[1]
Value2 = Record[2]
GuidCommentList = []
InfPath = self.GetInfFilePathFromID(Record[3])
Msg = "The GUID format of %s in INF file [%s] does not follow rules" % (Value1, InfPath)
if Value2.startswith(DT.TAB_SPECIAL_COMMENT):
GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)
if GuidCommentList[0].strip().startswith(DT.TAB_INF_USAGE_UNDEFINED):
continue
elif len(GuidCommentList) > 1:
if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,
DT.TAB_INF_USAGE_SOME_PRO,
DT.TAB_INF_USAGE_CON,
DT.TAB_INF_USAGE_SOME_CON)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
if not (GuidCommentList[1].strip()).startswith(DT.TAB_INF_GUIDTYPE_VAR) and \
not GuidCommentList[1].strip().startswith((DT.TAB_INF_GUIDTYPE_EVENT,
DT.TAB_INF_GUIDTYPE_HII,
DT.TAB_INF_GUIDTYPE_FILE,
DT.TAB_INF_GUIDTYPE_HOB,
DT.TAB_INF_GUIDTYPE_FV,
DT.TAB_INF_GUIDTYPE_ST,
DT.TAB_INF_GUIDTYPE_TSG,
DT.TAB_INF_GUIDTYPE_GUID,
DT.TAB_INF_GUIDTYPE_PROTOCOL,
DT.TAB_INF_GUIDTYPE_PPI,
DT.TAB_INF_USAGE_UNDEFINED)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
else:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
else:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check Protocol Format in module INF
def MetaDataFileCheckModuleFileProtocolFormat(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFileProtocolFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Check Protocol Format in module INF ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID
""" % (Table.Table, MODEL_EFI_PROTOCOL)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
Value1 = Record[1]
Value2 = Record[2]
GuidCommentList = []
InfPath = self.GetInfFilePathFromID(Record[3])
Msg = "The Protocol format of %s in INF file [%s] does not follow rules" % (Value1, InfPath)
if Value2.startswith(DT.TAB_SPECIAL_COMMENT):
GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)
if len(GuidCommentList) >= 1:
if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,
DT.TAB_INF_USAGE_SOME_PRO,
DT.TAB_INF_USAGE_CON,
DT.TAB_INF_USAGE_SOME_CON,
DT.TAB_INF_USAGE_NOTIFY,
DT.TAB_INF_USAGE_TO_START,
DT.TAB_INF_USAGE_BY_START,
DT.TAB_INF_USAGE_UNDEFINED)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PROTOCOL, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
else:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PROTOCOL, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check Ppi Format in module INF
def MetaDataFileCheckModuleFilePpiFormat(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFilePpiFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Check Ppi Format in module INF ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID
""" % (Table.Table, MODEL_EFI_PPI)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
Value1 = Record[1]
Value2 = Record[2]
GuidCommentList = []
InfPath = self.GetInfFilePathFromID(Record[3])
Msg = "The Ppi format of %s in INF file [%s] does not follow rules" % (Value1, InfPath)
if Value2.startswith(DT.TAB_SPECIAL_COMMENT):
GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)
if len(GuidCommentList) >= 1:
if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,
DT.TAB_INF_USAGE_SOME_PRO,
DT.TAB_INF_USAGE_CON,
DT.TAB_INF_USAGE_SOME_CON,
DT.TAB_INF_USAGE_NOTIFY,
DT.TAB_INF_USAGE_UNDEFINED)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PPI, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
else:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PPI, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check Pcd Format in module INF
def MetaDataFileCheckModuleFilePcdFormat(self):
if EccGlobalData.gConfig.MetaDataFileCheckModuleFilePcdFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Check Pcd Format in module INF ...")
Table = EccGlobalData.gDb.TblInf
SqlCommand = """
select ID, Model, Value1, Value2, Usage, BelongsToFile from %s where Model >= %s and Model < %s group by ID
""" % (Table.Table, MODEL_PCD, MODEL_META_DATA_HEADER)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
Model = Record[1]
PcdName = Record[2] + '.' + Record[3]
Usage = Record[4]
PcdCommentList = []
InfPath = self.GetInfFilePathFromID(Record[5])
Msg = "The Pcd format of %s in INF file [%s] does not follow rules" % (PcdName, InfPath)
if Usage.startswith(DT.TAB_SPECIAL_COMMENT):
PcdCommentList = Usage[2:].split(DT.TAB_SPECIAL_COMMENT)
if len(PcdCommentList) >= 1:
if Model in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_FEATURE_FLAG] \
and not PcdCommentList[0].strip().startswith((DT.TAB_INF_USAGE_SOME_PRO,
DT.TAB_INF_USAGE_CON,
DT.TAB_INF_USAGE_UNDEFINED)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
if Model in [MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX] \
and not PcdCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,
DT.TAB_INF_USAGE_SOME_PRO,
DT.TAB_INF_USAGE_CON,
DT.TAB_INF_USAGE_SOME_CON,
DT.TAB_INF_USAGE_UNDEFINED)):
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
else:
EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check whether these is duplicate Guid/Ppi/Protocol name
def CheckGuidProtocolPpi(self, ErrorID, Model, Table):
Name = ''
if Model == MODEL_EFI_GUID:
Name = 'guid'
if Model == MODEL_EFI_PROTOCOL:
Name = 'protocol'
if Model == MODEL_EFI_PPI:
Name = 'ppi'
SqlCommand = """
select A.ID, A.Value1 from %s as A, %s as B
where A.Model = %s and B.Model = %s
and A.Value1 like B.Value1 and A.ID != B.ID
and A.Scope1 = B.Scope1
and A.Enabled > -1
and B.Enabled > -1
group by A.ID
""" % (Table.Table, Table.Table, Model, Model)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ErrorID, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg="The %s name [%s] is defined more than one time" % (Name.upper(), Record[1]), BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Check whether these is duplicate Guid/Ppi/Protocol value
def CheckGuidProtocolPpiValue(self, ErrorID, Model):
Name = ''
Table = EccGlobalData.gDb.TblDec
if Model == MODEL_EFI_GUID:
Name = 'guid'
if Model == MODEL_EFI_PROTOCOL:
Name = 'protocol'
if Model == MODEL_EFI_PPI:
Name = 'ppi'
SqlCommand = """
select A.ID, A.Value1, A.Value2 from %s as A, %s as B
where A.Model = %s and B.Model = %s
and A.Value2 like B.Value2 and A.ID != B.ID
and A.Scope1 = B.Scope1 and A.Value1 != B.Value1
group by A.ID
""" % (Table.Table, Table.Table, Model, Model)
RecordSet = Table.Exec(SqlCommand)
for Record in RecordSet:
if not EccGlobalData.gException.IsException(ErrorID, Record[2]):
EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg="The %s value [%s] is used more than one time" % (Name.upper(), Record[2]), BelongsToTable=Table.Table, BelongsToItem=Record[0])
# Naming Convention Check
def NamingConventionCheck(self):
if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckIfndefStatement == '1' \
or EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' \
or EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' \
or EccGlobalData.gConfig.NamingConventionCheckAll == '1'\
or EccGlobalData.gConfig.CheckAll == '1':
for Dirpath, Dirnames, Filenames in self.WalkTree():
for F in Filenames:
if os.path.splitext(F)[1] in ('.h', '.c'):
FullName = os.path.join(Dirpath, F)
Id = c.GetTableID(FullName)
if Id < 0:
continue
FileTable = 'Identifier' + str(Id)
self.NamingConventionCheckDefineStatement(FileTable)
self.NamingConventionCheckTypedefStatement(FileTable)
self.NamingConventionCheckVariableName(FileTable)
self.NamingConventionCheckSingleCharacterVariable(FileTable)
if os.path.splitext(F)[1] in ('.h'):
self.NamingConventionCheckIfndefStatement(FileTable)
self.NamingConventionCheckPathName()
self.NamingConventionCheckFunctionName()
# Check whether only capital letters are used for #define declarations
def NamingConventionCheckDefineStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #define statement ...")
SqlCommand = """select ID, Value from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_MACRO_DEFINE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].strip().split()[1]
if Name.find('(') != -1:
Name = Name[0:Name.find('(')]
if Name.upper() != Name:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, OtherMsg="The #define name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Check whether only capital letters are used for typedef declarations
def NamingConventionCheckTypedefStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #typedef statement ...")
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_TYPEDEF)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].strip()
if Name != '' and Name is not None:
if Name[0] == '(':
Name = Name[1:Name.find(')')]
if Name.find('(') > -1:
Name = Name[Name.find('(') + 1 : Name.find(')')]
Name = Name.replace('WINAPI', '')
Name = Name.replace('*', '').strip()
if Name.upper() != Name:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, OtherMsg="The #typedef name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Check whether the #ifndef at the start of an include file uses both prefix and postfix underscore characters, '_'.
def NamingConventionCheckIfndefStatement(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckIfndefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of #ifndef statement ...")
SqlCommand = """select ID, Value from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_MACRO_IFNDEF)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Name = Record[1].replace('#ifndef', '').strip()
if Name[0] != '_' or Name[-1] != '_':
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, Name):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, OtherMsg="The #ifndef name [%s] does not follow the rules" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# Check whether the path name followed the rule
def NamingConventionCheckPathName(self):
if EccGlobalData.gConfig.NamingConventionCheckPathName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of file path name ...")
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from File"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if not Pattern.match(Record[1]):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, OtherMsg="The file path [%s] does not follow the rules" % (Record[1]), BelongsToTable='File', BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# 4. Global variable name must start with a 'g'
# Check whether the variable name followed the rule
def NamingConventionCheckVariableName(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of variable name ...")
Pattern = re.compile(r'^[A-Zgm]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_VARIABLE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Var = Record[1]
if Var.startswith('CONST'):
Var = Var[5:].lstrip()
if not Pattern.match(Var):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, OtherMsg="The variable name [%s] does not follow the rules" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])
# Rule for path name, variable name and function name
# 1. First character should be upper case
# 2. Existing lower case in a word
# 3. No space existence
# Check whether the function name followed the rule
def NamingConventionCheckFunctionName(self):
if EccGlobalData.gConfig.NamingConventionCheckFunctionName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of function name ...")
Pattern = re.compile(r'^[A-Z]+\S*[a-z]\S*$')
SqlCommand = """select ID, Name from Function"""
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
if not Pattern.match(Record[1]):
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, OtherMsg="The function name [%s] does not follow the rules" % (Record[1]), BelongsToTable='Function', BelongsToItem=Record[0])
# Check whether NO use short variable name with single character
def NamingConventionCheckSingleCharacterVariable(self, FileTable):
if EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':
EdkLogger.quiet("Checking naming covention of single character variable name ...")
SqlCommand = """select ID, Name from %s where Model = %s""" % (FileTable, MODEL_IDENTIFIER_VARIABLE)
RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)
for Record in RecordSet:
Variable = Record[1].replace('*', '')
if len(Variable) == 1:
if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, Record[1]):
EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, OtherMsg="The variable name [%s] does not follow the rules" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])
def FindPara(FilePath, Para, CallingLine):
Lines = open(FilePath).readlines()
Line = ''
for Index in range(CallingLine - 1, 0, -1):
# Find the nearest statement for Para
Line = Lines[Index].strip()
if Line.startswith('%s = ' % Para):
Line = Line.strip()
return Line
break
return ''
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
Check = Check()
Check.Check()
|
[
"brbarkel@microsoft.com"
] |
brbarkel@microsoft.com
|
a3f6ae7c20cd1bfb28fbfeef13b36f8fe71a7f48
|
1abf781b85cade4c57d1784fbfa61298f1d11d7f
|
/assignment01/assignment01/stock_manager.py
|
60e3415f0a6a850356c8f823a968614dab30733e
|
[] |
no_license
|
bayram98/APME_class
|
3200ba5cb10ac2a79db968728cb4a9b7e33cf487
|
8fcfb198e0c0f7d135e711cc976534a01e10706a
|
refs/heads/master
| 2020-04-09T09:29:50.879851
| 2018-12-03T19:03:41
| 2018-12-03T19:03:41
| 160,235,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,112
|
py
|
"""
Refer to the assignment specification for more details
ID: 20162023
Name: Bayram Guvanjov
"""
from module03_stacks_and_queues.part00_preclass.stack import ArrayStack
class StockManager:
def __init__(self, o_name, o_surname):
"""
Initialises the Stock Manager with the name and surname of the portfolio
:param o_name: name of the owner of the porfolio
:param o_surname: surname of the portfolio
"""
self._o_name = o_name
self._o_surname = o_surname
self._list = {}
self._size = {}
self._profit = 0
def buy_shares(self, company, number, buy_price):
"""
Buy stocks
:param company: the company of which shares are being bought
:param number: the number of shares to buy
:param buy_price: the price (per share) at which shares are bought
"""
q = ArrayStack()
if company not in self._list:
self._list[company] = q
self._size[company] = 0
self._list[company].push([number, buy_price])
self._size[company] += number
def sell_shares(self, company, number, sell_price):
"""
Sell shares (only if you have enough to sell!)
:param company: the company of which shares are being bought
:param number: the number of shares to buy
:param sell_price: the price (per share) at which shares are sold
"""
profit = 0
if company in self._list:
if number > self._size[company]:
print(" Invalid amount of stock.")
else:
while number > 0:
last = self._list[company].top()
num = min( number , last[0] )
profit += ( sell_price - last[1] ) * num
number -= num
last[0] -= num
if last[0] == 0 :
self._list[company].pop()
else:
print("Invalid input")
self._profit +=profit
return profit
def buy_multiple(self, company_list, number_list, price_list):
for i in range(len(company_list)) :
self.buy_shares(company_list[i], number_list[i], price_list[i])
def sell_multiple(self, company_list, number_list, price_list):
Profit = 0
for i in range(len(company_list)) :
Profit += self.sell_shares(company_list[i], number_list[i], price_list[i])
return Profit
def get_profit(self):
print(self._profit)
if self._profit < 0:
print("Loss of ${0} from stock manager initialisation".format(self._profit * (-1)))
else:
print("Gain of ${0} from stock manager initialisation".format(self._profit))
return self._profit
"""
allows to print the current stock held by the investor (name of stocks, numbers of stocks, and
prices at which they were bought)
"""
def print_portfolio(self):
print("Name : {}".format(self._o_name))
print("Surname : {}".format(self._o_surname))
print("Net Profit by now :{}".format(self.get_profit()))
for i in self._list:
print(i)
self._list[i].print_contents()
print()
if __name__ == '__main__':
# extend this code to test all the functions of your portfolio manager
P = StockManager("Donald", "Trump")
P.buy_shares("UNIST", 20, 100)
P.buy_shares("Google", 20, 100)
print("Profit: {0}".format(P.sell_shares("Google", 5, 120)))
print("Current cumulative profit: {0}".format(P.get_profit()))
print("Profit: {0}".format(P.sell_shares("Google", 31, 127)))
print("Current cumulative profit: {0}".format(P.get_profit()))
print("Profit: {0}".format(P.sell_shares("Google", 2, 23)))
print("Current cumulative profit: {0}".format(P.get_profit()))
P.print_portfolio()
P.sell_shares("Google", 50, 150)
P.buy_multiple(["Google", "Apple"], [10, 56], [56, 27])
P.sell_multiple(["Google", "Apple"], [1, 1], [56, 27])
P.print_portfolio()
|
[
"noreply@github.com"
] |
bayram98.noreply@github.com
|
0b3b95468803f1c3c7f46c276d0bdb6e893daee2
|
4855811d329fecb387fe9964147a8be38e005496
|
/source/Solver/Distance_Solver.py
|
297e0e00f353dc36d70e3708f3cac248db1906dd
|
[
"MIT"
] |
permissive
|
afarahi/XTRA
|
a9b9570720895cd0c7bf800a301763073e7875e1
|
6550b216264abaa3ed705835aca0981f2934e069
|
refs/heads/master
| 2021-01-13T05:28:58.341354
| 2019-07-17T15:47:47
| 2019-07-17T15:47:47
| 86,624,796
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,748
|
py
|
from numpy import sin, sinh, sqrt, linspace, zeros, savetxt, array
def distance_integral_element(Input_Param,z):
dl = sqrt( Input_Param.Omega_M*(1.0+z)**3 +
Input_Param.Omega_DE*(1.0+z)**(3.0*(1.0+Input_Param.w)) +
Input_Param.Omega_R*(1.0+z)**4 -
Input_Param.Omega_k*(1.0+z)**2 )
return 1.0/dl
def Proper_Distance_Tabulate(Input_Param, z_max):
# --- convert redshift to proper distance
integral_n = 10000
z = linspace(0.0, z_max, integral_n+1)
dz= z[1]-z[0]
rz= zeros(len(z))
integral_dl = 0.0
if (Input_Param.Omega_k > 0.0):
for i in range(1,integral_n+1):
integral_dl += dz*( distance_integral_element(Input_Param,z[i]) +
distance_integral_element(Input_Param,z[i-1]) ) / 2.0
rz[i] = 1.0/(sqrt(abs(Input_Param.Omega_k))) * sin(sqrt(abs(Input_Param.Omega_k))*integral_dl)
elif (Input_Param.Omega_k == 0.0):
for i in range(1,integral_n+1):
integral_dl += dz*( distance_integral_element(Input_Param,z[i]) +
distance_integral_element(Input_Param,z[i-1]) ) / 2.0
rz[i] = integral_dl
else:
for i in range(1,integral_n+1):
integral_dl += dz*( distance_integral_element(Input_Param,z[i]) +
distance_integral_element(Input_Param,z[i-1]) ) / 2.0
rz[i] = 1.0/(sqrt(abs(Input_Param.Omega_k))) * sinh(sqrt(abs(Input_Param.Omega_k))*integral_dl)
rz = rz * 2997.92458
# saving:
f = open("./Output/tabulated_data/Proper_Distance.txt", "w")
f.write("# redshift Proper distance (Mpc/h)\n") # column names
savetxt(f, array([z, rz]).T)
|
[
"aryaf66@gmail.com"
] |
aryaf66@gmail.com
|
9e5d3a6432c78cb8a03c5b9a86742cdb2c43ec43
|
0201c0e75575af3db0a57c1e2270c3934ccf68ae
|
/addok/core.py
|
3f9ca919534ad004ae913b8a0eb2ba2902617797
|
[
"WTFPL"
] |
permissive
|
eric-pommereau/addok
|
f694f6a5fcdff2131e8bf4c3b6ebc686cd2a2b4b
|
ab15e43ade7e434055c3a656fdd1f1fa04e6db75
|
refs/heads/master
| 2021-01-18T09:04:13.773277
| 2016-01-29T15:36:20
| 2016-01-29T15:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,071
|
py
|
import time
import geohash
from . import config
from .db import DB
from .helpers import keys
from .helpers.index import VALUE_SEPARATOR
from .helpers.text import ascii
def compute_geohash_key(geoh, with_neighbors=True):
if with_neighbors:
neighbors = geohash.expand(geoh)
neighbors = [keys.geohash_key(n) for n in neighbors]
else:
neighbors = [geoh]
key = 'gx|{}'.format(geoh)
total = DB.sunionstore(key, neighbors)
if not total:
# No need to keep it.
DB.delete(key)
key = False
else:
DB.expire(key, 10)
return key
class Result(object):
def __init__(self, _id):
self.housenumber = None
self._scores = {}
self.load(_id)
self.labels = []
def load(self, _id):
self._cache = {}
doc = DB.hgetall(_id)
if not doc:
raise ValueError('id "{}" not found'.format(_id[2:]))
self._doc = {k.decode(): v.decode() for k, v in doc.items()}
self.load_housenumbers()
def load_housenumbers(self):
self.housenumbers = {}
for key, value in self._doc.items():
if key.startswith('h|'):
self.housenumbers[key[2:]] = value
def __getattr__(self, key):
if key not in self._cache:
# By convention, in case of multiple values, first value is default
# value, others are aliases.
value = self._rawattr(key)[0]
self._cache[key] = value
return self._cache[key]
def __str__(self):
return (self.labels[0] if self.labels
else self._rawattr(config.NAME_FIELD)[0])
def _rawattr(self, key):
return self._doc.get(key, '').split(VALUE_SEPARATOR)
def __repr__(self):
return '<{} - {} ({})>'.format(str(self), self.id, self.score)
@property
def keys(self):
to_filter = ['importance', 'housenumbers', 'lat', 'lon']
keys = ['housenumber']
keys.extend(self._doc.keys())
housenumber = getattr(self, 'housenumber', None)
if housenumber:
keys.extend(config.HOUSENUMBERS_PAYLOAD_FIELDS)
for key in keys:
if key.startswith(('_', 'h|')) or key in to_filter:
continue
yield key
def to_geojson(self):
properties = {
"label": str(self),
}
if self._scores:
properties["score"] = self.score
for key in self.keys:
val = getattr(self, key, None)
if val:
properties[key] = val
housenumber = getattr(self, 'housenumber', None)
if housenumber:
if self._doc.get('type'):
properties[self._doc['type']] = properties.get('name')
properties['name'] = '{} {}'.format(housenumber,
properties.get('name'))
try:
properties['distance'] = int(self.distance)
except ValueError:
pass
return {
"type": "Feature",
"geometry": {
"type": "Point",
"coordinates": [float(self.lon), float(self.lat)]
},
"properties": properties
}
def add_score(self, name, score, ceiling):
if score >= self._scores.get(name, (0, 0))[0]:
self._scores[name] = (score, ceiling)
@property
def score(self):
if self._score != '':
return float(self._score)
score, _max = zip(*self._scores.values())
return sum(score) / sum(_max)
@score.setter
def score(self, value):
self._score = value
@property
def str_distance(self):
return self._scores.get('str_distance', [0.0])[0]
@classmethod
def from_id(self, _id):
"""Return a result from it's document id."""
return Result(keys.document_key(_id))
class BaseHelper(object):
def __init__(self, verbose):
self._start = time.time()
if not verbose:
self.debug = lambda *args: None
def debug(self, *args):
s = args[0] % args[1:]
s = '[{}] {}'.format(str((time.time() - self._start) * 1000)[:5], s)
print(s)
class Search(BaseHelper):
SMALL_BUCKET_LIMIT = 10
MAX_MEANINGUL = 10
def __init__(self, match_all=False, fuzzy=1, limit=10, autocomplete=True,
verbose=False):
super().__init__(verbose=verbose)
self.match_all = match_all
self.fuzzy = fuzzy
self.limit = limit
self.min = self.limit
self.autocomplete = autocomplete
def __call__(self, query, lat=None, lon=None, **filters):
self.lat = lat
self.lon = lon
self._geohash_key = None
self.results = {}
self.bucket = set([]) # No duplicates.
self.meaningful = []
self.not_found = []
self.common = []
self.keys = []
self.matched_keys = set([])
self.check_housenumber = filters.get('type') in [None, "housenumber"]
self.filters = [keys.filter_key(k, v) for k, v in filters.items() if v]
self.query = ascii(query.strip())
for func in config.SEARCH_PREPROCESSORS:
func(self)
if not self.tokens:
return []
self.debug('Taken tokens: %s', self.meaningful)
self.debug('Common tokens: %s', self.common)
self.debug('Not found tokens: %s', self.not_found)
self.debug('Filters: %s', ['{}={}'.format(k, v)
for k, v in filters.items()])
for collector in config.RESULTS_COLLECTORS:
self.debug('** %s **', collector.__name__.upper())
if collector(self):
break
return self.render()
@property
def geohash_key(self):
if self.lat and self.lon and self._geohash_key is None:
geoh = geohash.encode(self.lat, self.lon, config.GEOHASH_PRECISION)
self._geohash_key = compute_geohash_key(geoh)
if self._geohash_key:
self.debug('Computed geohash key %s', self._geohash_key)
else:
self.debug('Empty geohash key, deleting %s', self._geohash_key)
return self._geohash_key
def render(self):
self.convert()
self._sorted_bucket = list(self.results.values())
self._sorted_bucket.sort(key=lambda r: r.score, reverse=True)
return self._sorted_bucket[:self.limit]
def intersect(self, keys, limit=0):
if not limit > 0:
limit = config.BUCKET_LIMIT
ids = []
if keys:
if self.filters:
keys.extend(self.filters)
if len(keys) == 1:
ids = DB.zrevrange(keys[0], 0, limit - 1)
else:
DB.zinterstore(self.query, set(keys))
ids = DB.zrevrange(self.query, 0, limit - 1)
DB.delete(self.query)
return set(ids)
def add_to_bucket(self, keys, limit=None):
self.debug('Adding to bucket with keys %s', keys)
self.matched_keys.update([k for k in keys if k.startswith('w|')])
limit = limit or (config.BUCKET_LIMIT - len(self.bucket))
self.bucket.update(self.intersect(keys, limit))
self.debug('%s ids in bucket so far', len(self.bucket))
def new_bucket(self, keys, limit=0):
self.debug('New bucket with keys %s and limit %s', keys, limit)
self.matched_keys = set([k for k in keys if k.startswith('w|')])
self.bucket = self.intersect(keys, limit)
self.debug('%s ids in bucket so far', len(self.bucket))
def convert(self):
self.debug('Computing results')
for _id in self.bucket:
if _id in self.results:
continue
result = Result(_id)
for processor in config.SEARCH_RESULT_PROCESSORS:
processor(self, result)
self.results[_id] = result
self.debug('Done computing results')
@property
def bucket_full(self):
l = len(self.bucket)
return l >= self.min and l < config.BUCKET_LIMIT
@property
def bucket_overflow(self):
return len(self.bucket) >= config.BUCKET_LIMIT
@property
def bucket_dry(self):
return len(self.bucket) < self.min
@property
def bucket_empty(self):
return not self.bucket
@property
def cream(self):
return len([r for _id, r in self.results.items()
if r.str_distance >= config.MATCH_THRESHOLD])
def has_cream(self):
if self.bucket_empty or self.bucket_overflow or len(self.bucket) > 10:
return False
self.debug('Checking cream.')
self.convert()
return self.cream > 0
@property
def pass_should_match_threshold(self):
return len(self.matched_keys) >= self.should_match_threshold
class Reverse(BaseHelper):
def __call__(self, lat, lon, limit=1, **filters):
self.lat = lat
self.lon = lon
self.keys = set([])
self.results = []
self.limit = limit
self.fetched = []
self.check_housenumber = filters.get('type') in [None, "housenumber"]
self.filters = [keys.filter_key(k, v) for k, v in filters.items()]
geoh = geohash.encode(lat, lon, config.GEOHASH_PRECISION)
hashes = self.expand([geoh])
self.fetch(hashes)
if not self.keys:
hashes = self.expand(hashes)
self.fetch(hashes)
return self.convert()
def expand(self, hashes):
new = []
for h in hashes:
neighbors = geohash.expand(h)
for n in neighbors:
if n not in self.fetched:
new.append(n)
return new
def fetch(self, hashes):
self.debug('Fetching %s', hashes)
for h in hashes:
k = keys.geohash_key(h)
self.intersect(k)
self.fetched.append(h)
def intersect(self, key):
if self.filters:
keys = DB.sinter([key] + self.filters)
else:
keys = DB.smembers(key)
self.keys.update(keys)
def convert(self):
for _id in self.keys:
result = Result(_id)
for processor in config.REVERSE_RESULT_PROCESSORS:
processor(self, result)
self.results.append(result)
self.debug(result, result.distance, result.score)
self.results.sort(key=lambda r: r.score, reverse=True)
return self.results[:self.limit]
def search(query, match_all=False, fuzzy=1, limit=10, autocomplete=False,
lat=None, lon=None, verbose=False, **filters):
helper = Search(match_all=match_all, fuzzy=fuzzy, limit=limit,
verbose=verbose, autocomplete=autocomplete)
return helper(query, lat=lat, lon=lon, **filters)
def reverse(lat, lon, limit=1, verbose=False, **filters):
helper = Reverse(verbose=verbose)
return helper(lat, lon, limit, **filters)
|
[
"yb@enix.org"
] |
yb@enix.org
|
aae4793daeef18f577ffd36084f0c7832b95c6b9
|
360b51c2b1b83856cdeda7dc5eac789e15ebd359
|
/Section 11/script5.py
|
9901a1176b9d6553cedef0c5f0c8db90fb9c5880
|
[] |
no_license
|
EfezinoErome/PythonMegaCourse
|
4df164d1092914e6826b4468d3c253e660cd6bec
|
b9ddba962145dd7a466ebcdf6e105a5c0f318c97
|
refs/heads/master
| 2021-04-28T22:19:34.420524
| 2017-01-25T18:27:29
| 2017-01-25T18:27:29
| 77,755,087
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 673
|
py
|
import folium
import pandas
df = pandas.read_csv("Volcanoes-USA.txt")
map = folium.Map(location=[df['LAT'].mean(),df['LON'].mean()],zoom_start = 5,tiles='Stamen Terrain')
def color(elev):
minimum = int(min(df['ELEV']))
step = int(((1/3)*(max(df["ELEV"]) - min(df["ELEV"]))))
if elev in range(minimum, minimum + step) :
col = 'green'
elif elev in range(minimum + step,minimum + (2*step)):
col = 'orange'
else:
col = 'red'
return col
for lat,lon,name,elev in zip(df['LAT'],df['LON'],df['NAME'],df['ELEV']):
map.simple_marker(location=[lat, lon],popup=name,marker_color = color(elev))
map.create_map(path="test.html")
|
[
"johnerome09@gmail.com"
] |
johnerome09@gmail.com
|
3412068c88db0a4b2867e3f9492c89f4caddc089
|
a7f6321c52f8993f20afd4bf1934f6a0223c9a00
|
/LearningOpenCV3WithPython/temperatureMonitoring/MatplotlibWidget.py
|
d41ce578a4fe1085d0caf20b541518ad9145a87b
|
[] |
no_license
|
CycloneBoy/pythonLearning
|
23a1aef5e8345a0ad2d7dffcdd0cd75426f9035b
|
1a7cbbb0add0521f7eb080801d560fac70fd0e0d
|
refs/heads/master
| 2021-01-23T23:45:37.883636
| 2018-06-04T15:41:06
| 2018-06-04T15:41:06
| 116,812,409
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,261
|
py
|
import sys
import random
import matplotlib
import datetime
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import numpy as np
import csv, pyodbc
matplotlib.use("Qt5Agg")
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QSizePolicy, QWidget
from numpy import arange, sin, pi
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
class MyMplCanvas(FigureCanvas):
"""FigureCanvas的最终的父类其实是QWidget。"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
self.tableNameSTemp = 'STemp'
self.tableNameBTemp = 'BTemp'
# 配置中文显示
plt.rcParams['font.family'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
self.fig = Figure(figsize=(width, height), dpi=dpi) # 新建一个figure
self.ax1 = self.fig.add_subplot(1, 1, 1)
# self.ax1.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)
self.ax1.set_xlabel('幅值')
self.ax1.set_ylabel('时间')
self.ax1.set_ylim(top=60.0,bottom=0.)
self.ax1.grid(True)
# plt.ylim(ymin=0,ymax=60)
# format the ticks
self.ax1.xaxis.set_major_locator(mdates.DayLocator())
self.ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
self.ax1.xaxis.set_minor_locator(
mdates.HourLocator(byhour=range(0, 24), interval=3))
for label in self.ax1.xaxis.get_ticklabels():
label.set_rotation(30)
FigureCanvas.__init__(self, self.fig)
self.setParent(parent)
'''定义FigureCanvas的尺寸策略,这部分的意思是设置FigureCanvas,使之尽可能的向外填充空间。'''
FigureCanvas.setSizePolicy(self,
QSizePolicy.Expanding,
QSizePolicy.Expanding)
FigureCanvas.updateGeometry(self)
# 绘制左边的图
def start_plot_left(self,dateA50, dataA50, columnNameA50, dataB50, columnNameB50):
# dateA50, dataA50, columnNameA50 = self.selectData(tableName, columnIndexOne,queryBeginTime,queryBeginEnd)
print("左边的图数据:",len(dateA50),len(dataA50) ,columnNameA50, columnNameB50)
self.ax1.clear()
line1 = self.ax1.plot(dateA50, dataA50, label=columnNameA50)
# dateB50, dataB50, columnNameB50 = self.selectData(tableName, columnIndexTwo,queryBeginTime,queryBeginEnd)
line2 = self.ax1.plot(dateA50, dataB50, label=columnNameB50)
# self.selfAxSetting()
self.ax1.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)
self.ax1.set_xlabel('幅值')
self.ax1.set_ylabel('时间')
self.ax1.grid(True)
# format the ticks
self.ax1.xaxis.set_major_locator(mdates.DayLocator())
self.ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
self.ax1.xaxis.set_minor_locator(
mdates.HourLocator(byhour=range(0, 24), interval=3))
for label in self.ax1.xaxis.get_ticklabels():
label.set_rotation(30)
self.draw()
# 绘制右边的图
def start_plot_right(self,dateA50, dataA50, columnNameA50):
self.ax1 = self.fig.add_subplot(1, 1, 1)
# dateA50, dataA50, columnNameA50 = self.selectData(tableName, columnIndexOne,queryBeginTime,queryBeginEnd)
print("右边的图数据:", len(dateA50), columnNameA50 )
self.ax1.clear()
line1 = self.ax1.plot(dateA50, dataA50, label=columnNameA50)
# self.selfAxSetting()
self.ax1.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)
self.ax1.set_xlabel('幅值')
self.ax1.set_ylabel('时间')
self.ax1.grid(True)
# format the ticks
self.ax1.xaxis.set_major_locator(mdates.DayLocator())
self.ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
self.ax1.xaxis.set_minor_locator(
mdates.HourLocator(byhour=range(0, 24), interval=3))
for label in self.ax1.xaxis.get_ticklabels():
label.set_rotation(30)
self.draw()
# 图显示设置
def selfAxSetting(self):
self.ax1.legend(bbox_to_anchor=(1, 1), loc=1, borderaxespad=0.)
self.ax1.set_xlabel('幅值')
self.ax1.set_ylabel('时间')
self.ax1.grid(True)
# format the ticks
self.ax1.xaxis.set_major_locator(mdates.DayLocator())
self.ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
self.ax1.xaxis.set_minor_locator(
mdates.HourLocator(byhour=range(0, 24), interval=3))
for label in self.ax1.xaxis.get_ticklabels():
label.set_rotation(30)
self.draw()
# 绘制图形
def plotTableData(self):
fig = plt.figure()
fig.suptitle('figure title demo', fontsize=14, fontweight='bold')
ax1 = fig.add_subplot(1, 1, 1)
dateA50, dataA50, columnNameA50 = self.selectData(self.tableNameSTemp, 2)
line1 = ax1.plot(dateA50, dataA50, label=columnNameA50)
dateB50, dataB50, columnNameB50 = self.selectData(self.tableNameBTemp, 6)
line2 = ax1.plot(dateB50, dataB50, label=columnNameB50)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ax1.set_title("axes title")
ax1.set_xlabel('幅值')
ax1.set_ylabel('时间')
ax1.grid(True)
# format the ticks
ax1.xaxis.set_major_locator(mdates.DayLocator())
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y/%m/%d'))
ax1.xaxis.set_minor_locator(
mdates.HourLocator(byhour=range(0, 24), interval=3))
for label in ax1.xaxis.get_ticklabels():
label.set_rotation(90)
plt.show()
# 数据库查询内容
def selectData(self,TableName, columnIndex,queryBeginTime,queryBeginEnd):
# set up some constants
MDB = 'D:\python\measurement data.mdb'
DRV = '{Microsoft Access Driver (*.mdb, *.accdb)}'
PWD = ''
# connect to db
con = pyodbc.connect('DRIVER={};DBQ={};PWD={}'.format(DRV, MDB, PWD))
cur = con.cursor()
# 打印数据库表格名称
for table_info in cur.tables(tableType='TABLE'):
print(table_info.table_name)
columnNameList = []
for row in cur.columns(table=TableName):
# print(row.column_name)
columnNameList.append(row.column_name)
# # 查询所有
# for row in cur.execute("select * from " + TableName + " where ID = 1"):
# print("{}".format(row))
SQL = 'SELECT * FROM ' + TableName + ' ;'
rows = cur.execute(SQL).fetchall()
print(len(rows))
# 过滤显示的数据
dateList = []
dataList = []
beginQueryDatatime = datetime.datetime(2016, 10, 18, 0, 0, 0)
endQueryDatatime = datetime.datetime(2016, 10, 28, 0, 0, 0)
print("查询时间: ", str(queryBeginTime), str(queryBeginEnd))
for row in rows:
if row.时间 > queryBeginTime and row.时间 < queryBeginEnd:
# print(row.ID, row.时间, row.A50,row.气温)
dateList.append(row[1])
dataList.append(row[columnIndex])
cur.close()
con.close()
return dateList, dataList, TableName[0] + '-' + columnNameList[
columnIndex]
class MatplotlibWidget(QWidget):
def __init__(self, parent=None):
super(MatplotlibWidget, self).__init__(parent)
self.initUi()
def initUi(self):
self.layout = QVBoxLayout(self)
self.mpl = MyMplCanvas(self, width=5, height=6, dpi=100)
self.mpl_ntb = NavigationToolbar(self.mpl, self) # 添加完整的 toolbar
self.layout.addWidget(self.mpl)
self.layout.addWidget(self.mpl_ntb)
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = MatplotlibWidget()
ui.show()
sys.exit(app.exec_())
|
[
"xuanfeng1992@gmail.com"
] |
xuanfeng1992@gmail.com
|
bcdfa2627a3a7059adc14cc77b201eb84f302170
|
d4b6480e6c06b6be8670abcbed4e30b8e2c617ed
|
/ttuple.py
|
2774b1aefba32fd2047e19f853c0af6b7b445412
|
[] |
no_license
|
kexiaomeng/python
|
c31bf2096e477fec0ef964de9871165abe471e71
|
bc4af30fca7ea4f38d6324aff1919c802f95bfbd
|
refs/heads/master
| 2020-03-31T06:58:08.546235
| 2018-10-08T02:53:00
| 2018-10-08T02:53:00
| 152,001,755
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,933
|
py
|
record = [
("foo", 1, 2),
("bar", "hello"),
("foo", 3,4)
]
def do_foo(x, y):
print("foo", x, y)
def do_bar(x):
print("bar", x)
for tag, *arg in record:
if tag == "foo":
do_foo(*arg)
elif tag == "bar":
do_bar(*arg)
import collections #队列
q = collections.deque(maxlen=3)
q.append(1)
q.append(2)
q.append(3)
print(q)
q.append(4)
print(q)
print(q.pop())
print(q.popleft())
q.appendleft(5)
print(q)
#################################
#查找最大或最小的值
import heapq
nums = [1,3,6,8,2,3,4]
print(heapq.nlargest(2,nums))
print(heapq.nsmallest(2,nums))
print(heapq.heappop(nums))
############################
items = [1,2,3,4,5,6,7,8,9,10]
def sum(item): #递归
head, *tail = item
# if(len(tail)>0):
# return head + sum(tail)
# else:
# return head
return head + sum(tail) if tail else head
print(sum(items))
li = [11, 22, 33]
new_list = map(lambda a: a + 100, li)
new_list1=[]
if new_list1:
print("null")
else:
print('not null')
from collections import deque
def search(lines, pattern, history=5):
previous_lines = deque(maxlen=history)
for li in lines:
if pattern in li:
yield li, previous_lines
previous_lines.append(li)
# Example use on a file
if __name__ == '__main__':
with open(r'sun.txt') as f:
for line, prevlines in search(f, 'python', 5):
for pline in prevlines:
print(pline, end='')
print(line, end='')
print('-' * 20)
for i in range(1000):
pass
###################################
import collections
# d = collections.defaultdict(set)
# print("##############")
# d['a'].add('1')
# d['b'].add('2')
# d['a'].add('3')
# d['a'].add('3')
d = collections.defaultdict(list)
print("##############")
d['a'].append('1')
d['b'].append('2')
d['a'].append('3')
d['a'].append('3')
print(d)
|
[
"307621564@qq.com"
] |
307621564@qq.com
|
2f50affd6eebd0aa5aa0d92f85fa6e2305bda42f
|
a22aa358db0a9bc2c9dcd5b152b5437b49ad95b5
|
/test/utils_tests.py
|
c44448e1679e2f49b74bf0ee9f7780dc4a593957
|
[
"MIT"
] |
permissive
|
JakubPetriska/poker-agent-kit
|
17d79e37f30cffc6642db9b6bc4f689376f36c88
|
12c28711c91447c708719454d1fbd224fa03189e
|
refs/heads/master
| 2021-10-25T20:00:39.501902
| 2019-04-06T19:40:55
| 2019-04-06T19:40:55
| 101,301,631
| 21
| 4
|
MIT
| 2018-11-17T00:42:56
| 2017-08-24T14:06:00
|
Python
|
UTF-8
|
Python
| false
| false
| 763
|
py
|
import unittest
from tools.utils import flatten, intersection, is_unique
class UtilsTests(unittest.TestCase):
def test_flatten(self):
self.assertEqual(
flatten([1, 2], [3], [4, 5]),
[1, 2, 3, 4, 5])
def test_intersection_empty(self):
self.assertEqual(
intersection([1, 2], [3, 4]),
set([]))
def test_intersection_non_empty(self):
self.assertEqual(
intersection([1, 2, 3, 4], [2, 4]),
set([2, 4]))
def test_is_unique_true(self):
self.assertEqual(
is_unique((1, 2), [3], [4, 5]),
True)
def test_is_unique_false(self):
self.assertEqual(
is_unique([1, 2], [3], [3, 4, 5]),
False)
|
[
"jakubpetriska@gmail.com"
] |
jakubpetriska@gmail.com
|
b396d78f6de5178ad51c6a7d172d32c072adf5d5
|
5d26fb700ae9d40bf142b2773777faaf0d0c8c47
|
/Myblog/blog/admin.py
|
4da013a0c43af4f1d236834ddea1aeb21fb90b97
|
[] |
no_license
|
yangshuer/myblogs
|
16e4bb11c0693351536fe019dde0160c7919daeb
|
f6df13ced173aeb15920ec63672c3c862bb2e2d9
|
refs/heads/master
| 2020-05-25T01:23:51.989145
| 2019-05-20T02:27:15
| 2019-05-20T02:27:15
| 187,554,667
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 631
|
py
|
from django.contrib import admin
from blog.models import Article,BlockInfo,TextInfo,Images
# Register your models here.
# 一对多编辑
class TextToImage(admin.StackedInline):
model = Images
extra = 5
class BlockAdmin(admin.ModelAdmin):
list_display = ['Block_in']
class TextAdmin(admin.ModelAdmin):
inlines = [TextToImage,]
list_display = ['id','title','author','Block','image','introduce','text','date']
class ImagesAdmin(admin.ModelAdmin):
list_display = ['id','image','Text']
admin.site.register(BlockInfo,BlockAdmin)
admin.site.register(TextInfo,TextAdmin)
admin.site.register(Images,ImagesAdmin)
|
[
"yangxue@yangxuedeMacBook-Pro.local"
] |
yangxue@yangxuedeMacBook-Pro.local
|
ee0e3a0adf316753938f11a769a36dbf93754a35
|
6238dc5b5818f54295547cf4cb1afa5553ddfb94
|
/taobao/top/api/rest/ItemAnchorGetRequest.py
|
79056fcdfb49a86ae2bd6eaefc98665235267027
|
[] |
no_license
|
liaosiwei/guagua
|
8208bb82b1df5506dcb86c1a7094c849ea5576a6
|
ee6025813e83568dc25beb52279c86f8bd33f1a4
|
refs/heads/master
| 2016-09-06T16:45:00.798633
| 2013-05-03T04:02:35
| 2013-05-03T04:02:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 325
|
py
|
'''
Created by auto_sdk on 2013-04-14 16:35:32
'''
from top.api.base import RestApi
class ItemAnchorGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cat_id = None
self.type = None
def getapiname(self):
return 'taobao.item.anchor.get'
|
[
"liaosiweiorxiaowei@gmail.com"
] |
liaosiweiorxiaowei@gmail.com
|
f26d408d3dd24e65a2b3a849ccf4b965215ca6a9
|
94f591d4a966b66b1e1c7096b22f77de2be899d6
|
/memory/dns.py
|
c0ffda400cbdbb9b43e9511e8dbb423795b27d04
|
[] |
no_license
|
obulpathi/python
|
59961ac509c51fde0f9fa1461550682b3e4050fa
|
ef3c20ac985a28ac911f9a4badb619436ee487b5
|
refs/heads/master
| 2016-09-05T09:34:22.879012
| 2015-07-31T15:15:06
| 2015-07-31T15:15:06
| 21,796,513
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,936
|
py
|
import gc
import os
import sys
import time
import requests
import pyrax
import pyrax.exceptions as exc
from memory_profiler import profile
pyrax.set_setting("identity_type", "rackspace")
creds_file = os.path.expanduser("~/.credentials.cfg")
pyrax.set_credential_file(creds_file)
dns = pyrax.cloud_dns
def get_domains():
headers = {'X-Auth-Token': 'ee0a869233ed4fe7ac68f47dafdc4a0f',
'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Length': 0}
url = 'https://dns.api.rackspacecloud.com/v1.0/901255/domains'
for i in range(10000):
response = requests.get(url, headers=headers)
print i, response
time.sleep(1)
@profile(precision=3)
def bar():
get_domains()
def foo():
#gc.disable()
#gc.set_debug(gc.DEBUG_LEAK)
#gc.set_threshold(10,0,0)
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
domains = dns.list()
print len(domains)
#gc.collect()
time.sleep(1)
print("Garbage: ".format(gc.garbage))
if __name__ == "__main__":
bar()
|
[
"obulapathi.challa@rackspace.com"
] |
obulapathi.challa@rackspace.com
|
dc632caf0c068c70908b7b2623e27d816a2a074e
|
903c2f1cb06091f8199bf63d74111fe79dac6f8d
|
/Initial_Assignment/3.14.Pattern.py
|
d253939f3c113f5f1aa56fdb3f9942c37ff28477
|
[] |
no_license
|
praneethpeddi/Python-Assignments
|
c8b4d8fa58244b033bb4cdf36f55e42b7e7cf6fc
|
0a224aeae40a4cc55a5a7259ff2360fc61841abd
|
refs/heads/master
| 2023-01-19T07:58:59.144309
| 2020-11-19T01:46:45
| 2020-11-19T01:46:45
| 299,494,119
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
n = int(input('Enter n value: '))
value = 0
temp = n
while n > 0:
for i in range(0, 2 * temp - 1):
if i >= temp:
value = value - 1
else:
value = value + 1
if value <= n:
print(value, end=' ')
else:
print(' '*2, end='')
value = 0
n = n - 1
print()
|
[
"praneethpeddi1995@gmail.com"
] |
praneethpeddi1995@gmail.com
|
e9964654a7d249c9d2d76a7e29a33b184d8b7231
|
cccef35f008e9676f6b207f02bd31b0f8d432d27
|
/PrimeNumberFinder/venv/bin/easy_install-3.8
|
256a2c1c96fc14fd7c524174873ac0a5b238bd59
|
[] |
no_license
|
finxter/MiniProjects
|
e8a1a9270194dd467198276c46849e9744efc625
|
d754864ddca9b362f043a6eccd37f9b24cd87738
|
refs/heads/main
| 2023-02-14T01:54:04.230460
| 2021-01-02T13:58:40
| 2021-01-02T13:58:40
| 315,742,089
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 499
|
8
|
#!/Users/lukas/Documents/Coding/PyCharm/Projects/PythonBeginnerProjects/PrimeNumberFinder/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"rieger.lks@gmail.com"
] |
rieger.lks@gmail.com
|
8b696b10470fa0eeb7fcd2e45fdea033d869cab7
|
6751c663dc4c34e41e627658267179aac0f6859e
|
/app/api/migrations/0001_initial.py
|
d778345db7ee5a2bb754a1b3eb918afaf4976468
|
[
"MIT"
] |
permissive
|
deckTECHeu/profiles-rest-api
|
d97f9d86cac9499b24ababb2cd9319c791401107
|
031a917f2c5dfbccca60ffec4c55addf57433e61
|
refs/heads/main
| 2023-03-04T08:12:19.115485
| 2021-02-21T20:12:32
| 2021-02-21T20:12:32
| 339,798,633
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,716
|
py
|
# Generated by Django 2.1.15 on 2021-02-21 18:02
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
[
"akozlowski@afdata.local"
] |
akozlowski@afdata.local
|
ae3d486028cdfe78a2c66dd51bae9ee8a1a6b563
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_173/ch43_2020_03_25_19_08_28_308004.py
|
72d28a1bb3f599fe72957af11372f4f38a04b902
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 257
|
py
|
1 = 'janeiro'
2 = 'fevereiro'
3 = 'março'
4 = 'abril'
5 = 'maio'
6 = 'junho'
7 = 'julho'
8 = 'agosto'
9 = 'setembro'
10 = 'outubro'
11 = 'novembro'
12 = 'dezembro'
pergunta = range (0,12)
pergunta = int(input('Escreva o número do mês: ')
print (pergunta
|
[
"you@example.com"
] |
you@example.com
|
baec9a3ff03abd76ae8a1fede5d7d5b7b201a35a
|
1c40a5b1e7ffb7ffed2cfe831c1686aa4af82284
|
/omm/analysis/aes/aes_excel_geo.py
|
3129856e444e11a47ae139c318d80b353245afe6
|
[] |
no_license
|
rhodges/oregon-marinemap
|
3c4bb3c13f15ec5f2efd405b0006c8a85d3b42b0
|
4a3797f507a48fd158be5f751fa0ca8c24a47cb7
|
refs/heads/master
| 2016-09-05T17:49:44.702600
| 2011-11-04T15:56:18
| 2011-11-04T15:56:18
| 32,354,262
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,740
|
py
|
import xlwt
from analysis.excel.utils import major_heading_style, minor_heading_style, heading_column_style, data_style, perc_style
from analysis.excel.geography import geo_spatial_headers, geo_spatial_data
def populate_geo_sheet(ws, context):
geo_header(ws, context)
geo_spatial_headers(ws)
geo_spatial_data(ws, context)
geo_setting_headers(ws, context)
geo_setting_data(ws, context)
def geo_header(ws, context):
ws.write(0, 0, "Energy Site Geography Report for %s" % context['aes'].name, major_heading_style)
def geo_setting_headers(ws, context, row=7):
ws.write(row-1, 0, "Geographic Setting", minor_heading_style)
if len(context['county']) > 1:
ws.write(row, 1, "Adjacent Counties", heading_column_style)
else:
ws.write(row, 1, "Adjacent County", heading_column_style)
ws.write(row, 2, 'Nearest Cities', heading_column_style)
ws.write(row, 3, "Nearest Ports", heading_column_style)
ws.write(row, 4, "Nearest Marinas", heading_column_style)
def geo_setting_data(ws, context, row=8):
offset = 0
for county in context['county']:
ws.write(row+offset, 1, county, data_style)
offset += 1
offset = 0
for city in context['cities']:
ws.write(row+offset, 2, str(city[0]) + ' (%.1f %s)' % (city[1], context['length_units']), data_style)
offset += 1
offset = 0
for port in context['ports']:
ws.write(row+offset, 3, str(port[0]) + ' (%.1f %s)' % (port[1], context['length_units']), data_style)
offset += 1
offset = 0
for marina in context['marinas']:
ws.write(row+offset, 4, str(marina[0]) + ' (%.1f %s)' % (marina[1], context['length_units']), data_style)
offset += 1
|
[
"sfletche@gmail.com"
] |
sfletche@gmail.com
|
6f556dcc6d01cb80678c187bac0d3dd3fd5c7fde
|
174833cc3f8ea752ecfa5ffbbdfbab2798dc8745
|
/ml_params_tensorflow/ml_params/extra_symbols.py
|
eecf0002e3b84ec8e61c0cee6b2ff11d02420b23
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SamuelMarks/ml-params-tensorflow
|
35098d95de15b65d4bf7ae944eb9a62efb245584
|
86fb92147443e69982d05755361b101f8a6f64e5
|
refs/heads/master
| 2023-04-22T14:59:15.464011
| 2021-04-08T11:03:24
| 2021-04-08T11:03:24
| 276,570,010
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 361
|
py
|
"""
The symbols to expose. This is the public API used by ml_params.
"""
from importlib import import_module
extra_symbols = {
"loss"
if mod == "losses"
else mod: import_module("ml_params_tensorflow.ml_params.{mod}".format(mod=mod))
for mod in ("callbacks", "losses", "metrics", "optimizers")
}
del import_module
__all__ = ["extra_symbols"]
|
[
"807580+SamuelMarks@users.noreply.github.com"
] |
807580+SamuelMarks@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.