repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
hakril/PythonForWindows
samples/process/thread.py
1
1631
import sys import os.path sys.path.append(os.path.abspath(__file__ + "\..\..")) import windows import windows.native_exec.simple_x86 as x86 import windows.native_exec.simple_x64 as x64 print("Creating a notepad") ## Replaced calc.exe by notepad.exe cause of windows 10. notepad = windows.utils.create_process(r"C:\windows\system32\notepad.exe") # You don't need to do that in our case, but it's useful to now print("Priting threads") for th in notepad.threads: print(" * {0}".format(th)) print("Writing some code in memory") if notepad.bitness == 32: code = "mov eax, 0x42424242; label :start ; jmp :start; nop; nop; ret" rawcode = x86.assemble(code) else: code = "mov rax, 0x4242424242424242; label :start ; jmp :start; nop; nop; ret" rawcode = x64.assemble(code) print("Allocating memory") with notepad.allocated_memory(0x1000) as addr: print("Writing code at <{0:#x}>".format(addr)) notepad.write_memory(addr, rawcode) print("Creating thread on injected code") t = notepad.create_thread(addr, 0x11223344) print("New thread is {0}".format(t)) print("Suspending thread") t.suspend() ctx = t.context print("Thread context is {0}".format(ctx)) print("Dumping thread context:") ctx.dump() print("Changing context") ctx.pc += 2 # EIP / RIP ctx.func_result = 0x12345678 # EAX / RAX print("Setting new thread context") t.set_context(ctx) print("Resuming thread") t.resume() print("Waiting thread") t.wait() print("Thread has exit: {0}".format(t.is_exit)) print("Thread exit value = {0:#x}".format(t.exit_code))
bsd-3-clause
-8,247,220,387,132,427,000
28.125
84
0.667075
false
3.04291
false
false
false
start-jsk/jsk_apc
demos/grasp_data_generator/scripts/generate_evaluation_data.py
1
3866
import argparse import cv2 import datetime import json import matplotlib.pyplot as plt import numpy as np import os import os.path as osp import PIL.Image import PIL.ImageDraw import yaml from chainercv.utils.mask.mask_to_bbox import mask_to_bbox from grasp_data_generator.visualizations \ import vis_occluded_instance_segmentation filepath = osp.dirname(osp.realpath(__file__)) dataset_dir = osp.join(filepath, '../data/evaluation_data') yamlpath = osp.join(filepath, '../yaml/dualarm_grasping_label_names.yaml') def main(datadir, visualize): time = datetime.datetime.now() timestamp = time.strftime('%Y%m%d_%H%M%S') with open(yamlpath, 'r') as yaml_f: label_names = yaml.load(yaml_f)[1:] for scene_d in sorted(os.listdir(datadir)): scenedir = osp.join(datadir, scene_d) ins_imgs = [] label = [] for time_d in sorted(os.listdir(scenedir))[::-1]: timedir = osp.join(scenedir, time_d) savedir = osp.join(dataset_dir, timestamp, time_d) if not osp.exists(savedir): os.makedirs(savedir) rgbpath = osp.join(timedir, 'masked_rgb.png') annopath = osp.join(timedir, 'masked_rgb.json') rgb = cv2.imread(rgbpath)[:, :, ::-1] with open(annopath, 'r') as json_f: data = json.load(json_f) H, W = data['imageHeight'], data['imageWidth'] msk = np.zeros((H, W), dtype=np.uint8) msk = PIL.Image.fromarray(msk) draw = PIL.ImageDraw.Draw(msk) shape = data['shapes'][0] label_name = shape['label'] points = shape['points'] xy = [tuple(point) for point in points] draw.polygon(xy=xy, outline=1, fill=1) msk = np.array(msk, dtype=np.int32) next_ins_imgs = [] next_label = [] for ins_id, (ins_img, lbl) in enumerate(zip(ins_imgs, label)): occ_msk = np.logical_and(ins_img > 0, msk > 0) ins_img[occ_msk] = 2 if not np.any(ins_img == 1): print('{} is occluded and no more visible' .format(label_names[lbl])) else: next_ins_imgs.append(ins_img) next_label.append(lbl) ins_imgs = next_ins_imgs label = next_label ins_imgs.append(msk[None]) lbl = label_names.index(label_name) label.append(lbl) if visualize: vis_rgb = rgb.transpose((2, 0, 1)) vis_ins_imgs = np.concatenate( ins_imgs, axis=0).astype(np.int32) bbox = mask_to_bbox(vis_ins_imgs > 0) vis_occluded_instance_segmentation( vis_rgb, vis_ins_imgs, label, bbox, label_names=label_names) plt.show() rgb_savepath = osp.join(savedir, 'rgb.png') ins_imgs_savepath = osp.join(savedir, 'ins_imgs.npz') label_savepath = osp.join(savedir, 'labels.yaml') cv2.imwrite(rgb_savepath, rgb) np.savez_compressed( ins_imgs_savepath, ins_imgs=np.concatenate(ins_imgs, axis=0).astype(np.int32)) np.savez_compressed with open(label_savepath, 'w+') as yaml_save_f: yaml_save_f.write(yaml.dump(label)) with open(osp.join(dataset_dir, timestamp, 'label_names.yaml'), 'w+') as f: f.write(yaml.dump(label_names)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--visualize', '-v', action='store_true') parser.add_argument('--data-dir', '-d') args = parser.parse_args() datadir = osp.join(filepath, args.data_dir) main(datadir, args.visualize)
bsd-3-clause
2,756,688,793,698,856,400
34.46789
79
0.55432
false
3.406167
false
false
false
scanny/python-pptx
pptx/oxml/chart/shared.py
1
6097
# encoding: utf-8 """Shared oxml objects for charts.""" from __future__ import absolute_import, division, print_function, unicode_literals from pptx.oxml import parse_xml from pptx.oxml.ns import nsdecls from pptx.oxml.simpletypes import ( ST_LayoutMode, XsdBoolean, XsdDouble, XsdString, XsdUnsignedInt, ) from pptx.oxml.xmlchemy import ( BaseOxmlElement, OptionalAttribute, RequiredAttribute, ZeroOrOne, ) class CT_Boolean(BaseOxmlElement): """ Common complex type used for elements having a True/False value. """ val = OptionalAttribute("val", XsdBoolean, default=True) class CT_Boolean_Explicit(BaseOxmlElement): """Always spells out the `val` attribute, e.g. `val=1`. At least one boolean element is improperly interpreted by one or more versions of PowerPoint. The `c:overlay` element is interpreted as |False| when no `val` attribute is present, contrary to the behavior described in the schema. A remedy for this is to interpret a missing `val` attribute as |True| (consistent with the spec), but always write the attribute whenever there is occasion for changing the element. """ _val = OptionalAttribute("val", XsdBoolean, default=True) @property def val(self): return self._val @val.setter def val(self, value): val_str = "1" if bool(value) is True else "0" self.set("val", val_str) class CT_Double(BaseOxmlElement): """ Used for floating point values. """ val = RequiredAttribute("val", XsdDouble) class CT_Layout(BaseOxmlElement): """ ``<c:layout>`` custom element class """ manualLayout = ZeroOrOne("c:manualLayout", successors=("c:extLst",)) @property def horz_offset(self): """ The float value in ./c:manualLayout/c:x when c:layout/c:manualLayout/c:xMode@val == "factor". 0.0 if that XPath expression finds no match. """ manualLayout = self.manualLayout if manualLayout is None: return 0.0 return manualLayout.horz_offset @horz_offset.setter def horz_offset(self, offset): """ Set the value of ./c:manualLayout/c:x@val to *offset* and ./c:manualLayout/c:xMode@val to "factor". Remove ./c:manualLayout if *offset* == 0. """ if offset == 0.0: self._remove_manualLayout() return manualLayout = self.get_or_add_manualLayout() manualLayout.horz_offset = offset class CT_LayoutMode(BaseOxmlElement): """ Used for ``<c:xMode>``, ``<c:yMode>``, ``<c:wMode>``, and ``<c:hMode>`` child elements of CT_ManualLayout. """ val = OptionalAttribute("val", ST_LayoutMode, default=ST_LayoutMode.FACTOR) class CT_ManualLayout(BaseOxmlElement): """ ``<c:manualLayout>`` custom element class """ _tag_seq = ( "c:layoutTarget", "c:xMode", "c:yMode", "c:wMode", "c:hMode", "c:x", "c:y", "c:w", "c:h", "c:extLst", ) xMode = ZeroOrOne("c:xMode", successors=_tag_seq[2:]) x = ZeroOrOne("c:x", successors=_tag_seq[6:]) del _tag_seq @property def horz_offset(self): """ The float value in ./c:x@val when ./c:xMode@val == "factor". 0.0 when ./c:x is not present or ./c:xMode@val != "factor". """ x, xMode = self.x, self.xMode if x is None or xMode is None or xMode.val != ST_LayoutMode.FACTOR: return 0.0 return x.val @horz_offset.setter def horz_offset(self, offset): """ Set the value of ./c:x@val to *offset* and ./c:xMode@val to "factor". """ self.get_or_add_xMode().val = ST_LayoutMode.FACTOR self.get_or_add_x().val = offset class CT_NumFmt(BaseOxmlElement): """ ``<c:numFmt>`` element specifying the formatting for number labels on a tick mark or data point. """ formatCode = RequiredAttribute("formatCode", XsdString) sourceLinked = OptionalAttribute("sourceLinked", XsdBoolean) class CT_Title(BaseOxmlElement): """`c:title` custom element class.""" _tag_seq = ("c:tx", "c:layout", "c:overlay", "c:spPr", "c:txPr", "c:extLst") tx = ZeroOrOne("c:tx", successors=_tag_seq[1:]) spPr = ZeroOrOne("c:spPr", successors=_tag_seq[4:]) del _tag_seq def get_or_add_tx_rich(self): """Return `c:tx/c:rich`, newly created if not present. Return the `c:rich` grandchild at `c:tx/c:rich`. Both the `c:tx` and `c:rich` elements are created if not already present. Any `c:tx/c:strRef` element is removed. (Such an element would contain a cell reference for the axis title text in the chart's Excel worksheet.) """ tx = self.get_or_add_tx() tx._remove_strRef() return tx.get_or_add_rich() @property def tx_rich(self): """Return `c:tx/c:rich` or |None| if not present.""" richs = self.xpath("c:tx/c:rich") if not richs: return None return richs[0] @staticmethod def new_title(): """Return "loose" `c:title` element containing default children.""" return parse_xml( "<c:title %s>" " <c:layout/>" ' <c:overlay val="0"/>' "</c:title>" % nsdecls("c") ) class CT_Tx(BaseOxmlElement): """ ``<c:tx>`` element containing the text for a label on a data point or other chart item. """ strRef = ZeroOrOne("c:strRef") rich = ZeroOrOne("c:rich") def _new_rich(self): return parse_xml( "<c:rich %s>" " <a:bodyPr/>" " <a:lstStyle/>" " <a:p>" " <a:pPr>" " <a:defRPr/>" " </a:pPr>" " </a:p>" "</c:rich>" % nsdecls("c", "a") ) class CT_UnsignedInt(BaseOxmlElement): """ ``<c:idx>`` element and others. """ val = RequiredAttribute("val", XsdUnsignedInt)
mit
-5,779,250,776,741,064,000
26.21875
82
0.579957
false
3.411863
false
false
false
quantopian/zipline
zipline/data/bundles/csvdir.py
1
8039
""" Module for building a complete dataset from local directory with csv files. """ import os import sys from logbook import Logger, StreamHandler from numpy import empty from pandas import DataFrame, read_csv, Index, Timedelta, NaT from trading_calendars import register_calendar_alias from zipline.utils.cli import maybe_show_progress from . import core as bundles handler = StreamHandler(sys.stdout, format_string=" | {record.message}") logger = Logger(__name__) logger.handlers.append(handler) def csvdir_equities(tframes=None, csvdir=None): """ Generate an ingest function for custom data bundle This function can be used in ~/.zipline/extension.py to register bundle with custom parameters, e.g. with a custom trading calendar. Parameters ---------- tframes: tuple, optional The data time frames, supported timeframes: 'daily' and 'minute' csvdir : string, optional, default: CSVDIR environment variable The path to the directory of this structure: <directory>/<timeframe1>/<symbol1>.csv <directory>/<timeframe1>/<symbol2>.csv <directory>/<timeframe1>/<symbol3>.csv <directory>/<timeframe2>/<symbol1>.csv <directory>/<timeframe2>/<symbol2>.csv <directory>/<timeframe2>/<symbol3>.csv Returns ------- ingest : callable The bundle ingest function Examples -------- This code should be added to ~/.zipline/extension.py .. code-block:: python from zipline.data.bundles import csvdir_equities, register register('custom-csvdir-bundle', csvdir_equities(["daily", "minute"], '/full/path/to/the/csvdir/directory')) """ return CSVDIRBundle(tframes, csvdir).ingest class CSVDIRBundle: """ Wrapper class to call csvdir_bundle with provided list of time frames and a path to the csvdir directory """ def __init__(self, tframes=None, csvdir=None): self.tframes = tframes self.csvdir = csvdir def ingest(self, environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir): csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, self.tframes, self.csvdir) @bundles.register("csvdir") def csvdir_bundle(environ, asset_db_writer, minute_bar_writer, daily_bar_writer, adjustment_writer, calendar, start_session, end_session, cache, show_progress, output_dir, tframes=None, csvdir=None): """ Build a zipline data bundle from the directory with csv files. """ if not csvdir: csvdir = environ.get('CSVDIR') if not csvdir: raise ValueError("CSVDIR environment variable is not set") if not os.path.isdir(csvdir): raise ValueError("%s is not a directory" % csvdir) if not tframes: tframes = set(["daily", "minute"]).intersection(os.listdir(csvdir)) if not tframes: raise ValueError("'daily' and 'minute' directories " "not found in '%s'" % csvdir) divs_splits = {'divs': DataFrame(columns=['sid', 'amount', 'ex_date', 'record_date', 'declared_date', 'pay_date']), 'splits': DataFrame(columns=['sid', 'ratio', 'effective_date'])} for tframe in tframes: ddir = os.path.join(csvdir, tframe) symbols = sorted(item.split('.csv')[0] for item in os.listdir(ddir) if '.csv' in item) if not symbols: raise ValueError("no <symbol>.csv* files found in %s" % ddir) dtype = [('start_date', 'datetime64[ns]'), ('end_date', 'datetime64[ns]'), ('auto_close_date', 'datetime64[ns]'), ('symbol', 'object')] metadata = DataFrame(empty(len(symbols), dtype=dtype)) if tframe == 'minute': writer = minute_bar_writer else: writer = daily_bar_writer writer.write(_pricing_iter(ddir, symbols, metadata, divs_splits, show_progress), show_progress=show_progress) # Hardcode the exchange to "CSVDIR" for all assets and (elsewhere) # register "CSVDIR" to resolve to the NYSE calendar, because these # are all equities and thus can use the NYSE calendar. metadata['exchange'] = "CSVDIR" asset_db_writer.write(equities=metadata) divs_splits['divs']['sid'] = divs_splits['divs']['sid'].astype(int) divs_splits['splits']['sid'] = divs_splits['splits']['sid'].astype(int) adjustment_writer.write(splits=divs_splits['splits'], dividends=divs_splits['divs']) def _pricing_iter(csvdir, symbols, metadata, divs_splits, show_progress): with maybe_show_progress(symbols, show_progress, label='Loading custom pricing data: ') as it: files = os.listdir(csvdir) for sid, symbol in enumerate(it): logger.debug('%s: sid %s' % (symbol, sid)) try: fname = [fname for fname in files if '%s.csv' % symbol in fname][0] except IndexError: raise ValueError("%s.csv file is not in %s" % (symbol, csvdir)) dfr = read_csv(os.path.join(csvdir, fname), parse_dates=[0], infer_datetime_format=True, index_col=0).sort_index() start_date = dfr.index[0] end_date = dfr.index[-1] # The auto_close date is the day after the last trade. ac_date = end_date + Timedelta(days=1) metadata.iloc[sid] = start_date, end_date, ac_date, symbol if 'split' in dfr.columns: tmp = 1. / dfr[dfr['split'] != 1.0]['split'] split = DataFrame(data=tmp.index.tolist(), columns=['effective_date']) split['ratio'] = tmp.tolist() split['sid'] = sid splits = divs_splits['splits'] index = Index(range(splits.shape[0], splits.shape[0] + split.shape[0])) split.set_index(index, inplace=True) divs_splits['splits'] = splits.append(split) if 'dividend' in dfr.columns: # ex_date amount sid record_date declared_date pay_date tmp = dfr[dfr['dividend'] != 0.0]['dividend'] div = DataFrame(data=tmp.index.tolist(), columns=['ex_date']) div['record_date'] = NaT div['declared_date'] = NaT div['pay_date'] = NaT div['amount'] = tmp.tolist() div['sid'] = sid divs = divs_splits['divs'] ind = Index(range(divs.shape[0], divs.shape[0] + div.shape[0])) div.set_index(ind, inplace=True) divs_splits['divs'] = divs.append(div) yield sid, dfr register_calendar_alias("CSVDIR", "NYSE")
apache-2.0
-5,625,354,991,096,741,000
34.414097
79
0.524941
false
4.264721
false
false
false
CLandauGWU/group_e
supp_funcs.py
1
9214
def zoneConcentration(shp_gdf, raw, pntLst, bufr=None): from downloading_funcs import addr_shape, down_extract_zip import pandas as pd import geopandas as gpd pnt = pntLst[0] pnt_isCalled = pntLst[1] for url in pnt: if url[-3:] == 'zip': pnt = url assert isinstance(pnt, str) #Must extract a zipfile from pnt! #Convenience assignment of projection type crs='EPSG:4326' #Extract and read points into memory pnt = down_extract_zip(pnt) ftr = gpd.read_file(pnt, crs=crs) #Flag properties within distance "bufr" of featured locations if not bufr: bufr = 1/250 #Hard to say what a good buffer is. assert isinstance(bufr, float) #buffer must be float! #Frame up the buffer shapes ftr.geometry = ftr.geometry.buffer(bufr) ftr['flag'] = 1 if 'NAME' in ftr: ftr.drop(['NAME'], axis=1, inplace=True) #Frame up the raw address points data pointy = raw[['NAME', 'Points', 'dummy_counter']] pointy = gpd.GeoDataFrame(pointy, crs=ftr.crs, geometry=pointy.Points) pointy = gpd.sjoin(pointy, ftr, how='left', op='intersects') denom = pointy.groupby('NAME').sum() denom = denom.dummy_counter numer = pointy.groupby('NAME').sum() numer = numer.flag pct_ftr_coverage = pd.DataFrame(numer/denom) pct_ftr_coverage.columns = [ pnt_isCalled ] pct_ftr_coverage.fillna(0, inplace=True) pct_ftr_coverage.crs = pointy.crs shp_gdf = shp_gdf.merge(pct_ftr_coverage, how="left", left_on='NAME', right_index=True) del pct_ftr_coverage, raw, pointy, denom, numer return shp_gdf del shp_gdf def pointInZone(shp_gdf, raw, zoneLst): from downloading_funcs import addr_shape, down_extract_zip import pandas as pd import geopandas as gpd zone = zoneLst[0] zone_isCalled = zoneLst[1] for url in zone: if url[-3:] == 'zip': zone = url assert isinstance(zone, str) #Must extract a zipfile from pnt! #Convenience assignment of projection type crs='EPSG:4326' #Extract and read points into memory zone = down_extract_zip(zone) zone = gpd.read_file(zone, crs=crs) zone['flag'] = 1 if 'NAME' in zone: zone.drop(['NAME'], axis=1, inplace=True) #Frame up the raw address points data pointy = raw[['NAME', 'Points', 'dummy_counter']] pointy = gpd.GeoDataFrame(pointy, crs=zone.crs, geometry=pointy.Points) pointy = gpd.sjoin(pointy, zone, how='left', op='intersects') numer = pointy.groupby('NAME').sum() numer = numer.flag inzone = pointy.groupby('NAME').sum() inzone = inzone.dummy_counter #This was calling denom.dummy_counter which is undeclared flaginzone = pd.DataFrame(inzone) flaginzone.columns = [ zone_isCalled ] flaginzone.fillna(0, inplace=True) flaginzone.crs = pointy.crs shp_gdf = shp_gdf.merge(flaginzone, how="left", left_on='NAME', right_index=True) del flaginzone, pointy, inzone, numer, raw return shp_gdf del shp_gdf def oecdGdpQs(shp_gdf, raw, url, i=None): #This extracts U.S. GDP on a quarterly #basis to the correct time unit of analysis import numpy as np import pandas as pd import geopandas as gpd if not 'Q_GDP' in shp_gdf.columns: shp_gdf['Q_GDP'] = 0 Qbins = [[1,2,3],[4,5,6],[7,8,9],[10,11,12]] yr = round(i) q = round((i-yr)*100) assert q < 14 for ij in range(0, 4): if q in Qbins[ij]: q = 'Q'+ str(ij+1) df = pd.read_csv(url[0], encoding='utf-8') df = df[df.LOCATION == 'USA'] df[['q', 'yr']]= df.Time.str.split('-', expand=True) df['q'] = df['q'].astype(str) df['yr'] = df['yr'].astype(int) df = df[(df.q == q)] df = df[(df.yr == yr)] i_gdp = list(df['Value']) i_gdp = i_gdp[0] shp_gdf['Q_GDP'][shp_gdf['month']==i] = i_gdp return shp_gdf del shp_gdf def metro_prox(shp_gdf, raw, bufr=None): #Flag properties within distance "bufr" of metro stations from downloading_funcs import addr_shape, down_extract_zip import pandas as pd import geopandas as gpd if not bufr: bufr = 1/250 #Hard to say what a good buffer is. assert isinstance(bufr, float) #buffer must be float! #Frame up the metro buffer shapes metro = down_extract_zip( 'https://opendata.arcgis.com/datasets/54018b7f06b943f2af278bbe415df1de_52.zip' ) metro = gpd.read_file(metro, crs=shp_gdf.crs) metro.geometry = metro.geometry.buffer(bufr) metro['bymet'] = 1 metro.drop(['NAME'], axis=1, inplace=True) #Frame up the raw address points data pointy = raw[['NAME', 'Points', 'dummy_counter']] pointy = gpd.GeoDataFrame(pointy, crs=metro.crs, geometry=pointy.Points) pointy = gpd.sjoin(pointy, metro, how='left', op='intersects') denom = pointy.groupby('NAME').sum() denom = denom.dummy_counter numer = pointy.groupby('NAME').sum() numer = numer.bymet pct_metro_coverage = pd.DataFrame(numer/denom) pct_metro_coverage.columns = [ 'pct_metro_coverage' ] pct_metro_coverage.fillna(0, inplace=True) pct_metro_coverage.crs = pointy.crs shp_gdf = shp_gdf.merge(pct_metro_coverage, how="left", left_on='NAME', right_index=True) return shp_gdf def clim_ingest(shp_gdf, raw, filepath, i=None): #Adds monthly average, max and min temp, from National Airport import numpy as np import pandas as pd import geopandas as gpd #NOAA NCDC data mining is not worth implementing in this workflow #Pull the data from disk df = pd.read_csv(filepath) #Only want National Airport df = df[df.NAME == 'WASHINGTON REAGAN NATIONAL AIRPORT, VA US'] #Express the dates as datetime objects df.DATE = pd.to_datetime(df.DATE) yr = round(i) month = round((i-yr)*100) #Narrow it down to just the one row that matches "i" df = df[df.DATE.dt.year == yr] df = df[df.DATE.dt.month == month] assert df.shape[0] == 1 #Only one row should match "i" for tag in ['TAVG', 'TMAX', 'TMIN']: #iterate thru values we want #Establishes the column if needed if not tag in shp_gdf.columns: shp_gdf[tag] = 0 #Extract the value of df[tag] val = list(df[tag]) val = val[0] #Assign the extracted value to all shp_gdf[tag] rows where 'month' is t-i shp_gdf[tag][shp_gdf['month']==i] = val return shp_gdf del shp_gdf def ITSPExtract(shp_gdf, raw, i=None): """Read in tax extract data, pare it down to month i, spatial join on the shape geodataframe shp_gdf. Return shp_gdf. """ from downloading_funcs import addr_shape, down_extract_zip import pandas as pd from shapely.geometry import Point, Polygon import geopandas as gpd crs='EPSG:4326' df = pd.read_csv('./data/Integrated_Tax_System_Public_Extract.csv') df.SALEDATE = pd.to_datetime(df.SALEDATE) yr = round(i) month = round((i-yr)*100) #Narrow it down to just the one row that matches "i" df = df[df.SALEDATE.dt.year == yr] df = df[df.SALEDATE.dt.month == month] df = df.sort_values(['SALEDATE']) df = df.reset_index(drop=True) #ITSPE has no geospatial data, so we need to merge on addresspoints. adr_df = pd.read_csv('./data/Address_Points.csv') #Regex to clean off the regime codes and any other NaN. adr_df['SSL'] = adr_df['SSL'].str.replace(r'\D+', '') df['SSL'] = df['SSL'].str.replace(r'\D+', '') adr_df = pd.merge(adr_df, df, how='inner', on=['SSL', 'SSL'], suffixes=['', '_tax']) del df adr_df['geometry'] = [ Point(xy) for xy in zip( adr_df.LONGITUDE.apply(float), adr_df.LATITUDE.apply(float) ) ] adr_df = gpd.GeoDataFrame(adr_df, crs=shp_gdf.crs, geometry=adr_df.geometry) adr_df = adr_df.dropna(subset=['SALEPRICE']) pointy = gpd.sjoin(shp_gdf, adr_df, how='left', op='intersects') pointy = pointy.dropna(subset=['SALEPRICE']) sales = pointy.groupby('NAME').sum() sales = sales.SALEPRICE sales.columns = ['realPropertySaleVolume' ] sales = pd.DataFrame(sales) shp_gdf = shp_gdf.merge(sales, how="left", left_on='NAME', right_index=True) del sales, raw, pointy return shp_gdf del adr_df, shp_gdf
mit
5,698,252,796,958,923,000
28.822006
91
0.572173
false
3.211572
false
false
false
timole/sopernovus
dev/prod2csv.py
1
7254
#!/usr/bin/env python # -*- coding: utf-8 -*- import re, sys, json from pymongo import MongoClient a = None def parseColumnNames(f): line = f.readline() return line.split(',') inputFilename = sys.argv[1] inputFilenameCron = sys.argv[2] outputfilename = sys.argv[3] outputfilenameIds = sys.argv[4] outputfilenameMunicipalityIds = sys.argv[5] client = MongoClient('localhost', 27017) db = client['lupapiste'] applications = db.applications apps = {} i = 0 for application in applications.find(): appId = application["_id"] appFields = {} if "primaryOperation" in application.keys(): if application["primaryOperation"] is not None and "name" in application["primaryOperation"].keys(): op = application["primaryOperation"]["name"] else: op = "" appFields["primaryOperation"] = op apps[appId] = appFields if i % 1000 == 0: sys.stdout.write('.') sys.stdout.flush() i = i + 1 f = open(inputFilename, "r") fcron = open(inputFilenameCron, "r") out = open(outputfilename, "w") outIds = open(outputfilenameIds, "w") outMunicipalityIds = open(outputfilenameMunicipalityIds, "w") columnNames = parseColumnNames(f) print("Column names") i = 0 for col in columnNames: print `i` + ": " + col i = i + 1 out.write("datetime;applicationId;operation;municipalityId;userId;role;action;target\n") ids = {} idSeq = 100000 municipalityIds = {} municipalityIdSeq = 1000 userIds = {} userIdSeq = 100000 parsed = 0 errors = 0 for line in f: fields = line.split(',') datetime = re.match("\"(.*) .*", fields[1]).group(1) # print "ts: " + datetime rawMatch = re.match(".*? - (.*)\"", line) js = rawMatch.group(1).replace("\"\"", "\"") try: data = json.loads(js) except ValueError: errors = errors + 1 #sys.stdout.write('E') #print("Error parsing json") continue if data["type"] == "command": # print(data) action = data["action"] if action == "login" or action == "register-user" or action == "update-user" or action == "update-user-organization" or action == "reset-password" or action == "users-for-datatables" or action == "impersonate-authority" or action == "frontend-error" or action == "browser-timing": continue # if not id in data["data"].keys(): # continue id = "" role = "" userId = "" try: if action == "create-application": id = "" role = data["user"]["role"] userId = data["user"]["id"] else: if action == "neighbor-response": # print(data) id = data["data"]["applicationId"] role = "neighbor" userId = data["data"]["neighborId"] else: userId = data["user"]["id"] role = data["user"]["role"] id = data["data"]["id"] except: #sys.stdout.write('i') errors = errors + 1 #print("No id for " + data["action"]) target = "" try: if action == "update-doc": target = data["data"]["updates"][0][0] if action == "upload-attachment": target = data["data"]["attachmentType"]["type-id"] if action == "mark-seen": target = data["data"]["type"] if action == "approve-doc": target = data["data"]["path"] if action == "add-comment": target = data["data"]["target"]["type"] if action == "create-doc": target = data["data"]["schemaName"] if action == "invite-with-role": target = data["data"]["role"] except: #sys.stdout.write('t') target = "" errors = errors + 1 if id != "": if not id in ids.keys(): ids[id] = str(idSeq) idSeq = idSeq + 1 pubId = ids[id] else: pubId = "" pubMunicipalityId = "" municipalityId = "" if id != "": if id is not None and len(id.split('-')) == 4: municipalityId = id.split('-')[1] if not municipalityId in municipalityIds.keys(): municipalityIds[municipalityId] = str(municipalityIdSeq) municipalityIdSeq = municipalityIdSeq + 1 pubMunicipalityId = municipalityIds[municipalityId] if not userId in userIds.keys(): userIds[userId] = str(userIdSeq) userIdSeq = userIdSeq + 1 pubUserId = userIds[userId] op = "" if id in apps.keys(): app = apps[id] op = app["primaryOperation"] l = datetime + ";" + pubId + ";" + op + ";" + pubMunicipalityId + ";" + pubUserId + ";" + role + ";" + action + ";" + target + "\n" # print(l) out.write(l) parsed = parsed + 1 if parsed % 1000 == 0: sys.stdout.write('.') sys.stdout.flush() columnNames = parseColumnNames(fcron) for line in fcron: fields = line.split(',') datetime = re.match("\"(.*) .*", fields[1]).group(1) # print "ts: " + datetime raw = fields[7] rawMatch = re.match(".*?\[(LP.*?)\].*", raw) id = rawMatch.group(1) jsMatch = re.match(".*? - (.*)\"", line) js = jsMatch.group(1).replace("\"\"", "\"") try: data = json.loads(js) except ValueError: errors = errors + 1 #sys.stdout.write('E') #print("Error parsing json") continue if data["event"] == "Found new verdict": if id != "": if not id in ids.keys(): ids[id] = str(idSeq) idSeq = idSeq + 1 pubId = ids[id] else: pubId = "" op = "" if id in apps.keys(): app = apps[id] op = app["primaryOperation"] l = datetime + ";" + pubId + ";" + op + ";" + pubMunicipalityId + ";" + pubUserId + ";" + role + ";" + action + ";" + target + "\n" # print(l) out.write(l) # else: #errors = errors + 1 #sys.stdout.write('N') parsed = parsed + 1 if parsed % 10000 == 0: sys.stdout.write('.') sys.stdout.flush() outIds.write("applicationId;originalApplicationId\n") for idKey in ids.keys(): id = ids[idKey] if id is None or idKey is None: print "Error: None:" print("id") print(id) print("idKey") print(idKey) else: outIds.write(id + ";" + idKey + "\n") outMunicipalityIds.write("municipalityId;originalMunicipalityId\n") for idKey in municipalityIds.keys(): id = municipalityIds[idKey] if id is None or idKey is None: print "Error: None:" print("id") print(id) print("idKey") print(idKey) else: outMunicipalityIds.write(id + ";" + idKey + "\n") outMunicipalityIds.close() outIds.close() out.close() print print "Errors: " + str(errors) print "Parsed: " + str(parsed)
mit
6,015,316,454,777,002,000
26.793103
288
0.514613
false
3.645226
false
false
false
brainiak/brainiak
setup.py
1
5433
from distutils import sysconfig from setuptools import setup, Extension, find_packages from setuptools.command.build_ext import build_ext import os import site import sys import setuptools from copy import deepcopy assert sys.version_info >= (3, 5), ( "Please use Python version 3.5 or higher, " "lower versions are not supported" ) # https://github.com/pypa/pip/issues/7953#issuecomment-645133255 site.ENABLE_USER_SITE = "--user" in sys.argv[1:] here = os.path.abspath(os.path.dirname(__file__)) # Get the long description from the README file with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f: long_description = f.read() ext_modules = [ Extension( 'brainiak.factoranalysis.tfa_extension', ['brainiak/factoranalysis/tfa_extension.cpp'], ), Extension( 'brainiak.fcma.fcma_extension', ['brainiak/fcma/src/fcma_extension.cc'], ), Extension( 'brainiak.fcma.cython_blas', ['brainiak/fcma/cython_blas.pyx'], ), Extension( 'brainiak.eventseg._utils', ['brainiak/eventseg/_utils.pyx'], ), ] # As of Python 3.6, CCompiler has a `has_flag` method. # cf http://bugs.python.org/issue26689 def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ import tempfile with tempfile.NamedTemporaryFile('w', suffix='.cpp') as f: f.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([f.name], extra_postargs=[flagname]) except setuptools.distutils.errors.CompileError: return False return True def cpp_flag(compiler): """Return the -std=c++[11/14] compiler flag. The c++14 is prefered over c++11 (when it is available). """ if has_flag(compiler, '-std=c++14'): return '-std=c++14' elif has_flag(compiler, '-std=c++11'): return '-std=c++11' else: raise RuntimeError('Unsupported compiler -- at least C++11 support ' 'is needed!') class BuildExt(build_ext): """A custom build extension for adding compiler-specific options.""" c_opts = { 'unix': ['-g0', '-fopenmp'], } # FIXME Workaround for using the Intel compiler by setting the CC env var # Other uses of ICC (e.g., cc binary linked to icc) are not supported if (('CC' in os.environ and 'icc' in os.environ['CC']) or (sysconfig.get_config_var('CC') and 'icc' in sysconfig.get_config_var('CC'))): c_opts['unix'] += ['-lirc', '-lintlc'] if sys.platform == 'darwin': c_opts['unix'] += ['-stdlib=libc++', '-mmacosx-version-min=10.9', '-ftemplate-depth-1024'] def build_extensions(self): ct = self.compiler.compiler_type opts = self.c_opts.get(ct, []) if ct == 'unix': opts.append('-DVERSION_INFO="%s"' % self.distribution.get_version()) for ext in self.extensions: ext.extra_compile_args = deepcopy(opts) ext.extra_link_args = deepcopy(opts) lang = ext.language or self.compiler.detect_language(ext.sources) if lang == 'c++': ext.extra_compile_args.append(cpp_flag(self.compiler)) ext.extra_link_args.append(cpp_flag(self.compiler)) build_ext.build_extensions(self) def finalize_options(self): super().finalize_options() import numpy import pybind11 self.include_dirs.extend([ numpy.get_include(), pybind11.get_include(user=True), pybind11.get_include(), ]) setup( name='brainiak', use_scm_version=True, setup_requires=[ 'cython', # https://github.com/numpy/numpy/issues/14189 # https://github.com/brainiak/brainiak/issues/493 'numpy!=1.17.*,<1.20', 'pybind11>=1.7', 'scipy!=1.0.0', 'setuptools_scm', ], install_requires=[ 'cython', # Previous versions fail of the Anaconda package fail on MacOS: # https://travis-ci.org/brainiak/brainiak/jobs/545838666 'mpi4py>=3', 'nitime', # https://github.com/numpy/numpy/issues/14189 # https://github.com/brainiak/brainiak/issues/493 'numpy!=1.17.*,<1.20', 'scikit-learn[alldeps]>=0.18', # See https://github.com/scipy/scipy/pull/8082 'scipy!=1.0.0', 'statsmodels', 'pymanopt', 'theano>=1.0.4', # See https://github.com/Theano/Theano/pull/6671 'pybind11>=1.7', 'psutil', 'nibabel', 'joblib', 'wheel', # See https://github.com/astropy/astropy-helpers/issues/501 'pydicom', ], extras_require={ 'matnormal': [ 'tensorflow', 'tensorflow_probability', ], }, author='Princeton Neuroscience Institute and Intel Corporation', author_email='mihai.capota@intel.com', url='http://brainiak.org', description='Brain Imaging Analysis Kit', license='Apache 2', keywords='neuroscience, algorithm, fMRI, distributed, scalable', long_description=long_description, ext_modules=ext_modules, cmdclass={'build_ext': BuildExt}, packages=find_packages(), include_package_data=True, python_requires='>=3.5', zip_safe=False, )
apache-2.0
-1,000,340,240,095,070,700
30.77193
93
0.598196
false
3.516505
false
false
false
ryfeus/lambda-packs
pytorch/source/torch/cuda/nccl.py
1
1644
import warnings import torch.cuda __all__ = ['all_reduce', 'reduce', 'broadcast', 'all_gather', 'reduce_scatter'] SUM = 0 # ncclRedOp_t def is_available(tensors): devices = set() for tensor in tensors: if tensor.is_sparse: return False if not tensor.is_contiguous(): return False if not tensor.is_cuda: return False device = tensor.get_device() if device in devices: return False devices.add(device) if not hasattr(torch._C, '_nccl_all_reduce'): warnings.warn('PyTorch is not compiled with NCCL support') return False return True def version(): return torch._C._nccl_version() def unique_id(): return torch._C._nccl_unique_id() def init_rank(num_ranks, uid, rank): return torch._C._nccl_init_rank(num_ranks, uid, rank) def all_reduce(inputs, outputs=None, op=SUM, streams=None, comms=None): if outputs is None: outputs = inputs torch._C._nccl_all_reduce(inputs, outputs, op, streams, comms) def reduce(inputs, outputs=None, root=0, op=SUM, streams=None, comms=None): if outputs is None: outputs = inputs torch._C._nccl_reduce(inputs, outputs, root, op, streams, comms) def broadcast(inputs, root=0, streams=None, comms=None): torch._C._nccl_broadcast(inputs, root, streams, comms) def all_gather(inputs, outputs, streams=None, comms=None): torch._C._nccl_all_gather(inputs, outputs, streams, comms) def reduce_scatter(inputs, outputs, op=SUM, streams=None, comms=None): torch._C._nccl_reduce_scatter(inputs, outputs, op, streams, comms)
mit
-5,757,900,926,952,547,000
25.095238
79
0.646594
false
3.37577
false
false
false
w0921444648/IT110_DJANGO_ATTEMPT2
mysite/mysite/settings.py
1
3236
""" Django settings for mysite project. Generated by 'django-admin startproject' using Django 1.9. For more information on this file, see https://docs.djangoproject.com/en/1.9/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.9/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '#m0orppq%#*(33*!j@3=tphdly3b^5xv5&xvy_q0(wx!q_oiw)' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'polls.apps.PollsConfig', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE_CLASSES = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'mysite.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [os.path.join(BASE_DIR, 'templates')], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'mysite.wsgi.application' # Database # https://docs.djangoproject.com/en/1.9/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.9/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'US/Eastern' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.9/howto/static-files/ STATIC_URL = '/static/'
gpl-2.0
2,290,409,260,022,670,000
25.52459
91
0.689431
false
3.505959
false
false
false
scienceopen/pybashutils
getIP_curl.py
1
2046
#!/usr/bin/env python """ gets interface IPv4 and IPv6 public addresses using libCURL This uses the "reflector" method, which I feel is more reliable for finding public-facing IP addresses, WITH THE CAVEAT that man-in-the-middle, etc. attacks can defeat the reflector method. PyCurl does not have a context manager. https://ident.me ipv6 and ipv4 https://api.ipify.org # ipv4 only """ from argparse import ArgumentParser import ipaddress import pycurl from io import BytesIO from typing import List, Union length = 45 # http://stackoverflow.com/questions/166132/maximum-length-of-the-textual-representation-of-an-ipv6-address URL = 'https://ident.me' def main(): p = ArgumentParser() p.add_argument('iface', help='network interface to use', nargs='?') p.add_argument('--url', help='plain text server', default='https://ident.me') P = p.parse_args() addr = getip(P.url, P.iface) for a in addr: print(a) def getip(url: str = None, iface: str = None) -> List[Union[ipaddress.IPv4Address, ipaddress.IPv6Address]]: if url is None: url = URL addrs = [] for v in (pycurl.IPRESOLVE_V4, pycurl.IPRESOLVE_V6): addr = _public_addr(v, url, iface) if addr is not None: addrs.append(addr) return addrs def _public_addr(v, url: str, iface: str = None) -> Union[None, ipaddress.IPv4Address, ipaddress.IPv6Address]: B = BytesIO() C = pycurl.Curl() addr = None # %% set options C.setopt(pycurl.TIMEOUT, 3) # 1 second is too short for slow connections if iface: C.setopt(pycurl.INTERFACE, iface) C.setopt(C.URL, url) # type: ignore C.setopt(pycurl.IPRESOLVE, v) C.setopt(C.WRITEDATA, B) # type: ignore # %% get public IP address ret = None try: C.perform() ret = B.getvalue() C.close() except pycurl.error: pass # %% validate response if ret: addr = ipaddress.ip_address(ret.decode('utf8')) return addr if __name__ == '__main__': main()
bsd-3-clause
2,727,372,804,540,858,000
26.28
120
0.646139
false
3.343137
false
false
false
github-borat/cinder
cinder/volume/drivers/vmware/error_util.py
1
2480
# Copyright (c) 2013 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exception classes and SOAP response error checking module. """ from cinder import exception from cinder.openstack.common.gettextutils import _ NOT_AUTHENTICATED = 'NotAuthenticated' class VimException(exception.CinderException): """The VIM Exception class.""" def __init__(self, msg): exception.CinderException.__init__(self, msg) class SessionOverLoadException(VimException): """Session Overload Exception.""" pass class VimAttributeException(VimException): """VI Attribute Error.""" pass class VimConnectionException(VimException): """Thrown when there is a connection problem.""" pass class VimFaultException(VimException): """Exception thrown when there are faults during VIM API calls.""" def __init__(self, fault_list, msg): super(VimFaultException, self).__init__(msg) self.fault_list = fault_list class VMwareDriverException(exception.CinderException): """Base class for all exceptions raised by the VMDK driver. All exceptions raised by the vmdk driver should raise an exception descended from this class as a root. This will allow the driver to potentially trap problems related to its own internal configuration before halting the cinder-volume node. """ message = _("VMware VMDK driver exception.") class VMwaredriverConfigurationException(VMwareDriverException): """Base class for all configuration exceptions. """ message = _("VMware VMDK driver configuration error.") class InvalidAdapterTypeException(VMwareDriverException): """Thrown when the disk adapter type is invalid.""" message = _("Invalid disk adapter type: %(invalid_type)s.") class InvalidDiskTypeException(VMwareDriverException): """Thrown when the disk type is invalid.""" message = _("Invalid disk type: %(disk_type)s.")
apache-2.0
2,716,783,250,249,688,000
30
78
0.722581
false
4.253859
false
false
false
dbreen/connectfo
game/scenes/about.py
1
1898
import pygame import random from game import constants from game.media import media from game.scene import Scene class Bouncy(object): def __init__(self, surf): self.surf = surf self.pos_x = random.randrange(0, constants.SCREEN_WIDTH - surf.get_width()) self.pos_y = random.randrange(0, constants.SCREEN_HEIGHT - surf.get_height()) self.vel_x = random.randrange(2, 8) self.vel_y = random.randrange(2, 8) def update(self): self.pos_x += self.vel_x self.pos_y += self.vel_y if self.pos_x < 0: self.pos_x = 0 self.vel_x = -self.vel_x if self.pos_y < 0: self.pos_y = 0 self.vel_y = -self.vel_y if self.pos_x + self.surf.get_width() >= constants.SCREEN_WIDTH: self.pos_x = constants.SCREEN_WIDTH - self.surf.get_width() - 1 self.vel_x = -self.vel_x if self.pos_y + self.surf.get_height() >= constants.SCREEN_HEIGHT: self.pos_y = constants.SCREEN_HEIGHT - self.surf.get_height() - 1 self.vel_y = -self.vel_y def draw(self, screen): screen.blit(self.surf, (self.pos_x, self.pos_y)) class AboutScene(Scene): def load(self): font = pygame.font.Font(constants.MENU_FONT, 36) self.bouncers = [Bouncy(font.render("Dan is better than Matt!!", True, constants.WHITE))] for i in range(0, 5): self.bouncers.append(Bouncy(media[random.choice(['img.dragon1', 'img.dragon2'])])) def render(self, screen): screen.fill(constants.BLACK) for bouncer in self.bouncers: bouncer.update() bouncer.draw(screen) def do_event(self, event): if event.type == pygame.KEYUP: if event.key == pygame.K_ESCAPE: self.manager.switch_scene('main')
mit
2,367,316,040,705,137,000
33.811321
97
0.570601
false
3.371226
false
false
false
shermp/KoboPatchGUI
PatchEdit.py
1
7070
import re import io, os, sys def iterDic(dic): """ Return a python 2/3 compatible iterable :param dic: :param pythonTwo: :return: """ if sys.version_info.major == 2: return dic.viewitems() else: return dic.items() class Patch: """ Create an object that contains information about each individual patch """ def __init__(self, name, status, group, patch_file): self.name = name self.status = status self.help_text = '' self.group = group self.patch_file = patch_file self.patch_replacements = [] def get_patch_replacements(self, data): """ Generate a list of possible strings for replacement. Using the data generated here has not yet been implemented, and may never be implemented. :param data: :return: """ start = 0 find = re.compile(r'^#{0,1}replace_.+?$') for (index, line) in enumerate(data): if 'patch_name = '+self.name in line: start = index break for line in data[start:]: if '</Patch>' in line: break m = find.search(line) if m: self.patch_replacements.append(m.group()) def get_help_text(self, text): """ From the text in the patch file, search for appropriate text to be used for help on what the patch does. :param text: :return: """ search_str = r'<Patch>(\npatch_name = ' + re.escape(self.name) + r'.+?)</Patch>' search_str = search_str.replace('\\`', '`') re_match_help_txt = re.search(search_str, text, flags=re.DOTALL | re.UNICODE) text = re_match_help_txt.group(1) if '##' not in text: self.help_text = text else: help_t = '' help_patt = r'## (.+?\n)' help_t_match = re.finditer(help_patt, text, flags=re.DOTALL | re.UNICODE) for match in help_t_match: help_t += match.group(1) self.help_text = help_t def gen_patch_obj_list(fn, patch_text): """ From the text in the patch files, generate patch objects and store them in a list :param fn: :param patch_text: :return: """ patch_obj_list = [] search_pattern = r'<Patch>.+?patch_name = (`[^`]+`).+?patch_enable = (`[^`]+`).+?</Patch>' re_find_attrib = re.compile(search_pattern, flags=re.DOTALL | re.UNICODE) attrib_match_list = re_find_attrib.finditer(patch_text) for match in attrib_match_list: mut_ex_group = '' group_pattern = r'patch_group = (`[^`]+`)' group_match = re.search(group_pattern, match.group(0), flags=re.DOTALL | re.UNICODE) if group_match: mut_ex_group = group_match.group(1) patch_obj = Patch(name=match.group(1), status=match.group(2), group=mut_ex_group, patch_file=fn) patch_obj.get_help_text(patch_text) patch_obj_list.append(patch_obj) return patch_obj_list def read_patch_files(fn_dic): """ Read the patch files into a dictionary :param fn_dic: :return: """ error_msg = None for fn in fn_dic: try: with io.open(os.path.normpath(fn), 'r', encoding='utf8') as patch_file: fn_dic[fn] = '' for line in patch_file: fn_dic[fn] += line except EnvironmentError: error_msg = 'There was a problem reading the file.\n\nCheck that you have permission to read the file.' return fn_dic, error_msg def apply_changes(patch_obj_dic, file_dic): """ If all checks are passed, write the changes to the patch file. Note that the original file is overwritten :return: """ success = False error_title = None error_msg = None # Checks that mutually exclusive options have not been set together. If they have, alert the user, # and abort before writing to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): mut_exl_dic = {} for obj in patch_obj_list: if obj.group and 'yes' in obj.status: if obj.group not in mut_exl_dic: mut_exl_dic[obj.group] = [] mut_exl_dic[obj.group].append(obj.name) else: mut_exl_dic[obj.group].append(obj.name) for (group, names) in iterDic(mut_exl_dic): if len(names) > 1: name_str = '\n' for name in names: name_str += ' ' + name + '\n' error_title = 'Mutually Exlusive Options Detected!' error_msg = 'The following options cannot be enabled together: \n' + name_str + \ fn + ' was not written.' success = False return success, error_title, error_msg # If checks passed, prepare and then write data to file(s) for (fn, patch_obj_list) in iterDic(patch_obj_dic): for obj in patch_obj_list: file_dic = prep_for_writing(fn, obj, file_dic) r_p_f_success, error_title, error_msg = write_patch_files(fn, file_dic) if not r_p_f_success: success = False return success, error_title, error_msg success = True return success, error_title, error_msg def prep_for_writing(patch_fn, patch_object, file_dic): """ Using regex, search and replace the patch enabled/disabled status in the patch text. :param patch_fn: :param patch_object: :return: """ search_pattern = r'(patch_name = ' + re.escape(patch_object.name) + r'.+?patch_enable = )' + \ r'`.+?`' search_pattern = search_pattern.replace('\\`', '`') search_replace = r'\1' + patch_object.status s = re.sub(search_pattern, search_replace, file_dic[patch_fn], flags=re.DOTALL | re.UNICODE) file_dic[patch_fn] = s return file_dic def write_patch_files(fn, file_dic): """ Write the changes to file(s) :param fn: :return: """ succsess = False error_title = None error_msg = None try: with io.open(os.path.normpath(fn), 'w', encoding='utf8') as patch_file: patch_file.write(file_dic[fn]) succsess = True return succsess, error_title, error_msg except EnvironmentError: error_title = 'File Error!' error_msg = 'There was a problem writing to the following file:\n\n' + \ fn + '\n\n' \ 'Check that the file isn\'t in use by another program, and that you have write ' \ 'permissions to the file and folder' return succsess, error_title, error_msg def calc_grid_pos(pos, cols): """ A little function to calculate the grid position of checkboxes :param pos: :param cols: :return: """ calc_row = pos // cols calc_col = pos % cols return calc_row, calc_col def edit_repl_opts(event, ext_pos, pos, patch_obj): pass
mit
-6,138,971,067,062,184,000
32.995192
115
0.563366
false
3.649974
false
false
false
edeposit/edeposit.amqp.storage
src/edeposit/amqp/storage/storage_handler.py
1
7204
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Interpreter version: python 2.7 # # Imports ===================================================================== import transaction from BTrees.OOBTree import OOBTree from BTrees.OOBTree import OOTreeSet from BTrees.OOBTree import intersection from zeo_connector import transaction_manager from zeo_connector.examples import DatabaseHandler import settings # Exceptions ================================================================== class InvalidType(Exception): """ Raised in case that object you are trying to store doesn't have required interface. """ class UnindexableObject(Exception): """ Raised in case, that object doesn't have at least one attribute set. """ # Functions & classes ========================================================= class StorageHandler(DatabaseHandler): """ Object database with indexing by the object attributes. Each stored object is required to have following properties: - indexes (list of strings) - project_key (string) For example:: class Person(Persistent): def __init__(self, name, surname): self.name = name self.surname = surname @property def indexes(self): return [ "name", "surname", ] @property def project_key(self): return PROJECT_KEY Note: I suggest to use properties, because that way the values are not stored in database, but constructed at request by the property methods. """ def __init__(self, project_key, conf_path=settings.ZEO_CLIENT_PATH): """ Constructor. Args: project_key (str): Project key which is used for the root of DB. conf_path (str): Path to the client zeo configuration file. Default :attr:`.settings.ZEO_CLIENT_PATH`. """ super(self.__class__, self).__init__( conf_path=conf_path, project_key=project_key ) @transaction_manager def _zeo_key(self, key, new_type=OOBTree): """ Get key from the :attr:`zeo` database root. If the key doesn't exist, create it by calling `new_type` argument. Args: key (str): Key in the root dict. new_type (func/obj): Object/function returning the new instance. Returns: obj: Stored object, or `new_type`. """ zeo_key = self.zeo.get(key, None) if zeo_key is None: zeo_key = new_type() self.zeo[key] = zeo_key return zeo_key def _get_db_fields(self, obj): """ Return list of database dictionaries, which are used as indexes for each attributes. Args: cached (bool, default True): Use cached connection to database. Returns: list: List of OOBTree's for each item in :attr:`.COMMON_FIELDS`. """ for field in obj.indexes: yield field, self._zeo_key(field) def _check_obj_properties(self, pub, name="pub"): """ Make sure, that `pub` has the right interface. Args: pub (obj): Instance which will be checked. name (str): Name of the instance. Used in exception. Default `pub`. Raises: InvalidType: When the `pub` is not instance of `obj_type`. """ if not hasattr(pub, "indexes"): raise InvalidType("`%s` doesn't have .indexes property!" % name) if not pub.indexes: raise InvalidType("`%s.indexes` is not set!" % name) if not hasattr(pub, "project_key"): raise InvalidType( "`%s` doesn't have .project_key property!" % name ) if not pub.project_key: raise InvalidType("`%s.project_key` is not set!" % name) def _put_into_indexes(self, obj): """ Put publication into all indexes. Attr: obj (obj): Indexable object. Raises: UnindexableObject: When there is no index (property) which can be used to index `obj` in database. """ no_of_used_indexes = 0 for field_name, db_index in list(self._get_db_fields(obj)): attr_value = getattr(obj, field_name) if attr_value is None: # index only by set attributes continue container = db_index.get(attr_value, None) if container is None: container = OOTreeSet() db_index[attr_value] = container container.insert(obj) no_of_used_indexes += 1 # make sure that atleast one `attr_value` was used if no_of_used_indexes <= 0: raise UnindexableObject( "You have to use atleast one of the identificators!" ) def store_object(self, obj): """ Save `obj` into database and into proper indexes. Attr: obj (obj): Indexable object. Raises: InvalidType: When the `obj` doesn't have right properties. Unindexableobjlication: When there is no indexes defined. """ self._check_obj_properties(obj) with transaction.manager: self._put_into_indexes(obj) def _get_subset_matches(self, query): """ Yield publications, at indexes defined by `query` property values. Args: query (obj): Object implementing proper interface. Yields: list: List of matching publications. """ for field_name, db_index in self._get_db_fields(query): attr = getattr(query, field_name) if attr is None: # don't use unset attributes continue results = db_index.get(attr, OOTreeSet()) if results: yield results def search_objects(self, query): """ Return list of objects which match all properties that are set (``not None``) using AND operator to all of them. Example: result = storage_handler.search_objects( DBPublication(isbn="azgabash") ) Args: query (obj): Object implementing proper interface with some of the properties set. Returns: list: List of matching objects or ``[]`` if no match was found. Raises: InvalidType: When the `query` doesn't implement required properties. """ self._check_obj_properties(query, "query") # AND operator between results final_result = None for result in self._get_subset_matches(query): if final_result is None: final_result = result continue final_result = intersection(final_result, result) # if no result is found, `final_result` is None, and I want [] if not final_result: return [] return list(final_result)
mit
4,649,346,435,320,711,000
28.52459
79
0.54678
false
4.519448
false
false
false
byashimov/django-controlcenter
tests/test_templatetags.py
1
11715
import collections import json from django import VERSION from django.contrib.auth.models import User from django.contrib.contenttypes.models import ContentType from controlcenter import app_settings, widgets from controlcenter.templatetags.controlcenter_tags import ( _method_prop, attrlabel, attrvalue, change_url, changelist_url, external_link, is_sequence, jsonify, ) from test_models import TestUser0, TestUser1 from . import TestCase class SimpleTagsTest(TestCase): def test_jsonify(self): data = {'a': None, 'b': 0} json_data = jsonify(data) # Marked safe self.assertTrue(hasattr(json_data, '__html__')) self.assertEqual(json_data, json.dumps(data)) def test_is_sequence(self): self.assertTrue(is_sequence(list())) self.assertTrue(is_sequence(tuple())) self.assertFalse(is_sequence(dict())) self.assertFalse(is_sequence(User())) def test_changelist_url(self): widget = widgets.ItemList(request=None) widget.changelist_url = 'test' # Original admin_changelist_url = '/admin/auth/user/' # String test self.assertEqual(changelist_url(widget), 'test') # Model test widget.changelist_url = User self.assertEqual(changelist_url(widget), admin_changelist_url + '') # Tuple with params test widget.changelist_url = (User, {'username__exact': 'user0'}) self.assertEqual(changelist_url(widget), admin_changelist_url + '?username__exact=user0') # Same with string no question sign widget.changelist_url = (User, 'username__exact=user0') self.assertEqual(changelist_url(widget), admin_changelist_url + '?username__exact=user0') # Same with question sign widget.changelist_url = (User, '?username__exact=user0') self.assertEqual(changelist_url(widget), admin_changelist_url + '?username__exact=user0') # Asserts first item is a Model widget.changelist_url = (None, {'username__exact': 'user0'}) with self.assertRaises(AssertionError): self.assertEqual(changelist_url(widget), admin_changelist_url) # Asserts last items is either basestring or dict widget.changelist_url = (User, None) with self.assertRaises(AssertionError): self.assertEqual(changelist_url(widget), admin_changelist_url) def test_method_prop(self): class Test(object): foo = True def bar(self): pass bar.allow_tags = True def baz(self): pass baz.allow_tags = False def egg(self): pass test = Test() # Attribute is not callable self.assertIsNone(_method_prop(test, 'foo', 'allow_tags')) # Has the property self.assertEqual(_method_prop(test, 'bar', 'allow_tags'), True) # Has it but it's False self.assertFalse(_method_prop(test, 'baz', 'allow_tags')) # Doesn't have self.assertIsNone(_method_prop(test, 'egg', 'allow_tags')) # Doesn't exist self.assertIsNone(_method_prop(test, 'doesnt_exist', 'allow_tags')) class AttrTagsTest(TestCase): def setUp(self): class TestUserWidget0(widgets.ItemList): model = TestUser0 list_display = ('foo', 'egg') # Should override models method def foo(self, obj): return 'new foo value' foo.short_description = 'new foo label' # Doesn't have description def bar(self, obj): return 'new bar value' def allows_tags(self, obj): return '<br>' allows_tags.allow_tags = True def no_tags(self, obj): return '<br>' class TestUserWidget1(TestUserWidget0): list_display = None class TestUserWidget2(TestUserWidget0): list_display = ((app_settings.SHARP, ) + TestUserWidget0.list_display) class TestUserWidget3(TestUserWidget2): model = TestUser1 self.user0 = TestUser0(username='user0') self.widget0 = TestUserWidget0(request=None) self.widget1 = TestUserWidget1(request=None) self.widget2 = TestUserWidget2(request=None) self.widget3 = TestUserWidget3(request=None) self.mapping = {'baz': 'mapping baz'} self.sequence = ['foo value', 'egg value'] self.namedtuple = collections.namedtuple('User', ['egg'])('egg value') def test_attrlabel(self): # Widget overrides self.assertEqual(attrlabel(self.widget0, 'foo'), 'new foo label') # Widget's has no description, takes model's one self.assertEqual(attrlabel(self.widget0, 'bar'), 'original bar label') # Empty description self.assertEqual(attrlabel(self.widget0, 'baz'), '') # Field's verbose name self.assertEqual(attrlabel(self.widget0, 'test_field'), 'My title') # No description found self.assertEqual(attrlabel(self.widget0, 'egg'), 'egg') # No attribute found self.assertEqual(attrlabel(self.widget0, 'unknown'), 'unknown') # Pk field self.assertEqual(attrlabel(self.widget0, 'id'), 'ID') self.assertEqual(attrlabel(self.widget0, 'pk'), 'ID') # Id is not defined self.assertEqual(attrlabel(self.widget3, 'id'), 'id') self.assertEqual(attrlabel(self.widget3, 'pk'), 'primary') def test_attrvalue(self): # New method self.assertEqual( attrvalue(self.widget0, self.user0, 'foo'), 'new foo value') # Old method self.assertEqual( attrvalue(self.widget0, self.user0, 'egg'), 'original egg value') # Allow tags test self.assertEqual( attrvalue(self.widget0, self.user0, 'allows_tags'), '<br>') self.assertEqual( attrvalue(self.widget0, self.user0, 'no_tags'), '&lt;br&gt;') # Attribute test self.assertEqual( attrvalue(self.widget0, self.user0, 'username'), 'user0') # 1) if method wasn't found in widget, # doesn't pass instance to it's method # 2) returns empty value because gots None self.assertEqual(attrvalue(self.widget0, self.user0, 'baz'), '') # No attribute found -- empty value self.assertEqual( attrvalue(self.widget0, self.user0, 'unknown'), '') # Mapping test self.assertEqual( attrvalue(self.widget0, self.mapping, 'baz'), 'mapping baz') # Key not found, not KeyError self.assertEqual( attrvalue(self.widget0, self.mapping, 'unknown'), '') # Requires list_display to map it to values self.assertEqual( attrvalue(self.widget0, self.sequence, 'egg'), 'egg value') self.assertEqual(attrvalue(self.widget1, self.sequence, 'egg'), '') # Namedtuple doesn't require it # with list_display self.assertEqual( attrvalue(self.widget0, self.namedtuple, 'egg'), 'egg value') # without list_display self.assertEqual( attrvalue(self.widget1, self.namedtuple, 'egg'), 'egg value') # Sharp test self.assertEqual( attrvalue(self.widget2, self.sequence, 'egg'), 'egg value') # IndexError test self.assertEqual( attrvalue(self.widget2, self.sequence[:-1], 'egg'), '') class ChangeurlTest(TestCase): def setUp(self): for i in range(10): username = 'user{}'.format(i) User.objects.create_user(username, username + '@example.com', username + 'password') self.obj = User.objects.first() self.obj_url = '/admin/auth/user/{}/'.format(self.obj.pk) if VERSION > (1, 9): self.obj_url += 'change/' # Model queryset class ModelQuerySet(widgets.ItemList): queryset = User.objects.all() # Deferred queryset class DeferredQuerySet(widgets.ItemList): queryset = User.objects.defer('email') # Dict class ValuesDict(widgets.ItemList): queryset = User.objects.values('pk', 'email') # List class ValuesList(widgets.ItemList): queryset = User.objects.values_list('pk', 'email') # List class ValuesListNoPk(widgets.ItemList): queryset = User.objects.values_list('email') # Namedtuple class NamedtupleList(ValuesList): klass = collections.namedtuple('User', 'pk email') def values(self): vals = super(NamedtupleList, self).values return [self.klass._make(x) for x in vals] self.widgets = [ ModelQuerySet, DeferredQuerySet, ValuesDict, ValuesList, NamedtupleList, ] for widget in self.widgets: setattr(self, widget.__name__, widget) def equal(self, klass, value): widget = klass(request=None) self.assertEqual(change_url(widget, widget.values[0]), value) def test_non_registered(self): # It's not registered so no reverse is possible class NonRegisteredModel(widgets.ItemList): queryset = ContentType.objects.all() self.equal(NonRegisteredModel, None) def test_no_model(self): # Model queryset + Deferred self.equal(self.ModelQuerySet, self.obj_url) self.equal(self.DeferredQuerySet, self.obj_url) # widget.model is not defined, so it can't build # change_url from Dict, List, Namedtuple self.equal(self.ValuesDict, None) self.equal(self.ValuesList, None) self.equal(self.NamedtupleList, None) def test_with_model(self): for widget in self.widgets: class Widget(widget): model = User if widget is self.ValuesList: # No widget.values_list_defined self.equal(Widget, None) else: self.equal(Widget, self.obj_url) def test_with_model_and_list_display(self): for widget in self.widgets: class Widget(widget): model = User list_display = (app_settings.SHARP, 'pk', 'email') # Need pk to build url for ValuesList self.equal(Widget, self.obj_url) class IdWidget(Widget): list_display = ('id', 'email') # Alias test pk == id and also no sharp sign in list_display self.equal(IdWidget, self.obj_url) def test_no_pk(self): class NoPkList(self.NamedtupleList): klass = collections.namedtuple('User', 'email') model = User queryset = model.objects.values_list('email') self.equal(NoPkList, None) class ExternalLinkTest(TestCase): def test_no_label(self): self.assertEqual( external_link('http://example.com'), '<a href="http://example.com" target="_blank" ' 'rel="noreferrer" rel="noopener">http://example.com</a>', ) def test_with_label(self): self.assertEqual( external_link('http://example.com', 'my-example-link'), '<a href="http://example.com" target="_blank" ' 'rel="noreferrer" rel="noopener">my-example-link</a>', )
bsd-3-clause
-5,575,421,691,948,398,000
31.541667
78
0.583696
false
4.167556
true
false
false
danceasarxx/pyfunk
pyfunk/monads/helpers.py
1
1936
from pyfunk import combinators as _, collections as __ @_.curry def fmap(fn, f): """ Generic version of fmap. @sig fmap :: Functor f => (a -> b) -> f a -> f b """ return f.fmap(fn) if hasattr(f, 'fmap') else __.fmap(fn, f) @_.curry def chain(fn, c): """ Generic version of chain @sig chain :: Chain c => (a -> c b) -> c a -> c b """ return c.chain(fn) @_.curry def ap(fof, fn, f): """ Generic ap @sig ap :: Applicative a, Functor f => (t -> a t) -> (x -> y) -> f x -> a y """ return fof(fn).ap(f) def chaincompose(*fns): """ Composes functions that produce Chains @sig mcompose :: Chain c => (y -> c z)...(x -> c y) -> (x -> c z) """ last = fns[-1:] rest = tuple(list(map(chain, fns[:-1]))) chained = rest + last return _.compose(*chained) @_.curry def liftA2(fn, f1, f2): """ Generic version of liftA2 @sig ap :: Functor f => (x -> y -> z) -> f x -> f y -> a z """ return f1.fmap(fn).ap(f2) @_.curry def liftA3(fn, f1, f2, f3): """ Generic version of liftA3 @sig ap :: Functor f => (w -> x -> y -> z) f v -> f x -> f y -> a z """ return f1.fmap(fn).ap(f2).ap(f3) # def doMonad(fn): # """ # The do monad helps to run a series of serial mappings on monads to # remove the need for callbacks. e.g # @doMonad # def read(path): # xfile = yield openIO(path) # lines = split(xfile) # # chain calls use join # xresults = join(yield openURLs(xfiles)) # Note: This function has no test # """ # def init(*args, **kwargs): # gen = fn(*args, **kwargs) # def stepper(result): # try: # result = gen.send(result) # except StopIteration: # return result # else: # return result.fmap(stepper) # return stepper(None) # return init
gpl-3.0
1,954,826,466,861,489,700
22.325301
79
0.502583
false
2.942249
false
false
false
TkTech/sachi
sachi/backends/solr.py
1
6451
# -*- coding: utf-8 -*- from __future__ import unicode_literals import json import logging from sachi import six, fields from sachi.query import QAll from sachi.backends.base import SearchBackend logger = logging.getLogger(__name__) FIELD_MAPPINGS = { fields.TextField: 'text_en', fields.StringField: 'string', fields.DateTimeField: 'date' } class SolrBackend(SearchBackend): '''A solr-backed search interface. .. note:: As a caveat, the Solr API in v6 does not currently provide a method of changing the UNIQUE KEY. As such, your ID field should always be called `id` in your indexes. :param connection: An active pysolr connection. ''' def __init__(self, connection, index): super(SolrBackend, self).__init__(connection, index) def update_schema(self): '''Update the Solr schema using the Schema API. This method will not remove or modify fields not defined in the index. .. note:: This method isn't magical. You must reindex your documents after changing the schema or the state of your index will become indeterminent. ''' # Get the current Solr schema and swap it into a name-keyed dict. current_schema = self.connection.schema current_fields = { f['name']: f for f in current_schema['schema']['fields'] } # We need to find all of the currently defined fields, then find any # defined in our index. If we find it, we compare it to see if it's # changed and replace the field. If we don't find it, we create it. # TODO: If Solr ever allows us to do an upsert, we can remove all of # this. logger.debug('Comparing Solr & Index schema...') to_create, to_replace = [], [] for field_name, schema_field in six.iteritems(self._index.schema): solr_field = current_fields.get(field_name) if solr_field: l1 = ( solr_field.get('stored', True), solr_field.get('indexed', True), solr_field.get('multiValued', False), solr_field['type'] ) l2 = ( schema_field.stored, schema_field.indexed, schema_field.multivalued, FIELD_MAPPINGS[schema_field.__class__] ) if l1 != l2: logger.debug('Replacing field %s', field_name) to_replace.append((field_name, schema_field)) else: logger.debug('Skipping unchanged field %s', field_name) else: logger.debug('Creating field %s', field_name) to_create.append((field_name, schema_field)) self.connection.schema_update({ 'add-field': [ { 'indexed': f.indexed, 'stored': f.stored, 'multiValued': f.multivalued, 'name': n, 'type': FIELD_MAPPINGS[f.__class__] } for n, f in to_create ], 'replace-field': [ { 'indexed': f.indexed, 'stored': f.stored, 'multiValued': f.multivalued, 'name': n, 'type': FIELD_MAPPINGS[f.__class__] } for n, f in to_replace ], }) def search(self, q): # By default we sort by score, however this can be overwritten. order_by = 'score desc' if q.sort_keys: order_by = ', '.join( '{0} {1}'.format( k[:1] if k.startswith('-') else k, 'DESC' if k.startswith('-') else 'ASC' ) for k in q.sort_keys ) facets = {} if q.facets: # TODO: Proper support for DateTimeField & Query facets. Right # now we're only supporting basic terms facetting. for facet in q.facets: facets[facet.name] = { 'type': 'terms', 'field': facet.name + '_facet', 'mincount': facet.at_least, 'limit': facet.limit } results = self.connection.select({ 'rows': q.limit, 'start': q.start, 'q': '*:*' if q.query is QAll else q.query, 'sort': order_by, 'df': self._index.default_field.index_keyname, 'facet': 'true' if q.facets else 'false', 'json.facet': json.dumps(facets) }) response = results['response'] fmt_response = { 'query': q.query, 'start': q.start, 'limit': q.limit, 'count': response['numFound'], 'results': [{ # Filter out any fields which aren't included in our schema, # or we'll end up with things like _version_. k: v for k, v in six.iteritems(r) if k in self._index.schema and not k.endswith('_facet') } for r in response['docs']], 'facets': { 'terms': {} } } # Process facets into our standard representation, only for those # we were asked to provide (solr by default will have additional # facets in here, such as count.) for facet in q.facets: if facet.name not in results['facets']: continue fmt_response['facets']['terms'][facet.name] = { b['val']: b['count'] for b in results['facets'][facet.name]['buckets'] } return fmt_response def index(self, objects): self.connection.index( list(self._index.apply(objects)) ) def clear(self): self.connection.delete_by_query('*:*', commit=True) def count(self): results = self.connection.select({ 'rows': 0, 'q': '*:*' }) return results['response']['numFound'] def refresh(self): self.connection.commit() def remove(self, objects): self.connection.delete_by_ids( list(self._index.apply_only_ids(objects)) )
mit
-4,865,335,914,014,963,000
32.42487
79
0.502868
false
4.320831
false
false
false
Ventrosky/python-scripts
network-recon/sql-scan.py
1
1585
#!/usr/bin/env python import sys, os, subprocess def nmapScriptsScan(ip, port): print "[-] Starting nmap ms-sql script scan for " + ip + ":" + port nmapCmd = "nmap -sV -Pn -v -p "+port+" --script=ms-sql* -oN reports/sql/"+ip+"_"+port+"_nmap "+ip+ " >> reports/sql/"+ip+"_"+port+"_nmapOutput.txt" subprocess.check_output(nmapCmd, shell=True) print "[-] Completed nmap ms-sql script scan for " + ip + ":" + port def hydraScan(ip, port): print "[-] Starting ms-sql against " + ip + ":" + port hydraCmd = "hydra -L wordlists/users.txt -P wordlists/passwords.txt -f -e n -o reports/sql/"+ip+"_"+port+"_ncrack.txt -u "+ip+" -s "+port + "mssql" try: results = subprocess.check_output(hydraCmd, shell=True) resultarr = results.split("\n") for result in resultarr: if "login:" in result: print "[*] Valid ms-sql credentials found: " + result resultList=result.split() self.username=resultList[4] if resultList[6]: self.password=resultList[6] else: self.password='' except: print "[-] No valid ms-sql credentials found" print "[-] Completed hydra ms-sql against " + ip + ":" + port def main(): if len(sys.argv) != 3: print "Passed: ",sys.argv print "Usage: sql-scan.py <ip> <port> " sys.exit(0) ip = str(sys.argv[1]) port = str(sys.argv[2]) nmapScriptsScan( ip, port) hydraScan( ip, port) main()
gpl-3.0
442,974,220,446,875,260
37.658537
152
0.543849
false
3.394004
false
false
false
alexhayes/django-toolkit
django_toolkit/font_awesome.py
1
5598
from django.core.urlresolvers import reverse from copy import copy class Icon(): """ Represents a Bootstrap icon (<i>) tag. """ def __init__(self, icon, *css): self.icon = icon self.css = css def render(self, extra_css=[]): html = '<i class="%s' % self.icon if self.css: html += ' %s' % ' '.join([css for css in self.css]) if extra_css: html += ' %s' % ' '.join([css for css in extra_css]) html += '"></i>' return html class BaseCollection(): def __init__(self, *items): self.items = list(items) def append(self, item): self.items.append(item) class Stack(BaseCollection): """ Represents a Font Awesome icon stack. @see http://fortawesome.github.io/Font-Awesome/examples/ """ def render(self): """ Render the icon stack. For example: <span class="icon-stack"> <i class="icon-check-empty icon-stack-base"></i> <i class="icon-twitter"></i> </span> <span class="icon-stack"> <i class="icon-circle icon-stack-base"></i> <i class="icon-flag icon-light"></i> </span> <span class="icon-stack"> <i class="icon-sign-blank icon-stack-base"></i> <i class="icon-terminal icon-light"></i> </span> <span class="icon-stack"> <i class="icon-camera"></i> <i class="icon-ban-circle icon-stack-base text-error"></i> </span> """ return '<span class="icon-stack">%s</span>' % ( ''.join([item.render(['icon-stack-base'] if i == 0 else []) for (i, item) in enumerate(self.items)]) ) class ButtonGroup(BaseCollection): """ Font-Awesome ButtonGroup @see http://fortawesome.github.io/Font-Awesome/examples/ """ def render(self): """ Render the groups. Example: <div class="btn-group"> <a class="btn" href="#"><i class="icon-align-left"></i></a> <a class="btn" href="#"><i class="icon-align-center"></i></a> <a class="btn" href="#"><i class="icon-align-right"></i></a> <a class="btn" href="#"><i class="icon-align-justify"></i></a> </div> """ return '<div class="btn-group">%s</div>' % ( ''.join([item.render() for (i, item) in enumerate(self.items)]) ) class Button(): def __init__(self, inner=None, data_tip=None, view=None, view_kwargs=[], view_args=[], next=None, href=None, title=None, attrs={}, target=False, modal=False, submodal=False, data_target=True, css=[]): self.inner = inner self.href = href self.view = view self.view_args = view_args self.view_kwargs = view_kwargs self.title = title self.attrs = attrs self.css = [css] if isinstance(css, basestring) else css self.next = next self.modal = modal self.submodal = submodal self.data_target = data_target self.data_tip = data_tip self.target = target def render(self): """ <a class="btn" href="#"><i class="icon-repeat"></i> Reload</a> or.. <button type="button" class="btn"><i class="icon-repeat"></i> Reload</button> """ html = '' href = self.view if self.view is not None else self.href attrs = copy(self.attrs) if self.submodal: attrs['role'] = "button" attrs['data-toggle'] = "remote-submodal" if self.data_target: if isinstance(self.data_target, basestring): attrs['data-target'] = self.data_target else: attrs['data-target'] = "#submodal" elif self.modal: attrs['role'] = "button" #attrs['data-dismiss'] = "modal" attrs['data-toggle'] = "modal" #attrs['data-submodal'] = "true" #attrs['data-remoteinbody'] = "false" if self.data_target: if isinstance(self.data_target, basestring): attrs['data-target'] = self.data_target else: attrs['data-target'] = "#modal" if self.data_tip: attrs['data-tip'] = self.data_tip if self.target: attrs['target'] = self.target if 'css_class' not in attrs: attrs['css_class'] = '' attrs['css_class'] += ' btn ' + " ".join(self.css) attrs = ' '.join(['%s="%s"' % (key if key != 'css_class' else 'class', value) for key,value in attrs.iteritems()]) if href: if self.view: href = reverse(self.view, args=self.view_args, kwargs=self.view_kwargs) if self.next: href += '?next=%s' % (self.next if self.next.startswith('/') else reverse(self.next)) html += '<a href="%s" %s>' % (href, attrs) else: html += '<button type="button" %s>' % (attrs,) if hasattr(self.inner, 'render'): html += self.inner.render() else: html += self.inner if self.title: html += self.title if href: html += "</a>" else: html += "</button>" return html
mit
-2,145,452,662,055,833,300
30.8125
122
0.486781
false
3.914685
false
false
false
sergiohr/NeoDB
core/blockdb.py
1
3923
''' Created on Apr 20, 2014 @author: sergio ''' import psycopg2 import neo.core from .. import dbutils class BlockDB(neo.core.Block): ''' classdocs ''' def __init__(self, id_project = None, id_individual = None, name = None, description = None, file_origin = None, file_datetime = None, rec_datetime = None, index = None): ''' Constructor ''' neo.core.Block.__init__(self, name, description, file_origin, file_datetime, rec_datetime, index) self.id_project = id_project self.id_individual = id_individual self.connection = None def save(self, connection): # Check mandatory values if self.id_project == None or self.id_individual == None: raise StandardError("Block Session must have id_project and id_individual.") if self.name == None: raise StandardError("Block Session must have a name.") other = dbutils.get_id(connection, 'block', name = self.name) if other != []: raise StandardError("There is another block session with name '%s'."%self.name) file_datetime = None rec_datetime = None if self.file_datetime: file_datetime = dbutils.get_ppgdate(self.file_datetime) if self.rec_datetime: rec_datetime = dbutils.get_ppgdate(self.rec_datetime) # QUERY cursor = connection.cursor() query = """INSERT INTO block (id_project, id_individual, name, description, file_datetime, rec_datetime, file_origin, index) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)""" cursor.execute(query,[self.id_project, self.id_individual, self.name, self.description, file_datetime, rec_datetime, self.file_origin, self.index]) connection.commit() # Get ID [(id, _)] = dbutils.get_id(connection, 'block', name = self.name) return id def get_from_db(self, connection, id): connection = connection cursor = connection.cursor() query = """ SELECT * FROM block WHERE id = %s""" cursor.execute(query, [id]) results = cursor.fetchall() if results != []: self.name = results[0][6] self.description = results[0][7] self.file_origin = results[0][8] self.file_datetime = results[0][3] self.rec_datetime = results[0][4] results = {} results['name'] = self.name results['description'] = self.description results['file_origin'] = self.file_origin results['file_datetime'] = self.file_datetime results['rec_datetime'] = self.rec_datetime results['segments'] = self.__get_segments_id(id, connection) return results def __get_segments_id(self, id, connection): cursor = connection.cursor() query = """ SELECT id FROM segment WHERE id_block = %s""" cursor.execute(query, [id]) results = cursor.fetchall() ids = [] for id in results: ids.append(id[0]) return ids if __name__ == '__main__': username = 'postgres' password = 'postgres' host = '192.168.2.2' dbname = 'demo' url = 'postgresql://%s:%s@%s/%s'%(username, password, host, dbname) dbconn = psycopg2.connect('dbname=%s user=%s password=%s host=%s'%(dbname, username, password, host)) #b = BlockDB(id_project = 5, id_individual = 1, name = 'bloque prueba', rec_datetime="19-05-2014") b = BlockDB() b.get_from_db(dbconn,2) print b.save(dbconn)
gpl-3.0
8,031,664,291,213,629,000
32.538462
105
0.53454
false
4.103556
false
false
false
lavanoid/pi-rc
control_dune_warrior.py
1
8703
#!/usr/bin/env python """Manually send commands to the RC car.""" import argparse import json import pygame import pygame.font import socket import sys from common import server_up UP = LEFT = DOWN = RIGHT = False QUIT = False # pylint: disable=superfluous-parens def dead_frequency(frequency): """Returns an approprtiate dead signal frequency for the given signal.""" if frequency < 38: return 49.890 return 26.995 def format_command( frequency, useconds ): """Returns the JSON command string for this command tuple.""" dead = dead_frequency(frequency) return { 'frequency': frequency, 'dead_frequency': dead, 'burst_us': useconds, 'spacing_us': useconds, 'repeats': 1, } def input_function(type_cast): """Returns the input function for the running version of Python for reading data from stdin. """ # pylint: disable=bad-builtin if sys.version_info.major == 2: return lambda message: type_cast(raw_input(message)) else: return lambda message: type_cast(input(message)) def get_command_array(parser): """Returns an array of command information that can be used in the format_command function. """ args = parser.parse_args() read_float = input_function(float) read_int = input_function(int) option_to_prompt_and_function = { 'frequency': ('Command frequency? ', read_float), } for option, prompt_and_function in option_to_prompt_and_function.items(): if getattr(args, option) is None: prompt, function = prompt_and_function setattr(args, option, function(prompt)) return [ float(args.frequency), ] def make_parser(): """Builds and returns an argument parser.""" parser = argparse.ArgumentParser( description='Sends burst commands to Raspberry Pi RC.' ) parser.add_argument( '-p', '--port', dest='port', help='The port to send control commands to.', default=12345, type=int ) parser.add_argument( '-s', '--server', dest='server', help='The server to send control commands to.', default='127.1' ) parser.add_argument( '-f', '--frequency', dest='frequency', help='The frequency to broadcast commands on.' ) return parser def to_bit(number): if number > 0: return 1 return 0 def ones_count(number): mask = 1 ones = 0 while mask <= number: ones += to_bit(mask & number) mask <<= 1 return ones def format_dune_warrior_command(throttle, turn, frequency): """Formats a command to JSON to the Raspberry Pi.""" command = [format_command(frequency, 500)] if throttle >= 32 or throttle < 0: raise ValueError('Invalid throttle') # Turning too sharply causes the servo to push harder than it can go, so limit this if turn >= 58 or turn < 8: raise ValueError('Invalid turn') even_parity_bit = to_bit( ( ones_count(throttle) + ones_count(turn) + 3 ) % 2 ) bit_pattern = ( to_bit(turn & 0x8), to_bit(turn & 0x4), to_bit(turn & 0x2), to_bit(turn & 0x1), 0, 0, to_bit(turn & 0x20), to_bit(turn & 0x10), to_bit(throttle & 0x10), to_bit(throttle & 0x8), to_bit(throttle & 0x4), to_bit(throttle & 0x2), to_bit(throttle & 0x1), 1, 1, 1, 0, 0, even_parity_bit, 0, 0, 0 ) assert(len(bit_pattern) == 22) assert(sum(bit_pattern) % 2 == 0) total_useconds = 1000 for bit in bit_pattern[:-1]: if bit == 0: useconds = 127 else: useconds = 200 command.append(format_command(27.145, useconds)) total_useconds += useconds if bit_pattern[-1] == 0: useconds = 127 else: useconds = 200 total_useconds += useconds command.append({ 'frequency': frequency, 'dead_frequency': dead_frequency(frequency), 'burst_us': useconds, 'spacing_us': 7000 - total_useconds, 'repeats': 1, }) command_str = json.dumps(command) if sys.version_info.major == 3: command_str = bytes(command_str, 'utf-8') return command_str def get_keys(): """Returns a tuple of (UP, DOWN, LEFT, RIGHT, changed) representing which keys are UP or DOWN and whether or not the key states changed. """ change = False key_to_global_name = { pygame.K_LEFT: 'LEFT', pygame.K_RIGHT: 'RIGHT', pygame.K_UP: 'UP', pygame.K_DOWN: 'DOWN', pygame.K_ESCAPE: 'QUIT', pygame.K_q: 'QUIT', } for event in pygame.event.get(): if event.type == pygame.QUIT: global QUIT QUIT = True elif event.type in {pygame.KEYDOWN, pygame.KEYUP}: down = (event.type == pygame.KEYDOWN) change = (event.key in key_to_global_name) if event.key in key_to_global_name: globals()[key_to_global_name[event.key]] = down return (UP, DOWN, LEFT, RIGHT, change) def interactive_control(host, port, frequency): """Runs the interactive control.""" pygame.init() size = (300, 400) screen = pygame.display.set_mode(size) # pylint: disable=too-many-function-args background = pygame.Surface(screen.get_size()) clock = pygame.time.Clock() black = (0, 0, 0) white = (255, 255, 255) big_font = pygame.font.Font(None, 40) little_font = pygame.font.Font(None, 24) pygame.display.set_caption('Dune Warrior') text = big_font.render('Use arrows to move', 1, white) text_position = text.get_rect(centerx=size[0] / 2) background.blit(text, text_position) screen.blit(background, (0, 0)) pygame.display.flip() sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) while not QUIT: up, down, left, right, change = get_keys() if change: # Something changed, so send a new command throttle = 16 turn = 32 if up: throttle = 24 elif down: throttle = 8 if left: turn = 12 elif right: turn = 52 command_json = format_dune_warrior_command(throttle, turn, frequency) sock.sendto(command_json, (host, port)) # Show the command and JSON background.fill(black) text = big_font.render(command_json[:100], 1, white) text_position = text.get_rect(centerx=size[0] / 2) background.blit(text, text_position) pretty = json.dumps(json.loads(command_json), indent=4) pretty_y_position = big_font.size(command_json)[1] + 10 for line in pretty.split('\n'): text = little_font.render(line, 1, white) text_position = text.get_rect(x=0, y=pretty_y_position) pretty_y_position += little_font.size(line)[1] background.blit(text, text_position) screen.blit(background, (0, 0)) pygame.display.flip() # Limit to 20 frames per second clock.tick(60) pygame.quit() def make_parser(): """Builds and returns an argument parser.""" parser = argparse.ArgumentParser( description='Interactive controller for the Raspberry Pi RC.' ) parser.add_argument( '-p', '--port', dest='port', help='The port to send control commands to.', type=int, default=12345, ) parser.add_argument( '-s', '--server', dest='server', help='The server to send control commands to.', type=str, default='127.1', ) parser.add_argument( '-f', '--frequency', dest='frequency', help='The frequency to broadcast signals on.', type=float, default=27.145, ) return parser def main(): """Parses command line arguments and runs the interactive controller.""" parser = make_parser() args = parser.parse_args() print('Sending commands to ' + args.server + ':' + str(args.port)) if not server_up(args.server, args.port, args.frequency): sys.stderr.write('Unable to contact server; did you start it?\n') sys.exit(1) interactive_control(args.server, args.port, args.frequency) if __name__ == '__main__': main()
gpl-2.0
9,042,649,700,021,385,000
25.372727
87
0.568884
false
3.803759
false
false
false
damaggu/SAMRI
samri/pipelines/utils.py
1
6258
# -*- coding: utf-8 -*- from __future__ import print_function, division, unicode_literals, absolute_import def parse_paravision_date(pv_date): """Convert ParaVision-style datetime string to Python datetime object. Parameters ---------- pv_date : str ParaVision datetime string. Returns ------- `datetime.datetime` : A Python datetime object. Notes ----- The datetime object produced does not contain a timezone, and should therefor only be used to determine time deltas relative to other datetimes from the same session. """ from datetime import datetime pv_date, _ = pv_date.split('+') pv_date += "000" pv_date = datetime.strptime(pv_date, "%Y-%m-%dT%H:%M:%S,%f") return pv_date def fslmaths_invert_values(img_path): """Calculates the op_string required to make an fsl.ImageMaths() node invert an image""" op_string = "-sub {0} -sub {0}".format(img_path) return op_string def iterfield_selector(iterfields, selector, action): """Include or exclude entries from iterfields based on a selector dictionary Parameters ---------- iterfields : list A list of lists (or tuples) containing entries fromatted at (subject_id,session_id,trial_id) selector : dict A dictionary with any combination of "sessions", "subjects", "trials" as keys and corresponding identifiers as values. action : "exclude" or "include" Whether to exclude or include (and exclude all the other) matching entries from the output. """ name_map = {"subjects": 0, "sessions": 1, "trials":2} keep = [] for ix, iterfield in enumerate(iterfields): for key in selector: selector[key] = [str(i) for i in selector[key]] if iterfield[name_map[key]] in selector[key]: keep.append(ix) break if action == "exclude": iterfields = [iterfields[i] for i in range(len(iterfields)) if i not in keep] elif action == "include": iterfields = [iterfields[i] for i in keep] return iterfields def datasource_exclude(in_files, excludes, output="files"): """Exclude file names from a list that match a BIDS-style specifications from a dictionary. Parameters ---------- in_files : list A list of flie names. excludes : dictionary A dictionary with keys which are "subjects", "sessions", or "scans", and values which are lists giving the subject, session, or scan identifier respectively. output : string Either "files" or "len". The former outputs the filtered file names, the latter the length of the resulting list. """ if not excludes: out_files = in_files else: exclude_criteria=[] for key in excludes: if key in "subjects": for i in excludes[key]: exclude_criteria.append("sub-"+str(i)) if key in "sessions": for i in excludes[key]: exclude_criteria.append("ses-"+str(i)) if key in "scans": for i in excludes[key]: exclude_criteria.append("trial-"+str(i)) out_files = [in_file for in_file in in_files if not any(criterion in in_file for criterion in exclude_criteria)] if output == "files": return out_files elif output == "len": return len(out_files) def bids_dict_to_dir(bids_dictionary): """Concatenate a (subject, session) or (subject, session, scan) tuple to a BIDS-style path""" subject = "sub-" + bids_dictionary['subject'] session = "ses-" + bids_dictionary['session'] return "/".join([subject,session]) def ss_to_path(subject_session): """Concatenate a (subject, session) or (subject, session, scan) tuple to a BIDS-style path""" subject = "sub-" + subject_session[0] session = "ses-" + subject_session[1] return "/".join([subject,session]) def bids_dict_to_source(bids_dictionary, source_format): from os import path source = source_format.format(**bids_dictionary) return source def out_path(selection_df, in_path, in_field='path', out_field='out_path', ): """Select the `out_path` field corresponding to a given `in_path` from a BIDS-style selection dataframe which includes an `out_path` column. """ out_path = selection_df[selection_df[in_field]==in_path][out_field].item() return out_path def container(selection_df, out_path, kind='', out_field='out_path', ): subject = selection_df[selection_df[out_field]==out_path]['subject'].item() session = selection_df[selection_df[out_field]==out_path]['session'].item() container = 'sub-{}/ses-{}'.format(subject,session) if kind: container += '/' container += kind return container def bids_naming(subject_session, scan_type, metadata, extra=['acq'], extension='.nii.gz', suffix='', ): """ Generate a BIDS filename from a subject-and-session iterator, a scan type, and a `pandas.DataFrame` metadata container. """ subject, session = subject_session filename = 'sub-{}'.format(subject) filename += '_ses-{}'.format(session) selection = metadata[(metadata['subject']==subject)&(metadata['session']==session)&(metadata['scan_type']==scan_type)] if selection.empty: return if 'acq' in extra: acq = selection['acquisition'] if not acq.isnull().all(): acq = acq.item() filename += '_acq-{}'.format(acq) trial = selection['trial'] if not trial.isnull().all(): trial = trial.item() filename += '_trial-{}'.format(trial) if not suffix: try: modality = selection['modality'] except KeyError: pass else: if not modality.isnull().all(): modality = modality.item() filename += '_{}'.format(modality) else: filename += '_{}'.format(suffix) filename += extension return filename def sss_filename(subject_session, scan, scan_prefix="trial", suffix="", extension=".nii.gz"): """Concatenate subject-condition and scan inputs to a BIDS-style filename Parameters ---------- subject_session : list Length-2 list of subject and session identifiers scan : string Scan identifier suffix : string, optional Measurement type suffix (commonly "bold" or "cbv") """ # we do not want to modify the subject_session iterator entry from copy import deepcopy subject_session = deepcopy(subject_session) subject_session[0] = "sub-" + subject_session[0] subject_session[1] = "ses-" + subject_session[1] if suffix: suffix = "_"+suffix if scan_prefix: scan = "".join([scan_prefix,"-",scan,suffix,extension]) else: scan = "".join([scan,suffix,extension]) subject_session.append(scan) return "_".join(subject_session)
gpl-3.0
-7,383,947,743,070,093,000
28.380282
167
0.693193
false
3.250909
false
false
false
andresailer/DIRAC
tests/Integration/Resources/Catalog/FIXME_Test_CatalogPlugin.py
1
18130
#! /usr/bin/env python # FIXME: it has to be seen if this is any useful # FIXME: to bring back to life from DIRAC.Core.Base.Script import parseCommandLine parseCommandLine() from DIRAC.Resources.Catalog.FileCatalog import FileCatalog from DIRAC.Core.Utilities.File import makeGuid from DIRAC.Core.Utilities.Adler import stringAdler from types import * import unittest,time,os,shutil,sys if len(sys.argv) < 2: print 'Usage: TestCatalogPlugIn.py CatalogClient' sys.exit() else: catalogClientToTest = sys.argv[1] class CatalogPlugInTestCase(unittest.TestCase): """ Base class for the CatalogPlugin test case """ def setUp(self): self.fullMetadata = ['Status', 'ChecksumType', 'OwnerRole', 'CreationDate', 'Checksum', 'ModificationDate', 'OwnerDN', 'Mode', 'GUID', 'Size'] self.dirMetadata = self.fullMetadata + ['NumberOfSubPaths'] self.fileMetadata = self.fullMetadata + ['NumberOfLinks'] self.catalog = FileCatalog(catalogs=[catalogClientToTest]) valid = self.catalog.isOK() self.assertTrue(valid) self.destDir = '/lhcb/test/unit-test/TestCatalogPlugin' self.link = "%s/link" % self.destDir # Clean the existing directory self.cleanDirectory() res = self.catalog.createDirectory(self.destDir) returnValue = self.parseResult(res,self.destDir) # Register some files to work with self.numberOfFiles = 2 self.files = [] for i in xrange(self.numberOfFiles): lfn = "%s/testFile_%d" % (self.destDir,i) res = self.registerFile(lfn) self.assertTrue(res) self.files.append(lfn) def registerFile(self,lfn): pfn = 'protocol://host:port/storage/path%s' % lfn size = 10000000 se = 'DIRAC-storage' guid = makeGuid() adler = stringAdler(guid) fileDict = {} fileDict[lfn] = {'PFN':pfn,'Size':size,'SE':se,'GUID':guid,'Checksum':adler} res = self.catalog.addFile(fileDict) return self.parseResult(res,lfn) def parseResult(self,res,path): self.assertTrue(res['OK']) self.assertTrue(res['Value']) self.assertTrue(res['Value']['Successful']) self.assertTrue(res['Value']['Successful'].has_key(path)) return res['Value']['Successful'][path] def parseError(self,res,path): self.assertTrue(res['OK']) self.assertTrue(res['Value']) self.assertTrue(res['Value']['Failed']) self.assertTrue(res['Value']['Failed'].has_key(path)) return res['Value']['Failed'][path] def cleanDirectory(self): res = self.catalog.exists(self.destDir) returnValue = self.parseResult(res,self.destDir) if not returnValue: return res = self.catalog.listDirectory(self.destDir) returnValue = self.parseResult(res,self.destDir) toRemove = returnValue['Files'].keys() if toRemove: self.purgeFiles(toRemove) res = self.catalog.removeDirectory(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertTrue(returnValue) def purgeFiles(self,lfns): for lfn in lfns: res = self.catalog.getReplicas(lfn,True) replicas = self.parseResult(res,lfn) for se,pfn in replicas.items(): repDict = {} repDict[lfn] = {'PFN':pfn,'SE':se} res = self.catalog.removeReplica(repDict) self.parseResult(res,lfn) res = self.catalog.removeFile(lfn) self.parseResult(res,lfn) def tearDown(self): self.cleanDirectory() class FileTestCase(CatalogPlugInTestCase): def test_isFile(self): # Test isFile with a file res = self.catalog.isFile(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Test isFile for missing path res = self.catalog.isFile(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") # Test isFile with a directory res = self.catalog.isFile(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertFalse(returnValue) def test_getFileMetadata(self): # Test getFileMetadata with a file res = self.catalog.getFileMetadata(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue['Status'],'-') self.assertEqual(returnValue['Size'],10000000) self.metadata = ['Status', 'ChecksumType', 'NumberOfLinks', 'CreationDate', 'Checksum', 'ModificationDate', 'Mode', 'GUID', 'Size'] for key in self.metadata: self.assertTrue(returnValue.has_key(key)) # Test getFileMetadata for missing path res = self.catalog.getFileMetadata(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") # Test getFileMetadata with a directory res = self.catalog.getFileMetadata(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertEqual(returnValue['Status'],'-') self.assertEqual(returnValue['Size'],0) self.metadata = ['Status', 'ChecksumType', 'NumberOfLinks', 'CreationDate', 'Checksum', 'ModificationDate', 'Mode', 'GUID', 'Size'] for key in self.metadata: self.assertTrue(returnValue.has_key(key)) def test_getFileSize(self): # Test getFileSize with a file res = self.catalog.getFileSize(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue,10000000) # Test getFileSize for missing path res = self.catalog.getFileSize(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") # Test getFileSize with a directory res = self.catalog.getFileSize(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertEqual(returnValue,0) def test_getReplicas(self): # Test getReplicas with a file res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue.keys(),['DIRAC-storage']) self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]]) # Test getReplicas for missing path res = self.catalog.getReplicas(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") # Test getReplicas with a directory res = self.catalog.getReplicas(self.destDir) error = self.parseError(res,self.destDir) # TODO return an error (currently 'File has zero replicas') #self.assertEqual(error,"Supplied path not a file") def test_getReplicaStatus(self): # Test getReplicaStatus with a file with existing replica replicaDict = {} replicaDict[self.files[0]] = 'DIRAC-storage' res = self.catalog.getReplicaStatus(replicaDict) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue,'U') # Test getReplicaStatus with a file with non-existing replica replicaDict = {} replicaDict[self.files[0]] = 'Missing' res = self.catalog.getReplicaStatus(replicaDict) error = self.parseError(res,self.files[0]) self.assertEqual(error,"No replica at supplied site") # Test getReplicaStatus for missing path res = self.catalog.getReplicaStatus(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") # Test getReplicaStatus with a directory res = self.catalog.getReplicas(self.destDir) error = self.parseError(res,self.destDir) # TODO return an error (currently 'File has zero replicas') #self.assertEqual(error,"Supplied path not a file") def test_exists(self): # Test exists with a file res = self.catalog.exists(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Test exists for missing path res = self.catalog.exists(self.files[0][:-1]) returnValue = self.parseResult(res,self.files[0][:-1]) self.assertFalse(returnValue) # Test exists with a directory res = self.catalog.exists(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertTrue(returnValue) def test_addReplica(self): # Test getReplicas with a file res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue.keys(),['DIRAC-storage']) self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]]) # Test the addReplica with a file registrationDict = {} registrationDict[self.files[0]] = {'SE':'DIRAC-storage2','PFN':'protocol2://host:port/storage/path%s' % self.files[0]} res = self.catalog.addReplica(registrationDict) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Check the addReplica worked correctly res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(sorted(returnValue.keys()),sorted(['DIRAC-storage','DIRAC-storage2'])) self.assertEqual(sorted(returnValue.values()),sorted(['protocol://host:port/storage/path%s' % self.files[0], 'protocol2://host:port/storage/path%s' % self.files[0]])) # Test the addReplica with a non-existant file registrationDict = {} registrationDict[self.files[0][:-1]] = {'SE':'DIRAC-storage3','PFN':'protocol3://host:port/storage/path%s' % self.files[0]} res = self.catalog.addReplica(registrationDict) error = self.parseError(res,self.files[0][:-1]) # TODO When the master fails it should return an error in FileCatalog #self.assertEqual(error,"No such file or directory") def test_setReplicaStatus(self): # Test setReplicaStatus with a file lfnDict = {} lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'Status':'P'} res = self.catalog.setReplicaStatus(lfnDict) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Check the setReplicaStatus worked correctly res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertFalse(returnValue) #time.sleep(2) # Test setReplicaStatus with a file lfnDict = {} lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'Status':'U'} res = self.catalog.setReplicaStatus(lfnDict) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Check the setReplicaStatus worked correctly res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue.keys(),['DIRAC-storage']) self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]]) # Test setReplicaStatus with non-existant file lfnDict = {} lfnDict[self.files[0][:-1]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0][:-1],'SE':'DIRAC-storage' ,'Status':'U'} res = self.catalog.setReplicaStatus(lfnDict) error = self.parseError(res,self.files[0][:-1]) # TODO When the master fails it should return an error in FileCatalog #self.assertEqual(error,"No such file or directory") def test_setReplicaHost(self): # Test setReplicaHost with a file lfnDict = {} lfnDict[self.files[0]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0],'SE':'DIRAC-storage' ,'NewSE':'DIRAC-storage2'} res = self.catalog.setReplicaHost(lfnDict) returnValue = self.parseResult(res,self.files[0]) self.assertTrue(returnValue) # Check the setReplicaHost worked correctly res = self.catalog.getReplicas(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue.keys(),['DIRAC-storage2']) self.assertEqual(returnValue.values(),['protocol://host:port/storage/path%s' % self.files[0]]) # Test setReplicaHost with non-existant file lfnDict = {} lfnDict[self.files[0][:-1]] = {'PFN': 'protocol://host:port/storage/path%s' % self.files[0][:-1],'SE':'DIRAC-storage' ,'NewSE':'DIRAC-storage2'} res = self.catalog.setReplicaHost(lfnDict) error = self.parseError(res,self.files[0][:-1]) # TODO When the master fails it should return an error in FileCatalog #self.assertEqual(error,"No such file or directory") class DirectoryTestCase(CatalogPlugInTestCase): def test_isDirectory(self): # Test isDirectory with a directory res = self.catalog.isDirectory(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertTrue(returnValue) # Test isDirectory with a file res = self.catalog.isDirectory(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertFalse(returnValue) # Test isDirectory for missing path res = self.catalog.isDirectory(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") def test_getDirectoryMetadata(self): # Test getDirectoryMetadata with a directory res = self.catalog.getDirectoryMetadata(self.destDir) returnValue = self.parseResult(res,self.destDir) self.assertEqual(returnValue['Status'],'-') self.assertEqual(returnValue['Size'],0) self.assertEqual(returnValue['NumberOfSubPaths'],self.numberOfFiles) for key in self.dirMetadata: self.assertTrue(returnValue.has_key(key)) # Test getDirectoryMetadata with a file res = self.catalog.getDirectoryMetadata(self.files[0]) returnValue = self.parseResult(res,self.files[0]) self.assertEqual(returnValue['Status'],'-') self.assertEqual(returnValue['Size'],10000000) for key in self.dirMetadata: self.assertTrue(returnValue.has_key(key)) # Test getDirectoryMetadata for missing path res = self.catalog.getDirectoryMetadata(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") def test_listDirectory(self): # Test listDirectory for directory res = self.catalog.listDirectory(self.destDir,True) returnValue = self.parseResult(res,self.destDir) self.assertEqual(returnValue.keys(),['Files','SubDirs','Links']) self.assertFalse(returnValue['SubDirs']) self.assertFalse(returnValue['Links']) self.assertEqual(sorted(returnValue['Files'].keys()),sorted(self.files)) directoryFiles = returnValue['Files'] for lfn,fileDict in directoryFiles.items(): self.assertTrue(fileDict.has_key('Replicas')) self.assertEqual(len(fileDict['Replicas']),1) self.assertTrue(fileDict.has_key('MetaData')) for key in self.fileMetadata: self.assertTrue(fileDict['MetaData'].has_key(key)) # Test listDirectory for a file res = self.catalog.listDirectory(self.files[0],True) error = self.parseError(res,self.files[0]) self.assertEqual(error,"Not a directory") # Test listDirectory for missing path res = self.catalog.listDirectory(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") def test_getDirectoryReplicas(self): # Test getDirectoryReplicas for directory res = self.catalog.getDirectoryReplicas(self.destDir,True) returnValue = self.parseResult(res,self.destDir) self.assertTrue(returnValue.has_key(self.files[0])) fileReplicas = returnValue[self.files[0]] self.assertEqual(fileReplicas.keys(),['DIRAC-storage']) self.assertEqual(fileReplicas.values(),['protocol://host:port/storage/path%s' % self.files[0]]) # Test getDirectoryReplicas for a file res = self.catalog.getDirectoryReplicas(self.files[0],True) error = self.parseError(res,self.files[0]) self.assertEqual(error,"Not a directory") # Test getDirectoryReplicas for missing path res = self.catalog.getDirectoryReplicas(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") def test_getDirectorySize(self): # Test getDirectorySize for directory res = self.catalog.getDirectorySize(self.destDir) returnValue = self.parseResult(res,self.destDir) for key in ['Files','TotalSize','SubDirs','ClosedDirs','SiteUsage']: self.assertTrue(returnValue.has_key(key)) self.assertEqual(returnValue['Files'],self.numberOfFiles) self.assertEqual(returnValue['TotalSize'],(self.numberOfFiles*10000000)) #TODO create a sub dir, check, close it, check self.assertFalse(returnValue['SubDirs']) self.assertFalse(returnValue['ClosedDirs']) usage = returnValue['SiteUsage'] self.assertEqual(usage.keys(),['DIRAC-storage']) self.assertEqual(usage['DIRAC-storage']['Files'],self.numberOfFiles) self.assertEqual(usage['DIRAC-storage']['Size'],(self.numberOfFiles*10000000)) # Test getDirectorySize for a file res = self.catalog.getDirectorySize(self.files[0]) error = self.parseError(res,self.files[0]) self.assertEqual(error,"Not a directory") # Test getDirectorySize for missing path res = self.catalog.getDirectorySize(self.files[0][:-1]) error = self.parseError(res,self.files[0][:-1]) self.assertEqual(error,"No such file or directory") class LinkTestCase(CatalogPlugInTestCase): #'createLink','removeLink','isLink','readLink' pass class DatasetTestCase(CatalogPlugInTestCase): #'removeDataset','removeFileFromDataset','createDataset' pass if __name__ == '__main__': #TODO getDirectoryMetadata and getFileMetadata should be merged #TODO Fix the return structure of write operations from FileCatalog suite = unittest.defaultTestLoader.loadTestsFromTestCase(FileTestCase) #suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(FileTestCase)) #suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(DirectoryTestCase)) testResult = unittest.TextTestRunner(verbosity=2).run(suite) sys.exit(not testResult.wasSuccessful())
gpl-3.0
3,103,157,516,962,040,000
44.325
170
0.703475
false
3.558391
true
false
false
nmalaguti/mini-halite
tournament/migrations/0001_initial.py
1
2061
# -*- coding: utf-8 -*- # Generated by Django 1.10.5 on 2017-01-09 05:56 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Bot', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(max_length=255, unique=True)), ('mu', models.FloatField(default=25.0)), ('sigma', models.FloatField(default=8.33333)), ('enabled', models.BooleanField(default=True)), ], ), migrations.CreateModel( name='Match', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date', models.DateTimeField()), ('replay', models.FileField(upload_to='hlt/')), ('seed', models.CharField(max_length=255)), ('width', models.IntegerField()), ('height', models.IntegerField()), ], options={ 'verbose_name_plural': 'matches', }, ), migrations.CreateModel( name='MatchResult', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('rank', models.IntegerField()), ('mu', models.FloatField()), ('sigma', models.FloatField()), ('last_frame_alive', models.IntegerField()), ('bot', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='matches', to='tournament.Bot')), ('match', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='results', to='tournament.Match')), ], ), ]
mit
5,398,077,657,251,073,000
37.886792
137
0.540029
false
4.5
false
false
false
rleigh-dundee/openmicroscopy
components/tools/OmeroWeb/omeroweb/webgateway/views.py
1
64885
# # webgateway/views.py - django application view handling functions # # Copyright (c) 2007, 2008, 2009 Glencoe Software, Inc. All rights reserved. # # This software is distributed under the terms described by the LICENCE file # you can find at the root of the distribution bundle, which states you are # free to use it only for non commercial purposes. # If the file is missing please request a copy by contacting # jason@glencoesoftware.com. # # Author: Carlos Neves <carlos(at)glencoesoftware.com> import re import omero import omero.clients from django.http import HttpResponse, HttpResponseServerError, HttpResponseRedirect, Http404 from django.utils import simplejson from django.utils.encoding import smart_str from django.utils.http import urlquote from django.core import template_loader from django.core.urlresolvers import reverse from django.conf import settings from django.template import RequestContext as Context from omero.rtypes import rlong, unwrap from marshal import imageMarshal, shapeMarshal try: from hashlib import md5 except: from md5 import md5 from cStringIO import StringIO from omero import client_wrapper, ApiUsageException from omero.gateway import timeit, TimeIt import Ice import settings #from models import StoredConnection from webgateway_cache import webgateway_cache, CacheBase, webgateway_tempfile cache = CacheBase() connectors = {} CONNECTOR_POOL_SIZE = 70 CONNECTOR_POOL_KEEP = 0.75 # keep only SIZE-SIZE*KEEP of the connectors if POOL_SIZE is reached import logging, os, traceback, time, zipfile, shutil from omeroweb.decorators import login_required from omeroweb.connector import Connector logger = logging.getLogger(__name__) try: import Image import ImageDraw except: #pragma: nocover try: from PIL import Image from PIL import ImageDraw except: logger.error('No PIL installed') def _safestr (s): return unicode(s).encode('utf-8') class UserProxy (object): """ Represents the current user of the connection, with methods delegating to the connection itself. """ def __init__ (self, blitzcon): """ Initialises the User proxy with the L{omero.gateway.BlitzGateway} connection @param blitzcon: connection @type blitzcon: L{omero.gateway.BlitzGateway} """ self._blitzcon = blitzcon self.loggedIn = False def logIn (self): """ Sets the loggedIn Flag to True """ self.loggedIn = True def isAdmin (self): """ True if the current user is an admin @return: True if the current user is an admin @rtype: Boolean """ return self._blitzcon.isAdmin() def canBeAdmin (self): """ True if the current user can be admin @return: True if the current user can be admin @rtype: Boolean """ return self._blitzcon.canBeAdmin() def getId (self): """ Returns the ID of the current user @return: User ID @rtype: Long """ return self._blitzcon.getUserId() def getName (self): """ Returns the Name of the current user @return: User Name @rtype: String """ return self._blitzcon.getUser().omeName def getFirstName (self): """ Returns the first name of the current user @return: First Name @rtype: String """ return self._blitzcon.getUser().firstName or self.getName() # def getPreferences (self): # return self._blitzcon._user.getPreferences() # # def getUserObj (self): # return self._blitzcon._user #class SessionCB (object): # def _log (self, what, c): # logger.debug('CONN:%s %s:%d:%s' % (what, c._user, os.getpid(), c._sessionUuid)) # # def create (self, c): # self._log('create',c) # # def join (self, c): # self._log('join',c) # # def close (self, c): # self._log('close',c) #_session_cb = SessionCB() def _createConnection (server_id, sUuid=None, username=None, passwd=None, host=None, port=None, retry=True, group=None, try_super=False, secure=False, anonymous=False, useragent=None): """ Attempts to create a L{omero.gateway.BlitzGateway} connection. Tries to join an existing session for the specified user, using sUuid. @param server_id: Way of referencing the server, used in connection dict keys. Int or String @param sUuid: Session ID - used for attempts to join sessions etc without password @param username: User name to log on with @param passwd: Password @param host: Host name @param port: Port number @param retry: Boolean @param group: String? TODO: parameter is ignored. @param try_super: If True, try to log on as super user, 'system' group @param secure: If True, use an encrypted connection @param anonymous: Boolean @param useragent: Log which python clients use this connection. E.g. 'OMERO.webadmin' @return: The connection @rtype: L{omero.gateway.BlitzGateway} """ try: if anonymous: username = settings.PUBLIC_USER passwd = settings.PUBLIC_PASSWORD blitzcon = client_wrapper(username, passwd, host=host, port=port, group=None, try_super=try_super, secure=secure, anonymous=anonymous, useragent=useragent) blitzcon.connect(sUuid=sUuid) blitzcon.server_id = server_id blitzcon.user = UserProxy(blitzcon) if blitzcon._anonymous and hasattr(blitzcon.c, 'onEventLogs'): logger.debug('Connecting weblitz_cache to eventslog') def eventlistener (e): return webgateway_cache.eventListener(server_id, e) blitzcon.c.onEventLogs(eventlistener) return blitzcon except: logger.debug(traceback.format_exc()) if not retry: return None logger.error("Critical error during connect, retrying after _purge") logger.debug(traceback.format_exc()) _purge(force=True) return _createConnection(server_id, sUuid, username, passwd, retry=False, host=host, port=port, group=None, try_super=try_super, anonymous=anonymous, useragent=useragent) def _purge (force=False): if force or len(connectors) > CONNECTOR_POOL_SIZE: keys = connectors.keys() for i in range(int(len(connectors)*CONNECTOR_POOL_KEEP)): try: c = connectors.pop(keys[i]) c.seppuku(softclose=True) except: logger.debug(traceback.format_exc()) logger.info('reached connector_pool_size (%d), size after purge: (%d)' % (CONNECTOR_POOL_SIZE, len(connectors))) def _split_channel_info (rchannels): """ Splits the request query channel information for images into a sequence of channels, window ranges and channel colors. @param rchannels: The request string with channel info. E.g 1|100:505$0000FF,-2,3|620:3879$FF0000 @type rchannels: String @return: E.g. [1, -2, 3] [[100.0, 505.0], (None, None), [620.0, 3879.0]] [u'0000FF', None, u'FF0000'] @rtype: tuple of 3 lists """ channels = [] windows = [] colors = [] for chan in rchannels.split(','): chan = chan.split('|') t = chan[0].strip() color = None if t.find('$')>=0: t,color = t.split('$') try: channels.append(int(t)) ch_window = (None, None) if len(chan) > 1: t = chan[1].strip() if t.find('$')>=0: t, color = t.split('$') t = t.split(':') if len(t) == 2: try: ch_window = [float(x) for x in t] except ValueError: pass windows.append(ch_window) colors.append(color) except ValueError: pass logger.debug(str(channels)+","+str(windows)+","+str(colors)) return channels, windows, colors def getImgDetailsFromReq (request, as_string=False): """ Break the GET information from the request object into details on how to render the image. The following keys are recognized: z - Z axis position t - T axis position q - Quality set (0,0..1,0) m - Model (g for greyscale, c for color) p - Projection (see blitz_gateway.ImageWrapper.PROJECTIONS for keys) x - X position (for now based on top/left offset on the browser window) y - Y position (same as above) c - a comma separated list of channels to be rendered (start index 1) - format for each entry [-]ID[|wndst:wndend][#HEXCOLOR][,...] zm - the zoom setting (as a percentual value) @param request: http request with keys above @param as_string: If True, return a string representation of the rendering details @return: A dict or String representation of rendering details above. @rtype: Dict or String """ r = request.REQUEST rv = {} for k in ('z', 't', 'q', 'm', 'zm', 'x', 'y', 'p'): if r.has_key(k): rv[k] = r[k] if r.has_key('c'): rv['c'] = [] ci = _split_channel_info(r['c']) logger.debug(ci) for i in range(len(ci[0])): # a = abs channel, i = channel, s = window start, e = window end, c = color rv['c'].append({'a':abs(ci[0][i]), 'i':ci[0][i], 's':ci[1][i][0], 'e':ci[1][i][1], 'c':ci[2][i]}) if as_string: return "&".join(["%s=%s" % (x[0], x[1]) for x in rv.items()]) return rv @login_required() def render_birds_eye_view (request, iid, size=None, conn=None, **kwargs): """ Returns an HttpResponse wrapped jpeg with the rendered bird's eye view for image 'iid'. Rendering settings can be specified in the request parameters as in L{render_image} and L{render_image_region}; see L{getImgDetailsFromReq} for a complete list. @param request: http request @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} connection @param size: Maximum size of the longest side of the resulting bird's eye view. @return: http response containing jpeg """ server_id = request.session['connector'].server_id img = _get_prepared_image(request, iid, conn=conn, server_id=server_id) if img is None: logger.debug("(b)Image %s not found..." % (str(iid))) raise Http404 img, compress_quality = img return HttpResponse(img.renderBirdsEyeView(size), mimetype='image/jpeg') @login_required() def render_thumbnail (request, iid, w=None, h=None, conn=None, _defcb=None, **kwargs): """ Returns an HttpResponse wrapped jpeg with the rendered thumbnail for image 'iid' @param request: http request @param iid: Image ID @param w: Thumbnail max width. 64 by default @param h: Thumbnail max height @return: http response containing jpeg """ server_id = request.session['connector'].server_id if w is None: size = (64,) else: if h is None: size = (int(w),) else: size = (int(w), int(h)) user_id = conn.getUserId() jpeg_data = webgateway_cache.getThumb(request, server_id, user_id, iid, size) if jpeg_data is None: prevent_cache = False img = conn.getObject("Image", iid) if img is None: logger.debug("(b)Image %s not found..." % (str(iid))) if _defcb: jpeg_data = _defcb(size=size) prevent_cache = True else: raise Http404 else: jpeg_data = img.getThumbnail(size=size) if jpeg_data is None: logger.debug("(c)Image %s not found..." % (str(iid))) if _defcb: jpeg_data = _defcb(size=size) prevent_cache = True else: return HttpResponseServerError('Failed to render thumbnail') else: prevent_cache = img._thumbInProgress if not prevent_cache: webgateway_cache.setThumb(request, server_id, user_id, iid, jpeg_data, size) else: pass rsp = HttpResponse(jpeg_data, mimetype='image/jpeg') return rsp @login_required() def render_roi_thumbnail (request, roiId, w=None, h=None, conn=None, **kwargs): """ For the given ROI, choose the shape to render (first time-point, mid z-section) then render a region around that shape, scale to width and height (or default size) and draw the shape on to the region """ server_id = request.session['connector'].server_id # need to find the z indices of the first shape in T roiResult = conn.getRoiService().findByRoi(long(roiId), None, conn.SERVICE_OPTS) if roiResult is None or roiResult.rois is None: raise Http404 zz = set() minT = None shapes = {} for roi in roiResult.rois: imageId = roi.image.id.val for s in roi.copyShapes(): if s is None: # seems possible in some situations continue t = s.getTheT().getValue() z = s.getTheZ().getValue() shapes[(z,t)] = s if minT is None: minT = t if t < minT: zz = set([z]) minT = t elif minT == t: zz.add(z) zList = list(zz) zList.sort() midZ = zList[len(zList)/2] s = shapes[(midZ, minT)] pi = _get_prepared_image(request, imageId, server_id=server_id, conn=conn) if pi is None: raise Http404 image, compress_quality = pi return get_shape_thumbnail (request, conn, image, s, compress_quality) @login_required() def render_shape_thumbnail (request, shapeId, w=None, h=None, conn=None, **kwargs): """ For the given Shape, redner a region around that shape, scale to width and height (or default size) and draw the shape on to the region. """ server_id = request.session['connector'].server_id # need to find the z indices of the first shape in T params = omero.sys.Parameters() params.map = {'id':rlong(shapeId)} shape = conn.getQueryService().findByQuery("select s from Shape s join fetch s.roi where s.id = :id", params) if shape is None: raise Http404 imageId = shape.roi.image.id.val pi = _get_prepared_image(request, imageId, server_id=server_id, conn=conn) if pi is None: raise Http404 image, compress_quality = pi return get_shape_thumbnail (request, conn, image, shape, compress_quality) def get_shape_thumbnail (request, conn, image, s, compress_quality): """ Render a region around the specified Shape, scale to width and height (or default size) and draw the shape on to the region. Returns jpeg data. @param image: ImageWrapper @param s: omero.model.Shape """ MAX_WIDTH = 250 color = request.REQUEST.get("color", "fff") colours = {"f00":(255,0,0), "0f0":(0,255,0), "00f":(0,0,255), "ff0":(255,255,0), "fff":(255,255,255), "000":(0,0,0)} lineColour = colours["f00"] if color in colours: lineColour = colours[color] bg_color = (221,221,221) # used for padding if we go outside the image area def pointsStringToXYlist(string): """ Method for converting the string returned from omero.model.ShapeI.getPoints() into list of (x,y) points. E.g: "points[309,427, 366,503, 190,491] points1[309,427, 366,503, 190,491] points2[309,427, 366,503, 190,491]" """ pointLists = string.strip().split("points") if len(pointLists) < 2: logger.error("Unrecognised ROI shape 'points' string: %s" % string) return "" firstList = pointLists[1] xyList = [] for xy in firstList.strip(" []").split(", "): x, y = xy.split(",") xyList.append( ( int( x.strip() ), int(y.strip() ) ) ) return xyList def xyListToBbox(xyList): """ Returns a bounding box (x,y,w,h) that will contain the shape represented by the XY points list """ xList, yList = [], [] for xy in xyList: x, y = xy xList.append(x) yList.append(y) return (min(xList), min(yList), max(xList)-min(xList), max(yList)-min(yList)) bBox = None # bounding box: (x, y, w, h) shape = {} theT = s.getTheT().getValue() theZ = s.getTheZ().getValue() if type(s) == omero.model.RectI: shape['type'] = 'Rectangle' shape['x'] = s.getX().getValue() shape['y'] = s.getY().getValue() shape['width'] = s.getWidth().getValue() shape['height'] = s.getHeight().getValue() bBox = (shape['x'], shape['y'], shape['width'], shape['height']) elif type(s) == omero.model.MaskI: shape['type'] = 'Mask' shape['x'] = s.getX().getValue() shape['y'] = s.getY().getValue() shape['width'] = s.getWidth().getValue() shape['height'] = s.getHeight().getValue() bBox = (shape['x'], shape['y'], shape['width'], shape['height']) # TODO: support for mask elif type(s) == omero.model.EllipseI: shape['type'] = 'Ellipse' shape['cx'] = int(s.getCx().getValue()) shape['cy'] = int(s.getCy().getValue()) shape['rx'] = int(s.getRx().getValue()) shape['ry'] = int(s.getRy().getValue()) bBox = (shape['cx']-shape['rx'], shape['cy']-shape['ry'], 2*shape['rx'], 2*shape['ry']) elif type(s) == omero.model.PolylineI: shape['type'] = 'PolyLine' shape['xyList'] = pointsStringToXYlist(s.getPoints().getValue()) bBox = xyListToBbox(shape['xyList']) elif type(s) == omero.model.LineI: shape['type'] = 'Line' shape['x1'] = int(s.getX1().getValue()) shape['x2'] = int(s.getX2().getValue()) shape['y1'] = int(s.getY1().getValue()) shape['y2'] = int(s.getY2().getValue()) x = min(shape['x1'],shape['x2']) y = min(shape['y1'],shape['y2']) bBox = (x, y, max(shape['x1'],shape['x2'])-x, max(shape['y1'],shape['y2'])-y) elif type(s) == omero.model.PointI: shape['type'] = 'Point' shape['cx'] = s.getCx().getValue() shape['cy'] = s.getCy().getValue() bBox = (shape['cx']-50, shape['cy']-50, 100, 100) elif type(s) == omero.model.PolygonI: shape['type'] = 'Polygon' shape['xyList'] = pointsStringToXYlist(s.getPoints().getValue()) bBox = xyListToBbox(shape['xyList']) elif type(s) == omero.model.LabelI: shape['type'] = 'Label' shape['x'] = s.getX().getValue() shape['y'] = s.getY().getValue() bBox = (shape['x']-50, shape['y']-50, 100, 100) else: logger.debug("Shape type not supported: %s" % str(type(s))) #print shape # we want to render a region larger than the bounding box x,y,w,h = bBox requiredWidth = max(w,h*3/2) # make the aspect ratio (w/h) = 3/2 requiredHeight = requiredWidth*2/3 newW = int(requiredWidth * 1.5) # make the rendered region 1.5 times larger than the bounding box newH = int(requiredHeight * 1.5) # Don't want the region to be smaller than the thumbnail dimensions if newW < MAX_WIDTH: newW = MAX_WIDTH newH = newW*2/3 # Don't want the region to be bigger than a 'Big Image'! def getConfigValue(key): try: return conn.getConfigService().getConfigValue(key) except: logger.warn("webgateway: get_shape_thumbnail() could not get Config-Value for %s" % key) pass max_plane_width = getConfigValue("omero.pixeldata.max_plane_width") max_plane_height = getConfigValue("omero.pixeldata.max_plane_height") if max_plane_width is None or max_plane_height is None or (newW > int(max_plane_width)) or (newH > int(max_plane_height)): # generate dummy image to return dummy = Image.new('RGB', (MAX_WIDTH, MAX_WIDTH*2/3), bg_color) draw = ImageDraw.Draw(dummy) draw.text((10,30), "Shape too large to \ngenerate thumbnail", fill=(255,0,0)) rv = StringIO() dummy.save(rv, 'jpeg', quality=90) return HttpResponse(rv.getvalue(), mimetype='image/jpeg') xOffset = (newW - w)/2 yOffset = (newH - h)/2 newX = int(x - xOffset) newY = int(y - yOffset) # Need to check if any part of our region is outside the image. (assume that SOME of the region is within the image!) sizeX = image.getSizeX() sizeY = image.getSizeY() left_xs, right_xs, top_xs, bottom_xs = 0,0,0,0 if newX < 0: newW = newW + newX left_xs = abs(newX) newX = 0 if newY < 0: newH = newH + newY top_xs = abs(newY) newY = 0 if newW+newX > sizeX: right_xs = (newW+newX) - sizeX newW = newW - right_xs if newH+newY > sizeY: bottom_xs = (newH+newY) - sizeY newH = newH - bottom_xs # now we should be getting the correct region jpeg_data = image.renderJpegRegion(theZ,theT,newX, newY, newW, newH,level=None, compression=compress_quality) img = Image.open(StringIO(jpeg_data)) # add back on the xs we were forced to trim if left_xs != 0 or right_xs != 0 or top_xs != 0 or bottom_xs != 0: jpg_w, jpg_h = img.size xs_w = jpg_w + right_xs + left_xs xs_h = jpg_h + bottom_xs + top_xs xs_image = Image.new('RGBA', (xs_w, xs_h), bg_color) xs_image.paste(img, (left_xs, top_xs)) img = xs_image # we have our full-sized region. Need to resize to thumbnail. current_w, current_h = img.size factor = float(MAX_WIDTH) / current_w resizeH = current_h * factor img = img.resize((MAX_WIDTH, resizeH)) draw = ImageDraw.Draw(img) if shape['type'] == 'Rectangle': rectX = int(xOffset * factor) rectY = int(yOffset * factor) rectW = int((w+xOffset) * factor) rectH = int((h+yOffset) * factor) draw.rectangle((rectX, rectY, rectW, rectH), outline=lineColour) draw.rectangle((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2 elif shape['type'] == 'Line': lineX1 = (shape['x1'] - newX + left_xs) * factor lineX2 = (shape['x2'] - newX + left_xs) * factor lineY1 = (shape['y1'] - newY + top_xs) * factor lineY2 = (shape['y2'] - newY + top_xs) * factor draw.line((lineX1, lineY1, lineX2, lineY2), fill=lineColour, width=2) elif shape['type'] == 'Ellipse': rectX = int(xOffset * factor) rectY = int(yOffset * factor) rectW = int((w+xOffset) * factor) rectH = int((h+yOffset) * factor) draw.ellipse((rectX, rectY, rectW, rectH), outline=lineColour) draw.ellipse((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2 elif shape['type'] == 'Point': point_radius = 2 rectX = (MAX_WIDTH/2) - point_radius rectY = int(resizeH/2) - point_radius rectW = rectX + (point_radius * 2) rectH = rectY + (point_radius * 2) draw.ellipse((rectX, rectY, rectW, rectH), outline=lineColour) draw.ellipse((rectX-1, rectY-1, rectW+1, rectH+1), outline=lineColour) # hack to get line width of 2 elif 'xyList' in shape: #resizedXY = [ (int(x*factor), int(y*factor)) for (x,y) in shape['xyList'] ] def resizeXY(xy): x,y = xy return (int((x-newX + left_xs)*factor), int((y-newY + top_xs)*factor)) resizedXY = [ resizeXY(xy) for xy in shape['xyList'] ] #draw.polygon(resizedXY, outline=lineColour) # doesn't support 'width' of line for l in range(1, len(resizedXY)): x1, y1 = resizedXY[l-1] x2, y2 = resizedXY[l] draw.line((x1, y1, x2, y2), fill=lineColour, width=2) start_x, start_y = resizedXY[0] if shape['type'] != 'PolyLine': draw.line((x2, y2, start_x, start_y), fill=lineColour, width=2) rv = StringIO() compression = 0.9 img.save(rv, 'jpeg', quality=int(compression*100)) jpeg = rv.getvalue() return HttpResponse(jpeg, mimetype='image/jpeg') def _get_signature_from_request (request): """ returns a string that identifies this image, along with the settings passed on the request. Useful for using as img identifier key, for prepared image. @param request: http request @return: String """ r = request.REQUEST rv = r.get('m','_') + r.get('p','_')+r.get('c','_')+r.get('q', '_') return rv def _get_prepared_image (request, iid, server_id=None, conn=None, saveDefs=False, retry=True): """ Fetches the Image object for image 'iid' and prepares it according to the request query, setting the channels, rendering model and projection arguments. The compression level is parsed and returned too. For parameters in request, see L{getImgDetailsFromReq} @param request: http request @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} connection @param saveDefs: Try to save the rendering settings, default z and t. @param retry: Try an extra attempt at this method @return: Tuple (L{omero.gateway.ImageWrapper} image, quality) """ r = request.REQUEST logger.debug('Preparing Image:%r saveDefs=%r ' \ 'retry=%r request=%r conn=%s' % (iid, saveDefs, retry, r, str(conn))) img = conn.getObject("Image", iid) if img is None: return if r.has_key('c'): logger.debug("c="+r['c']) channels, windows, colors = _split_channel_info(r['c']) if not img.setActiveChannels(channels, windows, colors): logger.debug("Something bad happened while setting the active channels...") if r.get('m', None) == 'g': img.setGreyscaleRenderingModel() elif r.get('m', None) == 'c': img.setColorRenderingModel() img.setProjection(r.get('p', None)) img.setInvertedAxis(bool(r.get('ia', "0") == "1")) compress_quality = r.get('q', None) if saveDefs: r.has_key('z') and img._re.setDefaultZ(long(r['z'])-1) r.has_key('t') and img._re.setDefaultT(long(r['t'])-1) img.saveDefaults() return (img, compress_quality) @login_required() def render_image_region(request, iid, z, t, conn=None, **kwargs): """ Returns a jpeg of the OMERO image, rendering only a region specified in query string as region=x,y,width,height. E.g. region=0,512,256,256 Rendering settings can be specified in the request parameters. @param request: http request @param iid: image ID @param z: Z index @param t: T index @param conn: L{omero.gateway.BlitzGateway} connection @return: http response wrapping jpeg """ server_id = request.session['connector'].server_id # if the region=x,y,w,h is not parsed correctly to give 4 ints then we simply provide whole image plane. # alternatively, could return a 404? #if h == None: # return render_image (request, iid, z, t, server_id=None, _conn=None, **kwargs) pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn) if pi is None: raise Http404 img, compress_quality = pi tile = request.REQUEST.get('tile', None) region = request.REQUEST.get('region', None) level = None if tile: try: img._prepareRenderingEngine() tiles = img._re.requiresPixelsPyramid() w, h = img._re.getTileSize() levels = img._re.getResolutionLevels()-1 zxyt = tile.split(",") #w = int(zxyt[3]) #h = int(zxyt[4]) level = levels-int(zxyt[0]) x = int(zxyt[1])*w y = int(zxyt[2])*h except: logger.debug("render_image_region: tile=%s" % tile) logger.debug(traceback.format_exc()) elif region: try: xywh = region.split(",") x = int(xywh[0]) y = int(xywh[1]) w = int(xywh[2]) h = int(xywh[3]) except: logger.debug("render_image_region: region=%s" % region) logger.debug(traceback.format_exc()) # region details in request are used as key for caching. jpeg_data = webgateway_cache.getImage(request, server_id, img, z, t) if jpeg_data is None: jpeg_data = img.renderJpegRegion(z,t,x,y,w,h,level=level, compression=compress_quality) if jpeg_data is None: raise Http404 webgateway_cache.setImage(request, server_id, img, z, t, jpeg_data) rsp = HttpResponse(jpeg_data, mimetype='image/jpeg') return rsp @login_required() def render_image (request, iid, z=None, t=None, conn=None, **kwargs): """ Renders the image with id {{iid}} at {{z}} and {{t}} as jpeg. Many options are available from the request dict. See L{getImgDetailsFromReq} for list. I am assuming a single Pixels object on image with image-Id='iid'. May be wrong @param request: http request @param iid: image ID @param z: Z index @param t: T index @param conn: L{omero.gateway.BlitzGateway} connection @return: http response wrapping jpeg """ server_id = request.session['connector'].server_id pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn) if pi is None: raise Http404 img, compress_quality = pi jpeg_data = webgateway_cache.getImage(request, server_id, img, z, t) if jpeg_data is None: jpeg_data = img.renderJpeg(z,t, compression=compress_quality) if jpeg_data is None: raise Http404 webgateway_cache.setImage(request, server_id, img, z, t, jpeg_data) rsp = HttpResponse(jpeg_data, mimetype='image/jpeg') return rsp @login_required() def render_ome_tiff (request, ctx, cid, conn=None, **kwargs): """ Renders the OME-TIFF representation of the image(s) with id cid in ctx (i)mage, (d)ataset, or (p)roject. For multiple images export, images that require pixels pyramid (big images) will be silently skipped. If exporting a single big image or if all images in a multple image export are big, a 404 will be triggered. A request parameter dryrun can be passed to return the count of images that would actually be exported. @param request: http request @param ctx: 'p' or 'd' or 'i' @param cid: Project, Dataset or Image ID @param conn: L{omero.gateway.BlitzGateway} connection @return: http response wrapping the tiff (or zip for multiple files), or redirect to temp file/zip if dryrun is True, returns count of images that would be exported """ server_id = request.session['connector'].server_id imgs = [] if ctx == 'p': obj = conn.getObject("Project", cid) if obj is None: raise Http404 for d in obj.listChildren(): imgs.extend(list(d.listChildren())) name = obj.getName() elif ctx == 'd': obj = conn.getObject("Dataset", cid) if obj is None: raise Http404 imgs.extend(list(obj.listChildren())) selection = filter(None, request.REQUEST.get('selection', '').split(',')) if len(selection): logger.debug(selection) logger.debug(imgs) imgs = filter(lambda x: str(x.getId()) in selection, imgs) logger.debug(imgs) if len(imgs) == 0: raise Http404 name = '%s-%s' % (obj.getParent().getName(), obj.getName()) elif ctx == 'w': obj = conn.getObject("Well", cid) if obj is None: raise Http404 imgs.extend([x.getImage() for x in obj.listChildren()]) plate = obj.getParent() coord = "%s%s" % (plate.getRowLabels()[obj.row],plate.getColumnLabels()[obj.column]) name = '%s-%s-%s' % (plate.getParent().getName(), plate.getName(), coord) else: obj = conn.getObject("Image", cid) if obj is None: raise Http404 imgs.append(obj) imgs = filter(lambda x: not x.requiresPixelsPyramid(), imgs) if request.REQUEST.get('dryrun', False): rv = simplejson.dumps(len(imgs)) c = request.REQUEST.get('callback', None) if c is not None and not kwargs.get('_internal', False): rv = '%s(%s)' % (c, rv) return HttpResponse(rv, mimetype='application/javascript') if len(imgs) == 0: raise Http404 if len(imgs) == 1: obj = imgs[0] key = '_'.join((str(x.getId()) for x in obj.getAncestry())) + '_' + str(obj.getId()) + '_ome_tiff' fpath, rpath, fobj = webgateway_tempfile.new(str(obj.getId()) + '-'+obj.getName() + '.ome.tiff', key=key) if fobj is True: # already exists return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath) tiff_data = webgateway_cache.getOmeTiffImage(request, server_id, imgs[0]) if tiff_data is None: try: tiff_data = imgs[0].exportOmeTiff() except: logger.debug('Failed to export image (2)', exc_info=True) tiff_data = None if tiff_data is None: webgateway_tempfile.abort(fpath) raise Http404 webgateway_cache.setOmeTiffImage(request, server_id, imgs[0], tiff_data) if fobj is None: rsp = HttpResponse(tiff_data, mimetype='image/tiff') rsp['Content-Disposition'] = 'attachment; filename="%s.ome.tiff"' % (str(obj.getId()) + '-'+obj.getName()) rsp['Content-Length'] = len(tiff_data) return rsp else: fobj.write(tiff_data) fobj.close() return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath) else: try: img_ids = '+'.join((str(x.getId()) for x in imgs)) key = '_'.join((str(x.getId()) for x in imgs[0].getAncestry())) + '_' + md5(img_ids).hexdigest() + '_ome_tiff_zip' fpath, rpath, fobj = webgateway_tempfile.new(name + '.zip', key=key) if fobj is True: return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath) logger.debug(fpath) if fobj is None: fobj = StringIO() zobj = zipfile.ZipFile(fobj, 'w', zipfile.ZIP_STORED) for obj in imgs: tiff_data = webgateway_cache.getOmeTiffImage(request, server_id, obj) if tiff_data is None: tiff_data = obj.exportOmeTiff() if tiff_data is None: continue webgateway_cache.setOmeTiffImage(request, server_id, obj, tiff_data) zobj.writestr(str(obj.getId()) + '-'+obj.getName() + '.ome.tiff', tiff_data) zobj.close() if fpath is None: zip_data = fobj.getvalue() rsp = HttpResponse(zip_data, mimetype='application/zip') rsp['Content-Disposition'] = 'attachment; filename="%s.zip"' % name rsp['Content-Length'] = len(zip_data) return rsp except: logger.debug(traceback.format_exc()) raise return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath) @login_required() def render_movie (request, iid, axis, pos, conn=None, **kwargs): """ Renders a movie from the image with id iid @param request: http request @param iid: Image ID @param axis: Movie frames are along 'z' or 't' dimension. String @param pos: The T index (for z axis) or Z index (for t axis) @param conn: L{omero.gateway.BlitzGateway} connection @return: http response wrapping the file, or redirect to temp file """ server_id = request.session['connector'].server_id try: # Prepare a filename we'll use for temp cache, and check if file is already there opts = {} opts['format'] = 'video/' + request.REQUEST.get('format', 'quicktime') opts['fps'] = int(request.REQUEST.get('fps', 4)) opts['minsize'] = (512,512, 'Black') ext = '.avi' key = "%s-%s-%s-%d-%s-%s" % (iid, axis, pos, opts['fps'], _get_signature_from_request(request), request.REQUEST.get('format', 'quicktime')) pos = int(pos) pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn) if pi is None: raise Http404 img, compress_quality = pi fpath, rpath, fobj = webgateway_tempfile.new(img.getName() + ext, key=key) logger.debug(fpath, rpath, fobj) if fobj is True: return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)#os.path.join(rpath, img.getName() + ext)) if kwargs.has_key('optsCB'): opts.update(kwargs['optsCB'](img)) opts.update(kwargs.get('opts', {})) logger.debug('rendering movie for img %s with axis %s, pos %i and opts %s' % (iid, axis, pos, opts)) #fpath, rpath = webgateway_tempfile.newdir() if fpath is None: import tempfile fo, fn = tempfile.mkstemp() else: fn = fpath #os.path.join(fpath, img.getName()) if axis.lower() == 'z': dext, mimetype = img.createMovie(fn, 0, img.getSizeZ()-1, pos-1, pos-1, opts) else: dext, mimetype = img.createMovie(fn, pos-1, pos-1, 0, img.getSizeT()-1, opts) if dext is None and mimetype is None: # createMovie is currently only available on 4.1_custom # http://trac.openmicroscopy.org.uk/ome/ticket/3857 raise Http404 if fpath is None: movie = open(fn).read() os.close(fo) rsp = HttpResponse(movie, mimetype=mimetype) rsp['Content-Disposition'] = 'attachment; filename="%s"' % (img.getName()+ext) rsp['Content-Length'] = len(movie) return rsp else: fobj.close() #shutil.move(fn, fn + ext) return HttpResponseRedirect(settings.STATIC_URL + 'webgateway/tfiles/' + rpath)#os.path.join(rpath, img.getName() + ext)) except: logger.debug(traceback.format_exc()) raise @login_required() def render_split_channel (request, iid, z, t, conn=None, **kwargs): """ Renders a split channel view of the image with id {{iid}} at {{z}} and {{t}} as jpeg. Many options are available from the request dict. Requires PIL to be installed on the server. @param request: http request @param iid: Image ID @param z: Z index @param t: T index @param conn: L{omero.gateway.BlitzGateway} connection @return: http response wrapping a jpeg """ server_id = request.session['connector'].server_id pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn) if pi is None: raise Http404 img, compress_quality = pi compress_quality = compress_quality and float(compress_quality) or 0.9 jpeg_data = webgateway_cache.getSplitChannelImage(request, server_id, img, z, t) if jpeg_data is None: jpeg_data = img.renderSplitChannel(z,t, compression=compress_quality) if jpeg_data is None: raise Http404 webgateway_cache.setSplitChannelImage(request, server_id, img, z, t, jpeg_data) rsp = HttpResponse(jpeg_data, mimetype='image/jpeg') return rsp def debug (f): """ Decorator for adding debugging functionality to methods. @param f: The function to wrap @return: The wrapped function """ def wrap (request, *args, **kwargs): debug = request.REQUEST.getlist('debug') if 'slow' in debug: time.sleep(5) if 'fail' in debug: raise Http404 if 'error' in debug: raise AttributeError('Debug requested error') return f(request, *args, **kwargs) wrap.func_name = f.func_name return wrap def jsonp (f): """ Decorator for adding connection debugging and returning function result as json, depending on values in kwargs @param f: The function to wrap @return: The wrapped function, which will return json """ def wrap (request, *args, **kwargs): logger.debug('jsonp') try: server_id = kwargs.get('server_id', None) if server_id is None: server_id = request.session['connector'].server_id kwargs['server_id'] = server_id rv = f(request, *args, **kwargs) if kwargs.get('_raw', False): return rv if isinstance(rv, HttpResponse): return rv rv = simplejson.dumps(rv) c = request.REQUEST.get('callback', None) if c is not None and not kwargs.get('_internal', False): rv = '%s(%s)' % (c, rv) if kwargs.get('_internal', False): return rv return HttpResponse(rv, mimetype='application/javascript') except omero.ServerError: if kwargs.get('_raw', False) or kwargs.get('_internal', False): raise return HttpResponseServerError('("error in call","%s")' % traceback.format_exc(), mimetype='application/javascript') except: logger.debug(traceback.format_exc()) if kwargs.get('_raw', False) or kwargs.get('_internal', False): raise return HttpResponseServerError('("error in call","%s")' % traceback.format_exc(), mimetype='application/javascript') wrap.func_name = f.func_name return wrap @debug @login_required() def render_row_plot (request, iid, z, t, y, conn=None, w=1, **kwargs): """ Renders the line plot for the image with id {{iid}} at {{z}} and {{t}} as gif with transparent background. Many options are available from the request dict. I am assuming a single Pixels object on image with Image ID='iid'. May be wrong TODO: cache @param request: http request @param iid: Image ID @param z: Z index @param t: T index @param y: Y position of row to measure @param conn: L{omero.gateway.BlitzGateway} connection @param w: Line width @return: http response wrapping a gif """ if not w: w = 1 pi = _get_prepared_image(request, iid, conn=conn) if pi is None: raise Http404 img, compress_quality = pi try: gif_data = img.renderRowLinePlotGif(int(z),int(t),int(y), int(w)) except: logger.debug('a', exc_info=True) raise if gif_data is None: raise Http404 rsp = HttpResponse(gif_data, mimetype='image/gif') return rsp @debug @login_required() def render_col_plot (request, iid, z, t, x, w=1, conn=None, **kwargs): """ Renders the line plot for the image with id {{iid}} at {{z}} and {{t}} as gif with transparent background. Many options are available from the request dict. I am assuming a single Pixels object on image with id='iid'. May be wrong TODO: cache @param request: http request @param iid: Image ID @param z: Z index @param t: T index @param x: X position of column to measure @param conn: L{omero.gateway.BlitzGateway} connection @param w: Line width @return: http response wrapping a gif """ if not w: w = 1 pi = _get_prepared_image(request, iid, conn=conn) if pi is None: raise Http404 img, compress_quality = pi gif_data = img.renderColLinePlotGif(int(z),int(t),int(x), int(w)) if gif_data is None: raise Http404 rsp = HttpResponse(gif_data, mimetype='image/gif') return rsp @login_required() @jsonp def imageData_json (request, conn=None, _internal=False, **kwargs): """ Get a dict with image information TODO: cache @param request: http request @param conn: L{omero.gateway.BlitzGateway} @param _internal: TODO: ? @return: Dict """ iid = kwargs['iid'] key = kwargs.get('key', None) image = conn.getObject("Image", iid) if image is None: return HttpResponseServerError('""', mimetype='application/javascript') rv = imageMarshal(image, key) return rv @login_required() @jsonp def wellData_json (request, conn=None, _internal=False, **kwargs): """ Get a dict with image information TODO: cache @param request: http request @param conn: L{omero.gateway.BlitzGateway} @param _internal: TODO: ? @return: Dict """ wid = kwargs['wid'] well = conn.getObject("Well", wid) if well is None: return HttpResponseServerError('""', mimetype='application/javascript') prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail') def urlprefix(iid): return reverse(prefix, args=(iid,)) xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)} rv = well.simpleMarshal(xtra=xtra) return rv @login_required() @jsonp def plateGrid_json (request, pid, field=0, conn=None, **kwargs): """ """ plate = conn.getObject('plate', long(pid)) try: field = long(field or 0) except ValueError: field = 0 if plate is None: return HttpResponseServerError('""', mimetype='application/javascript') grid = [] prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail') thumbsize = int(request.REQUEST.get('size', 64)) logger.debug(thumbsize) def urlprefix(iid): return reverse(prefix, args=(iid,thumbsize)) xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)} server_id = kwargs['server_id'] rv = webgateway_cache.getJson(request, server_id, plate, 'plategrid-%d-%d' % (field, thumbsize)) if rv is None: plate.setGridSizeConstraints(8,12) for row in plate.getWellGrid(field): tr = [] for e in row: if e: i = e.getImage() if i: t = i.simpleMarshal(xtra=xtra) t['wellId'] = e.getId() t['field'] = field tr.append(t) continue tr.append(None) grid.append(tr) rv = {'grid': grid, 'collabels': plate.getColumnLabels(), 'rowlabels': plate.getRowLabels()} webgateway_cache.setJson(request, server_id, plate, simplejson.dumps(rv), 'plategrid-%d-%d' % (field, thumbsize)) else: rv = simplejson.loads(rv) return rv @login_required() @jsonp def listImages_json (request, did, conn=None, **kwargs): """ lists all Images in a Dataset, as json TODO: cache @param request: http request @param did: Dataset ID @param conn: L{omero.gateway.BlitzGateway} @return: list of image json. """ dataset = conn.getObject("Dataset", did) if dataset is None: return HttpResponseServerError('""', mimetype='application/javascript') prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail') def urlprefix(iid): return reverse(prefix, args=(iid,)) xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)} return map(lambda x: x.simpleMarshal(xtra=xtra), dataset.listChildren()) @login_required() @jsonp def listWellImages_json (request, did, conn=None, **kwargs): """ lists all Images in a Well, as json TODO: cache @param request: http request @param did: Well ID @param conn: L{omero.gateway.BlitzGateway} @return: list of image json. """ well = conn.getObject("Well", did) if well is None: return HttpResponseServerError('""', mimetype='application/javascript') prefix = kwargs.get('thumbprefix', 'webgateway.views.render_thumbnail') def urlprefix(iid): return reverse(prefix, args=(iid,)) xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)} return map(lambda x: x.getImage() and x.getImage().simpleMarshal(xtra=xtra), well.listChildren()) @login_required() @jsonp def listDatasets_json (request, pid, conn=None, **kwargs): """ lists all Datasets in a Project, as json TODO: cache @param request: http request @param pid: Project ID @param conn: L{omero.gateway.BlitzGateway} @return: list of dataset json. """ project = conn.getObject("Project", pid) rv = [] if project is None: return HttpResponse('[]', mimetype='application/javascript') return [x.simpleMarshal(xtra={'childCount':0}) for x in project.listChildren()] @login_required() @jsonp def datasetDetail_json (request, did, conn=None, **kwargs): """ return json encoded details for a dataset TODO: cache """ ds = conn.getObject("Dataset", did) return ds.simpleMarshal() @login_required() @jsonp def listProjects_json (request, conn=None, **kwargs): """ lists all Projects, as json TODO: cache @param request: http request @param conn: L{omero.gateway.BlitzGateway} @return: list of project json. """ rv = [] for pr in conn.listProjects(): rv.append( {'id': pr.id, 'name': pr.name, 'description': pr.description or ''} ) return rv @login_required() @jsonp def projectDetail_json (request, pid, conn=None, **kwargs): """ grab details from one specific project TODO: cache @param request: http request @param pid: Project ID @param conn: L{omero.gateway.BlitzGateway} @return: project details as dict. """ pr = conn.getObject("Project", pid) rv = pr.simpleMarshal() return rv def searchOptFromRequest (request): """ Returns a dict of options for searching, based on parameters in the http request Request keys include: - ctx: (http request) 'imgs' to search only images - text: (http request) the actual text phrase - start: starting index (0 based) for result - limit: nr of results to retuen (0 == unlimited) - author: - grabData: - parents: @param request: http request @return: Dict of options """ try: r = request.REQUEST opts = { 'search': unicode(r.get('text', '')).encode('utf8'), 'ctx': r.get('ctx', ''), 'grabData': not not r.get('grabData', False), 'parents': not not bool(r.get('parents', False)), 'start': int(r.get('start', 0)), 'limit': int(r.get('limit', 0)), 'key': r.get('key', None) } author = r.get('author', '') if author: opts['search'] += ' author:'+author return opts except: logger.error(traceback.format_exc()) return {} @TimeIt(logging.INFO) @login_required() @jsonp def search_json (request, conn=None, **kwargs): """ Search for objects in blitz. Returns json encoded list of marshalled objects found by the search query Request keys include: - text: The text to search for - ctx: (http request) 'imgs' to search only images - text: (http request) the actual text phrase - start: starting index (0 based) for result - limit: nr of results to retuen (0 == unlimited) - author: - grabData: - parents: @param request: http request @param conn: L{omero.gateway.BlitzGateway} @return: json search results TODO: cache """ opts = searchOptFromRequest(request) rv = [] logger.debug("searchObjects(%s)" % (opts['search'])) # search returns blitz_connector wrapper objects def urlprefix(iid): return reverse('webgateway.views.render_thumbnail', args=(iid,)) xtra = {'thumbUrlPrefix': kwargs.get('urlprefix', urlprefix)} pks = None try: if opts['ctx'] == 'imgs': sr = conn.searchObjects(["image"], opts['search'], conn.SERVICE_OPTS) else: sr = conn.searchObjects(None, opts['search'], conn.SERVICE_OPTS) # searches P/D/I except ApiUsageException: return HttpResponseServerError('"parse exception"', mimetype='application/javascript') def marshal (): rv = [] if (opts['grabData'] and opts['ctx'] == 'imgs'): bottom = min(opts['start'], len(sr)-1) if opts['limit'] == 0: top = len(sr) else: top = min(len(sr), bottom + opts['limit']) for i in range(bottom, top): e = sr[i] #for e in sr: try: rv.append(imageData_json(request, server_id, iid=e.id, key=opts['key'], conn=conn, _internal=True)) except AttributeError, x: logger.debug('(iid %i) ignoring Attribute Error: %s' % (e.id, str(x))) pass except omero.ServerError, x: logger.debug('(iid %i) ignoring Server Error: %s' % (e.id, str(x))) return rv else: return map(lambda x: x.simpleMarshal(xtra=xtra, parents=opts['parents']), sr) rv = timeit(marshal)() logger.debug(rv) return rv @login_required() def save_image_rdef_json (request, iid, conn=None, **kwargs): """ Requests that the rendering defs passed in the request be set as the default for this image. Rendering defs in request listed at L{getImgDetailsFromReq} TODO: jsonp @param request: http request @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} @return: http response 'true' or 'false' """ server_id = request.session['connector'].server_id r = request.REQUEST pi = _get_prepared_image(request, iid, server_id=server_id, conn=conn, saveDefs=True) if pi is None: json_data = 'false' else: user_id = pi[0]._conn.getEventContext().userId webgateway_cache.invalidateObject(server_id, user_id, pi[0]) pi[0].getThumbnail() json_data = 'true' if r.get('callback', None): json_data = '%s(%s)' % (r['callback'], json_data) return HttpResponse(json_data, mimetype='application/javascript') @login_required() def list_compatible_imgs_json (request, iid, conn=None, **kwargs): """ Lists the images on the same project that would be viable targets for copying rendering settings. TODO: change method to: list_compatible_imgs_json (request, iid, server_id=None, conn=None, **kwargs): @param request: http request @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} @return: json list of image IDs """ json_data = 'false' r = request.REQUEST if conn is None: img = None else: img = conn.getObject("Image", iid) if img is not None: # List all images in project imgs = [] for ds in img.getProject().listChildren(): imgs.extend(ds.listChildren()) # Filter the ones that would pass the applySettingsToImages call img_ptype = img.getPrimaryPixels().getPixelsType().getValue() img_ccount = img.getSizeC() img_ew = [x.getLabel() for x in img.getChannels()] img_ew.sort() def compat (i): if long(i.getId()) == long(iid): return False pp = i.getPrimaryPixels() if pp is None or \ i.getPrimaryPixels().getPixelsType().getValue() != img_ptype or \ i.getSizeC() != img_ccount: return False ew = [x.getLabel() for x in i.getChannels()] ew.sort() if ew != img_ew: return False return True imgs = filter(compat, imgs) json_data = simplejson.dumps([x.getId() for x in imgs]) if r.get('callback', None): json_data = '%s(%s)' % (r['callback'], json_data) return HttpResponse(json_data, mimetype='application/javascript') @login_required() @jsonp def copy_image_rdef_json (request, conn=None, **kwargs): """ Copy the rendering settings from one image to a list of images. Images are specified in request by 'fromid' and list of 'toids' Returns json dict of Boolean:[Image-IDs] for images that have successfully had the rendering settings applied, or not. @param request: http request @param server_id: @param conn: L{omero.gateway.BlitzGateway} @return: json dict of Boolean:[Image-IDs] """ server_id = request.session['connector'].server_id json_data = False r = request.REQUEST try: fromid = long(r.get('fromid', None)) toids = map(lambda x: long(x), r.getlist('toids')) except TypeError: fromid = None except ValueError: fromid = None if fromid is not None and len(toids) > 0: fromimg = conn.getObject("Image", fromid) frompid = fromimg.getPixelsId() userid = fromimg.getOwner().getId() if fromimg.canWrite(): ctx = conn.SERVICE_OPTS.copy() ctx.setOmeroGroup(fromimg.getDetails().getGroup().getId()) ctx.setOmeroUser(userid) rsettings = conn.getRenderingSettingsService() json_data = rsettings.applySettingsToImages(frompid, list(toids), ctx) if fromid in json_data[True]: del json_data[True][json_data[True].index(fromid)] for iid in json_data[True]: img = conn.getObject("Image", iid) img is not None and webgateway_cache.invalidateObject(server_id, userid, img) return json_data # # json_data = simplejson.dumps(json_data) # # if r.get('callback', None): # json_data = '%s(%s)' % (r['callback'], json_data) # return HttpResponse(json_data, mimetype='application/javascript') @login_required() @jsonp def reset_image_rdef_json (request, iid, conn=None, **kwargs): """ Try to remove all rendering defs the logged in user has for this image. @param request: http request @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} @return: json 'true', or 'false' if failed """ img = conn.getObject("Image", iid) if img is not None and img.resetRDefs(): user_id = conn.getEventContext().userId server_id = request.session['connector'].server_id webgateway_cache.invalidateObject(server_id, user_id, img) return True json_data = 'true' else: json_data = 'false' return False # if _conn is not None: # return json_data == 'true' # TODO: really return a boolean? (not json) # if r.get('callback', None): # json_data = '%s(%s)' % (r['callback'], json_data) # return HttpResponse(json_data, mimetype='application/javascript') @login_required() def full_viewer (request, iid, conn=None, **kwargs): """ This view is responsible for showing the omero_image template Image rendering options in request are used in the display page. See L{getImgDetailsFromReq}. @param request: http request. @param iid: Image ID @param conn: L{omero.gateway.BlitzGateway} @param **kwargs: Can be used to specify the html 'template' for rendering @return: html page of image and metadata """ rid = getImgDetailsFromReq(request) try: image = conn.getObject("Image", iid) if image is None: logger.debug("(a)Image %s not found..." % (str(iid))) raise Http404 d = {'blitzcon': conn, 'image': image, 'opts': rid, 'roiCount': image.getROICount(), 'viewport_server': kwargs.get('viewport_server', '/webgateway'), 'object': 'image:%i' % int(iid)} template = kwargs.get('template', "webgateway/viewport/omero_image.html") t = template_loader.get_template(template) c = Context(request,d) rsp = t.render(c) except omero.SecurityViolation: raise Http404 return HttpResponse(rsp) @login_required() def get_shape_json(request, roiId, shapeId, conn=None, **kwargs): roiId = int(roiId) shapeId = int(shapeId) shape = conn.getQueryService().findByQuery( 'select shape from Roi as roi ' \ 'join roi.shapes as shape ' \ 'where roi.id = %d and shape.id = %d' % (roiId, shapeId), None) logger.debug('Shape: %r' % shape) if shape is None: logger.debug('No such shape: %r' % shapeId) raise Http404 return HttpResponse(simplejson.dumps(shapeMarshal(shape)), mimetype='application/javascript') @login_required() def get_rois_json(request, imageId, conn=None, **kwargs): """ Returns json data of the ROIs in the specified image. """ rois = [] roiService = conn.getRoiService() #rois = webfigure_utils.getRoiShapes(roiService, long(imageId)) # gets a whole json list of ROIs result = roiService.findByImage(long(imageId), None, conn.SERVICE_OPTS) for r in result.rois: roi = {} roi['id'] = r.getId().getValue() # go through all the shapes of the ROI shapes = [] for s in r.copyShapes(): if s is None: # seems possible in some situations continue shapes.append(shapeMarshal(s)) # sort shapes by Z, then T. shapes.sort(key=lambda x: "%03d%03d"% (x.get('theZ', -1), x.get('theT', -1))); roi['shapes'] = shapes rois.append(roi) rois.sort(key=lambda x: x['id']) # sort by ID - same as in measurement tool. return HttpResponse(simplejson.dumps(rois), mimetype='application/javascript') def test (request): """ Tests the L{full_viewer} with no args passed to the template. @param request: http request. @return: blank page template """ context = {} t = template_loader.get_template('webgateway/viewport/omero_image.html') c = Context(request,context) return HttpResponse(t.render(c)) @login_required(isAdmin=True) @jsonp def su (request, user, conn=None, **kwargs): """ If current user is admin, switch the session to a new connection owned by 'user' (puts the new session ID in the request.session) Return False if not possible @param request: http request. @param user: Username of new connection owner @param conn: L{omero.gateway.BlitzGateway} @param **kwargs: Can be used to specify the html 'template' for rendering @return: Boolean """ conn.setGroupNameForSession('system') connector = request.session['connector'] connector = Connector(connector.server_id, connector.is_secure) session = conn.getSessionService().getSession(conn._sessionUuid) ttl = session.getTimeToIdle().val connector.omero_session_key = conn.suConn(user, ttl=ttl)._sessionUuid request.session['connector'] = connector conn.revertGroupForSession() conn.seppuku() return True
gpl-2.0
-5,932,802,012,153,358,000
36.636311
184
0.587316
false
3.653023
false
false
false
Parallel-in-Time/pySDC
pySDC/playgrounds/mpifft/grayscott.py
1
7994
import numpy as np from mpi4py import MPI import matplotlib.pyplot as plt from pySDC.helpers.stats_helper import filter_stats, sort_stats from pySDC.implementations.collocation_classes.gauss_radau_right import CollGaussRadau_Right from pySDC.implementations.collocation_classes.gauss_lobatto import CollGaussLobatto from pySDC.implementations.controller_classes.controller_nonMPI import controller_nonMPI from pySDC.implementations.sweeper_classes.imex_1st_order import imex_1st_order from pySDC.implementations.sweeper_classes.multi_implicit import multi_implicit from pySDC.implementations.problem_classes.GrayScott_MPIFFT import grayscott_imex_diffusion, grayscott_imex_linear, \ grayscott_mi_diffusion, grayscott_mi_linear from pySDC.implementations.transfer_classes.TransferMesh_MPIFFT import fft_to_fft def run_simulation(spectral=None, splitting_type=None, ml=None, num_procs=None): """ A test program to do SDC, MLSDC and PFASST runs for the 2D NLS equation Args: spectral (bool): run in real or spectral space ml (bool): single or multiple levels num_procs (int): number of parallel processors """ comm = MPI.COMM_WORLD rank = comm.Get_rank() # initialize level parameters level_params = dict() level_params['restol'] = 1E-12 level_params['dt'] = 8E-00 level_params['nsweeps'] = [1] level_params['residual_type'] = 'last_abs' # initialize sweeper parameters sweeper_params = dict() # sweeper_params['collocation_class'] = CollGaussRadau_Right sweeper_params['collocation_class'] = CollGaussLobatto sweeper_params['num_nodes'] = [5] sweeper_params['QI'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part sweeper_params['Q1'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part sweeper_params['Q2'] = ['LU'] # For the IMEX sweeper, the LU-trick can be activated for the implicit part sweeper_params['QE'] = ['EE'] # You can try PIC here, but PFASST doesn't like this.. sweeper_params['initial_guess'] = 'spread' # initialize problem parameters problem_params = dict() if ml: problem_params['nvars'] = [(128, 128), (32, 32)] else: problem_params['nvars'] = [(128, 128)] problem_params['spectral'] = spectral problem_params['comm'] = comm problem_params['Du'] = 0.00002 problem_params['Dv'] = 0.00001 problem_params['A'] = 0.04 problem_params['B'] = 0.1 problem_params['newton_maxiter'] = 50 problem_params['newton_tol'] = 1E-11 # initialize step parameters step_params = dict() step_params['maxiter'] = 100 step_params['errtol'] = 1E-09 # initialize controller parameters controller_params = dict() controller_params['logger_level'] = 20 if rank == 0 else 99 # controller_params['predict_type'] = 'fine_only' controller_params['use_iteration_estimator'] = False # fill description dictionary for easy step instantiation description = dict() description['problem_params'] = problem_params # pass problem parameters if splitting_type == 'diffusion': description['problem_class'] = grayscott_imex_diffusion elif splitting_type == 'linear': description['problem_class'] = grayscott_imex_linear elif splitting_type == 'mi_diffusion': description['problem_class'] = grayscott_mi_diffusion elif splitting_type == 'mi_linear': description['problem_class'] = grayscott_mi_linear else: raise NotImplementedError(f'splitting_type = {splitting_type} not implemented') if splitting_type == 'mi_diffusion' or splitting_type == 'mi_linear': description['sweeper_class'] = multi_implicit else: description['sweeper_class'] = imex_1st_order description['sweeper_params'] = sweeper_params # pass sweeper parameters description['level_params'] = level_params # pass level parameters description['step_params'] = step_params # pass step parameters description['space_transfer_class'] = fft_to_fft # set time parameters t0 = 0.0 Tend = 32 f = None if rank == 0: f = open('GS_out.txt', 'a') out = f'Running with ml = {ml} and num_procs = {num_procs}...' f.write(out + '\n') print(out) # instantiate controller controller = controller_nonMPI(num_procs=num_procs, controller_params=controller_params, description=description) # get initial values on finest level P = controller.MS[0].levels[0].prob uinit = P.u_exact(t0) # plt.figure() # plt.imshow(uinit[..., 0], vmin=0, vmax=1) # plt.title('v') # plt.colorbar() # plt.figure() # plt.imshow(uinit[..., 1], vmin=0, vmax=1) # plt.title('v') # plt.colorbar() # plt.figure() # plt.imshow(uinit[..., 0] + uinit[..., 1]) # plt.title('sum') # plt.colorbar() # call main function to get things done... uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend) # plt.figure() # plt.imshow(P.fft.backward(uend[..., 0]))#, vmin=0, vmax=1) # # plt.imshow(np.fft.irfft2(uend[..., 0]))#, vmin=0, vmax=1) # plt.title('u') # plt.colorbar() # plt.figure() # plt.imshow(P.fft.backward(uend[..., 1]))#, vmin=0, vmax=1) # # plt.imshow(np.fft.irfft2(uend[..., 1]))#, vmin=0, vmax=1) # plt.title('v') # plt.colorbar() # # plt.figure() # # plt.imshow(uend[..., 0] + uend[..., 1]) # # plt.title('sum') # # plt.colorbar() # plt.show() # # exit() if rank == 0: # filter statistics by type (number of iterations) filtered_stats = filter_stats(stats, type='niter') # convert filtered statistics to list of iterations count, sorted by process iter_counts = sort_stats(filtered_stats, sortby='time') niters = np.array([item[1] for item in iter_counts]) out = f' Min/Mean/Max number of iterations: ' \ f'{np.min(niters):4.2f} / {np.mean(niters):4.2f} / {np.max(niters):4.2f}' f.write(out + '\n') print(out) out = ' Range of values for number of iterations: %2i ' % np.ptp(niters) f.write(out + '\n') print(out) out = ' Position of max/min number of iterations: %2i -- %2i' % \ (int(np.argmax(niters)), int(np.argmin(niters))) f.write(out + '\n') print(out) out = ' Std and var for number of iterations: %4.2f -- %4.2f' % (float(np.std(niters)), float(np.var(niters))) f.write(out + '\n') print(out) timing = sort_stats(filter_stats(stats, type='timing_run'), sortby='time') out = f'Time to solution: {timing[0][1]:6.4f} sec.' f.write(out + '\n') print(out) f.write('\n') print() f.close() def main(): """ Little helper routine to run the whole thing Note: This can also be run with "mpirun -np 2 python grayscott.py" """ # run_simulation(spectral=False, splitting_type='diffusion', ml=False, num_procs=1) # run_simulation(spectral=True, splitting_type='diffusion', ml=False, num_procs=1) # run_simulation(spectral=True, splitting_type='linear', ml=False, num_procs=1) # run_simulation(spectral=False, splitting_type='diffusion', ml=True, num_procs=1) # run_simulation(spectral=True, splitting_type='diffusion', ml=True, num_procs=1) # run_simulation(spectral=False, splitting_type='diffusion', ml=True, num_procs=10) # run_simulation(spectral=True, splitting_type='diffusion', ml=True, num_procs=10) # run_simulation(spectral=False, splitting_type='mi_diffusion', ml=False, num_procs=1) run_simulation(spectral=True, splitting_type='mi_diffusion', ml=False, num_procs=1) # run_simulation(spectral=False, splitting_type='mi_linear', ml=False, num_procs=1) # run_simulation(spectral=True, splitting_type='mi_linear', ml=False, num_procs=1) if __name__ == "__main__": main()
bsd-2-clause
632,620,661,393,249,800
38.97
120
0.643858
false
3.265523
false
false
false
sbates130272/libdonard
scripts/imgrep.py
1
6622
#!/usr/bin/env python ######################################################################## ## ## Copyright 2014 PMC-Sierra, Inc. ## ## Licensed under the Apache License, Version 2.0 (the "License"); you ## may not use this file except in compliance with the License. You may ## obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 Unless required by ## applicable law or agreed to in writing, software distributed under the ## License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR ## CONDITIONS OF ANY KIND, either express or implied. See the License for ## the specific language governing permissions and limitations under the ## License. ## ######################################################################## ######################################################################## ## ## Author: Logan Gunthorpe ## ## Date: Oct 23, 2014 ## ## Description: ## Image grep test/example script ## ######################################################################## import os import sys import Image import numpy as np from numpy import fft import utils import time data = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "data")) class ImageGrepError(Exception): pass class ImageGrep(object): max_size = 8192 threshold = 150 tiff = False def __init__(self, needle, **kws): for k,v in kws.iteritems(): if hasattr(self, k): setattr(self, k, v) data_needle = os.path.join(data, needle) if not os.path.exists(needle) and os.path.exists(data_needle): needle = data_needle im = Image.open(needle, 'r').convert("L") self.needle = np.array(im) / 255. self.needle_size = im.size revneedle = self.needle[::-1,::-1] revneedle = self._padimg(revneedle, self._next_highest_pow2(*revneedle.shape)) edge_detect = np.ones((3,3)) * -1./8 edge_detect[1,1] = 1 edge_detect = self._padimg(edge_detect, revneedle.shape) edge_needle = self._convolve(revneedle, edge_detect) self.needle = self._padimg(edge_needle, (self.max_size, self.max_size)) self.pixels = 0 self.bytes = 0 def _next_highest_pow2(self, *args): return tuple(1 << (x-1).bit_length() for x in args) def _padimg(self, a, shape): padspace = np.zeros(shape) padspace[:a.shape[0], :a.shape[1]] = a return padspace def _convolve(self, a, b): haystack_fft = fft.rfft2(a) needle_fft = fft.rfft2(b) return fft.irfft2((haystack_fft * needle_fft)) def _save_image(self, m, fname): mx = np.amax(m) mn = np.amin(m) if mx > 1.0 or mn < 0: m = m.copy() m += -mn m /= (mx-mn) Image.fromarray(np.uint8(m*255)).save(fname) def __call__(self, haystack, *args, **kws): if utils.istiff(haystack) and self.tiff: pass elif utils.isjpeg(haystack) and not self.tiff: pass else: return [] im = Image.open(haystack, 'r').convert("L") self.pixels += im.size[0] * im.size[1] self.bytes += os.path.getsize(haystack) haystack = np.array(im) / 255. #Pad dimensions to next highest power of 2 seeing this is # more efficient for the fft haystack = self._padimg(haystack, self._next_highest_pow2(*haystack.shape)) if max(haystack.shape) > self.max_size: raise ImageGrepError("Image too large. Increase max_size.") needle = self.needle[:haystack.shape[0],:haystack.shape[1]] conv = self._convolve(needle, haystack) #self._save_image((conv > self.threshold) * 200, "conv.jpg") w, h = self.needle_size results = {} for x, y in zip(*np.nonzero(conv > self.threshold)): xx, yy = y-w+1, x-h if xx < 0 or yy < 0 or xx > im.size[0] or yy > im.size[1]: continue for (xr, yr, wr, hr), rr in results.iteritems(): if (abs(xx-xr) < h or abs(yy-yr) < w): if rr < conv[x,y]: results[xx,yy,w,h] = conv[x,y] del results[xr,yr,wr,hr] break else: results[xx,yy,w,h] = conv[x, y] return results.items() def print_results(self, haystack, *args, **kws): try: for (x,y,w,h), r in self(haystack, *args, **kws): print "%s %5d+%-3d %5d+%-3d (%.2f)" % (haystack+":", x, w, y, h, r) sys.stdout.flush() except ImageGrepError, e: print >> sys.stderr, "%s (%s) " % (haystack, str(e)) if __name__ == "__main__": import optparse usage = "usage: %prog [options] IMAGE1 [IMAGE2 DIR1 ...]" parser = optparse.OptionParser(usage = usage) parser.add_option("--tiff", action="store_true", help="process TIFF files rather than JPEGs") parser.add_option("-M", "--max-size", action="store", type="int", default=ImageGrep.max_size, help="maximum supported image size, default: %default") parser.add_option("-t", "--threshold", action="store", type="float", default=ImageGrep.threshold, help="detection threshold, default: %default") parser.add_option("-n", "--needle", action="store", type="string", default="pmclogo.png", help="needle image to search for, default : %default") (options, args) = parser.parse_args() if not args: parser.print_usage() sys.exit(-1) imgrep = ImageGrep(**options.__dict__) try: starttime = time.time() utils.run(args, imgrep.print_results) except KeyboardInterrupt: pass except IOError, e: print e finally: duration = time.time() - starttime time.sleep(0.5) print >> sys.stderr print >> sys.stderr, ("%.2f%spixels in %.1fs %.2f%spixels/s" % (utils.si_suffix(imgrep.pixels) + (duration, ) + utils.si_suffix(imgrep.pixels / duration))) print >> sys.stderr, ("%.2f%sBytes in %.1fs %.2f%sB/s" % (utils.si_suffix(imgrep.bytes) + (duration, ) + utils.si_suffix(imgrep.bytes / duration)))
apache-2.0
2,855,813,345,342,170,000
32.958974
99
0.517064
false
3.596958
false
false
false
edy89/Compugrafica
level2.py
1
14250
import pygame import sys import random import level1_main import menu from pygame.locals import * pygame.init() reloj = pygame.time.Clock() opciones = [ ("Jugar", level1_main.Nivel_1), ("Creditos", menu.creditos), ("Salir", menu.salir_del_programa) ] menu = menu.Menu(opciones = opciones) def Nivel_2(pantalla): level1_main.pygame.mixer.music.play() puntos_influencia = 20 contador = 0 contador_inicio = 0 num_jugadores = 0 pag = 0 level_2 = 0 bool_pag = False bool_level2 = False bool_lose = False bandera = True cursor = level1_main.Cursor() hecho = False jugador_aux = level1_main.Jugador('Sprites/nave2.png') jugador_aux_3 = level1_main.Jugador('Sprites/nave2.png') jugador_aux_2 = level1_main.Jugador('Sprites/nave.png') jugador_aux_4 = level1_main.Jugador('Sprites/nave3.png') # inicializacion del Enemigo Boss enemigo = level1_main.Boss('Sprites/Bossnave.png') enemigo.rect.x = level1_main.DIMENSION_VENTANA[0] - 300 enemigo.rect.y = random.randrange(0, level1_main.DIMENSION_VENTANA[1] - 300) enemigo.disparar = random.randrange(30, 100) vida_Boss = level1_main.fuente3.render(str(enemigo.vida), 0, level1_main.Blanco) ############### LISTAS ####################### lista_todos = pygame.sprite.Group() lista_enemigo = pygame.sprite.Group() lista_jugadores1 = pygame.sprite.Group() lista_jugadores2 = pygame.sprite.Group() lista_jugadores3 = pygame.sprite.Group() lista_balas_jugadores = pygame.sprite.Group() lista_balas_enemigo = pygame.sprite.Group() ########################################### while not hecho and bandera: pos = pygame.mouse.get_pos() for evento in pygame.event.get(): if evento.type == pygame.QUIT: hecho = True quit() if evento.type == pygame.MOUSEBUTTONDOWN: if cursor.colliderect(level1_main.boton.rect): # nave 1 en pantalla level1_main.seleccion_nave.play() jugador_aux = jugador_aux_3 contador += 1 lista_todos.add(jugador_aux) if cursor.colliderect(level1_main.boton1.rect): # nave 2 en pantalla level1_main.seleccion_nave.play() jugador_aux = jugador_aux_2 contador += 1 lista_todos.add(jugador_aux) if cursor.colliderect(level1_main.boton2.rect): #nave 3 en pantalla level1_main.seleccion_nave.play() jugador_aux = jugador_aux_4 contador += 1 lista_todos.add(jugador_aux) if cursor.colliderect(level1_main.boton_inicio.rect) and contador_inicio == 0: lista_enemigo.add(enemigo) lista_todos.add(enemigo) contador_inicio += 1 level1_main.pygame.mixer.music.stop() level1_main.juego.play() if evento.type == pygame.MOUSEBUTTONUP and contador > 0: contador -= 1 if puntos_influencia > 0: if jugador_aux == jugador_aux_2: if puntos_influencia >= 3: nave = level1_main.Jugador('Sprites/nave.png') nave.vida += 1 lista_jugadores1.add(nave) lista_todos.add(nave) num_jugadores += 1 puntos_influencia -= 3 bool_lose = True if jugador_aux == jugador_aux_3: if puntos_influencia >= 2: nave = level1_main.Jugador('Sprites/nave2.png') lista_jugadores2.add(nave) lista_todos.add(nave) num_jugadores += 1 puntos_influencia -= 2 bool_lose = True if jugador_aux == jugador_aux_4: if puntos_influencia >= 5: nave = level1_main.Jugador('Sprites/nave3.png') nave.vida += 2 lista_jugadores3.add(nave) lista_todos.add(nave) num_jugadores += 1 puntos_influencia -= 5 bool_lose = True if pos[1] > 135: nave.rect.x = pos[0] - 40 nave.rect.y = pos[1] - 40 else: nave.rect.x = pos[0] - 40 + 90 nave.rect.y = pos[1] - 40 + 135 lista_todos.remove(jugador_aux) if contador_inicio == 1: ##### Ciclos de llenado de balas de enemigos for a in lista_enemigo: if a.disparar == 0: balae1 = level1_main.Laser1('Sprites/Sol.png') balae2 = level1_main.Laser2('Sprites/Sol.png') balae3 = level1_main.Laser3('Sprites/Sol.png') balae4 = level1_main.Laser4('Sprites/Sol.png') balae5 = level1_main.Laser5('Sprites/Sol.png') balae1.rect.x = a.rect.x - 30 balae1.rect.y = a.rect.y balae2.rect.x = a.rect.x - 30 balae2.rect.y = a.rect.y balae3.rect.x = a.rect.x - 30 balae3.rect.y = a.rect.y balae4.rect.x = a.rect.x - 30 balae4.rect.y = a.rect.y balae5.rect.x = a.rect.x - 30 balae5.rect.y = a.rect.y lista_balas_enemigo.add(balae1) lista_todos.add(balae1) lista_balas_enemigo.add(balae2) lista_todos.add(balae2) lista_balas_enemigo.add(balae3) lista_todos.add(balae3) lista_balas_enemigo.add(balae4) lista_todos.add(balae4) lista_balas_enemigo.add(balae5) lista_todos.add(balae5) #### llenado de proyectiles de las naves ("jugadores") for e in lista_jugadores1: if e.disparar == 0: balaj = level1_main.Proyectil_1('Sprites/proyectil.png') balaj.rect.x = e.rect.x + 40 balaj.rect.y = e.rect.y + 10 lista_balas_jugadores.add(balaj) lista_todos.add(balaj) level1_main.sonido.play() for f in lista_jugadores2: if f.disparar == 0: balaj = level1_main.Proyectil('Sprites/proyectil.png') balaj.rect.x = f.rect.x + 40 balaj.rect.y = f.rect.y + 10 lista_balas_jugadores.add(balaj) lista_todos.add(balaj) level1_main.sonido.play() for g in lista_jugadores3: if g.disparar == 0: balaj = level1_main.Proyectil_2('Sprites/proyectil.png') balaj2 = level1_main.Proyectil_3('Sprites/proyectil.png') balaj2.rect.x = g.rect.x + 40 balaj.rect.x = g.rect.x + 40 balaj2.rect.y = g.rect.y + 10 balaj.rect.y = g.rect.y + 10 lista_balas_jugadores.add(balaj) lista_balas_jugadores.add(balaj2) lista_todos.add(balaj) lista_todos.add(balaj2) level1_main.sonido.play() ############################# COLISIONES ######################################## ##### Colision de un jugador con la bala de un enemigo ########### for h1 in lista_jugadores1: ls_impactoe = pygame.sprite.spritecollide(h1, lista_balas_enemigo, True) for imp1 in ls_impactoe: print (h1.vida) h1.vida -= 1 if h1.vida == 0: lista_jugadores1.remove(h1) lista_todos.remove(h1) num_jugadores -= 1 level1_main.desaparece.play() for h2 in lista_jugadores2: ls_impactoe = pygame.sprite.spritecollide(h2, lista_balas_enemigo, True) for imp2 in ls_impactoe: print (h2.vida) h2.vida -= 1 if h2.vida == 1: lista_jugadores2.remove(h2) lista_todos.remove(h2) num_jugadores -= 1 level1_main.desaparece.play() for h3 in lista_jugadores3: ls_impactoe = pygame.sprite.spritecollide(h3, lista_balas_enemigo, True) for imp3 in ls_impactoe: print (h3.vida) h3.vida -= 1 if h3.vida == 1: lista_jugadores3.remove(h3) lista_todos.remove(h3) num_jugadores -= 1 level1_main.desaparece.play() ########### Colision de una bala del jugador con un enemigo ########################### for k in lista_balas_jugadores: ls_impacto1 = pygame.sprite.spritecollide(k, lista_enemigo, False) for impacto1 in ls_impacto1: lista_balas_jugadores.remove(k) lista_todos.remove(k) puntos_influencia += 1 print("Boss_life: %d" % enemigo.vida ) vida_Boss = level1_main.fuente3.render(str(enemigo.vida), 0, level1_main.Blanco) level1_main.explosion.play() if enemigo.vida > 0: enemigo.vida -= 1 ######################################################################################## jugador_aux.rect.x = pos[0] - 30 jugador_aux.rect.y = pos[1] - 30 if enemigo.vida == 0: level_2 = 1 bool_level2 = True bandera = False level1_main.juego.stop() level1_main.win.play() if num_jugadores == 0 and bool_lose: pag += 1 bandera = False bool_pag = True level1_main.juego.stop() level1_main.pygame.mixer.music.stop() level1_main.explosion2.play() level1_main.muerto.play() pantalla.blit(level1_main.fondo, [0, 0]) while (not bandera): for event in pygame.event.get(): if event.type == pygame.QUIT: quit() if event.type == pygame.KEYUP and bool_pag: pag += 1 if event.type == pygame.KEYUP and bool_level2: level_2 += 1 if pag == 1: pantalla.blit(level1_main.muerte, [0, 0]) level1_main.to_level_2.play() if pag == 2: level1_main.juego.stop() level1_main.pygame.mixer.music.stop() while not bandera: for e in pygame.event.get(): if e.type == QUIT: bandera = True pantalla.blit(level1_main.fondo, (0, 0)) menu.actualizar() menu.imprimir(pantalla) pantalla.blit(level1_main.Titulo, (280, level1_main.DIMENSION_VENTANA[1] / 2 - 280)) level1_main.pygame.mixer.music.play() level1_main.muerto.stop() pygame.display.flip() level1_main.muerto.stop() reloj.tick(60) if level_2 == 1: pantalla.fill(level1_main.NEGRO) pantalla.blit(level1_main.texto3, (90, level1_main.DIMENSION_VENTANA[1] / 2 - 70)) if level_2 == 2: pantalla.fill(level1_main.NEGRO) pantalla.blit(level1_main.texto7, (250, level1_main.DIMENSION_VENTANA[1] / 2 - 70)) if level_2 == 3: level1_main.win.stop() while not bandera: for e in pygame.event.get(): if e.type == QUIT: bandera = True quit() pantalla.blit(level1_main.fondo, (0, 0)) menu.actualizar() menu.imprimir(pantalla) pantalla.blit(level1_main.Titulo, (280, level1_main.DIMENSION_VENTANA[1] / 2 - 280)) level1_main.pygame.mixer.music.play() level1_main.juego.stop() level1_main.muerto.stop() pygame.display.flip() reloj.tick(60) pygame.display.flip() puntos_pantalla = level1_main.fuente3.render(str(puntos_influencia), 0, level1_main.Blanco) cursor.update() lista_enemigo.update() lista_jugadores1.update() lista_jugadores2.update() lista_jugadores3.update() lista_balas_jugadores.update() lista_balas_enemigo.update() level1_main.boton.update(pantalla,cursor) level1_main.boton1.update(pantalla,cursor) level1_main.boton2.update(pantalla,cursor) level1_main.boton_inicio.update(pantalla, cursor) level1_main.boton_reset.update(pantalla, cursor) pantalla.blit(puntos_pantalla, (140,540)) pantalla.blit(vida_Boss, (880, 10)) pantalla.blit(level1_main.texto8, (700, 10)) pantalla.blit(level1_main.texto6, (12, 540)) pantalla.blit(level1_main.texto9,(10,65)) pantalla.blit(level1_main.texto10, (80, 65)) pantalla.blit(level1_main.texto11, (150, 65)) lista_todos.draw(pantalla) pygame.display.flip() reloj.tick(60)
gpl-3.0
-8,573,626,075,694,006,000
40.537313
122
0.479368
false
3.348214
false
false
false
GoodCloud/johnny-cache
johnny/transaction.py
1
10890
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.db import transaction as django_transaction from django.db import connection try: from django.db import DEFAULT_DB_ALIAS except: DEFUALT_DB_ALIAS = None try: from functools import wraps except ImportError: from django.utils.functional import wraps # Python 2.3, 2.4 fallback. import django class TransactionManager(object): """TransactionManager is a wrapper around a cache_backend that is transaction aware. If we are in a transaction, it will return the locally cached version. * On rollback, it will flush all local caches * On commit, it will push them up to the real shared cache backend (ex. memcached). """ _patched_var = False def __init__(self, cache_backend, keygen): from johnny import cache, settings self.timeout = settings.MIDDLEWARE_SECONDS self.prefix = settings.MIDDLEWARE_KEY_PREFIX self.cache_backend = cache_backend self.local = cache.local self.keygen = keygen(self.prefix) self._originals = {} self._dirty_backup = {} self.local['trans_sids'] = {} def _get_sid(self, using=None): if 'trans_sids' not in self.local: self.local['trans_sids'] = {} d = self.local['trans_sids'] if self.has_multi_db(): if using is None: using = DEFAULT_DB_ALIAS else: using = 'default' if using not in d: d[using] = [] return d[using] def _clear_sid_stack(self, using=None): if self.has_multi_db(): if using is None: using = DEFAULT_DB_ALIAS else: using = 'default' if using in self.local.get('trans_sids', {}): del self.local['trans_sids'] def has_multi_db(self): if django.VERSION[:2] in ((1, 2), (1, 3)): return True return False def is_managed(self): return django_transaction.is_managed() def get(self, key, default=None, using=None): if self.is_managed() and self._patched_var: val = self.local.get(key, None) if val: return val if self._uses_savepoints(): val = self._get_from_savepoints(key, using) if val: return val return self.cache_backend.get(key, default) def _get_from_savepoints(self, key, using=None): sids = self._get_sid(using) cp = list(sids) cp.reverse() for sid in cp: if key in self.local[sid]: return self.local[sid][key] def _trunc_using(self, using): if self.has_multi_db(): if using is None: using = DEFAULT_DB_ALIAS else: using = 'default' if len(using) > 100: using = using[0:68] + self.keygen.gen_key(using[68:]) return using def set(self, key, val, timeout=None, using=None): """ Set will be using the generational key, so if another thread bumps this key, the localstore version will still be invalid. If the key is bumped during a transaction it will be new to the global cache on commit, so it will still be a bump. """ if timeout is None: timeout = self.timeout if self.is_managed() and self._patched_var: self.local[key] = val else: self.cache_backend.set(key, val, timeout) def _clear(self, using=None): if self.has_multi_db(): self.local.clear('%s_%s_*'%(self.prefix, self._trunc_using(using))) else: self.local.clear('%s_*'%self.prefix) def _flush(self, commit=True, using=None): """ Flushes the internal cache, either to the memcache or rolls back """ if commit: # XXX: multi-set? if self._uses_savepoints(): self._commit_all_savepoints(using) if self.has_multi_db(): c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using))) else: c = self.local.mget('%s_*'%self.prefix) for key, value in c.iteritems(): self.cache_backend.set(key, value, self.timeout) else: if self._uses_savepoints(): self._rollback_all_savepoints(using) self._clear(using) self._clear_sid_stack(using) def _patched(self, original, commit=True): @wraps(original) def newfun(using=None): #1.2 version original(using=using) self._flush(commit=commit, using=using) @wraps(original) def newfun11(): #1.1 version original() self._flush(commit=commit) if django.VERSION[:2] == (1,1): return newfun11 elif django.VERSION[:2] in ((1,2), (1,3)): return newfun return original def _uses_savepoints(self): return connection.features.uses_savepoints def _sid_key(self, sid, using=None): if using != None: return 'trans_savepoint_%s_%s'%(using, sid) return 'trans_savepoint_%s'%sid def _create_savepoint(self, sid, using=None): key = self._sid_key(sid, using) #get all local dirty items if self.has_multi_db(): c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using))) else: c = self.local.mget('%s_*'%self.prefix) #store them to a dictionary in the localstore if key not in self.local: self.local[key] = {} for k, v in c.iteritems(): self.local[key][k] = v #clear the dirty self._clear(using) #append the key to the savepoint stack sids = self._get_sid(using) sids.append(key) def _rollback_savepoint(self, sid, using=None): sids = self._get_sid(using) key = self._sid_key(sid, using) stack = [] try: popped = None while popped != key: popped = sids.pop() stack.insert(0, popped) #delete items from localstore for i in stack: del self.local[i] #clear dirty self._clear(using) except IndexError, e: #key not found, don't delete from localstore, restore sid stack for i in stack: sids.insert(0, i) def _commit_savepoint(self, sid, using=None): #commit is not a commit but is in reality just a clear back to that savepoint #and adds the items back to the dirty transaction. key = self._sid_key(sid, using) sids = self._get_sid(using) stack = [] try: popped = None while popped != key: popped = sids.pop() stack.insert(0, popped) self._store_dirty(using) for i in stack: for k, v in self.local[i].iteritems(): self.local[k] = v del self.local[i] self._restore_dirty(using) except IndexError, e: for i in stack: sids.insert(0, i) def _commit_all_savepoints(self, using=None): sids = self._get_sid(using) if sids: self._commit_savepoint(sids[0], using) def _rollback_all_savepoints(self, using=None): sids = self._get_sid(using) if sids: self._rollback_savepoint(sids[0], using) def _store_dirty(self, using=None): if self.has_multi_db(): c = self.local.mget('%s_%s_*'%(self.prefix, self._trunc_using(using))) else: c = self.local.mget('%s_*'%self.prefix) backup = 'trans_dirty_store_%s'%self._trunc_using(using) self.local[backup] = {} for k, v in c.iteritems(): self.local[backup][k] = v self._clear(using) def _restore_dirty(self, using=None): backup = 'trans_dirty_store_%s'%self._trunc_using(using) for k, v in self.local.get(backup, {}).iteritems(): self.local[k] = v del self.local[backup] def _savepoint(self, original): @wraps(original) def newfun(using=None): if using != None: sid = original(using=using) else: sid = original() if self._uses_savepoints(): self._create_savepoint(sid, using) return sid return newfun def _savepoint_rollback(self, original): def newfun(sid, *args, **kwargs): original(sid, *args, **kwargs) if self._uses_savepoints(): if len(args) == 2: using = args[1] else: using = kwargs.get('using', None) self._rollback_savepoint(sid, using) return newfun def _savepoint_commit(self, original): def newfun(sid, *args, **kwargs): original(sid, *args, **kwargs) if self._uses_savepoints(): if len(args) == 1: using = args[0] else: using = kwargs.get('using', None) self._commit_savepoint(sid, using) return newfun def _getreal(self, name): return getattr(django_transaction, 'real_%s' % name, getattr(django_transaction, name)) def patch(self): """ This function monkey patches commit and rollback writes to the cache should not happen until commit (unless our state isn't managed). It does not yet support savepoints. """ if not self._patched_var: self._originals['rollback'] = self._getreal('rollback') self._originals['commit'] = self._getreal('commit') self._originals['savepoint'] = self._getreal('savepoint') self._originals['savepoint_rollback'] = self._getreal('savepoint_rollback') self._originals['savepoint_commit'] = self._getreal('savepoint_commit') django_transaction.rollback = self._patched(django_transaction.rollback, False) django_transaction.commit = self._patched(django_transaction.commit, True) django_transaction.savepoint = self._savepoint(django_transaction.savepoint) django_transaction.savepoint_rollback = self._savepoint_rollback(django_transaction.savepoint_rollback) django_transaction.savepoint_commit = self._savepoint_commit(django_transaction.savepoint_commit) self._patched_var = True def unpatch(self): for fun in self._originals: setattr(django_transaction, fun, self._originals[fun]) self._patched_var = False
mit
-1,120,410,348,727,484,800
33.245283
115
0.551974
false
3.975904
false
false
false
the-fascinator/fascinator-portal
src/main/config/portal/default/default/scripts/download.py
1
7607
import os from com.googlecode.fascinator.api.indexer import SearchRequest from com.googlecode.fascinator.api.storage import StorageException from com.googlecode.fascinator.common.solr import SolrDoc, SolrResult from java.io import ByteArrayInputStream, ByteArrayOutputStream from java.lang import Boolean from java.net import URLDecoder from org.apache.commons.io import IOUtils class DownloadData: def __init__(self): pass def __activate__(self, context): self.services = context["Services"] self.contextPath = context["contextPath"] self.pageName = context["pageName"] self.portalId = context["portalId"] self.request = context["request"] self.response = context["response"] self.formData = context["formData"] self.page = context["page"] self.log = context["log"] self.__metadata = SolrDoc(None) object = None payload = None # URL basics basePath = self.portalId + "/" + self.pageName fullUri = URLDecoder.decode(self.request.getAttribute("RequestURI")) uri = fullUri[len(basePath)+1:] # Turn our URL into objects object, payload = self.__resolve(uri) if object is None: if uri.endswith("/"): self.log.error("Object 404: '{}'", uri) self.response.setStatus(404); writer = self.response.getPrintWriter("text/plain; charset=UTF-8") writer.println("Object not found") writer.close() return else: # Sometimes adding a slash to the end will resolve the problem self.log.error("Redirecting, object 404: '{}'", uri) self.response.sendRedirect(context["urlBase"] + fullUri + "/") return # Ensure solr metadata is useable oid = object.getId() if self.isIndexed(): self.__metadata = self.__solrData.getResults().get(0) else: self.__metadata.getJsonObject().put("id", oid) #print "URI='%s' OID='%s' PID='%s'" % (uri, object.getId(), payload.getId()) # Security check if self.isAccessDenied(uri): # Redirect to the object page for standard access denied error self.response.sendRedirect(context["portalPath"] + "/detail/" + object.getId()) return ## The byte range cache will check for byte range requests first self.cache = self.services.getByteRangeCache() processed = self.cache.processRequest(self.request, self.response, payload) if processed: # We don't need to return data, the cache took care of it. return # Now the 'real' work of payload retrieval if payload is not None: filename = os.path.split(payload.getId())[1] mimeType = payload.getContentType() if mimeType == "application/octet-stream": self.response.setHeader("Content-Disposition", "attachment; filename=%s" % filename) type = payload.getContentType() # Enocode textual responses before sending if type is not None and type.startswith("text/"): out = ByteArrayOutputStream() IOUtils.copy(payload.open(), out) payload.close() writer = self.response.getPrintWriter(type + "; charset=UTF-8") writer.println(out.toString("UTF-8")) writer.close() # Other data can just be streamed out else: if type is None: # Send as raw data out = self.response.getOutputStream("application/octet-stream") else: out = self.response.getOutputStream(type) IOUtils.copy(payload.open(), out) payload.close() object.close() out.close() else: self.response.setStatus(404) writer = self.response.getPrintWriter("text/plain; charset=UTF-8") writer.println("Resource not found: uri='%s'" % uri) writer.close() def getAllowedRoles(self): metadata = self.getMetadata() if metadata is not None: return metadata.getList("security_filter") else: return [] def getMetadata(self): return self.__metadata def isAccessDenied(self,uri): # Admins always have access if self.page.authentication.is_admin(): return False slash = uri.find("/") if slash == -1: return None, None oid = uri[:slash] objectMetadata = self.services.getStorage().getObject(oid).getMetadata() if objectMetadata is not None: current_user = self.page.authentication.get_username() owner = objectMetadata.getProperty("owner") if current_user == owner: return False # Check for normal access myRoles = self.page.authentication.get_roles_list() allowedRoles = self.getAllowedRoles() if myRoles is None or allowedRoles is None: return True for role in myRoles: if role in allowedRoles: return False return True def isDetail(self): preview = Boolean.parseBoolean(self.formData.get("preview", "false")) return not (self.request.isXHR() or preview) def isIndexed(self): found = self.__solrData.getNumFound() return (found is not None) and (found == 1) def __resolve(self, uri): # Grab OID from the URL slash = uri.find("/") if slash == -1: return None, None oid = uri[:slash] # Query solr for this object self.__loadSolrData(oid) if not self.isIndexed(): print "WARNING: Object '%s' not found in index" % oid sid = None else: # Query storage for this object sid = self.__solrData.getResults().get(0).getFirst("storage_id") try: if sid is None: # Use the URL OID object = self.services.getStorage().getObject(oid) else: # We have a special storage ID from the index object = self.services.getStorage().getObject(sid) except StorageException, e: #print "Failed to access object: %s" % (str(e)) return None, None # Grab the payload from the rest of the URL pid = uri[slash+1:] if pid == "": # We want the source pid = object.getSourceId() # Now get the payload from storage try: payload = object.getPayload(pid) except StorageException, e: #print "Failed to access payload: %s" % (str(e)) return None, None # We're done return object, payload def __loadSolrData(self, oid): portal = self.page.getPortal() query = 'id:"%s"' % oid if self.isDetail() and portal.getSearchQuery(): query += " AND " + portal.getSearchQuery() req = SearchRequest(query) req.addParam("fq", 'item_type:"object"') if self.isDetail(): req.addParam("fq", portal.getQuery()) out = ByteArrayOutputStream() self.services.getIndexer().search(req, out) self.__solrData = SolrResult(ByteArrayInputStream(out.toByteArray()))
gpl-2.0
963,536,829,417,706,400
35.927184
100
0.567767
false
4.386967
false
false
false
digiholic/universalSmashSystem
engine/abstractFighter.py
1
75599
import settingsManager import pygame import xml.etree.ElementTree as ElementTree import xml.dom.minidom import os import engine.baseActions as baseActions import engine.collisionBox as collisionBox import weakref import engine.hurtbox as hurtbox import math import numpy import spriteManager import engine.article as article import engine.controller as controller import engine.actionLoader as actionLoader import engine.articleLoader from global_functions import * class AbstractFighter(): """The Abstract Fighter is an individual fighter in the battle. It holds all of the data needed to create, control, and clear a fighter. It is created initially by the Character Select Screen, as a container for things like icons and costume selections. It becomes a 'real' fighter when Initialize() is called, creating an object that can interact with the world. """ # Top Level fighter variables # base_dir = '' player_num = 0 xml_data = None # Data loaded from XML # name = 'Null' franchise_icon_path = 'sprites/default_franchise_icon.png' css_icon_path = './sprites/icon_unknown.png' css_portrait_path ='' sprite_directory = 'sprites/' sprite_prefix = '' sprite_width = 64 default_sprite = 'sandbag_idle' sprite = None sprite_flip = 'right' article_sprite_path = '' article_file = '' sound_path = '' action_file = baseActions.__file__ default_stats = { 'weight': 100, 'gravity': .5, 'max_fall_speed': 20.0, 'max_ground_speed': 7.0, 'run_speed': 11.0, 'max_air_speed': 5.5, 'aerial_transition_speed': 9.0, 'crawl_speed': 2.5, 'dodge_speed': 8.5, 'friction': 0.3, 'static_grip': 0.3, 'pivot_grip': 0.6, 'air_resistance': 0.2, 'air_control': 0.2, 'jumps': 1, 'jump_height': 12.5, 'short_hop_height': 8.5, 'air_jump_height': 15.0, 'heavy_land_lag': 4, 'wavedash_lag': 12, 'fastfall_multiplier': 2.0, 'hitstun_elasticity': .8, 'shield_size': 1.0 } default_vars = dict() # Data gotten from the XML data, like loading files and folders # actions = baseActions stats = dict() variables = dict() # Initialized fighter variables # key_bindings = None active_hitboxes = None #pygame.sprite.Group() articles = None #list() status_effects = None #list() active_hurtboxes = None #pygame.sprite.Group() auto_hurtbox = None armor = None shield = False shield_integrity = 100 input_buffer = None last_input_frame = 0 keys_held = None hitbox_lock = None #weakref.WeakSet() ledge_lock = False mask = None hit_tagged = None angle = 0 jumps = 0 damage = 0 landing_lag = 6 platform_phase = 0 tech_window = 0 airdodges = 1 grounded = False elasticity = 0 ground_elasticity = 0 grab_point = (0, 0) posx = 0 posy = 0 change_x = 0 change_y = 0 preferred_xspeed = 0 preferred_yspeed = 0 trail_color = "#000000" #facing right = 1, left = -1 facing = 1 #Adding a move to the disabled moves list prevents it from activating. #If told to switch to it, the fighter will ignore the request. disabled_moves = set() invulnerable = 0 respawn_invulnerable = 0 hitstop = 0 hitstop_vibration = (0,0) hitstop_pos = (0,0) custom_timers = list() current_color = 0 current_costume = 0 css_icon = spriteManager.ImageSprite(settingsManager.createPath('sprites/icon_unknown.png')) color_palettes = [] palette_display = [] def __init__(self,_baseDir,_playerNum): """ Create a fighter. To start, all that's needed is the directory it is in, and the player number. It uses the directory to find its fighter.xml file and begin storing data. Parameters ----------- _baseDir : string The filepath of the folder being loaded. Used to determine the location of fighter.xml, icons, and sprites _playerNum: int The number of the controlling player. 0-indexed, so Player 1 is number 0 """ def loadNodeWithDefault(_tag,_default): """ An anonymous inner function to quickly pull from XML, giving a default value if the node is not present or otherwise can't be loaded. Parameters ----------- _node : Element The XML tree to be searching from. The node directly above the one you're looking for. _tag : string The name of the XML tag to search for _default : any type the default value of the node, in case it cannot find the proper value Return ----------- The string value of the Node, or the given default if it is not valid """ if self.xml_data is not None: if self.xml_data.find(_tag) is not None: if self.xml_data.find(_tag).text is None: return _default else: return self.xml_data.find(_tag).text return _default self.base_dir = _baseDir self.player_num = _playerNum #Load the xml data if fighter.xml exists if os.path.exists(os.path.join(self.base_dir,'fighter.xml')): self.xml_data = ElementTree.parse(os.path.join(_baseDir,'fighter.xml')).getroot() else: self.xml_data = ElementTree.ElementTree().getroot() #Load the CSS info self.name = loadNodeWithDefault('name', self.name) self.franchise_icon_path = loadNodeWithDefault('icon', self.franchise_icon_path) self.css_icon_path = loadNodeWithDefault('css_icon', self.css_icon_path) self.css_portrait_path = loadNodeWithDefault('css_portrait', self.css_portrait_path) #Load the sprite info self.sprite_directory = loadNodeWithDefault('sprite_directory', os.path.join(self.base_dir,'sprites/')) self.sprite_prefix = loadNodeWithDefault('sprite_prefix', self.sprite_prefix) self.sprite_width = int(loadNodeWithDefault('sprite_width', self.sprite_width)) self.default_sprite = loadNodeWithDefault('default_sprite', self.default_sprite) try: self.sprite_flip = self.xml_data.find('facing').text except: self.sprite_flip = "right" #Load the article info self.article_sprite_path = loadNodeWithDefault('article_path', self.article_sprite_path) self.article_file = loadNodeWithDefault('articles', self.article_file) #Load sounds self.sound_path = loadNodeWithDefault('sound_path', self.sound_path) #Load actions self.action_file = loadNodeWithDefault('actions', self.action_file) #Load the article loader self.article_path_short = loadNodeWithDefault('article_path', '') self.article_path = os.path.join(self.base_dir,self.article_path_short) self.article_loader_path = loadNodeWithDefault('articles', None) print(self.article_loader_path) if self.article_loader_path == '': self.article_loader = None else: self.article_loader = engine.articleLoader.ArticleLoader(self) #TODO color palettes for color_palette in self.xml_data.findall('color_palette'): color_dict = {} for color_map in color_palette.findall('color_map'): from_color = pygame.Color(color_map.attrib['from_color']) to_color = pygame.Color(color_map.attrib['to_color']) color_dict[(from_color.r, from_color.g, from_color.b)] = (to_color.r, to_color.g, to_color.b) self.color_palettes.append(color_dict) self.palette_display.append(pygame.Color(color_palette.attrib['displayColor'])) while len(self.color_palettes) < 4: self.color_palettes.append({}) self.costumes = [self.sprite_prefix] for costume in self.xml_data.findall('costume'): self.costumes.append(costume.text) self.current_color = self.player_num # Now that we've got all the paths, need to actually load the files if self.css_icon_path[0] == '.': #If the path starts with a period, start from the top of the game directory instead self.css_icon = spriteManager.ImageSprite(settingsManager.createPath(self.css_icon_path)) else: self.css_icon = spriteManager.ImageSprite(os.path.join(self.base_dir,self.css_icon_path)) if self.franchise_icon_path[0] == '.': #If the path starts with a period, start from the top of the game directory instead self.franchise_icon = spriteManager.ImageSprite(settingsManager.createPath(self.franchise_icon_path)) else: self.franchise_icon = spriteManager.ImageSprite(os.path.join(self.base_dir,self.franchise_icon_path)) #TODO: The ECB crashes unless there is a sprite to pull from, so we load this one even though it'll never actually be drawn spriteName = self.sprite_prefix + self.default_sprite + '.png' try: self.scale = float(self.xml_data.find('scale').text) except: self.scale = 1.0 self.sprite = spriteManager.SheetSprite(os.path.join(self.base_dir,self.sprite_directory,spriteName), self.sprite_width) self.events = dict() #try: if self.action_file.endswith('.py'): self.actions = settingsManager.importFromURI(os.path.join(_baseDir,'fighter.xml'),self.action_file,_suffix=str(self.player_num)) else: self.actions = actionLoader.ActionLoader(_baseDir,self.action_file) self.events = self.actions.getGlobalEvents() #except: # self.actions = baseActions # self.action_file = baseActions.__file__ self.stats = self.default_stats.copy() self.variables = self.default_vars.copy() self.keys_held = dict() self.status_effects = list() self.data_log = None self.game_state = None def saveFighter(self,_path=None): """ Save the fighter's data to XML. Basically the inverse of __init__. Parameters ----------- _path : string The path to store the fighter.xml file in. If left blank, it will use base_dir. """ def createElement(_tag,_val): """ An anonymouse inner function for quickly creating an XML element with a value. Parameters ----------- _tag : The XML tag of the element _val : The data to go into the element """ elem = ElementTree.Element(_tag) if _val is not None: elem.text = str(_val) else: elem.text = '' return elem tree = ElementTree.Element('fighter') tree.append(createElement('name', self.name)) tree.append(createElement('icon', self.franchise_icon_path)) tree.append(createElement('css_icon', self.css_icon_path)) tree.append(createElement('scale', self.scale)) tree.append(createElement('sprite_directory', self.sprite_directory)) tree.append(createElement('sprite_prefix', self.sprite_prefix)) tree.append(createElement('sprite_width', self.sprite_width)) tree.append(createElement('default_sprite', self.default_sprite)) tree.append(createElement('article_path', self.article_path_short)) tree.append(createElement('articles', self.article_file)) tree.append(createElement('sound_path', self.sound_path)) tree.append(createElement('actions', self.action_file)) for i,color_dict in enumerate(self.color_palettes): color_elem = ElementTree.Element('color_palette') color_elem.attrib['id'] = str(i) color_elem.attrib['displayColor'] = '#000000' for from_color,to_color in color_dict.iteritems(): map_elem = ElementTree.Element('color_map') map_elem.attrib['from_color'] = '#%02x%02x%02x' % from_color map_elem.attrib['to_color'] = '#%02x%02x%02x' % to_color color_elem.append(map_elem) tree.append(color_elem) stats_elem = ElementTree.Element('stats') for costume in self.costumes: if not costume == self.sprite_prefix: tree.append(createElement('costume', costume)) for tag,val in self.stats.iteritems(): stats_elem.append(createElement(tag, val)) tree.append(stats_elem) if _path is None: _path = os.path.join(self.base_dir,'fighter.xml') xmlfile = xml.dom.minidom.parseString(ElementTree.tostring(tree)) outputFile = open(_path,'w') outputFile.write(xmlfile.toprettyxml()) def loadSpriteLibrary(self,_color=None): """ Loads the sprite library for the fighter, with the current costume and color. Parameters ----------- _color : int The index of the color to use. By default, will use the stored current_color variable, which is set while selecting. This optional argument should be used when you're overriding the game's color choice to load up a different palette. """ directory = os.path.join(self.base_dir,self.sprite_directory) try: scale = float(self.xml_data.find('scale').text) except: scale = 1.0 if _color == None: _color = self.current_color self.sprite = spriteManager.SpriteHandler(str(directory), self.costumes[self.current_costume % len(self.costumes)], self.default_sprite, self.sprite_width, self.color_palettes[_color % len(self.color_palettes)], scale, self.sprite_flip) self.rect = self.sprite.rect def initialize(self): """ This method is called when shit gets real. It creates the collision box, sprite library, etc. and is ready to start getting updates and doing actions. No parameters, no return value. Converts this object into an Initialized Fighter Object. """ """ Initialize components """ # Initialize key bindings object self.input_buffer = controller.InputBuffer() self.key_bindings = settingsManager.getControls(self.player_num) self.key_bindings.linkObject(self) self.articles = list() if self.sound_path: settingsManager.getSfx().addSoundsFromDirectory(os.path.join(self.base_dir,self.sound_path), self.name) if self.xml_data is not None: if self.xml_data.find('stats') is not None: for stat in self.xml_data.find('stats'): vartype = type(self.default_stats[stat.tag]).__name__ if vartype == 'int': self.default_stats[stat.tag] = int(stat.text) if vartype == 'float': self.default_stats[stat.tag] = float(stat.text) if self.xml_data.find('variables') is not None: for variable in self.xml_data.find('variables'): vartype = 'string' if variable.attrib.has_key('type'): vartype = variable.attrib['type'] val = variable.text if vartype == 'int': val = int(val) elif vartype == 'float': val = float(val) elif vartype == 'bool': val = bool(val) self.default_vars[variable.tag] = val self.onRespawn() ######################################################## # UPDATE METHODS # ######################################################## def onRespawn(self): """This method initializes things that should be initialized at the start of the game, and each time the fighter dies. """ self.key_bindings.flushInputs() self.keys_held = dict() self.stats = self.default_stats.copy() self.variables = self.default_vars.copy() self.disabled_moves.clear() # Evironmental Collision Box self.ecb = collisionBox.ECB(self) self.init_boxes() self.hitbox_lock = weakref.WeakSet() self.damage = 0 self.change_x = 0 self.change_y = 0 self.jumps = self.stats['jumps'] self.trail_color = settingsManager.getSetting('playerColor' + str(self.player_num)) self.facing = 1 if self.sprite.flip == 'left': self.sprite.flipX() self.unRotate() self.current_action = self.getAction('NeutralAction') if hasattr(self.actions,'loadAction'): self.doAction('Respawn') elif hasattr(self.actions, 'Respawn'): class_ = getattr(self.actions,'Respawn') self.changeAction(class_()) def init_boxes(self): self.active_hitboxes = pygame.sprite.Group() self.active_hurtboxes = pygame.sprite.Group() self.auto_hurtbox = hurtbox.Hurtbox(self) self.armor = dict() def update(self): """ This method will step the fighter forward one frame. It will resolve movement, collisions, animations, and all sorts of things. It should be called every frame. """ self.ecb.normalize() self.ecb.store() self.input_buffer.push() self.last_input_frame += 1 if self.hitstop > 0: #We're in hitstop, let's take care of that and ignore a normal update self.hitstopUpdate() return elif self.hitstop == 0 and not self.hitstop_vibration == (0,0): #self.hitstop_vibration = False #Lolwut? (self.posx, self.posy) = self.hitstop_pos self.hitstop_vibration = (0,0) self.updatePosition() self.ecb.normalize() # Allow ledge re-grabs if we've vacated a ledge if self.ledge_lock: ledges = pygame.sprite.spritecollide(self.ecb.current_ecb, self.game_state.platform_ledges, False) if len(ledges) == 0: # If we've cleared out of all of the ledges self.ledge_lock = False # Prepare for movement by setting change_x and change_y from acceleration if self.grounded: self.accel(self.stats['friction']) else: self.accel(self.stats['air_resistance']) self.calcGrav() # Check for transitions, then execute actions self.current_action.stateTransitions(self) self.current_action.update(self) #update our action self.updatePosition() self.ecb.normalize() self.collisionUpdate() self.childUpdate() self.timerUpdate() def collisionUpdate(self): """ Execute movement and resolve collisions. This function is due for a huge overhaul. """ loop_count = 0 while loop_count < 2: self.updatePosition() self.ecb.normalize() bumped = False block_hit_list = collisionBox.getSizeCollisionsWith(self, self.game_state.platform_list) if not block_hit_list: break for block in block_hit_list: if block.solid or (self.platform_phase <= 0): self.platform_phase = 0 if collisionBox.eject(self, block, self.platform_phase > 0): bumped = True break if not bumped: break loop_count += 1 # TODO: Crush death if loopcount reaches the 10 resolution attempt ceiling self.updatePosition() self.ecb.normalize() t = 1 to_bounce_block = None self.updatePosition() self.ecb.normalize() block_hit_list = collisionBox.getMovementCollisionsWith(self, self.game_state.platform_list) for block in block_hit_list: if self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y) > 0 and self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y) < t and collisionBox.catchMovement(self, block, self.platform_phase > 0): t = self.ecb.pathRectIntersects(block.rect, self.change_x, self.change_y) to_bounce_block = block self.posy += self.change_y*t self.posx += self.change_x*t self.updatePosition() self.ecb.normalize() # Move with the platform block = reduce(lambda x, y: y if x is None or y.rect.top <= x.rect.top else x, self.checkGround(), None) if not block is None and self.ecb.current_ecb.rect.centerx > block.rect.left and self.ecb.current_ecb.rect.centerx < block.rect.right: self.jumps = self.stats['jumps'] self.posx += block.change_x #if self.ecb.current_ecb.rect.bottom > block.rect.top: # self.posy += block.rect.top - self.ecb.current_ecb.rect.bottom-block.change_y self.change_y -= self.stats['gravity'] * settingsManager.getSetting('gravity') if self.change_y > block.change_y: self.change_y = block.change_y self.grounded = self.isGrounded() if to_bounce_block is not None: collisionBox.reflect(self, to_bounce_block) def childUpdate(self): """ The fighter contains many child objects, that all need to be updated. This function calls those updates. """ if self.mask:self.mask = self.mask.update() for art in self.articles: art.update() for stat in self.status_effects: stat.update() def timerUpdate(self): """ There are several frame counters that determine things like teching, invulnerability, platform phasing, etc. as well as possible custom timers. """ #These max calls will decrement the window, but not below 0 self.tech_window = max(0,self.tech_window-1) self.shield_integrity = min(100,self.shield_integrity+0.15) self.platform_phase = max(0,self.platform_phase-1) finished_timers = [] for timer in self.custom_timers: time,event = timer time -= 1 if time <= 0: for subact in event: subact.execute(self,self.current_action) #In order to avoid mucking up the iterative loop, we store finished timers to remove later finished_timers.append(timer) for timer in finished_timers: self.custom_timers.remove(timer) def hitstopUpdate(self): """ Handles what to do if the fighter is in hitstop (that freeze frame state when you get hit). Vibrates the fighter's sprite, and handles SDI """ self.hitstop -= 1 loop_count = 0 #QUESTION: Why is this a loop? #ANSWER: It's so multiple ejections can happen while loop_count < 2: self.updatePosition() self.ecb.normalize() bumped = False block_hit_list = collisionBox.getSizeCollisionsWith(self, self.game_state.platform_list) if not block_hit_list: break for block in block_hit_list: if block.solid or (self.platform_phase <= 0): self.platform_phase = 0 if collisionBox.eject(self, block, self.platform_phase > 0): bumped = True break if not bumped: break loop_count += 1 self.updatePosition() self.ecb.normalize() # Vibrate the sprite if not self.hitstop_vibration == (0,0): (x,y) = self.hitstop_vibration self.posx += x if not self.grounded: self.posy += y self.hitstop_vibration = (-x,-y) #Smash directional influence AKA hitstun shuffling di_vec = self.getSmoothedInput(int(self.key_bindings.timing_window['smoothing_window'])) self.posx += di_vec[0]*5 if not self.grounded or self.keysContain('jump', _threshold=1): self.posy += di_vec[1]*5 self.updatePosition() self.ecb.normalize() # Move with the platform block = reduce(lambda x, y: y if x is None or y.rect.top <= x.rect.top else x, self.checkGround(), None) if not block is None: self.posx += block.change_x self.updatePosition() if self.platform_phase > 0: self.platform_phase -= 1 self.ecb.normalize() def draw(self,_screen,_offset,_scale): if (settingsManager.getSetting('showSpriteArea')):spriteManager.RectSprite(self.sprite.rect).draw(_screen, _offset, _scale) rect = self.sprite.draw(_screen,_offset,_scale) if self.mask: self.mask.draw(_screen,_offset,_scale) if settingsManager.getSetting('showECB'): self.ecb.draw(_screen,_offset,_scale) return rect ######################################################## # ACTION MANAGEMENT # ######################################################## def doAction(self,_actionName): """ Load up the given action. If it's executable, change to it. If it's not, still execute the setUp (this allows for certain code to happen, even if the action is not executed.) If the move is disabled, it won't even bother to load it, since we shouldn't be doing anything with it. Parameters ----------- _actionName : String The Action to load and switch to """ if not _actionName in self.disabled_moves: # If our action is an ActionLoader, we need to pull it from XML if hasattr(self.actions,'loadAction'): action = self.actions.loadAction(_actionName) if action.last_frame > 0: self.changeAction(action) else: action.setUp(self) # If it has an object of the given name, get that object elif hasattr(self.actions, _actionName): class_ = getattr(self.actions,_actionName) action = class_() if action.last_frame > 0: self.changeAction(action) else: action.setUp(self) def changeAction(self,_newAction): """ Switches from the current action to the given action. Calls tearDown on the current action, before setting up the new one. If we get this far, the new action is valid and ready to be executed Parameters ----------- _newAction : Action The Action to switch to """ if self.current_action: self.current_action.tearDown(self,_newAction) _newAction.setUp(self) self.current_action = _newAction def getAction(self,_actionName): """ Loads an action, without changing to it or executing it. Since this is just to read, it will load an action that is disabled, or unexecutable. If you need to change to it, please use doAction instead, which will make sure the action is valid before executing. Parameters ----------- _actionName : String The name of the action to load Return ----------- Action : The loaded action with the given name. Returns None if there is no action with that name. """ action = None if hasattr(self.actions,'loadAction'): action = self.actions.loadAction(_actionName) elif hasattr(self.actions, _actionName): class_ = getattr(self.actions,_actionName) action = class_() return action def hasAction(self,_actionName): """ Returns True if the fighter has an action of the given name. Does not load the action, change to it, or do anything other than check if it exists. You do not need to run this before getAction or doAction, as they check for the action themselves. Parameters ----------- _actionName : String The name of the action to check for """ if hasattr(self.actions,'hasAction'): return self.actions.hasAction(_actionName) else: return hasattr(self.actions, _actionName) def loadArticle(self,_articleName): """ Loads and returns an article. Checks if the articles are loading from XML or Python, and loads the appropriate one. Parameters ----------- _articleName : String The name of the article to load Return ----------- Article : The article of the given name. Returns None if no Article with that name exists. """ if hasattr(self.article_loader, 'loadArticle'): return self.article_loader.loadArticle(_articleName) elif hasattr(self.article_loader, _articleName): class_ = getattr(self.article_loader, _articleName) return(class_(self)) """ All of this stuff below should probably be rewritten or find a way to be removed """ def doGroundMove(self,_direction): print(self.input_buffer) if (self.facing == 1 and _direction == 180) or (self.facing == -1 and _direction == 0): self.flip() self.doAction('Move') def doDash(self,_direction): if (self.facing == 1 and _direction == 180) or (self.facing == -1 and _direction == 0): self.flip() self.doAction('Dash') def doGroundAttack(self): (key, invkey) = self.getForwardBackwardKeys() direct = self.netDirection([key, invkey, 'down', 'up']) if direct == key: self.doAction('ForwardSmash') if self.checkSmash(key) else self.doAction('ForwardAttack') elif direct == invkey: self.flip() self.doAction('ForwardSmash') if self.checkSmash(invkey) else self.doAction('ForwardAttack') elif direct == 'down': self.doAction('DownSmash') if self.checkSmash('down') else self.doAction('DownAttack') elif direct == 'up': self.doAction('UpSmash') if self.checkSmash('up') else self.doAction('UpAttack') else: self.doAction('NeutralAttack') def doAirAttack(self): (forward, backward) = self.getForwardBackwardKeys() direct = self.netDirection([forward, backward, 'down', 'up']) if direct == forward: self.doAction('ForwardAir') elif direct == backward: self.doAction('BackAir') elif direct == 'down': self.doAction('DownAir') elif direct == 'up': self.doAction('UpAir') else: self.doAction('NeutralAir') def doGroundSpecial(self): (forward, backward) = self.getForwardBackwardKeys() direct = self.netDirection(['up', forward, backward, 'down']) if direct == 'up': if self.hasAction('UpSpecial'): self.doAction('UpSpecial') else: self.doAction('UpGroundSpecial') elif direct == forward: if self.hasAction('ForwardSpecial'): #If there's a ground/air version, do it self.doAction('ForwardSpecial') else: #If there is not a universal one, do a ground one self.doAction('ForwardGroundSpecial') elif direct == backward: self.flip() if self.hasAction('ForwardSpecial'): self.doAction('ForwardSpecial') else: self.doAction('ForwardGroundSpecial') elif direct == 'down': if self.hasAction('DownSpecial'): self.doAction('DownSpecial') else: self.doAction('DownGroundSpecial') else: if self.hasAction('NeutralSpecial'): self.doAction('NeutralSpecial') else: self.doAction('NeutralGroundSpecial') def doAirSpecial(self): (forward, backward) = self.getForwardBackwardKeys() direct = self.netDirection(['up', forward, backward, 'down']) if direct == 'up': if self.hasAction('UpSpecial'): self.doAction('UpSpecial') else: self.doAction('UpAirSpecial') elif direct == forward: if self.hasAction('ForwardSpecial'): #If there's a ground/air version, do it self.doAction('ForwardSpecial') else: #If there is not a universal one, do an air one self.doAction('ForwardAirSpecial') elif direct == backward: self.flip() if self.hasAction('ForwardSpecial'): self.doAction('ForwardSpecial') else: self.doAction('ForwardAirSpecial') elif direct == 'down': if self.hasAction('DownSpecial'): self.doAction('DownSpecial') else: self.doAction('DownAirSpecial') else: if self.hasAction('NeutralSpecial'): self.doAction('NeutralSpecial') else: self.doAction('NeutralAirSpecial') def doTech(self): (forward, backward) = self.getForwardBackwardKeys() direct = self.netDirection([forward, backward, 'down', 'up']) if direct == forward: self.doAction('ForwardTech') elif direct == backward: self.doAction('BackwardTech') elif direct == 'down': self.doAction('DodgeTech') else: if self.hasAction('NormalTech'): self.doAction('NormalTech') else: self.doAction('Getup') def doHitStun(self,_hitstun,_trajectory): self.doAction('HitStun') self.current_action.direction = _trajectory self.current_action.last_frame = _hitstun def doProne(self, _length): self.doAction('Prone') self.current_action.last_frame = _length def doShield(self, _newShield=True): self.doAction('Shield') self.current_action.new_shield = _newShield def doShieldStun(self, _length): self.doAction('ShieldStun') self.current_action.last_frame = _length def doLedgeGrab(self,_ledge): self.doAction('LedgeGrab') self.current_action.ledge = _ledge def doTrapped(self, _length): self.doAction('Trapped') self.current_action.last_frame = _length def doStunned(self, _length): self.doAction('Stunned') self.current_action.last_frame = _length def doGrabbed(self, _height): self.doAction('Grabbed') self.current_action.height = _height ######################################################## # COLLISIONS AND MOVEMENT # ######################################################## def accel(self,_xFactor): """ Change speed to get closer to the preferred speed without going over. Parameters ----------- _xFactor : float The factor by which to change xSpeed. Usually self.stats['friction'] or self.stats['air_resistance'] """ #TODO: I feel like there's a better way to do this but I can't think of one if self.change_x > self.preferred_xspeed: #if we're going too fast diff = self.change_x - self.preferred_xspeed self.change_x -= min(diff,_xFactor*(settingsManager.getSetting('friction') if self.grounded else settingsManager.getSetting('airControl'))) elif self.change_x < self.preferred_xspeed: #if we're going too slow diff = self.preferred_xspeed - self.change_x self.change_x += min(diff,_xFactor*(settingsManager.getSetting('friction') if self.grounded else settingsManager.getSetting('airControl'))) # Change ySpeed according to gravity. def calcGrav(self, _multiplier=1): """ Changes the ySpeed according to gravity Parameters ----------- _multiplier : float A multiple of gravity to adjust by, in case gravity is changed temporarily """ if self.change_y > self.preferred_yspeed: diff = self.change_y - self.preferred_yspeed self.change_y -= min(diff, _multiplier*self.stats['gravity'] * settingsManager.getSetting('gravity')) elif self.change_y < self.preferred_yspeed: diff = self.preferred_yspeed - self.change_y self.change_y += min(diff, _multiplier*self.stats['gravity'] * settingsManager.getSetting('gravity')) def checkGround(self): self.updatePosition() return collisionBox.checkGround(self, self.game_state.platform_list, self.tech_window <= 0) def checkLeftWall(self): self.updatePosition() return collisionBox.checkLeftWall(self, self.game_state.platform_list, True) def checkRightWall(self): self.updatePosition() return collisionBox.checkRightWall(self, self.game_state.platform_list, True) def checkBackWall(self): self.updatePosition() return collisionBox.checkBackWall(self, self.game_state.platform_list, True) def checkFrontWall(self): self.updatePosition() return collisionBox.checkFrontWall(self, self.game_state.platform_list, True) def checkCeiling(self): self.updatePosition() return collisionBox.checkCeiling(self, self.game_state.platform_list, True) def isGrounded(self): self.updatePosition() return collisionBox.isGrounded(self, self.game_state.platform_list, self.tech_window <= 0) def isLeftWalled(self): self.updatePosition() return collisionBox.isLeftWalled(self, self.game_state.platform_list, True) def isRightWalled(self): self.updatePosition() return collisionBox.isRightWalled(self, self.game_state.platform_list, True) def isBackWalled(self): self.updatePosition() return collisionBox.isBackWalled(self, self.game_state.platform_list, True) def isFrontWalled(self): self.updatePosition() return collisionBox.isFrontWalled(self, self.game_state.platform_list, True) def isCeilinged(self): self.updatePosition() return collisionBox.isCeilinged(self, self.game_state.platform_list, True) def setSpeed(self,_speed,_direction): """ Set the actor's speed. Instead of modifying the change_x and change_y values manually, this will calculate what they should be set at if you want to give a direction and magnitude instead. Parameters ----------- _speed : float The total speed you want the fighter to move _direction : int The angle of the speed vector in degrees, 0 being right, 90 being up, 180 being left. """ (x,y) = getXYFromDM(_direction,_speed) self.change_x = x self.change_y = y ######################################################## # ANIMATION FUNCTIONS # ######################################################## def rotateSprite(self,_direction): """ Rotate's the fighter's sprite a given number of degrees Parameters ----------- _direction : int The degrees to rotate towards. 0 being forward, 90 being up """ self.sprite.rotate(-1 * (90 - _direction)) def unRotate(self): """ Resets rotation to it's proper, straight upwards value """ self.sprite.rotate() def changeSprite(self,_newSprite,_frame=0): """ Changes the fighter's sprite to the one with the given name. Optionally can change into a frame other than zero. Parameters ----------- _newSprite : string The name of the sprite in the SpriteLibrary to change to _frame : int : default 0 The frame to switch to in the new sprite. Leave off to start the new animation at zero """ self.sprite.changeImage(_newSprite) self.current_action.sprite_name = _newSprite if _frame != 0: self.sprite.changeSubImage(_frame) def changeSpriteImage(self,_frame,_loop=False): """ Change the subimage of the current sprite. Parameters ----------- _frame : int The frame number to change to. _loop : bool If True, any subimage value larger than maximum will loop back into a new value. For example, if _loop is set, accessing the 6th subimage of an animation 4 frames long will get you the second. """ self.sprite.changeSubImage(_frame,_loop) def updatePosition(self): """ Passes the updatePosition call to the sprite. See documentation in SpriteLibrary.updatePosition """ return self.sprite.updatePosition(self.posx, self.posy) ######################################################## # INPUT FUNCTIONS # ######################################################## def keyPressed(self,_key): """ Add a key to the buffer. This function should be adding to the buffer, and ONLY adding to the buffer. Any sort of calculations and state changes should probably be done in the stateTransitions function of the current action. Parameters ----------- _key : String The key to append to the buffer """ self.input_buffer.append((_key,1.0)) self.keys_held[_key] = 1.0 def keyReleased(self,_key): """ Removes a key from the buffer. That is to day, it appends a release to the buffer. It is safe to call this function if the key is not in the buffer, and it will return False if the key was not in there to begin with. Parameters ----------- _key : String The key to remove Return ----------- If the key was successfully removed, True. False if the key was not present to be removed. """ if _key in self.keys_held: self.input_buffer.append((_key,0)) del self.keys_held[_key] return True else: return False def keyBuffered(self, _key, _from = 1, _state = 0.1, _to = 0): """ Checks if a key was pressed within a certain amount of frames. Parameters ----------- _key : String The key to search fore _from : int : 1 The furthest back frame to look to. _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if any(map(lambda k: _key in k and k[_key] >= _state,self.input_buffer.getLastNFrames(_from, _to))): self.last_input_frame = 0 return True return False def keyTapped(self, _key, _from = None, _state = 0.1, _to = 0): """ Checks if a key was pressed and released within a certain amount of frames. Parameters ----------- _key : String The key to search fore _from : int : None The furthest back frame to look to. If set to None, it will look at the default Buffer Window in the player's control settings _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if _from is None: _from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1) down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to)) up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to)) if not any(down_frames) or not any(up_frames): return False first_down_frame = reduce(lambda j, k: j if j != None else (k if down_frames[k] else None), range(len(down_frames)), None) last_up_frame = reduce(lambda j, k: k if up_frames[k] else j, range(len(up_frames)), None) if first_down_frame >= last_up_frame: self.last_input_frame = 0 return True return False #A key press which hasn't been released yet def keyHeld(self, _key, _from = None, _state = 0.1, _to = 0): """ Checks if a key was pressed within a certain amount of frames and is still being held. Parameters ----------- _key : String The key to search fore _from : int : None The furthest back frame to look to. If set to None, it will look at the default Buffer Window in the player's control settings _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if _from is None: _from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1) down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to)) up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to)) if not any(down_frames): return False if any(down_frames) and not any(up_frames): self.last_input_frame = 0 return True first_down_frame = reduce(lambda j, k: j if j != None else (k if down_frames[k] else None), range(len(down_frames)), None) last_up_frame = reduce(lambda j, k: k if up_frames[k] else j, range(len(up_frames)), None) if first_down_frame < last_up_frame: self.last_input_frame = 0 return True return False def keyUp(self, _key, _from = 1, _state = 0.1, _to = 0): """ Checks if a key was released within a certain amount of frames. Parameters ----------- _key : String The key to search fore _from : int : 1 The furthest back frame to look to. _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if any(map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to))): self.last_input_frame = 0 return True return False def keyReinput(self, _key, _from = None, _state = 0.1, _to = 0): """ Checks if a key was pressed twice within a certain amount of time Parameters ----------- _key : String The key to search fore _from : int : 1 The furthest back frame to look to. If set to None, it will look at the default Buffer Window in the player's control settings _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if _from is None: _from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1) up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to)) down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to)) if not any(down_frames) or not any(down_frames): return False first_up_frame = reduce(lambda j, k: j if j != None else (k if up_frames[k] else None), range(len(up_frames)), None) last_down_frame = reduce(lambda j, k: k if down_frames[k] else j, range(len(down_frames)), None) if first_up_frame < last_down_frame: self.last_input_frame = 0 return True return False def keyIdle(self, _key, _from = None, _state = 0.1, _to = 0): """ Checks if a key was released and not pressed again within a certain amount of time. Parameters ----------- _key : String The key to search fore _from : int : 1 The furthest back frame to look to. If set to None, it will look at the default Buffer Window in the player's control settings _state : float : 0.1 A value from 0 to 1 for a threshold on value before a button registers as a press. Usually only applies to sticks, since buttons are always 0.0 or 1.0 _to : int : 0 The furthest forward frame to look to. """ if _from is None: _from = max(min(int(self.key_bindings.timing_window['buffer_window']), self.last_input_frame), 1) up_frames = map(lambda k: _key in k and k[_key] < _state, self.input_buffer.getLastNFrames(_from, _to)) down_frames = map(lambda k: _key in k and k[_key] >= _state, self.input_buffer.getLastNFrames(_from, _to)) if not any(up_frames): return False if any(up_frames) and not any(down_frames): self.last_input_frame = 0 return True first_up_frame = reduce(lambda j, k: j if j != None else (k if up_frames[k] else None), range(len(up_frames)), None) last_down_frame = reduce(lambda j, k: k if down_frames[k] else j, range(len(down_frames)), None) if first_up_frame >= last_down_frame: self.last_input_frame = 0 return True return False def getSmoothedInput(self, _distanceBack = None, _maxMagnitude = 1.0): """ Converts buttons into an analog direction. It checks back for a set amount of frames and averages the inputs into a direction. Parameters ----------- _distanceBack : int : None How many frames to look back to get direction inputs from _maxMagnitude: """ #QUESTION - explain this algorithm a little better #TODO If this is a gamepad, simply return its analog input if _distanceBack is None: smooth_distance = int(self.key_bindings.timing_window['smoothing_window']) _distanceBack = smooth_distance else: smooth_distance = _distanceBack hold_buffer = reversed(self.input_buffer.getLastNFrames(_distanceBack)) smoothed_x = 0.0 smoothed_y = 0.0 if self.key_bindings.type == "Keyboard": for frame_input in hold_buffer: working_x = 0.0 working_y = 0.0 x_decay = float(1.5)/smooth_distance y_decay = float(1.5)/smooth_distance if 'left' in frame_input: working_x -= frame_input['left'] if 'right' in frame_input: working_x += frame_input['right'] if 'up' in frame_input: working_y -= frame_input['up'] if 'down' in frame_input: working_y += frame_input['down'] if (working_x > 0 and smoothed_x > 0) or (working_x < 0 and smoothed_x < 0): x_decay = float(1)/smooth_distance elif (working_x < 0 and smoothed_x > 0) or (working_x > 0 and smoothed_x < 0): x_decay = float(4)/smooth_distance if (working_y < 0 and smoothed_y < 0) or (working_y > 0 and smoothed_y > 0): y_decay = float(1)/smooth_distance elif (working_y < 0 and smoothed_y > 0) or (working_y > 0 and smoothed_y < 0): ySmooth = float(4)/smooth_distance magnitude = numpy.linalg.norm([working_x, working_y]) if magnitude > _maxMagnitude: working_x /= magnitude/_maxMagnitude working_y /= magnitude/_maxMagnitude if smoothed_x > 0: smoothed_x -= x_decay if smoothed_x < 0: smoothed_x = 0 elif smoothed_x < 0: smoothed_x += x_decay if smoothed_x > 0: smoothed_x = 0 if smoothed_y > 0: smoothed_y -= y_decay if smoothed_y < 0: smoothed_y = 0 elif smoothed_y < 0: smoothed_y += y_decay if smoothed_y > 0: smoothed_y = 0 smoothed_x += working_x smoothed_y += working_y else: left = self.keys_held['left'] if self.keys_held.has_key('left') else 0 right = self.keys_held['right'] if self.keys_held.has_key('right') else 0 up = self.keys_held['up'] if self.keys_held.has_key('up') else 0 down = self.keys_held['down'] if self.keys_held.has_key('down') else 0 smoothed_x = -left+right smoothed_y = -up+down final_magnitude = numpy.linalg.norm([smoothed_x, smoothed_y]) if final_magnitude > _maxMagnitude: smoothed_x /= final_magnitude/_maxMagnitude smoothed_y /= final_magnitude/_maxMagnitude return [smoothed_x, smoothed_y] def getSmoothedAngle(self,_default=90): """ Returns the angle that the smoothedInput currently points to. 0 being forward, 90 being up Parameters ----------- _default : int : 90 What to return if input is [0,0] """ inputValue = self.getSmoothedInput() print(inputValue) if (inputValue == [0, 0]): angle = _default else: angle = math.atan2(-inputValue[1], inputValue[0])*180.0/math.pi print('ANGLE:',angle) return angle def checkSmash(self,_direction): """ This function checks if the player has Smashed in a direction. It does this by noting if the direction was pressed recently and is now above a threshold Parameters ----------- _direction : String The joystick direction to check for a smash in """ #TODO different for buttons than joysticks return self.keyBuffered(_direction, int(self.key_bindings.timing_window['smash_window']), 0.85) def checkTap(self, _direction, _firstThreshold=0.6): """ Checks if the player has tapped a button, but not smashed it. If a joystick is used, the checkSmash function should cover this. Parameters ----------- _direction : String The joystick direction to check for a smash in _firstThreshold : float : 0.6 """ if self.key_bindings.type == "Keyboard": return self.keyBuffered(_direction, _state=1) and self.keyBuffered(_direction, int(self.key_bindings.timing_window['repeat_window'])+1, _firstThreshold, 1) else: return self.checkSmash(_direction) def netDirection(self, _checkDirectionList): """ Gets the net total direction of all of the directions currently being held. Parameters ----------- _checkDirectionList : """ coords = self.getSmoothedInput() if not filter(lambda a: a in ['left', 'right', 'up', 'down'], _checkDirectionList): return 'neutral' left_check = -coords[0] if 'left' in _checkDirectionList and 'left' in self.keys_held else -2 right_check = coords[0] if 'right' in _checkDirectionList and 'right' in self.keys_held else -2 up_check = -coords[1] if 'up' in _checkDirectionList and 'up' in self.keys_held else -2 down_check = coords[1] if 'down' in _checkDirectionList and 'down' in self.keys_held else -2 if left_check == -2 and right_check == -2 and up_check == -2 and down_check == -2: if 'left' in self.keys_held: left_check = self.keys_held['left'] if 'right' in self.keys_held: right_check = self.keys_held['right'] if 'up' in self.keys_held: up_check = self.keys_held['up'] if 'down' in self.keys_held: down_check = self.keys_held['down'] if left_check == -2 and right_check == -2 and up_check == -2 and down_check == -2: return 'neutral' check_dict = {'left': left_check, 'right': right_check, 'up': up_check, 'down': down_check} return max(_checkDirectionList, key=lambda k: check_dict[k]) def keysContain(self,_key,_threshold=0.1): """ Checks for keys that are currently being held, regardless of when they were pressed. Parameters ----------- _key : String The key to check for. _threshold : float : 0.1 The value that represents a "press", will check for values lower than the threshold """ if _key in self.keys_held: return self.keys_held[_key] >= _threshold return False def getForwardBackwardKeys(self): """ This returns a tuple of the key for forward, then backward Useful for checking if the fighter is pivoting, or doing a back air, or getting the proper key to dash-dance, etc. The best way to use this is something like (key,invkey) = actor.getForwardBackwardKeys() which will assign the variable "key" to the forward key, and "invkey" to the backward key. """ if self.facing == 1: return ('right','left') else: return ('left','right') ######################################################## # COMBAT FUNCTIONS # ######################################################## def applySubactions(self, _subacts): for subact in _subacts: subact.execute(self.current_action, self) return True # Our hit filter stuff expects this def filterHits(self, _hitbox, _subacts): if self.lockHitbox(_hitbox): for subact in _subacts: subact.execute(self.current_action, self) return True return False def dealDamage(self, _damage): """ Deal damage to the fighter. Checks to make sure the damage caps at 999. If you want to have higher damage, override this function and remove it. This function is called in the applyKnockback function, so you shouldn't need to call this function directly for normal attacks, although you can for things like poison, non-knockback attacks, etc. Parameters ----------- _damage : float The amount of damage to deal """ self.damage += float(math.floor(_damage)) self.damage = min(999,max(self.damage,0)) if self.data_log: self.data_log.addToData('Damage Taken',float(math.floor(_damage))) def applyHitstop(self,_damage,_hitlagMultiplier): """ Applies hitstop to the fighter when hit. Also sets the hitstun vibration. Parameters ----------- _damage : int The amount of damage the attack does _hitlagMultiplier : float An amount to multiply the calculated hitstop with """ self.hitstop = math.floor((_damage / 4.0 + 2)*_hitlagMultiplier) if self.grounded: self.hitstop_vibration = (3,0) else: self.hitstop_vibration = (0,3) self.hitstop_pos = (self.posx, self.posy) def applyKnockback(self, _total_kb,_trajectory): """Do Knockback to the fighter. The knockback calculation is derived from the SSBWiki, and a bit of information from ColinJF and Amazing Ampharos on Smashboards, it is based off of Super Smash Bros. Brawl's knockback calculation, which is the one with the most information available Parameters ----------- """ # Get the trajectory as a vector trajectory_vec = [math.cos(_trajectory/180*math.pi), math.sin(_trajectory/180*math.pi)] di_vec = self.getSmoothedInput(int(self.key_bindings.timing_window['smoothing_window'])) di_multiplier = 1+numpy.dot(di_vec, trajectory_vec)*.05 _trajectory += numpy.cross(di_vec, trajectory_vec)*13.5 print(_total_kb) self.setSpeed((_total_kb)*di_multiplier, _trajectory) def applyHitstun(self,_total_kb,_hitstunMultiplier,_baseHitstun,_trajectory): """TODO document this""" hitstun_frames = math.floor((_total_kb)*_hitstunMultiplier+_baseHitstun) if hitstun_frames > 0.5: #If the current action is not hitstun or you're in hitstun, but there's not much of it left if not isinstance(self.current_action, baseActions.HitStun) or (self.current_action.last_frame-self.current_action.frame)/float(settingsManager.getSetting('hitstun')) <= hitstun_frames+15: self.doHitStun(hitstun_frames*settingsManager.getSetting('hitstun'), _trajectory) self.current_action.tech_cooldown = (_total_kb*_hitstunMultiplier)//6 def applyPushback(self, _kb, _trajectory, _hitlag): """ Pushes back the fighter when they hit a foe. This is the corollary to applyKnockback, except this one is called on the fighter who lands the hit. It applies the hitlag to the fighter, and pushes them back slightly from the opponent. Parameters ----------- _kb : _trajectory : int The direction to push the attacker back. In degrees, zero being forward, 90 being up _hitlag : int The hitlag from the attack """ self.hitstop = math.floor(_hitlag*settingsManager.getSetting('hitlag')) print(self.hitstop) (x, y) = getXYFromDM(_trajectory, _kb) self.change_x += x if not self.grounded: self.change_y += y def die(self,_respawn = True): """ This function is called when a fighter dies. It spawns the death particles and resets some variables. Parameters ----------- _respawn : Boolean Whether or not to respawn the fighter after death """ sfxlib = settingsManager.getSfx() if sfxlib.hasSound('death', self.name): self.playSound('death') self.data_log.addToData('Falls',1) if self.hit_tagged != None: if hasattr(self.hit_tagged, 'data_log'): self.hit_tagged.data_log.addToData('KOs',1) if _respawn: if self.hit_tagged is not None: color = settingsManager.getSetting('playerColor' + str(self.hit_tagged.player_num)) else: color = settingsManager.getSetting('playerColor' + str(self.player_num)) for i in range(0, 11): next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30, 30, 1.5, color) self.articles.append(next_hit_article) next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30+10, 60, 1.5, color) self.articles.append(next_hit_article) next_hit_article = article.HitArticle(self, (self.posx, self.posy), 1, i*30+20, 90, 1.5, color) self.articles.append(next_hit_article) self.onRespawn() (self.posx, self.posy) = self.game_state.spawn_locations[self.player_num] self.posy -= 200 self.updatePosition() self.ecb.normalize() self.posy += self.ecb.current_ecb.rect.height/2.0 self.ecb.store() self.createMask([255,255,255], 480, True, 12) self.respawn_invulnerable = 480 self.doAction('Respawn') ######################################################## # HELPER FUNCTIONS # ######################################################## """ These are ways of getting properly formatted data, accessing specific things, converting data, etc. """ def getForwardWithOffset(self,_offSet = 0): """ Get a direction that is angled from the direction the fighter is facing, rather than angled from right. For example, sending the opponent 30 degrees is fine when facing right, but if you're facing left, you'd still be sending them to the right! Hitboxes use this calculation a lot. It'll return the proper angle that is the given offset from "forward". Defaults to 0, which will give either 0 or 180, depending on the direction of the fighter. Parameters ----------- _offSet : int The angle to convert Return ----------- The adjusted angle for the proper facing angle """ if self.facing == 1: return _offSet else: return 180 - _offSet def getDirectionMagnitude(self): """ Converts the fighter's current speed from XY components into a Direction and Magnitude. Angles are in degrees, with 0 being forward Return ----------- (direction,magnitude) : Tuple (int,float) The direction in degrees, and the magnitude in map uints """ if self.change_x == 0: magnitude = self.change_y direction = 90 if self.change_y < 0 else 270 return (direction,magnitude) if self.change_y == 0: magnitude = self.change_x direction = 0 if self.change_x > 0 else 180 return(direction,magnitude) direction = math.degrees(math.atan2(-self.change_y, self.change_x)) direction = round(direction) magnitude = numpy.linalg.norm([self.change_x, self.change_y]) return (direction,magnitude) def getFacingDirection(self): """ A simple function that converts the facing variable into a direction in degrees. Return ----------- The direction the fighter is facing in degrees, zero being right, 90 being up """ if self.facing == 1: return 0 else: return 180 def setGrabbing(self, _other): """ Sets a grabbing state. Tells this fighter that it's grabbing something else, and tells that thing what's grabbing it. Parameters ----------- _other : GameObject The object to be grabbing """ self.grabbing = _other _other.grabbed_by = self def isGrabbing(self): """ Check whether the fighter is current holding something. If this object says that it's holding something, but the other object doesn't agree, assume that there is no grab. Return ----------- bool : Whether the fighter is currently holding something """ if self.grabbing is None: return False if self.grabbing and self.grabbing.grabbed_by == self: return True return False def flip(self): """ Flip the fighter so he is now facing the other way. Also flips the sprite for you. """ self.facing = -self.facing self.sprite.flipX() def updateLandingLag(self,_lag,_reset=False): """ Updates landing lag, but doesn't overwrite a longer lag with a short one. Useful for things like fast aerials that have short endlag, but you don't want to be able to override something like an airdodge lag with it. Parameters ----------- _lag : int The number of frames of endlag to set _reset : bool : False When True, will always set the landing lag to the given value, regardless of current lag. """ if _reset: self.landing_lag = _lag else: if _lag > self.landing_lag: self.landing_lag = _lag def createMask(self,_color,_duration,_pulse = False,_pulse_size = 16): """ Creates a color mask sprite over the fighter Parameters ----------- _color : String The color of the mask in RGB of the format #RRGGBB _duration : int How many frames should the mask stay active _pulse : bool Should the mask "flash" in transparency, or just stay solid? _pulse_size : int If pulse is true, this is how long it takes for one full rotation of transparency """ self.mask = spriteManager.MaskSprite(self.sprite,_color,_duration,_pulse, _pulse_size) def playSound(self,_sound): """ Play a sound effect. If the sound is not in the fighter's SFX library, it will play the base sound. Parameters ----------- _sound : String The name of the sound to be played """ sfxlib = settingsManager.getSfx() if sfxlib.hasSound(_sound, self.name): sfxlib.playSound(_sound, self.name) else: sfxlib.playSound(_sound,'base') def activateHitbox(self,_hitbox): """ Activates a hitbox, adding it to your active_hitboxes list. Parameters ----------- _hitbox : Hitbox The hitbox to activate """ self.active_hitboxes.add(_hitbox) _hitbox.activate() def activateHurtbox(self,_hurtbox): """ Activates a hurtbox, adding it to your active_hurtboxes list. _hurtbox : Hurtbox The hitbox to activate """ self.active_hurtboxes.add(_hurtbox) def lockHitbox(self,_hbox): """ This will "lock" the hitbox so that another hitbox with the same ID from the same fighter won't hit again. Returns true if it was successful, false if it already exists in the lock. Parameters ----------- _hbox : Hitbox The hitbox we are checking for """ #If the hitbox belongs to something, get tagged by it if not _hbox.owner is None: self.hit_tagged = _hbox.owner if _hbox.hitbox_lock is None: return False if _hbox.hitbox_lock in self.hitbox_lock: return False self.hitbox_lock.add(_hbox.hitbox_lock) return True def startShield(self): """ Creates a shield article and adds it to your active articles list """ self.articles.append(article.ShieldArticle(settingsManager.createPath("sprites/melee_shield.png"),self)) def startParry(self): """ Creates a parry article and adds it to your active articles list """ self.articles.append(article.ParryArticle(settingsManager.createPath("sprites/melee_shield.png"),self)) def test(): fight = AbstractFighter('',0) print(fight.__init__.__doc__) if __name__ == '__main__': test()
gpl-3.0
-4,722,417,059,480,184,000
40.139911
237
0.555986
false
4.229788
false
false
false
rockneurotiko/madness-things
Python/Pygame/2-sprites/platfom_simple.py
1
4157
import pygame BLACK = ( 0, 0, 0) WHITE = ( 255, 255, 255) BLUE = ( 0, 0, 255) RED = ( 255, 0, 0) GREEN = ( 0, 255, 0) size = (800, 600) class Player(pygame.sprite.Sprite): change_x = 0 change_y = 0 level = None def __init__(self): pygame.sprite.Sprite.__init__(self) #The player. take an image, but now a shit self.image = pygame.Surface([40,60]) self.image.fill(RED) self.rect = self.image.get_rect() def update(self): #Move #Gravity self.calc_grav() #Move l/r self.rect.x += self.change_x #Check collissions blocks_hit = pygame.sprite.spritecollide(self,\ self.level.platform_list, False) for block in blocks_hit: if self.change_x > 0: self.rect.right = block.rect.left elif self.change_x < 0: self.rect.left = block.rect.right #Move u/d self.rect.y += self.change_y #Check collissions blocks_hit = pygame.sprite.spritecollide(self,\ self.level.platform_list, False) for block in blocks_hit: if self.change_y > 0: self.rect.bottom = block.rect.top elif self.change_y < 0: self.rect.top = block.rect.bottom self.change_y = 0 def calc_grav(self): if self.change_y == 0: self.change_y = 1 else: self.change_y += .35 if self.rect.y >= size[1] - self.rect.height and self.change_y >= 0: self.change_y = 0 self.rect.y = size[1] - self.rect.height def jump(self): #Saltar #Movemos dos pixeles, vemos si haf colision, y si la hay, se salta self.rect.y += 2 platform_hit = pygame.sprite.spritecollide(self, \ self.level.platform_list, False) self.rect.y -= 2 if len(platform_hit) > 0 or self.rect.bottom >= size[1]: self.change_y = -10 def go_left(self): self.change_x = -6 def go_right(self): self.change_x = 6 def stop(self): self.change_x = 0 class Platform(pygame.sprite.Sprite): def __init__(self, w, h): pygame.sprite.Sprite.__init__(self) self.image = pygame.Surface([w,h]) self.image.fill(GREEN) self.rect = self.image.get_rect() class Level(object): #Super class platform_list = None enemy_list = None background = None def __init__(self, player): self.platform_list = pygame.sprite.Group() self.enemy_list = pygame.sprite.Group() self.player = player def update(self): self.platform_list.update() self.enemy_list.update() def draw(self, screen): screen.fill(BLUE) self.platform_list.draw(screen) self.enemy_list.draw(screen) class Level01(Level): def __init__(self, player): Level.__init__(self, player) level = [[210, 70, 500, 500], [210, 70, 200, 400], [210, 70, 600, 300],] for plat in level: block = Platform(plat[0], plat[1]) block.rect.x = plat[2] block.rect.y = plat[3] block.player = self.player self.platform_list.add(block) def main(): pygame.init() screen = pygame.display.set_mode(size) pygame.display.set_caption('jumper') pygame.mouse.set_visible(False) player = Player() lvl_list = [] lvl_list.append(Level01(player)) current_lvl_no = 0 current_lvl = lvl_list[current_lvl_no] active_sprite_list = pygame.sprite.Group() player.level = current_lvl player.rect.x = 340 player.rect.y = size[1] - player.rect.height active_sprite_list.add(player) done = False clock = pygame.time.Clock() while not done: for event in pygame.event.get(): # User did something if event.type == pygame.QUIT: # If user clicked close done = True # Flag that we are done so we exit this loop if event.type == pygame.KEYDOWN: if event.key == pygame.K_LEFT: player.go_left() if event.key == pygame.K_RIGHT: player.go_right() if event.key == pygame.K_UP: player.jump() if event.type == pygame.KEYUP: if event.key == pygame.K_LEFT and player.change_x < 0: player.stop() if event.key == pygame.K_RIGHT and player.change_x > 0: player.stop() active_sprite_list.update() current_lvl.update() if player.rect.right > size[0]: player.rect.right = size[0] if player.rect.left < 0: player.rect.left = 0 current_lvl.draw(screen) active_sprite_list.draw(screen) pygame.display.flip() clock.tick(60) pygame.quit() if __name__ == '__main__': main()
mpl-2.0
384,585,491,287,128,450
21.117021
70
0.65071
false
2.702861
false
false
false
vileopratama/vitech
docs/tutorials/ebook/Odoo Development Cookbook/OdooDevelopmentCookbook_Code/Chapter13_code/ch13_r02_restrict_access_to_web_accessible_paths/controllers/main.py
1
2112
# -*- coding: utf-8 -*- # © 2015 Therp BV <http://therp.nl> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from openerp import http from openerp.http import request class Main(http.Controller): @http.route('/my_module/all-books', type='http', auth='none') def all_books(self): records = request.env['library.book'].sudo().search([]) result = '<html><body><table><tr><td>' result += '</td></tr><tr><td>'.join(records.mapped('name')) result += '</td></tr></table></body></html>' return result @http.route('/my_module/all-books/mark_mine', type='http', auth='public') def all_books_mark_mine(self): records = request.env['library.book'].sudo().search([]) result = '<html><body><table>' for record in records: result += '<tr>' if record.author_ids & request.env.user.partner_id: result += '<th>' else: result += '<td>' result += record.name if record.author_ids & request.env.user.partner_id: result += '</th>' else: result += '</td>' result += '</tr>' result += '</table></body></html>' return result @http.route('/my_module/all-books/mine', type='http', auth='user') def all_books_mine(self): records = request.env['library.book'].search([ ('author_ids', 'in', request.env.user.partner_id.ids), ]) result = '<html><body><table><tr><td>' result += '</td></tr><tr><td>'.join(records.mapped('name')) result += '</td></tr></table></body></html>' return result @http.route('/my_module/all-books/mine_base_group_user', type='http', auth='base_group_user') def all_books_mine_base_group_user(self): return self.all_books_mine() # this is for the exercise @http.route('/my_module/all-books/mine_groups', type='http', auth='groups(base.group_no_one)') def all_books_mine_groups(self): return self.all_books_mine()
mit
2,850,783,390,967,410,000
37.381818
77
0.547134
false
3.620926
false
false
false
ahnolds/rpglib
rpglib/window.py
1
10818
""" The module for drawing the game world """ # Standard Library packages import sys # External dependenices import pygame # Internal dependencies import world # Rendering settings SCREEN_X = 450 SCREEN_Y = 450 HALF_SCREEN_X = SCREEN_X // 2 HALF_SCREEN_Y = SCREEN_Y // 2 SQUARE_SIZE = 18 SCREEN_BG = pygame.color.Color('black') # Text font settings pygame.font.init() FONT_NAME = 'freemono' FONT_SIZE = 20 FONT = pygame.font.SysFont(FONT_NAME, FONT_SIZE) FONT_COLOR = pygame.color.Color('black') FONT_BG_COLOR = pygame.color.Color('white') FONT_HL_COLOR = pygame.color.Color('red') MAX_LINE_LEN = 37 MAX_NUM_LINES = 2 FONT_BOX_HEIGHT = FONT_SIZE * MAX_NUM_LINES # Key bindings UP_KEY = pygame.K_UP DOWN_KEY = pygame.K_DOWN LEFT_KEY = pygame.K_LEFT RIGHT_KEY = pygame.K_RIGHT A_KEY = pygame.K_a B_KEY = pygame.K_b START_KEY = pygame.K_SPACE SEL_KEY = pygame.K_RETURN # Game mode constants M_WALK = 0 M_MENU = 1 M_TEXT = 2 M_FIGHT = 3 M_INV = 4 M_SAVE = 5 # Menu option constants and mapping from option to mode ME_INVENTORY = 'ITEMS' ME_SAVE = 'SAVE' ME_EXIT = 'EXIT' MENU = [ME_INVENTORY, ME_SAVE, ME_EXIT] MENU_MODES = { ME_INVENTORY : M_INV, ME_SAVE : M_SAVE, ME_EXIT : M_WALK, } MENU_BOX_WIDTH = 100 MENU_BOX_HEIGHT = FONT_SIZE * len(MENU) SHOW_MENU_MODES = set({M_MENU, M_INV, M_SAVE}) # Inventory options INV_BOX_WIDTH = 100 INV_BOX_HEIGHT = SCREEN_Y class Window(object): def __init__(self, world): """ Create a new game window """ # Get the world self.world = world # Start out walking self.mode = M_WALK # Initially no text (TODO start text?) self.textQueue = [] # Default to the top of menus self.menuPos = 0 self.invPos = 0 # Default to no select hotkey self.selHotkey = None # Setup pygame for rendering pygame.init() pygame.key.set_repeat(160, 40) self.screen = pygame.display.set_mode((SCREEN_X, SCREEN_Y)) def draw(self): """ Draw the world """ # Background self.screen.fill(SCREEN_BG) # Image self.world.draw(self.screen) # Dialog text self.printText() # Menu text self.printMenu() # Inventory text self.printInventory() # Show the newly drawn screen (automatically double-buffered) pygame.display.flip() def handleEvent(self, event): """ Handle the current event """ if event.type == 0: # TODO handle internal changes e.g. NPC movement on Null event pass elif event.type == pygame.QUIT: sys.exit(0) elif event.type == pygame.KEYDOWN: if self.mode == M_WALK: if event.key == DOWN_KEY: # The origin for pygame is at the top left, and down is positive self.world.movePlayer((0, 1)) elif event.key == UP_KEY: # The origin for pygame is at the top left, and down is positive self.world.movePlayer((0, -1)) elif event.key == LEFT_KEY: self.world.movePlayer((-1, 0)) elif event.key == RIGHT_KEY: self.world.movePlayer((1, 0)) elif event.key == A_KEY: text = self.world.interact() if text is not None: # Format the text and add it to the queue self.formatText(text) elif event.key == START_KEY: # Bring up the menu self.mode = M_MENU elif event.key == SEL_KEY: if self.selHotkey is not None: inventory = self.world.player.items item = inventory[self.selHotkey][0] text, wasConsumed = item.useMenuInventory() if wasConsumed: # Decrement the inventory count inventory[self.selHotkey].pop() # If this used up the last one, then unmap the # hotkey and remove the key from the inventory if len(inventory[self.selHotkey]) == 0: del inventory[self.selHotkey] self.selHotkey = None if text is not None: # Format the text and add it to the queue self.formatText(text) elif self.mode == M_TEXT: if event.key == A_KEY: # Go to the next block of text self.textQueue.pop(0) # If there is no more text, switch back to walking if not self.textQueue: self.mode = M_WALK elif self.mode == M_MENU: if event.key == DOWN_KEY: # Move down the menu (or loop back to the top) self.menuPos = (self.menuPos + 1) % len(MENU) elif event.key == UP_KEY: # Move up the menu (or loop back to the bottom) if self.menuPos == 0: self.menuPos = len(MENU) self.menuPos -= 1 elif event.key == A_KEY: self.mode = MENU_MODES[MENU[self.menuPos]] elif event.key == B_KEY: # Exit the menu # TODO this should return to the previous mode to allow # opening the menu during encounters etc self.mode = M_WALK elif self.mode == M_INV: inventory = self.world.player.items if event.key == DOWN_KEY: # Move down the menu (or loop back to the top) self.invPos = (self.invPos + 1) % len(inventory) elif event.key == UP_KEY: # Move up the menu (or loop back to the bottom) if self.invPos == 0: self.invPos = len(inventory) self.invPos -= 1 elif event.key == A_KEY: # TODO use item pass if event.key == B_KEY: # Back to the main menu self.mode = M_MENU elif self.mode == M_SAVE: if event.key == B_KEY: # Back to the main menu self.mode = M_MENU def formatText(self, text): """ Format the text into blocks for display in a text box and add it to the text queue """ # Switch into text mode self.mode = M_TEXT # Fill in any variables in the message text = text.format(name = self.world.player.name) # Build the text queue parts = text.split('\n') for part in parts: wordList = part.strip().split() line = '' while wordList: lineLen = len(line.split('\n')[-1] + ' ' + wordList[0]) if lineLen <= MAX_LINE_LEN: line += ' ' + wordList.pop(0) elif len(line.split('\n')) < MAX_NUM_LINES: line += '\n' else: self.textQueue.append(line) line = '' if line: while len(line.split('\n')) < MAX_NUM_LINES: line += '\n' self.textQueue.append(line) def printText(self): # Only print if there is text in the queue if self.textQueue: # Create a box for the text along the bottom textBox = pygame.Rect(0, SCREEN_Y - FONT_BOX_HEIGHT, SCREEN_X, FONT_BOX_HEIGHT) # Draw the text box pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox) # Render the text for num, line in enumerate(reversed(self.textQueue[0].split('\n'))): text = FONT.render(line, True, FONT_COLOR) # Position the text textRect = text.get_rect() textRect.left = 0 textRect.bottom = SCREEN_Y - num * FONT_SIZE # Draw the text onto the screen self.screen.blit(text, textRect) def printMenu(self): """Print the menu if relevant""" if self.mode in SHOW_MENU_MODES: # Create a box for the menu along the left textBox = pygame.Rect(SCREEN_X - MENU_BOX_WIDTH, 0, MENU_BOX_WIDTH, MENU_BOX_HEIGHT) # Draw the menu box pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox) # Render the menu for num, line in enumerate(MENU): # Highlight the current selection color = FONT_HL_COLOR if num == self.menuPos else FONT_COLOR text = FONT.render(line, True, color) # Position the text textRect = text.get_rect() textRect.right = SCREEN_X textRect.top = num * FONT_SIZE # Draw the text onto the screen self.screen.blit(text, textRect) def printInventory(self): """Print the inventory if relevant""" if self.mode == M_INV: # Create a box for the menu along the left textBox = pygame.Rect(SCREEN_X - MENU_BOX_WIDTH - INV_BOX_WIDTH, 0, INV_BOX_WIDTH, INV_BOX_HEIGHT) # Draw the menu box pygame.draw.rect(self.screen, FONT_BG_COLOR, textBox) # Render the menu for num, line in enumerate(self.world.player.items.iterkeys()): # Highlight the current selection color = FONT_HL_COLOR if num == self.invPos else FONT_COLOR text = FONT.render(line, True, color) # Position the text textRect = text.get_rect() textRect.right = SCREEN_X - MENU_BOX_WIDTH textRect.top = num * FONT_SIZE # Draw the text onto the screen self.screen.blit(text, textRect) def run(self): """ Run the game """ # Main loop while True: # Get any events that occurred self.handleEvent(pygame.event.poll()) # Redraw the screen self.draw()
gpl-2.0
3,770,847,533,449,233,000
34.585526
84
0.495563
false
4.068447
false
false
false
GluonsAndProtons/gluon
gluon/backends/backends/proton_client.py
1
2296
from oslo_log import log as logging from gluon.common import exception as exc from requests import get, put, post, delete import json LOG = logging.getLogger(__name__) logger = LOG class Client(object): def __init__(self, backend): self._backend = backend def json_get(self, url): resp = get(url) if resp.status_code != 200: raise exc.GluonClientException('Bad return status %d' % resp.status_code, status_code=resp.status_code) try: rv = json.loads(resp.content) except Exception as e: raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s" % (e.message, resp.content)) return rv def do_delete(self, url): resp = delete(url) if resp.status_code != 200: raise exc.GluonClientException('Bad return status %d' % resp.status_code, status_code=resp.status_code) def do_post(self, url, values): resp = post(url, json=values) if resp.status_code != 201 or resp.status_code != 201: raise exc.GluonClientException('Bad return status %d' % resp.status_code, status_code=resp.status_code) try: rv = json.loads(resp.content) except Exception as e: raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s" % (e.message, resp.content)) return rv def do_put(self, url, values): resp = put(url, json=values) if resp.status_code != 200: raise exc.GluonClientException('Bad return status %d' % resp.status_code, status_code=resp.status_code) try: rv = json.loads(resp.content) except Exception as e: raise exc.MalformedResponseBody(reason="JSON unreadable: %s on %s" % (e.message, resp.content)) return rv
apache-2.0
-5,607,964,346,719,619,000
37.915254
79
0.490854
false
4.610442
false
false
false
digitalocean/netbox
netbox/tenancy/filters.py
1
2787
import django_filters from django.db.models import Q from extras.filters import CustomFieldModelFilterSet, CreatedUpdatedFilterSet from utilities.filters import BaseFilterSet, NameSlugSearchFilterSet, TagFilter, TreeNodeMultipleChoiceFilter from .models import Tenant, TenantGroup __all__ = ( 'TenancyFilterSet', 'TenantFilterSet', 'TenantGroupFilterSet', ) class TenantGroupFilterSet(BaseFilterSet, NameSlugSearchFilterSet): parent_id = django_filters.ModelMultipleChoiceFilter( queryset=TenantGroup.objects.all(), label='Tenant group (ID)', ) parent = django_filters.ModelMultipleChoiceFilter( field_name='parent__slug', queryset=TenantGroup.objects.all(), to_field_name='slug', label='Tenant group group (slug)', ) class Meta: model = TenantGroup fields = ['id', 'name', 'slug', 'description'] class TenantFilterSet(BaseFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet): q = django_filters.CharFilter( method='search', label='Search', ) group_id = TreeNodeMultipleChoiceFilter( queryset=TenantGroup.objects.all(), field_name='group', lookup_expr='in', label='Tenant group (ID)', ) group = TreeNodeMultipleChoiceFilter( queryset=TenantGroup.objects.all(), field_name='group', lookup_expr='in', to_field_name='slug', label='Tenant group (slug)', ) tag = TagFilter() class Meta: model = Tenant fields = ['id', 'name', 'slug'] def search(self, queryset, name, value): if not value.strip(): return queryset return queryset.filter( Q(name__icontains=value) | Q(slug__icontains=value) | Q(description__icontains=value) | Q(comments__icontains=value) ) class TenancyFilterSet(django_filters.FilterSet): """ An inheritable FilterSet for models which support Tenant assignment. """ tenant_group_id = TreeNodeMultipleChoiceFilter( queryset=TenantGroup.objects.all(), field_name='tenant__group', lookup_expr='in', label='Tenant Group (ID)', ) tenant_group = TreeNodeMultipleChoiceFilter( queryset=TenantGroup.objects.all(), field_name='tenant__group', to_field_name='slug', lookup_expr='in', label='Tenant Group (slug)', ) tenant_id = django_filters.ModelMultipleChoiceFilter( queryset=Tenant.objects.all(), label='Tenant (ID)', ) tenant = django_filters.ModelMultipleChoiceFilter( queryset=Tenant.objects.all(), field_name='tenant__slug', to_field_name='slug', label='Tenant (slug)', )
apache-2.0
1,279,330,868,893,995,500
28.648936
109
0.634733
false
4.050872
false
false
false
NetApp/manila
manila/tests/api/fakes.py
1
9038
# Copyright 2010 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_utils import timeutils import routes import webob import webob.dec import webob.request from manila.api import common as api_common from manila.api.middleware import auth from manila.api.middleware import fault from manila.api.openstack import api_version_request as api_version from manila.api.openstack import wsgi as os_wsgi from manila.api import urlmap from manila.api.v1 import limits from manila.api.v1 import router as router_v1 from manila.api.v2 import router as router_v2 from manila.api import versions from manila.common import constants from manila import context from manila import exception from manila import wsgi FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' FAKE_UUIDS = {} class Context(object): pass class FakeRouter(wsgi.Router): def __init__(self, ext_mgr=None): pass @webob.dec.wsgify def __call__(self, req): res = webob.Response() res.status = '200' res.headers['X-Test-Success'] = 'True' return res @webob.dec.wsgify def fake_wsgi(self, req): return self.application def wsgi_app(inner_app_v2=None, fake_auth=True, fake_auth_context=None, use_no_auth=False, ext_mgr=None): if not inner_app_v2: inner_app_v2 = router_v2.APIRouter(ext_mgr) if fake_auth: if fake_auth_context is not None: ctxt = fake_auth_context else: ctxt = context.RequestContext('fake', 'fake', auth_token=True) api_v2 = fault.FaultWrapper(auth.InjectContext(ctxt, inner_app_v2)) elif use_no_auth: api_v2 = fault.FaultWrapper(auth.NoAuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) else: api_v2 = fault.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app_v2))) mapper = urlmap.URLMap() mapper['/v2'] = api_v2 mapper['/'] = fault.FaultWrapper(versions.Versions()) return mapper class FakeToken(object): id_count = 0 def __getitem__(self, key): return getattr(self, key) def __init__(self, **kwargs): FakeToken.id_count += 1 self.id = FakeToken.id_count for k, v in kwargs.items(): setattr(self, k, v) class FakeRequestContext(context.RequestContext): def __init__(self, *args, **kwargs): kwargs['auth_token'] = kwargs.get('auth_token', 'fake_auth_token') super(FakeRequestContext, self).__init__(*args, **kwargs) class HTTPRequest(os_wsgi.Request): @classmethod def blank(cls, *args, **kwargs): if not kwargs.get('base_url'): kwargs['base_url'] = 'http://localhost/v1' use_admin_context = kwargs.pop('use_admin_context', False) version = kwargs.pop('version', api_version.DEFAULT_API_VERSION) experimental = kwargs.pop('experimental', False) out = os_wsgi.Request.blank(*args, **kwargs) out.environ['manila.context'] = FakeRequestContext( 'fake_user', 'fake', is_admin=use_admin_context) out.api_version_request = api_version.APIVersionRequest( version, experimental=experimental) return out class TestRouter(wsgi.Router): def __init__(self, controller): mapper = routes.Mapper() mapper.resource("test", "tests", controller=os_wsgi.Resource(controller)) super(TestRouter, self).__init__(mapper) class FakeAuthDatabase(object): data = {} @staticmethod def auth_token_get(context, token_hash): return FakeAuthDatabase.data.get(token_hash, None) @staticmethod def auth_token_create(context, token): fake_token = FakeToken(created_at=timeutils.utcnow(), **token) FakeAuthDatabase.data[fake_token.token_hash] = fake_token FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token return fake_token @staticmethod def auth_token_destroy(context, token_id): token = FakeAuthDatabase.data.get('id_%i' % token_id) if token and token.token_hash in FakeAuthDatabase.data: del FakeAuthDatabase.data[token.token_hash] del FakeAuthDatabase.data['id_%i' % token_id] class FakeRateLimiter(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): return self.application def get_fake_uuid(token=0): if token not in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] def app(): """API application. No auth, just let environ['manila.context'] pass through. """ mapper = urlmap.URLMap() mapper['/v1'] = router_v1.APIRouter() mapper['/v2'] = router_v2.APIRouter() return mapper fixture_reset_status_with_different_roles_v1 = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, }, ) fixture_reset_status_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'version': '2.6', }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, 'version': '2.7', }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, 'version': '2.6', }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, 'version': '2.7', }, ) fixture_reset_replica_status_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.STATUS_AVAILABLE, }, ) fixture_reset_replica_state_with_different_roles = ( { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_ACTIVE, }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_OUT_OF_SYNC, }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.REPLICA_STATE_IN_SYNC, }, { 'role': 'admin', 'valid_code': 202, 'valid_status': constants.STATUS_ERROR, }, { 'role': 'member', 'valid_code': 403, 'valid_status': constants.REPLICA_STATE_IN_SYNC, }, ) fixture_force_delete_with_different_roles = ( {'role': 'admin', 'resp_code': 202, 'version': '2.6'}, {'role': 'admin', 'resp_code': 202, 'version': '2.7'}, {'role': 'member', 'resp_code': 403, 'version': '2.6'}, {'role': 'member', 'resp_code': 403, 'version': '2.7'}, ) fixture_invalid_reset_status_body = ( {'os-reset_status': {'x-status': 'bad'}}, {'os-reset_status': {'status': 'invalid'}} ) def mock_fake_admin_check(context, resource_name, action, *args, **kwargs): if context.is_admin: return else: raise exception.PolicyNotAuthorized(action=action) class FakeResourceViewBuilder(api_common.ViewBuilder): _collection_name = 'fake_resource' _detail_version_modifiers = [ "add_field_xyzzy", "add_field_spoon_for_admins", "remove_field_foo", ] def view(self, req, resource): keys = ('id', 'foo', 'fred', 'alice') resource_dict = {key: resource.get(key) for key in keys} self.update_versioned_resource_dict(req, resource_dict, resource) return resource_dict @api_common.ViewBuilder.versioned_method("1.41") def add_field_xyzzy(self, context, resource_dict, resource): resource_dict['xyzzy'] = resource.get('xyzzy') @api_common.ViewBuilder.versioned_method("1.6") def add_field_spoon_for_admins(self, context, resource_dict, resource): if context.is_admin: resource_dict['spoon'] = resource.get('spoon') @api_common.ViewBuilder.versioned_method("3.14") def remove_field_foo(self, context, resource_dict, resource): resource_dict.pop('foo', None)
apache-2.0
-2,230,026,258,513,124,000
27.511041
79
0.609648
false
3.583664
false
false
false
Tjorriemorrie/twurl
gae/src/views.py
1
16080
from src import app from flask import request, render_template, json, Response, abort, redirect from models import User, Tweet, Link, UserLink import urllib import base64 from google.appengine.api import urlfetch, taskqueue, mail import datetime import math from flask.ext.jsontools import jsonapi import urlparse import oauth2 as oauth @app.route('/') def index(): token = obtainRequestToken() params = { } app.logger.info('index: {}'.format(params)) return render_template('index.html', **params) def obtainRequestToken(): app.logger.info('Obtaining request token') app.logger.info('Creating oauth consumer...') consumer = oauth.Consumer(app.config['CONSUMER_KEY'], app.config['CONSUMER_SECRET']) app.logger.info('Creating oauth client...') client = oauth.Client(consumer) app.logger.info('Requesting token from twitter...') resp, content = client.request(app.config['REQUEST_TOKEN_URL'], 'GET') if resp['status'] != '200': raise Exception("Invalid response %s." % resp['status']) request_token = dict(urlparse.parse_qsl(content)) app.logger.info('Request token received: {}'.format(request_token)) return request_token @app.route('/twitter_callback') def twitterCallback(): form_data = request.form app.logger.info('form_data: {}'.format(form_data)) return redirect('/') ########################################### # USER ########################################### @app.route('/user', methods=['GET', 'POST']) @jsonapi def user(): app.logger.info('formdata {}'.format(request.form)) email = request.form.get('email') password = request.form.get('password') # todo validation user = User.authenticate(email, password) # todo return proper token return user.key.urlsafe() @app.route('/user/main', methods=['GET', 'POST']) @jsonapi def userMain(): # todo ensure twurlie is topic if none data = {} # get user app.logger.info('formdata {}'.format(request.form)) user_key = request.form.get('user_key') user = User.fetchByKey(user_key) if not user: abort(403) # get last userlink per topic for topic in user.topics: userLink = UserLink.findLastByUser(topic, user) if userLink: data[topic] = { 'key': userLink.key.urlsafe(), 'link_id': userLink.link_id, 'tweeted_count': userLink.tweeted_count, 'priority': userLink.priority, 'read_at': hasattr(userLink, 'read_at') and userLink.read_at } else: data[topic] = None return data @app.route('/user/read', methods=['GET', 'POST']) @jsonapi def userRead(): # get user app.logger.info('formdata {}'.format(request.form)) user_key = request.form.get('user_key') user = User.fetchByKey(user_key) if not user: abort(403) # mark last link topic = request.form.get('topic') userLink = UserLink.readLastByUser(topic, user) return userLink.read_at @app.route('/topic/create') def topicCreate(): user = User.query(User.email == 'jacoj82@gmail.com').get() topics = ['python', 'html5'] user.topics = topics params = { 'user': user, 'topics': topics, } user.put() app.logger.info('topicCreate: {}'.format(params)) return render_template('base.html', **params) ########################################### # LINKS SCHEDULING ########################################### # NB this is run first before quota is filled # 1st create task queue for every topic # 2nd link every user's every topic @app.route('/cron/schedule/links', methods=['GET', 'POST']) def scheduleLinks(): ''' Run this after quota reset ''' # get topics user_topics = User.query(projection=[User.topics], distinct=True).fetch() topics = [user.topics[0] for user in user_topics] app.logger.info('Topics fetched: {}'.format(topics)) for topic in topics: taskqueue.add(url='/cron/schedule/link', params={'topic': topic}) app.logger.info('Created push queue to schedule link for {}'.format(topic)) mail.send_mail( sender='jacoj82@gmail.com', to='jacoj82@gmail.com', subject='Schedule Links', body='All {} topics pushed'.format(len(topics)), ) app.logger.info('All {} topics pushed'.format(len(topics))) return Response('OK') @app.route('/cron/schedule/link', methods=['GET', 'POST']) def scheduleLink(): if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) app.logger.info('Topic param received: {}'.format(topic)) # get users by topic users = User.fetchByTopic(topic) # get ordered links by topic # two inequality filters not supported week_ago = datetime.datetime.utcnow() - datetime.timedelta(days=7) links = Link.fetchByTopic(topic) spamLinks = [] info = {} # for every user for user in users: info[user.email] = None # get last userlink: # if not read => skip # if too soon => skip lastUserLink = UserLink.findLastByUser(topic, user) if lastUserLink and not hasattr(lastUserLink, 'read'): app.logger.info('User has unread UserLink') continue # then loop through ordered links for link in links: # skip links that has been spammed # ignore links created before a week ago # these links will go away since updated_at will keep renewing if link.created_at < week_ago: app.logger.debug('Skipping spam link: {}'.format(link.id)) spamLinks.append(link.id) continue # and assign first non-userlink to user # note: search without topic: # this gives unique link for a list of similar topics if not UserLink.findByUserAndLink(user, link): # assign new userlink to user for the topic UserLink.create(topic, user, link) info[user.email] = link.id break body = '\n'.join(['User {} got link {}'.format(userEmail, linkId) for userEmail, linkId in info.iteritems()]) body += '\n'.join(spamLinks) mail.send_mail( sender='jacoj82@gmail.com', to='jacoj82@gmail.com', subject='Schedule Link {}'.format(topic), body=body, ) app.logger.info('{} users got links'.format(len(info))) return Response('OK') ########################################### # SCRAPING TWITTER ########################################### # NB this is run last as the quota will never be sufficient # remember to set timeout on task queue so it does not carry over reset # 1st is to create task queues for every topic # 2nd remove expired tweets (hold about 1 month - depends on datastore size) # 3rd delete expired urls/links (hold about 1 month (created between 7 days and 1 months is spam) # 4th score the urls based on tweets and retweets # 2nd link every user's every topic @app.route('/cron/topics', methods=['GET', 'POST']) def cronTopics(): # res = urlfetch.fetch( # url='https://api.twitter.com/oauth2/token', # payload='grant_type=client_credentials', # method=urlfetch.POST, # headers={ # 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8', # 'Authorization': 'Basic {}'.format(bearer_token_encoded), # }, # ) # app.logger.info(res) # # data = json.loads(res.content) # app.logger.info('Data: {}'.format(data)) # if 'errors' in data: # error = data['errors'][0] # raise Exception('[{} {}] {}'.format(error['code'], error['label'], error['message'])) # access_token = data['access_token'] # token_type = data['token_type'] # bearer_token = '{}:{}'.format(consumer_key, consumer_secret) # app.logger.info('bearer token: {}'.format(bearer_token)) # bearer_token_encoded = base64.b64encode(bearer_token) # app.logger.info('bearer token encoded: {}'.format(bearer_token_encoded)) access_token = 'AAAAAAAAAAAAAAAAAAAAABcJYAAAAAAAVviSzyKtPYqYlHpZxoim6DHvfjI%3DU0slNkvBKQRynT62gbvQjEhAlE2PvzVZNia99xAdoJweI2OLqe' # get topics user_topics = User.query(projection=[User.topics], distinct=True).fetch() topics = [user.topics[0] for user in user_topics] app.logger.info('Topics fetched: {}'.format(topics)) for topic in topics: # get since ID since_id = Tweet.since_id(topic) params = {'topic': topic, 'since_id': since_id} app.logger.info('Created push queue for {}'.format(params)) taskqueue.add(url='/cron/remove/tweets', params=params) mail.send_mail( sender='jacoj82@gmail.com', to='jacoj82@gmail.com', subject='Cron Topics', body='All {} topics pushed'.format(len(topics)), ) app.logger.info('All {} topics pushed'.format(len(topics))) return Response('OK') @app.route('/cron/remove/tweets', methods=['GET', 'POST']) def removeTweets(): if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) app.logger.info('Topic param received: {}'.format(topic)) # delete old tweets (> 1 year) cnt = Tweet.removeOld(datetime.datetime.utcnow() - datetime.timedelta(days=30), topic) # continue with deleting urls taskqueue.add(url='/cron/delete/urls', params={'topic': topic}) # mail.send_mail( # sender='jacoj82@gmail.com', # to='jacoj82@gmail.com', # subject='Remove tweets {}'.format(topic), # body='{} tweets deleted for topic {}'.format(cnt, topic), # ) app.logger.info('{} tweets deleted for topic {}'.format(cnt, topic)) return Response('OK') @app.route('/cron/delete/urls', methods=['GET', 'POST']) def deleteUrls(): if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) app.logger.info('Topic param received: {}'.format(topic)) cnt = Link.removeOld(topic, datetime.datetime.utcnow() - datetime.timedelta(days=30)) # continue with scoring urls taskqueue.add(url='/cron/score/urls', params={'topic': topic}) # mail.send_mail( # sender='jacoj82@gmail.com', # to='jacoj82@gmail.com', # subject='Delete Urls {}'.format(topic), # body='Removed {} links for topic {}'.format(cnt, topic), # ) return Response('OK') @app.route('/cron/score/urls', methods=['GET', 'POST']) def scoreUrls(): if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) app.logger.info('Topic param received: {}'.format(topic)) tweets = Tweet.fetchByTopic(topic) # group by url and add score urlScores = {} for tweet in tweets: for url in tweet.urls: if url not in urlScores: urlScores[url] = { 'id': url, 'tweeted_count': 0, 'retweeted_sum': 0., 'favorite_sum': 0., } app.logger.debug('Url added: {}'.format(url)) urlScores[url]['tweeted_count'] += 1 urlScores[url]['retweeted_sum'] += math.log(max(1, tweet.retweet_count)) urlScores[url]['favorite_sum'] += math.log(max(1, tweet.favorite_count)) app.logger.info('All {} tweets parsed and found {} urls'.format(len(tweets), len(urlScores))) app.logger.info('Saving urls...') for url, url_info in urlScores.iteritems(): link = Link.create(topic, url, url_info) # continue to scrape for new tweets taskqueue.add(url='/cron/topic', params={'topic': topic}) app.logger.info('Task created to scrape for new tweets for {}'.format(topic)) mail.send_mail( sender='jacoj82@gmail.com', to='jacoj82@gmail.com', subject='Score urls {}'.format(topic), body='{} tweets created {} urls'.format(len(tweets), len(urlScores)), ) app.logger.info('Scoring urls done for {}'.format(topic)) return Response('OK') @app.route('/cron/topic', methods=['GET', 'POST']) def cronTopic(): access_token = 'AAAAAAAAAAAAAAAAAAAAABcJYAAAAAAAVviSzyKtPYqYlHpZxoim6DHvfjI%3DU0slNkvBKQRynT62gbvQjEhAlE2PvzVZNia99xAdoJweI2OLqe' if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) since_id = request.form.get('since_id') app.logger.info('Topic params received: {} {}'.format(topic, since_id)) # Requests / 15-min window (user auth) 180 # Requests / 15-min window (app auth) 450 # 450 / (15 * 60) = 0.5 per second # thus 1 request every 2 seconds month_ago = datetime.datetime.utcnow() - datetime.timedelta(days=30) day_ago = datetime.datetime.utcnow() - datetime.timedelta(days=1) params = urllib.urlencode({ 'q': 'filter:links since:{} until:{} #{} -filter:retweets'.format( month_ago.strftime('%Y-%m-%d'), day_ago.strftime('%Y-%m-%d'), topic, ), 'result_type': 'recent', 'include_entities': 1, 'count': 100, 'since_id': since_id, }) # count, until, since_id, max_id app.logger.info('params {}'.format(params)) res = urlfetch.fetch( url='https://api.twitter.com/1.1/search/tweets.json?{}'.format(params), method=urlfetch.GET, headers={ 'Authorization': 'Bearer {}'.format(access_token), }, ) app.logger.info(res) cnt = 0 max_cnt = 90 if app.config['DEBUG'] else 1222333 while cnt < max_cnt: content = json.loads(res.content) metadata = content['search_metadata'] statuses = content['statuses'] # app.logger.info('Metadata: {}'.format(metadata)) # app.logger.info('Statuses: {}'.format(len(statuses))) cnt += len(statuses) for status in statuses: app.logger.info('Processing status') tweet = Tweet.create(topic, status) if 'next_results' not in metadata: app.logger.info('No more statuses') break else: app.logger.info('Fetching more results at {}'.format(metadata['next_results'])) res = urlfetch.fetch( url='{}{}'.format('https://api.twitter.com/1.1/search/tweets.json', metadata['next_results']), method=urlfetch.GET, headers={ 'Authorization': 'Bearer {}'.format(access_token), }, ) # continue with nothing, quota will be obliterated with this mail.send_mail( sender='jacoj82@gmail.com', to='jacoj82@gmail.com', subject='Cron topic {}'.format(topic), body='Scraped {} tweets for topic {}'.format(cnt, topic), ) app.logger.info('Scraped {} tweets for topic {}'.format(cnt, topic)) return Response('OK')
apache-2.0
-4,003,832,567,429,699,600
32.569937
133
0.600684
false
3.689766
false
false
false
joelagnel/ns-3
src/visualizer/visualizer/plugins/mesh.py
1
6674
import gtk import ns3 from visualizer.base import InformationWindow NODE_STATISTICS_MEMORY = 10 class StatisticsCollector(object): """ Collects interface statistics for all nodes. """ class NetDevStats(object): __slots__ = ['rxPackets', 'rxBytes', 'txPackets', 'txBytes', 'rxPacketRate', 'rxBitRate', 'txPacketRate', 'txBitRate'] def __init__(self, visualizer): self.node_statistics = {} # nodeid -> list(raw statistics) self.visualizer = visualizer def simulation_periodic_update(self, viz): nodes_statistics = viz.simulation.sim_helper.GetNodesStatistics() for stats in nodes_statistics: try: raw_stats_list = self.node_statistics[stats.nodeId] except KeyError: raw_stats_list = [] self.node_statistics[stats.nodeId] = raw_stats_list raw_stats_list.append(stats.statistics) while len(raw_stats_list) > NODE_STATISTICS_MEMORY: raw_stats_list.pop(0) def get_interface_statistics(self, nodeId): try: raw_stats_list = self.node_statistics[nodeId] except KeyError: return [] if len(raw_stats_list) < NODE_STATISTICS_MEMORY: return [] assert len(raw_stats_list) == NODE_STATISTICS_MEMORY tx_packets1 = [] # transmitted packets, one value per interface rx_packets1 = [] tx_bytes1 = [] rx_bytes1 = [] for iface, stats in enumerate(raw_stats_list[0]): tx_packets1.append(stats.transmittedPackets) tx_bytes1.append(stats.transmittedBytes) rx_packets1.append(stats.receivedPackets) rx_bytes1.append(stats.receivedBytes) retval = [] k = self.visualizer.sample_period*(NODE_STATISTICS_MEMORY-1) for iface, stats in enumerate(raw_stats_list[-1]): outStat = self.NetDevStats() outStat.txPackets = stats.transmittedPackets outStat.txBytes = stats.transmittedBytes outStat.rxPackets = stats.receivedPackets outStat.rxBytes = stats.receivedBytes outStat.txPacketRate = (stats.transmittedPackets - tx_packets1[iface])/k outStat.rxPacketRate = (stats.receivedPackets - rx_packets1[iface])/k outStat.txBitRate = (stats.transmittedBytes - tx_bytes1[iface])*8/k outStat.rxBitRate = (stats.receivedBytes - rx_bytes1[iface])*8/k retval.append(outStat) return retval class ShowInterfaceStatistics(InformationWindow): ( COLUMN_INTERFACE, COLUMN_TX_PACKETS, COLUMN_TX_BYTES, COLUMN_TX_PACKET_RATE, COLUMN_TX_BIT_RATE, COLUMN_RX_PACKETS, COLUMN_RX_BYTES, COLUMN_RX_PACKET_RATE, COLUMN_RX_BIT_RATE, ) = range(9) def __init__(self, visualizer, node_index, statistics_collector): InformationWindow.__init__(self) self.win = gtk.Dialog(parent=visualizer.window, flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR, buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)) self.win.connect("response", self._response_cb) self.win.set_title("Mesh Statistics for node %i" % node_index) self.visualizer = visualizer self.statistics_collector = statistics_collector self.node_index = node_index self.viz_node = visualizer.get_node(node_index) self.table_model = gtk.ListStore(*([str]*13)) treeview = gtk.TreeView(self.table_model) treeview.show() self.win.vbox.add(treeview) def add_column(descr, colid): column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid) treeview.append_column(column) add_column("Interface", self.COLUMN_INTERFACE) add_column("Tx Packets", self.COLUMN_TX_PACKETS) add_column("Tx Bytes", self.COLUMN_TX_BYTES) add_column("Tx pkt/1s", self.COLUMN_TX_PACKET_RATE) add_column("Tx bit/1s", self.COLUMN_TX_BIT_RATE) add_column("Rx Packets", self.COLUMN_RX_PACKETS) add_column("Rx Bytes", self.COLUMN_RX_BYTES) add_column("Rx pkt/1s", self.COLUMN_RX_PACKET_RATE) add_column("Rx bit/1s", self.COLUMN_RX_BIT_RATE) self.visualizer.add_information_window(self) self.win.show() def _response_cb(self, win, response): self.win.destroy() self.visualizer.remove_information_window(self) def update(self): node = ns3.NodeList.GetNode(self.node_index) stats_list = self.statistics_collector.get_interface_statistics(self.node_index) self.table_model.clear() for iface, stats in enumerate(stats_list): tree_iter = self.table_model.append() netdevice = node.GetDevice(iface) interface_name = ns3.Names.FindName(netdevice) if not interface_name: interface_name = "(interface %i)" % iface self.table_model.set(tree_iter, self.COLUMN_INTERFACE, interface_name, self.COLUMN_TX_PACKETS, str(stats.txPackets), self.COLUMN_TX_BYTES, str(stats.txBytes), self.COLUMN_TX_PACKET_RATE, str(stats.txPacketRate), self.COLUMN_TX_BIT_RATE, str(stats.txBitRate), self.COLUMN_RX_PACKETS, str(stats.rxPackets), self.COLUMN_RX_BYTES, str(stats.rxBytes), self.COLUMN_RX_PACKET_RATE, str(stats.rxPacketRate), self.COLUMN_RX_BIT_RATE, str(stats.rxBitRate) ) def populate_node_menu(viz, node, menu): menu_item = gtk.MenuItem("Switch On") menu_item.show() def _show_it_on(dummy): print "Switching on\n" menu_item.connect("activate", _show_it_on) menu.add(menu_item) menu_item = gtk.MenuItem("Show Mesh Statistics") menu_item.show() def _show_it(dummy_menu_item): ShowInterfaceStatistics(viz, node.node_index, statistics_collector) menu_item.connect("activate", _show_it) menu.add(menu_item) def register(viz): statistics_collector = StatisticsCollector(viz) viz.connect("populate-node-menu", populate_node_menu) viz.connect("simulation-periodic-update", statistics_collector.simulation_periodic_update)
gpl-2.0
-4,180,465,858,970,958,000
38.02924
94
0.596644
false
3.864505
false
false
false
rmmariano/testejenkins
tests/global_imports.py
1
2622
#!/usr/bin/env python # -*- coding: utf-8 -*- from common import * from os import path from os import remove from glob import glob # Imports automáticos from gluon.cache import Cache from gluon.globals import Request, Response, Session from gluon.http import HTTP, redirect from gluon.sql import DAL, Field, SQLDB from gluon.sqlhtml import SQLFORM,SQLTABLE from gluon.validators import * from gluon.html import * from gluon.globals import current # função fake/mock do T def m__T__(f): return f # função fake/mock do URL def m__URL__(a='', c='', f='', r='', args='', vars='', anchor='', extension='', env='', hmac_key='', hash_vars='', salt='', user_signature='', scheme='', host='', port='', encode_embedded_slash='', url_encode='', language=''): lfoo=[a,c,f,r,args,vars,anchor,extension,env,hmac_key,hash_vars, salt,user_signature,scheme,host,port,encode_embedded_slash,url_encode,language] foo = 'http://' for f in lfoo: if f != '': foo=foo+str(f)+'/' return foo # def IS_URL(error_message='Enter a valid URL', mode='http', allowed_schemes=None, # prepend_scheme='http', allowed_tlds=None): # pass # função fake/mock do IS_URL def m__IS_URL__(foo,**dfoo): foo = str(foo) if foo.startswith('http://') or foo.startswith('https://'): return True return False current.request = request = None current.response = response = None current.session = session = None current.cache = cache = None current.T = T = None def initVars(): global current, request, response, session, cache, T current.request = request = Request() current.response = response = Response() current.session = session = Session() current.cache = cache = Cache(request) current.T = T = m__T__ initVars() deleteDB() db = DAL('sqlite://'+DB_PATH) import gluon.tools as gt from mock import Mock gt.URL=Mock(side_effect=m__URL__) crud = gt.Crud(db) # # Alguns imports globais do web2py # # Ja feitos # from gluon.cache import Cache # from gluon.globals import Request # from gluon.globals import Response # from gluon.globals import Session # request = Request() #request = Request({}) # cache = Cache() #cache = Cache(request) # response = Response() #funciona sem parametro # session = Session() #funciona sem parametro # from gluon.html import * # from gluon.http import HTTP # from gluon.http import redirect # from gluon.sql import DAL # from gluon.sql import Field # from gluon.sql import SQLDB # from gluon.sqlhtml import SQLFORM # from gluon.validators import * # # Dão erro # import gluon.languages.translator as T #error # from gluon.contrib.gql import GQLDB #error
mit
7,813,400,860,123,454,000
23.439252
83
0.695103
false
3.191697
false
false
false
syntheticpp/lyx
lib/lyx2lyx/lyx2lyx_tools.py
1
19009
# This file is part of lyx2lyx # -*- coding: utf-8 -*- # Copyright (C) 2011 The LyX team # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ''' This module offers several free functions to help with lyx2lyx'ing. More documentaton is below, but here is a quick guide to what they do. Optional arguments are marked by brackets. add_to_preamble(document, text): Here, text can be either a single line or a list of lines. It is bad practice to pass something with embedded newlines, but we will handle that properly. The routine checks to see whether the provided material is already in the preamble. If not, it adds it. Prepends a comment "% Added by lyx2lyx" to text. insert_to_preamble(document, text[, index]): Here, text can be either a single line or a list of lines. It is bad practice to pass something with embedded newlines, but we will handle that properly. The routine inserts text at document.preamble[index], where by default index is 0, so the material is inserted at the beginning. Prepends a comment "% Added by lyx2lyx" to text. put_cmd_in_ert(arg): Here arg should be a list of strings (lines), which we want to wrap in ERT. Returns a list of strings so wrapped. A call to this routine will often go something like this: i = find_token('\\begin_inset FunkyInset', ...) j = find_end_of_inset(document.body, i) content = lyx2latex(document[i:j + 1]) ert = put_cmd_in_ert(content) document.body[i:j+1] = ert get_ert(lines, i[, verbatim]): Here, lines is a list of lines of LyX material containing an ERT inset, whose content we want to convert to LaTeX. The ERT starts at index i. If the optional (by default: False) bool verbatim is True, the content of the ERT is returned verbatim, that is in LyX syntax (not LaTeX syntax) for the use in verbatim insets. lyx2latex(document, lines): Here, lines is a list of lines of LyX material we want to convert to LaTeX. We do the best we can and return a string containing the translated material. lyx2verbatim(document, lines): Here, lines is a list of lines of LyX material we want to convert to verbatim material (used in ERT an the like). We do the best we can and return a string containing the translated material. latex_length(slen): Convert lengths (in LyX form) to their LaTeX representation. Returns (bool, length), where the bool tells us if it was a percentage, and the length is the LaTeX representation. convert_info_insets(document, type, func): Applies func to the argument of all info insets matching certain types type : the type to match. This can be a regular expression. func : function from string to string to apply to the "arg" field of the info insets. ''' import re import string from parser_tools import find_token, find_end_of_inset from unicode_symbols import unicode_reps # This will accept either a list of lines or a single line. # It is bad practice to pass something with embedded newlines, # though we will handle that. def add_to_preamble(document, text): " Add text to the preamble if it is not already there. " if not type(text) is list: # split on \n just in case # it'll give us the one element list we want # if there's no \n, too text = text.split('\n') i = 0 prelen = len(document.preamble) while True: i = find_token(document.preamble, text[0], i) if i == -1: break # we need a perfect match matched = True for line in text: if i >= prelen or line != document.preamble[i]: matched = False break i += 1 if matched: return document.preamble.extend(["% Added by lyx2lyx"]) document.preamble.extend(text) # Note that text can be either a list of lines or a single line. # It should really be a list. def insert_to_preamble(document, text, index = 0): """ Insert text to the preamble at a given line""" if not type(text) is list: # split on \n just in case # it'll give us the one element list we want # if there's no \n, too text = text.split('\n') text.insert(0, "% Added by lyx2lyx") document.preamble[index:index] = text def put_cmd_in_ert(arg): ''' arg should be a list of lines we want to wrap in ERT. Returns a list of strings, with the lines so wrapped. ''' ret = ["\\begin_inset ERT", "status collapsed", "", "\\begin_layout Plain Layout", ""] # It will be faster for us to work with a single string internally. # That way, we only go through the unicode_reps loop once. if type(arg) is list: s = "\n".join(arg) else: s = arg for rep in unicode_reps: s = s.replace(rep[1], rep[0]) s = s.replace('\\', "\\backslash\n") ret += s.splitlines() ret += ["\\end_layout", "", "\\end_inset"] return ret def get_ert(lines, i, verbatim = False): 'Convert an ERT inset into LaTeX.' if not lines[i].startswith("\\begin_inset ERT"): return "" j = find_end_of_inset(lines, i) if j == -1: return "" while i < j and not lines[i].startswith("status"): i = i + 1 i = i + 1 ret = "" first = True while i < j: if lines[i] == "\\begin_layout Plain Layout": if first: first = False else: ret = ret + "\n" while i + 1 < j and lines[i+1] == "": i = i + 1 elif lines[i] == "\\end_layout": while i + 1 < j and lines[i+1] == "": i = i + 1 elif lines[i] == "\\backslash": if verbatim: ret = ret + "\n" + lines[i] + "\n" else: ret = ret + "\\" else: ret = ret + lines[i] i = i + 1 return ret def lyx2latex(document, lines): 'Convert some LyX stuff into corresponding LaTeX stuff, as best we can.' content = "" ert_end = 0 note_end = 0 hspace = "" for curline in range(len(lines)): line = lines[curline] if line.startswith("\\begin_inset Note Note"): # We want to skip LyX notes, so remember where the inset ends note_end = find_end_of_inset(lines, curline + 1) continue elif note_end >= curline: # Skip LyX notes continue elif line.startswith("\\begin_inset ERT"): # We don't want to replace things inside ERT, so figure out # where the end of the inset is. ert_end = find_end_of_inset(lines, curline + 1) continue elif line.startswith("\\begin_inset Formula"): line = line[20:] elif line.startswith("\\begin_inset Quotes"): # For now, we do a very basic reversion. Someone who understands # quotes is welcome to fix it up. qtype = line[20:].strip() # lang = qtype[0] side = qtype[1] dbls = qtype[2] if side == "l": if dbls == "d": line = "``" else: line = "`" else: if dbls == "d": line = "''" else: line = "'" elif line.startswith("\\begin_inset Newline newline"): line = "\\\\ " elif line.startswith("\\noindent"): line = "\\noindent " # we need the space behind the command elif line.startswith("\\begin_inset space"): line = line[18:].strip() if line.startswith("\\hspace"): # Account for both \hspace and \hspace* hspace = line[:-2] continue elif line == "\\space{}": line = "\\ " elif line == "\\thinspace{}": line = "\\," elif hspace != "": # The LyX length is in line[8:], after the \length keyword length = latex_length(line[8:])[1] line = hspace + "{" + length + "}" hspace = "" elif line.isspace() or \ line.startswith("\\begin_layout") or \ line.startswith("\\end_layout") or \ line.startswith("\\begin_inset") or \ line.startswith("\\end_inset") or \ line.startswith("\\lang") or \ line.strip() == "status collapsed" or \ line.strip() == "status open": #skip all that stuff continue # this needs to be added to the preamble because of cases like # \textmu, \textbackslash, etc. add_to_preamble(document, ['% added by lyx2lyx for converted index entries', '\\@ifundefined{textmu}', ' {\\usepackage{textcomp}}{}']) # a lossless reversion is not possible # try at least to handle some common insets and settings if ert_end >= curline: line = line.replace(r'\backslash', '\\') else: # No need to add "{}" after single-nonletter macros line = line.replace('&', '\\&') line = line.replace('#', '\\#') line = line.replace('^', '\\textasciicircum{}') line = line.replace('%', '\\%') line = line.replace('_', '\\_') line = line.replace('$', '\\$') # Do the LyX text --> LaTeX conversion for rep in unicode_reps: line = line.replace(rep[1], rep[0]) line = line.replace(r'\backslash', r'\textbackslash{}') line = line.replace(r'\series bold', r'\bfseries{}').replace(r'\series default', r'\mdseries{}') line = line.replace(r'\shape italic', r'\itshape{}').replace(r'\shape smallcaps', r'\scshape{}') line = line.replace(r'\shape slanted', r'\slshape{}').replace(r'\shape default', r'\upshape{}') line = line.replace(r'\emph on', r'\em{}').replace(r'\emph default', r'\em{}') line = line.replace(r'\noun on', r'\scshape{}').replace(r'\noun default', r'\upshape{}') line = line.replace(r'\bar under', r'\underbar{').replace(r'\bar default', r'}') line = line.replace(r'\family sans', r'\sffamily{}').replace(r'\family default', r'\normalfont{}') line = line.replace(r'\family typewriter', r'\ttfamily{}').replace(r'\family roman', r'\rmfamily{}') line = line.replace(r'\InsetSpace ', r'').replace(r'\SpecialChar ', r'') content += line return content def lyx2verbatim(document, lines): 'Convert some LyX stuff into corresponding verbatim stuff, as best we can.' content = lyx2latex(document, lines) content = re.sub(r'\\(?!backslash)', r'\n\\backslash\n', content) return content def latex_length(slen): ''' Convert lengths to their LaTeX representation. Returns (bool, length), where the bool tells us if it was a percentage, and the length is the LaTeX representation. ''' i = 0 percent = False # the slen has the form # ValueUnit+ValueUnit-ValueUnit or # ValueUnit+-ValueUnit # the + and - (glue lengths) are optional # the + always precedes the - # Convert relative lengths to LaTeX units units = {"text%":"\\textwidth", "col%":"\\columnwidth", "page%":"\\paperwidth", "line%":"\\linewidth", "theight%":"\\textheight", "pheight%":"\\paperheight"} for unit in list(units.keys()): i = slen.find(unit) if i == -1: continue percent = True minus = slen.rfind("-", 1, i) plus = slen.rfind("+", 0, i) latex_unit = units[unit] if plus == -1 and minus == -1: value = slen[:i] value = str(float(value)/100) end = slen[i + len(unit):] slen = value + latex_unit + end if plus > minus: value = slen[plus + 1:i] value = str(float(value)/100) begin = slen[:plus + 1] end = slen[i+len(unit):] slen = begin + value + latex_unit + end if plus < minus: value = slen[minus + 1:i] value = str(float(value)/100) begin = slen[:minus + 1] slen = begin + value + latex_unit # replace + and -, but only if the - is not the first character slen = slen[0] + slen[1:].replace("+", " plus ").replace("-", " minus ") # handle the case where "+-1mm" was used, because LaTeX only understands # "plus 1mm minus 1mm" if slen.find("plus minus"): lastvaluepos = slen.rfind(" ") lastvalue = slen[lastvaluepos:] slen = slen.replace(" ", lastvalue + " ") return (percent, slen) def length_in_bp(length): " Convert a length in LyX format to its value in bp units " em_width = 10.0 / 72.27 # assume 10pt font size text_width = 8.27 / 1.7 # assume A4 with default margins # scale factors are taken from Length::inInch() scales = {"bp" : 1.0, "cc" : (72.0 / (72.27 / (12.0 * 0.376 * 2.845))), "cm" : (72.0 / 2.54), "dd" : (72.0 / (72.27 / (0.376 * 2.845))), "em" : (72.0 * em_width), "ex" : (72.0 * em_width * 0.4305), "in" : 72.0, "mm" : (72.0 / 25.4), "mu" : (72.0 * em_width / 18.0), "pc" : (72.0 / (72.27 / 12.0)), "pt" : (72.0 / (72.27)), "sp" : (72.0 / (72.27 * 65536.0)), "text%" : (72.0 * text_width / 100.0), "col%" : (72.0 * text_width / 100.0), # assume 1 column "page%" : (72.0 * text_width * 1.7 / 100.0), "line%" : (72.0 * text_width / 100.0), "theight%" : (72.0 * text_width * 1.787 / 100.0), "pheight%" : (72.0 * text_width * 2.2 / 100.0)} rx = re.compile(r'^\s*([^a-zA-Z%]+)([a-zA-Z%]+)\s*$') m = rx.match(length) if not m: document.warning("Invalid length value: " + length + ".") return 0 value = m.group(1) unit = m.group(2) if not unit in scales.keys(): document.warning("Unknown length unit: " + unit + ".") return value return "%g" % (float(value) * scales[unit]) def revert_flex_inset(lines, name, LaTeXname): " Convert flex insets to TeX code " i = 0 while True: i = find_token(lines, '\\begin_inset Flex ' + name, i) if i == -1: return z = find_end_of_inset(lines, i) if z == -1: document.warning("Can't find end of Flex " + name + " inset.") i += 1 continue # remove the \end_inset lines[z - 2:z + 1] = put_cmd_in_ert("}") # we need to reset character layouts if necessary j = find_token(lines, '\\emph on', i, z) k = find_token(lines, '\\noun on', i, z) l = find_token(lines, '\\series', i, z) m = find_token(lines, '\\family', i, z) n = find_token(lines, '\\shape', i, z) o = find_token(lines, '\\color', i, z) p = find_token(lines, '\\size', i, z) q = find_token(lines, '\\bar under', i, z) r = find_token(lines, '\\uuline on', i, z) s = find_token(lines, '\\uwave on', i, z) t = find_token(lines, '\\strikeout on', i, z) if j != -1: lines.insert(z - 2, "\\emph default") if k != -1: lines.insert(z - 2, "\\noun default") if l != -1: lines.insert(z - 2, "\\series default") if m != -1: lines.insert(z - 2, "\\family default") if n != -1: lines.insert(z - 2, "\\shape default") if o != -1: lines.insert(z - 2, "\\color inherit") if p != -1: lines.insert(z - 2, "\\size default") if q != -1: lines.insert(z - 2, "\\bar default") if r != -1: lines.insert(z - 2, "\\uuline default") if s != -1: lines.insert(z - 2, "\\uwave default") if t != -1: lines.insert(z - 2, "\\strikeout default") lines[i:i + 4] = put_cmd_in_ert(LaTeXname + "{") i += 1 def revert_font_attrs(lines, name, LaTeXname): " Reverts font changes to TeX code " i = 0 changed = False while True: i = find_token(lines, name + ' on', i) if i == -1: return changed j = find_token(lines, name + ' default', i) k = find_token(lines, name + ' on', i + 1) # if there is no default set, the style ends with the layout # assure hereby that we found the correct layout end if j != -1 and (j < k or k == -1): lines[j:j + 1] = put_cmd_in_ert("}") else: j = find_token(lines, '\\end_layout', i) lines[j:j] = put_cmd_in_ert("}") lines[i:i + 1] = put_cmd_in_ert(LaTeXname + "{") changed = True i += 1 def revert_layout_command(lines, name, LaTeXname): " Reverts a command from a layout to TeX code " i = 0 while True: i = find_token(lines, '\\begin_layout ' + name, i) if i == -1: return k = -1 # find the next layout j = i + 1 while k == -1: j = find_token(lines, '\\begin_layout', j) l = len(lines) # if nothing was found it was the last layout of the document if j == -1: lines[l - 4:l - 4] = put_cmd_in_ert("}") k = 0 # exclude plain layout because this can be TeX code or another inset elif lines[j] != '\\begin_layout Plain Layout': lines[j - 2:j - 2] = put_cmd_in_ert("}") k = 0 else: j += 1 lines[i] = '\\begin_layout Standard' lines[i + 1:i + 1] = put_cmd_in_ert(LaTeXname + "{") i += 1 def hex2ratio(s): " Converts an RRGGBB-type hexadecimal string to a float in [0.0,1.0] " try: val = int(s, 16) except: val = 0 if val != 0: val += 1 return str(val / 256.0) def str2bool(s): "'true' goes to True, case-insensitively, and we strip whitespace." s = s.strip().lower() return s == "true" def convert_info_insets(document, type, func): "Convert info insets matching type using func." i = 0 type_re = re.compile(r'^type\s+"(%s)"$' % type) arg_re = re.compile(r'^arg\s+"(.*)"$') while True: i = find_token(document.body, "\\begin_inset Info", i) if i == -1: return t = type_re.match(document.body[i + 1]) if t: arg = arg_re.match(document.body[i + 2]) if arg: new_arg = func(arg.group(1)) document.body[i + 2] = 'arg "%s"' % new_arg i += 3
gpl-2.0
-5,966,020,858,891,533,000
35.001894
110
0.559209
false
3.468163
false
false
false
yoseforb/lollypop
src/define.py
1
2679
#!/usr/bin/python # Copyright (c) 2014-2015 Cedric Bellegarde <cedric.bellegarde@adishatz.org> # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # This is global object initialised at lollypop start # member init order is important! try: from gi.repository import Secret SecretSchema = { "org.gnome.Lollypop.lastfm.login": Secret.SchemaAttributeType.STRING } SecretAttributes = { "org.gnome.Lollypop.lastfm.login": "Last.fm login" } except: Secret = None SecretSchema = None SecretAttributes = None GOOGLE_INC = 8 GOOGLE_MAX = 100 class Lp: settings = None db = None sql = None albums = None artists = None genres = None tracks = None playlists = None player = None art = None window = None notify = None lastfm = None debug = False # Represent what to do on next track class NextContext: NONE = 0 # Continue playback STOP_TRACK = 1 # Stop after current track STOP_ALBUM = 2 # Stop after current album STOP_ARTIST = 3 # Stop after current artist START_NEW_ALBUM = 4 # Start a new album # Represent playback context class PlayContext: genre_id = None next = NextContext.NONE class GstPlayFlags: GST_PLAY_FLAG_VIDEO = 1 << 0 # We want video output GST_PLAY_FLAG_AUDIO = 1 << 1 # We want audio output GST_PLAY_FLAG_TEXT = 1 << 3 # We want subtitle output class ArtSize: SMALL_RADIUS = 2 RADIUS = 3 SMALL_BORDER = 1 BORDER = 3 SMALL = 32 MEDIUM = 48 BIG = 200 MONSTER = 500 class Shuffle: NONE = 0 # No shuffle TRACKS = 1 # Shuffle by tracks on genre ALBUMS = 2 # Shuffle by albums on genre TRACKS_ARTIST = 3 # Shuffle by tracks on artist ALBUMS_ARTIST = 4 # Shuffle by albums on artist # Order is important class Type: NONE = -1 POPULARS = -2 RANDOMS = -3 RECENTS = -4 PLAYLISTS = -5 RADIOS = -6 EXTERNALS = -7 ALL = -8 COMPILATIONS = -999 DEVICES = -1000 SEPARATOR = -2000
gpl-3.0
-5,321,358,665,882,580,000
24.759615
76
0.651362
false
3.529644
false
false
false
openqt/algorithms
projecteuler/ac/old/pe051_prime_digit_replacements.py
1
3267
#!/usr/bin/env python # coding=utf-8 """ Prime digit replacements Problem 51 By replacing the 1st digit of the 2-digit number *3, it turns out that six of the nine possible values: 13, 23, 43, 53, 73, and 83, are all prime. By replacing the 3rd and 4th digits of 56**3 with the same digit, this 5-digit number is the first example having seven primes among the ten generated numbers, yielding the family: 56003, 56113, 56333, 56443, 56663, 56773, and 56993. Consequently 56003, being the first member of this family, is the smallest prime with this property. Find the smallest prime which, by replacing part of the number (not necessarily adjacent digits) with the same digit, is part of an eight prime value family. """ from __future__ import print_function from utils import prime_sieve from pe049_prime_permutations import seq_int def combinations(seq, k): """combinations by lexicographic order :param seq: choices :param k: K :return: next combination """ def _inner_dfs(seq, k, vals): if len(seq) + len(vals) < k: return if len(vals) >= k: # got one yield vals else: for i in range(len(seq)): for j in _inner_dfs(seq[i + 1:], k, vals + [seq[i]]): yield j # here we added the extra parameter for i in _inner_dfs(seq, k, []): yield i def mask_same_digits(n, count=2): """mask same digit combinations by '*' :param n: the number :param count: least same digits :return: mask list """ def _same_digits(seq, count): m = {} for pos, val in enumerate(seq): # inverted index m.setdefault(val, []).append(pos) for val, pos in m.items(): # multi pos(es) if len(pos) >= count: yield pos def _mask(seq, mask, sign='*'): for i in mask: seq[i] = sign return ''.join(map(str, seq)) seq = seq_int(n) for pos in _same_digits(seq, count): for mask in combinations(pos, count): # all possible combinations yield _mask(seq[:], mask) # def combine(self, NN, K): # """Iterative 8-line solution using C(n, k) = C(n-1, k) + C(n-1, k-1) # # https://discuss.leetcode.com/topic/40827/iterative-8-line-solution-using-c-n-k-c-n-1-k-c-n-1-k-1 # :param self: # :param NN: # :param K: # :return: # """ # result = [[[]]] # for n in range(1, NN + 1): # newRes = [[[]]] # C(n, 0) = 0 # for k in range(1, n): # # C(n, k) = C(n-1, k) + C(n-1, k-1) # newRes.append(result[k] + [_ + [n] for _ in result[k - 1]]) # # C(n, n) = C(n-1, n-1) = 1 # newRes.append([result[n - 1][0] + [n]]) # result = newRes # return result[K] if __name__ == '__main__': # test only print([i for i in mask_same_digits(222323, 3)]) print([i for i in mask_same_digits(323333, 3)]) print('-' * 30) caches = {} for i in prime_sieve(1000000): for seq in mask_same_digits(i, 3): caches.setdefault(seq, []).append(i) print('> caches %d' % len(caches)) for k in caches: if len(caches[k]) >= 8: print((k, len(caches[k])), caches[k]) # 121313
gpl-3.0
-7,675,758,474,691,941,000
28.972477
102
0.566269
false
3.159574
false
false
false
byt3smith/CIRTKit
modules/reversing/viper/strings.py
1
8205
# This file is part of Viper - https://github.com/viper-framework/viper # See the file 'LICENSE' for copying permission. import os import re from socket import inet_pton, AF_INET6, error as socket_error from lib.common.abstracts import Module from lib.core.session import __sessions__ DOMAIN_REGEX = re.compile('([a-z0-9][a-z0-9\-]{0,61}[a-z0-9]\.)+[a-z0-9][a-z0-9\-]*[a-z0-9]', re.IGNORECASE) IPV4_REGEX = re.compile('[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]\.[1-2]?[0-9]?[0-9]') IPV6_REGEX = re.compile('((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}' '|((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9' 'A-Fa-f]{1,4}:){5}(((:[0-9A-Fa-f]{1,4}){1,2})|:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[' '0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3})|:))|(([0-9A-Fa-f]{1,4}:){4}(((:[0-9A-Fa-f]{1,4}){1,3' '})|((:[0-9A-Fa-f]{1,4})?:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[' '1-9]?\d)){3}))|:))|(([0-9A-Fa-f]{1,4}:){3}(((:[0-9A-Fa-f]{1,4}){1,4})|((:[0-9A-Fa-f]{1,' '4}){0,2}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:' '))|(([0-9A-Fa-f]{1,4}:){2}(((:[0-9A-Fa-f]{1,4}){1,5})|((:[0-9A-Fa-f]{1,4}){0,3}:((25[0-' '5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(([0-9A-Fa-f]' '{1,4}:){1}(((:[0-9A-Fa-f]{1,4}){1,6})|((:[0-9A-Fa-f]{1,4}){0,4}:((25[0-5]|2[0-4]\d|1\d' '\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)){3}))|:))|(:(((:[0-9A-Fa-f]{1,4}){1,7}' ')|((:[0-9A-Fa-f]{1,4}){0,5}:((25[0-5]|2[0-4]\d|1\d\d|[1-9]?\d)(\.(25[0-5]|2[0-4]\d|1\d' '\d|[1-9]?\d)){3}))|:)))(%.+)?', re.IGNORECASE | re.S) TLD = [ 'AC', 'ACADEMY', 'ACTOR', 'AD', 'AE', 'AERO', 'AF', 'AG', 'AGENCY', 'AI', 'AL', 'AM', 'AN', 'AO', 'AQ', 'AR', 'ARPA', 'AS', 'ASIA', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BAR', 'BARGAINS', 'BB', 'BD', 'BE', 'BERLIN', 'BEST', 'BF', 'BG', 'BH', 'BI', 'BID', 'BIKE', 'BIZ', 'BJ', 'BLUE', 'BM', 'BN', 'BO', 'BOUTIQUE', 'BR', 'BS', 'BT', 'BUILD', 'BUILDERS', 'BUZZ', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CAB', 'CAMERA', 'CAMP', 'CARDS', 'CAREERS', 'CAT', 'CATERING', 'CC', 'CD', 'CENTER', 'CEO', 'CF', 'CG', 'CH', 'CHEAP', 'CHRISTMAS', 'CI', 'CK', 'CL', 'CLEANING', 'CLOTHING', 'CLUB', 'CM', 'CN', 'CO', 'CODES', 'COFFEE', 'COM', 'COMMUNITY', 'COMPANY', 'COMPUTER', 'CONDOS', 'CONSTRUCTION', 'CONTRACTORS', 'COOL', 'COOP', 'CR', 'CRUISES', 'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DANCE', 'DATING', 'DE', 'DEMOCRAT', 'DIAMONDS', 'DIRECTORY', 'DJ', 'DK', 'DM', 'DNP', 'DO', 'DOMAINS', 'DZ', 'EC', 'EDU', 'EDUCATION', 'EE', 'EG', 'EMAIL', 'ENTERPRISES', 'EQUIPMENT', 'ER', 'ES', 'ESTATE', 'ET', 'EU', 'EVENTS', 'EXPERT', 'EXPOSED', 'FARM', 'FI', 'FISH', 'FJ', 'FK', 'FLIGHTS', 'FLORIST', 'FM', 'FO', 'FOUNDATION', 'FR', 'FUTBOL', 'GA', 'GALLERY', 'GB', 'GD', 'GE', 'GF', 'GG', 'GH', 'GI', 'GIFT', 'GL', 'GLASS', 'GM', 'GN', 'GOV', 'GP', 'GQ', 'GR', 'GRAPHICS', 'GS', 'GT', 'GU', 'GUITARS', 'GURU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HOLDINGS', 'HOLIDAY', 'HOUSE', 'HR', 'HT', 'HU', 'ID', 'IE', 'IL', 'IM', 'IMMOBILIEN', 'IN', 'INDUSTRIES', 'INFO', 'INK', 'INSTITUTE', 'INT', 'INTERNATIONAL', 'IO', 'IQ', 'IR', 'IS', 'IT', 'JE', 'JM', 'JO', 'JOBS', 'JP', 'KAUFEN', 'KE', 'KG', 'KH', 'KI', 'KIM', 'KITCHEN', 'KIWI', 'KM', 'KN', 'KOELN', 'KP', 'KR', 'KRED', 'KW', 'KY', 'KZ', 'LA', 'LAND', 'LB', 'LC', 'LI', 'LIGHTING', 'LIMO', 'LINK', 'LK', 'LR', 'LS', 'LT', 'LU', 'LUXURY', 'LV', 'LY', 'MA', 'MAISON', 'MANAGEMENT', 'MANGO', 'MARKETING', 'MC', 'MD', 'ME', 'MENU', 'MG', 'MH', 'MIL', 'MK', 'ML', 'MM', 'MN', 'MO', 'MOBI', 'MODA', 'MONASH', 'MP', 'MQ', 'MR', 'MS', 'MT', 'MU', 'MUSEUM', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', 'NAGOYA', 'NAME', 'NC', 'NE', 'NET', 'NEUSTAR', 'NF', 'NG', 'NI', 'NINJA', 'NL', 'NO', 'NP', 'NR', 'NU', 'NZ', 'OKINAWA', 'OM', 'ONION', 'ONL', 'ORG', 'PA', 'PARTNERS', 'PARTS', 'PE', 'PF', 'PG', 'PH', 'PHOTO', 'PHOTOGRAPHY', 'PHOTOS', 'PICS', 'PINK', 'PK', 'PL', 'PLUMBING', 'PM', 'PN', 'POST', 'PR', 'PRO', 'PRODUCTIONS', 'PROPERTIES', 'PS', 'PT', 'PUB', 'PW', 'PY', 'QA', 'QPON', 'RE', 'RECIPES', 'RED', 'RENTALS', 'REPAIR', 'REPORT', 'REVIEWS', 'RICH', 'RO', 'RS', 'RU', 'RUHR', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SEXY', 'SG', 'SH', 'SHIKSHA', 'SHOES', 'SI', 'SINGLES', 'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SOCIAL', 'SOLAR', 'SOLUTIONS', 'SR', 'ST', 'SU', 'SUPPLIES', 'SUPPLY', 'SUPPORT', 'SV', 'SX', 'SY', 'SYSTEMS', 'SZ', 'TATTOO', 'TC', 'TD', 'TECHNOLOGY', 'TEL', 'TF', 'TG', 'TH', 'TIENDA', 'TIPS', 'TJ', 'TK', 'TL', 'TM', 'TN', 'TO', 'TODAY', 'TOKYO', 'TOOLS', 'TP', 'TR', 'TRAINING', 'TRAVEL', 'TT', 'TV', 'TW', 'TZ', 'UA', 'UG', 'UK', 'UNO', 'US', 'UY', 'UZ', 'VA', 'VACATIONS', 'VC', 'VE', 'VENTURES', 'VG', 'VI', 'VIAJES', 'VILLAS', 'VISION', 'VN', 'VOTE', 'VOTING', 'VOTO', 'VOYAGE', 'VU', 'WANG', 'WATCH', 'WED', 'WF', 'WIEN', 'WIKI', 'WORKS', 'WS', 'XN--3BST00M', 'XN--3DS443G', 'XN--3E0B707E', 'XN--45BRJ9C', 'XN--55QW42G', 'XN--55QX5D', 'XN--6FRZ82G', 'XN--6QQ986B3XL', 'XN--80AO21A', 'XN--80ASEHDB', 'XN--80ASWG', 'XN--90A3AC', 'XN--C1AVG', 'XN--CG4BKI', 'XN--CLCHC0EA0B2G2A9GCD', 'XN--D1ACJ3B', 'XN--FIQ228C5HS', 'XN--FIQ64B', 'XN--FIQS8S', 'XN--FIQZ9S', 'XN--FPCRJ9C3D', 'XN--FZC2C9E2C', 'XN--GECRJ9C', 'XN--H2BRJ9C', 'XN--I1B6B1A6A2E', 'XN--IO0A7I', 'XN--J1AMH', 'XN--J6W193G', 'XN--KPRW13D', 'XN--KPRY57D', 'XN--L1ACC', 'XN--LGBBAT1AD8J', 'XN--MGB9AWBF', 'XN--MGBA3A4F16A', 'XN--MGBAAM7A8H', 'XN--MGBAB2BD', 'XN--MGBAYH7GPA', 'XN--MGBBH1A71E', 'XN--MGBC0A9AZCG', 'XN--MGBERP4A5D4AR', 'XN--MGBX4CD0AB', 'XN--NGBC5AZD', 'XN--NQV7F', 'XN--NQV7FS00EMA', 'XN--O3CW4H', 'XN--OGBPF8FL', 'XN--P1AI', 'XN--PGBS0DH', 'XN--Q9JYB4C', 'XN--RHQV96G', 'XN--S9BRJ9C', 'XN--UNUP4Y', 'XN--WGBH1C', 'XN--WGBL6A', 'XN--XKC2AL3HYE2A', 'XN--XKC2DL3A5EE0H', 'XN--YFRO4I67O', 'XN--YGBI2AMMX', 'XN--ZFR164B', 'XXX', 'XYZ', 'YE', 'YT', 'ZA', 'ZM', 'ZONE', 'ZW'] class Strings(Module): cmd = 'strings' description = 'Extract strings from file' authors = ['nex', 'Brian Wallace'] def __init__(self): super(Strings, self).__init__() self.parser.add_argument('-a', '--all', action='store_true', help='Print all strings') self.parser.add_argument('-H', '--hosts', action='store_true', help='Extract IP addresses and domains from strings') def extract_hosts(self, strings): results = [] for entry in strings: to_add = False if DOMAIN_REGEX.search(entry) and not IPV4_REGEX.search(entry): if entry[entry.rfind('.') + 1:].upper() in TLD: to_add = True elif IPV4_REGEX.search(entry): to_add = True elif IPV6_REGEX.search(entry): try: inet_pton(AF_INET6, entry) except socket_error: continue else: to_add = True if to_add: if entry not in results: results.append(entry) for result in results: self.log('item', result) def run(self): super(Strings, self).run() if self.args is None: return arg_all = self.args.all arg_hosts = self.args.hosts if not __sessions__.is_set(): self.log('error', "No session opened") return if os.path.exists(__sessions__.current.file.path): regexp = '[\x20\x30-\x39\x41-\x5a\x61-\x7a\-\.:]{4,}' strings = re.findall(regexp, __sessions__.current.file.data) if arg_all: for entry in strings: self.log('', entry) elif arg_hosts: self.extract_hosts(strings) else: self.log('error', 'At least one of the parameters is required') self.usage()
mit
1,134,222,950,011,861,100
64.64
124
0.467642
false
2.187417
false
false
false
ekansa/open-context-py
opencontext_py/apps/imports/fieldannotations/complexdescriptions.py
1
11675
import uuid as GenUUID from django.conf import settings from django.db import models from opencontext_py.libs.general import LastUpdatedOrderedDict from opencontext_py.apps.ocitems.manifest.models import Manifest from opencontext_py.apps.ocitems.documents.models import OCdocument from opencontext_py.apps.imports.fields.models import ImportField from opencontext_py.apps.imports.fieldannotations.models import ImportFieldAnnotation from opencontext_py.apps.imports.fields.templating import ImportProfile from opencontext_py.apps.imports.records.models import ImportCell from opencontext_py.apps.imports.records.process import ProcessCells from opencontext_py.apps.imports.fieldannotations.general import ProcessGeneral from opencontext_py.apps.imports.sources.unimport import UnImport from opencontext_py.apps.ocitems.complexdescriptions.models import ComplexDescription from opencontext_py.apps.ocitems.assertions.models import Assertion from opencontext_py.apps.ocitems.strings.models import OCstring from opencontext_py.apps.ocitems.strings.manage import StringManagement # Processes to generate complex descriptions for other manifest recorded entities class ProcessComplexDescriptions(): FRAG_ID_PREFIX = '#cplxdes-' # fragment id prefix for a complex description def __init__(self, source_id): self.source_id = source_id pg = ProcessGeneral(source_id) pg.get_source() self.project_uuid = pg.project_uuid self.complex_des_fields = [] self.start_row = 1 self.batch_size = settings.IMPORT_BATCH_SIZE self.end_row = self.batch_size self.count_active_fields = 0 self.count_new_assertions = 0 self.obs_num_complex_description_assertions = 1 def clear_source(self): """ Clears a prior import if the start_row is 1. This makes sure new entities and assertions are made for this source_id, and we don't duplicate things """ if self.start_row <= 1: # get rid of "documents" related assertions made from this source unimport = UnImport(self.source_id, self.project_uuid) unimport.delete_complex_description_assertions() def process_complex_batch(self): """ processes fields for documents entities starting with a given row number. This iterates over all containment fields, starting with the root subjhect field """ self.clear_source() # clear prior import for this source self.end_row = self.start_row + self.batch_size self.get_complex_description_fields() label_str_uuids = {} if len(self.complex_des_fields) > 0: print('Number of Complex Description Fields: ' + str(len(self.complex_des_fields))) cp_id_number = 0 for cp_field in self.complex_des_fields: cp_id_number += 1 pc = ProcessCells(self.source_id, self.start_row) distinct_records = pc.get_field_records_by_fl_uuid(cp_field.describes_field.field_num, False) if distinct_records is not False: # sort the list in row_order from the import table pg = ProcessGeneral(self.source_id) distinct_records = pg.order_distinct_records(distinct_records) for row_key, dist_rec in distinct_records.items(): if cp_field.obs_num < 1: obs_num = 1 else: obs_num = cp_field.obs_num obs_node = '#obs-' + str(obs_num) subject_uuid = dist_rec['imp_cell_obj'].fl_uuid subject_type = cp_field.describes_field.field_type subject_ok = dist_rec['imp_cell_obj'].cell_ok subject_record = dist_rec['imp_cell_obj'].record if subject_uuid is False or\ len(subject_record) < 1: subject_ok = False if subject_uuid == 'False': subject_ok = False sort = 0 in_rows = dist_rec['rows'] print('Look for complex description labels in rows: ' + str(in_rows)) if subject_ok is not False: # OK! we have the subjects of complex descriptions # with uuids, so now we can make an fl_uuid for each # of the complex description fields. complex_uuid = subject_uuid + self.FRAG_ID_PREFIX + str(cp_id_number) complex_recs = ImportCell.objects\ .filter(source_id=self.source_id, field_num=cp_field.field_num, row_num__in=in_rows)\ .exclude(record='') if len(complex_recs) > 0: # we have records in the complex description field that are not blank # and are associated with the subject of the complex description. # so now, let's record this association. save_ok = False new_ass = Assertion() new_ass.uuid = subject_uuid new_ass.subject_type = subject_type new_ass.project_uuid = self.project_uuid new_ass.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX new_ass.obs_node = obs_node new_ass.obs_num = obs_num new_ass.sort = 100 + cp_id_number new_ass.visibility = 1 new_ass.predicate_uuid = ComplexDescription.PREDICATE_COMPLEX_DES new_ass.object_type = 'complex-description' new_ass.object_uuid = complex_uuid new_ass.save() try: print('Saved complex-description: ' + complex_uuid) new_ass.save() save_ok = True except: save_ok = False if save_ok: self.count_new_assertions += 1 # now look through the complex description records and make labels for comp_rec in complex_recs: # first save the fl_uuid for the complex description comp_rec.fl_uuid = complex_uuid comp_rec.save() if isinstance(cp_field.value_prefix, str): cp_label = cp_field.value_prefix + comp_rec.record else: cp_label = comp_rec.record if cp_label not in label_str_uuids: # make a uuid for the record value # adding a source_id suffix keeps this from being deleted as descriptions get processed sm = StringManagement() sm.project_uuid = self.project_uuid sm.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX oc_string = sm.get_make_string(cp_label) content_uuid = oc_string.uuid label_str_uuids[cp_label] = content_uuid content_uuid = label_str_uuids[cp_label] save_ok = False new_ass = Assertion() new_ass.uuid = complex_uuid new_ass.subject_type = 'complex-description' new_ass.project_uuid = self.project_uuid # adding a source_id suffix keeps this from being deleted as descriptions get processed new_ass.source_id = self.source_id + ProcessGeneral.COMPLEX_DESCRIPTION_SOURCE_SUFFIX new_ass.obs_node = '#obs-' + str(self.obs_num_complex_description_assertions) new_ass.obs_num = self.obs_num_complex_description_assertions new_ass.sort = 1 new_ass.visibility = 1 new_ass.predicate_uuid = ComplexDescription.PREDICATE_COMPLEX_DES_LABEL new_ass.object_type = 'xsd:string' new_ass.object_uuid = content_uuid try: new_ass.save() save_ok = True except: save_ok = False if save_ok: self.count_new_assertions += 1 def get_complex_description_fields(self): """ Makes a list of document fields """ complex_des_fields = [] raw_cp_fields = ImportField.objects\ .filter(source_id=self.source_id, field_type='complex-description') for cp_field in raw_cp_fields: desribes_fields = ImportFieldAnnotation.objects\ .filter(source_id=self.source_id, field_num=cp_field.field_num, predicate=ImportFieldAnnotation.PRED_COMPLEX_DES)[:1] if len(desribes_fields) > 0: desc_field_objs = ImportField.objects\ .filter(source_id=self.source_id, field_num=desribes_fields[0].object_field_num, field_type__in=ImportProfile.DEFAULT_SUBJECT_TYPE_FIELDS)[:1] if len(desc_field_objs) > 0: # OK! the complex property field describes a field with the correct field type (ImportProfile.DEFAULT_SUBJECT_TYPE_FIELDS) # it is OK then to use to make complex descriptions cp_field.describes_field = desc_field_objs[0] complex_des_fields.append(cp_field) self.complex_des_fields = complex_des_fields self.count_active_fields = len(self.complex_des_fields) return self.complex_des_fields
gpl-3.0
-8,557,080,162,303,926,000
59.807292
142
0.484968
false
5.179681
false
false
false
gabelula/b-counted
.google_appengine/tools/bulkload_client.py
1
1747
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Convenience wrapper for starting bulkload_client.py""" import os import sys if not hasattr(sys, 'version_info'): sys.stderr.write('Very old versions of Python are not supported. Please ' 'use version 2.5 or greater.\n') sys.exit(1) version_tuple = tuple(sys.version_info[:2]) if version_tuple < (2, 4): sys.stderr.write('Error: Python %d.%d is not supported. Please use ' 'version 2.5 or greater.\n' % version_tuple) sys.exit(1) if version_tuple == (2, 4): sys.stderr.write('Warning: Python 2.4 is not supported; this program may ' 'break. Please use version 2.5 or greater.\n') BULKLOAD_CLIENT_PATH = 'google/appengine/tools/bulkload_client.py' DIR_PATH = os.path.abspath(os.path.dirname( os.path.dirname(os.path.realpath(__file__)))) EXTRA_PATHS = [ DIR_PATH, os.path.join(DIR_PATH, 'lib', 'django'), os.path.join(DIR_PATH, 'lib', 'webob'), os.path.join(DIR_PATH, 'lib', 'yaml', 'lib'), ] if __name__ == '__main__': sys.path = EXTRA_PATHS + sys.path script_path = os.path.join(DIR_PATH, BULKLOAD_CLIENT_PATH) execfile(script_path, globals())
apache-2.0
3,248,498,652,674,230,300
33.254902
76
0.680595
false
3.346743
false
false
false
haanme/FinnBrain
pipeline_DTI_step1_DTIprep.py
1
4312
#!/usr/bin/env python #################################################################### # Python 2.7 script for executing FA, MD calculations for one case # #################################################################### # Directory where result data are located experiment_dir = '/Users/eija/Documents/FinnBrain/Jetro_DTI/pipelinedata' # Protocol that is applied in DTIprep DTIprep_protocol = '/Users/eija/Documents/FinnBrain/Jetro_DTI/pipelinedata/default_all.xml' # # Moves file to results folder, overwriting the existing file # # filename - file to be moved # out_prefix - subject specific prefix # def move_to_results(filename, out_prefix): import os import shutil outfile = experiment_dir + '/' + out_prefix + '/' + os.path.basename(filename) if os.path.isfile(outfile): os.remove(outfile) shutil.move(filename,outfile) return outfile # # Gunzips file to results folder, overwriting the existing file # # filename - file to be moved (.nii.gz) # out_prefix - subject specific prefix # def gunzip_to(filename, out_prefix, destination): import os import shutil from nipype.interfaces.base import CommandLine cmd = CommandLine('gunzip -f %s' % (filename)) print "gunzip NII.GZ:" + cmd.cmd cmd.run() basename = os.path.basename(filename[:len(filename)-3]) outfile = destination + '/' + basename if os.path.isfile(outfile): os.remove(outfile) shutil.move(filename[:len(filename)-3],outfile) return outfile # # Executes DTIPrep # # in_file - DTI file for QC (.nrrd) # out_prefix - subject specific prefix # def dtiprep(in_file, output_prefix): from glob import glob import os from nipype.interfaces.base import CommandLine from nipype.utils.filemanip import split_filename _, name, _ = split_filename(in_file) cmd = CommandLine('DTIPrepExec -c -d -f %s -n %s/%s_notes.txt -p %s -w %s' % ((experiment_dir + '/' + output_prefix),(experiment_dir + '/' + output_prefix),output_prefix,DTIprep_protocol,in_file)) print "DTIPREP:" + cmd.cmd cmd.run() qcfile = experiment_dir + '/' + output_prefix + '/' + name + '_QCed.nrrd' xmlfile = experiment_dir + '/' + output_prefix + '/' + name + '_XMLQCResult.xml' sumfile = experiment_dir + '/' + output_prefix + '/' + name + '_QCReport.txt' return qcfile, xmlfile, sumfile # # Converts NRRD to FSL Nifti format (Nifti that is gzipped) # # in_file - NRRD file to convert # out_prefix - subject specific prefix # def nrrd2nii(in_file, output_prefix): from os.path import abspath as opap from nipype.interfaces.base import CommandLine from nipype.utils.filemanip import split_filename _, name, _ = split_filename(in_file) out_vol = experiment_dir + '/' + output_prefix + '/' + ('%s.nii.gz' % name) out_bval = experiment_dir + '/' + output_prefix + '/' + ('%s.bval' % name) out_bvec = experiment_dir + '/' + output_prefix + '/' + ('%s.bvec' % name) cmd = CommandLine(('DWIConvert --inputVolume %s --outputVolume %s --outputBValues %s' ' --outputBVectors %s --conversionMode NrrdToFSL') % (in_file, out_vol, out_bval, out_bvec)) print "NRRD->NIFTI:" + cmd.cmd cmd.run() return opap(out_vol), opap(out_bval), opap(out_bvec) def check_dependencies(): import os files = ['DWIconvert', 'DTIPrepExec', 'gunzip'] for file in files: if os.system('which ' + file) != 0: return False return True def run(nrrd_file, args_subject): # DTIprep QC-tool qcfile, _, _ = dtiprep(nrrd_file, args_subject) # Convert NRRD->NII dwifile, bval_file, bvec_file = nrrd2nii(qcfile, args.subject) ############### # Main script # ############### from argparse import ArgumentParser import os if __name__ == "__main__": if not check_dependencies(): print 'DEPENDENCIES NOT FOUND' sys.exit(1) # Parse input arguments into args structure parser = ArgumentParser() parser.add_argument("--subject", dest="subject", help="subject id", required=True) args = parser.parse_args() nrrd_file = experiment_dir + os.sep + args.subject + os.sep + args.subject + 'DTI.nrrd' run(nrrd_file, args.subject)
mit
-2,800,876,285,888,440,000
33.222222
200
0.618506
false
3.384615
false
false
false
ionitadaniel19/testframeworksevolution
src/hybridframework/hybridtests.py
1
2051
''' Created on 01.06.2014 @author: ionitadaniel19 ''' def show_answer_hybrid_simple(driver,scenario): from modularframework.login import LoginPage from modularframework.testframeworks import TestFrameworksPage from config.utilities import get_simple_hybrid_driven_scenario_values from config.constants import * data_test=get_simple_hybrid_driven_scenario_values(scenario) login_page=None test_framework_page=None actual_answer=None for data_function in data_test: if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_REMEMBER_ME: if login_page is None: login_page=LoginPage(driver) login_page.remember_me() if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_LOGIN: if login_page is None: login_page=LoginPage(driver) if len(data_function[PARAMETERS])==2: username=data_function[PARAMETERS][0] pwd=data_function[PARAMETERS][1] login_page.login(username, pwd) else: raise Exception('For function %s there were not enough parameters specified %s.Expected 2.' %(data_function[FRAMEWORK_FUNCTIONS],data_function[PARAMETERS])) if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_SELECT_ANSWER: if test_framework_page is None: test_framework_page=TestFrameworksPage(driver) if len(data_function[PARAMETERS])==1: answer=data_function[PARAMETERS][0] test_framework_page.select_answer(answer) else: raise Exception('For function %s there were not enough parameters specified %s.Expected 1.' %(data_function[FRAMEWORK_FUNCTIONS],data_function[PARAMETERS])) if data_function[FRAMEWORK_FUNCTIONS]==CELL_F_SHOW_ANSWER: if test_framework_page is None: test_framework_page=TestFrameworksPage(driver) actual_answer=test_framework_page.show_answer() return actual_answer
mit
-3,050,877,202,472,316,400
48.073171
177
0.643588
false
4.202869
true
false
false
Azure/azure-sdk-for-python
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/operations/_default_security_rules_operations.py
1
8972
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models as _models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class DefaultSecurityRulesOperations(object): """DefaultSecurityRulesOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.network.v2020_03_01.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def list( self, resource_group_name, # type: str network_security_group_name, # type: str **kwargs # type: Any ): # type: (...) -> Iterable["_models.SecurityRuleListResult"] """Gets all default security rules in a network security group. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SecurityRuleListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.SecurityRuleListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRuleListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('SecurityRuleListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules'} # type: ignore def get( self, resource_group_name, # type: str network_security_group_name, # type: str default_security_rule_name, # type: str **kwargs # type: Any ): # type: (...) -> "_models.SecurityRule" """Get the specified default network security rule. :param resource_group_name: The name of the resource group. :type resource_group_name: str :param network_security_group_name: The name of the network security group. :type network_security_group_name: str :param default_security_rule_name: The name of the default security rule. :type default_security_rule_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SecurityRule, or the result of cls(response) :rtype: ~azure.mgmt.network.v2020_03_01.models.SecurityRule :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["_models.SecurityRule"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2020-03-01" accept = "application/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'), 'defaultSecurityRuleName': self._serialize.url("default_security_rule_name", default_security_rule_name, 'str'), 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('SecurityRule', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}/defaultSecurityRules/{defaultSecurityRuleName}'} # type: ignore
mit
4,355,501,811,463,052,300
47.76087
236
0.650022
false
4.452605
true
false
false
cmateu/gaiaerror_py
gaiaerr.py
1
5490
#!/usr/bin/env python import numpy as np import scipy import sys import os import os.path import argparse import myutils #--------Version History---------------------------------------------------------------------------- # 16/nov/2017: parallax mission time scaling factor fixed (goes as (5./tm)**0.5 not **1.) # 11/oct/2016: VX,VY,VZ unit error fixed (inputs must be passes in mas/yr always, not muas/yr) #Gaia error code path gerr_path='/workd/cmateu/gaia_errors_color_tmission' #gerr_path='/Users/cmateu/trabajo/gaia/gaia_challenge2014_mgc3/gaiaerror_py/'+'gaia_errors_color_tmission' parser = argparse.ArgumentParser(description='Simulate Gaia errors + constant relative error in distance') parser.add_argument('infile',metavar='infile(.ne.dat)',help='Input File (x y z vx vy vz Mv VI)',nargs=1,action='store') parser.add_argument('-tm','--mission_t',help='Gaia mission time span in yr. Default 5.', action='store',default=5.,type=np.float) parser.add_argument('-v','--verbose',help='Verbose', action='store_true',default=False) #parse arguments args = parser.parse_args() infilen=args.infile[0] mission_t=args.mission_t if args.verbose: print 'Input file:', infilen print 'Gaia Mission time:',mission_t #Compute error scaling factor based on mission time (following Brown and deBruijne's prescriptions, priv. comm.) if mission_t<=10.: pfactor=(5./mission_t)**0.5 #nominal errors are for mission_t=5., so factor==1 in this case. For parallax err scales as t factor=(5./mission_t)**1.5 #nominal errors are for mission_t=5., so factor==1 in this case else: pfactor=(5./mission_t)**0.5 #If new Gaia is launched, scaling can be conservatively assumed to go as t factor=(5./mission_t)**1. #If new Gaia is launched, scaling can be conservatively assumed to go as t #Extra labels if mission_t==5: tlabel='' else: tlabel='%.1f' % (mission_t) #Print auxiliary input file for gaerr code.Aux files have to be unique so multiple threads can be run simultaneuosly auxinf=infilen+tlabel+'.aux.in' auxoutf=infilen+tlabel+'.aux.out' auxfilein=open(auxinf,'w') auxfilein.write('%s\n%s\n' % (infilen,auxoutf)) auxfilein.close() #Check whether auxfiles used by gaiaerr code exist in the present dir. Create symbolic links if not if not os.path.isfile('avdisk.dat'): if args.verbose: print 'Gaia error code aux files missing, creating symbolic links...' proc='ln -s %s/*.dat .' % (gerr_path) os.system(proc) #Run Gaia error code if args.verbose: print 'Running Gaia error code...' os.system('%s/compute_err_color_gaia_tmission < %s' % (gerr_path,auxinf)) #Read gaia error output file dat=scipy.genfromtxt(auxoutf) #Get true parallax, simulate gpar by adding gaussian X% error relerr_par=dat[:,5-1] xpar=dat[:,12-1] gpar=dat[:,25-1] #Rescale parallax err sigma_par_new=(gpar-xpar)*pfactor gpar=xpar+sigma_par_new sigma_par_ref=relerr_par*xpar #sigma used to draw random gaussian par error sigma_par_ref_new=sigma_par_ref*pfactor relerr_par_obs_new=sigma_par_ref_new/gpar #this is fobs. relerr_par is ftrue relerr_par_tru_new=sigma_par_ref_new/xpar #Recompute gvrad (a lot come out from Merce's code as ****) xvrad=dat[:,18-1] sigma_vrad=dat[:,34-1] gvrad=xvrad+np.random.normal(loc=0.,scale=sigma_vrad,size=xvrad.size) gl,gb,gmulstar,gmub=dat[:,26-1],dat[:,27-1],dat[:,29-1],dat[:,30-1] xl,xb,xmulstar,xmub=dat[:,13-1],dat[:,14-1],dat[:,16-1],dat[:,17-1] #Recompute uncertainties sigma_mulstar_new=(gmulstar-xmulstar)*factor sigma_mub_new=(gmub-xmub)*factor #Recompute 'observed proper motions' gmulstar=xmulstar+sigma_mulstar_new gmub=xmub+sigma_mub_new fp=1000. #Parallax for my function must be in muas. Proper motions must be in mas/yr (as needed by bovy library) mydat=myutils.helio_obj(gl,gb,fp*gpar,gmulstar,gmub,gvrad,degree=True,flag_mulstar=True) #Replace cols appropiately in full matrix dat[:,25-1]=gpar dat[:,26-1]=gl dat[:,27-1]=gb dat[:,28-1]=mydat.Rhel dat[:,29-1]=gmulstar dat[:,30-1]=gmub dat[:,31-1]=gvrad #---Proper motion cols---- dat[:,33-1]=dat[:,33-1]*factor #sigma_mub dat[:,36-1]=dat[:,36-1]*factor #relerr_mub #Parallax error cols------ dat[:, 5-1]=relerr_par_tru_new #relerr_par dat[:,32-1]=relerr_par_obs_new #relerr_par_obs #---Cartesian coords dat[:,19-1]=-mydat.x dat[:,20-1]=mydat.y dat[:,21-1]=mydat.z dat[:,22-1]=-mydat.vx dat[:,23-1]=mydat.vy dat[:,24-1]=mydat.vz #Header and print formats head_l=['Av','xV','Gmag','Grvs','relerr_par','xX','xY','xZ','xVX','xVY','xVZ','xpar_mas','xl_deg','xb_deg','xRhel','xmuls_cosb_mas','xmub_mas','xvrad','gX','gY','gZ','gVX','gVY','gVZ','gpar_mas','gl_deg','gb_deg','gRhel','gmuls_cosb_mas','gmub_mas','gvrad','relerr_parobs','sig_mub','sig_vrad','VI','relerr_mub','relerr_vrad'] head_cols=np.arange(len(head_l))+1 hfmts='#%17s '+(len(head_l)-1)*'%18s ' hfmts=hfmts+'\n' fmts=(dat[0,:].size)*'%18.10f ' #Final output file name ofilen=infilen.replace('.ne.dat','')+'.ge'+tlabel+'.dat' #Print output file if args.verbose: print 'Printing outputfile',ofilen ofile=open(ofilen,'w') ofile.write('#Gaia mission time assumed %.1f yr, error scaling factor %.3f\n' % (mission_t,factor)) ofile.write(hfmts % tuple(head_cols)) ofile.write(hfmts % tuple(head_l)) scipy.savetxt(ofile,dat,fmt=fmts) #Remove aux files proc='rm -f %s %s' % (auxinf,auxoutf) os.system(proc) #proc='rm -f TableVr-Jun2015.dat avdloc.dat avspir.dat myfile.ne.dat allruns.dat avori.dat gfactor-Jun2013.dat rf_allsky.dat avdisk.dat avori2.dat myfile.ge.dat run.dat' if args.verbose: print 'Done'
bsd-3-clause
1,554,841,752,632,622,300
37.661972
326
0.701093
false
2.610556
false
false
false
WilJoey/tn_ckan
ckan/lib/email_notifications.py
1
7858
''' Code for generating email notifications for users (e.g. email notifications for new activities in your dashboard activity stream) and emailing them to the users. ''' import datetime import re import pylons import ckan.model as model import ckan.logic as logic import ckan.lib.base as base from ckan.common import ungettext def string_to_timedelta(s): '''Parse a string s and return a standard datetime.timedelta object. Handles days, hours, minutes, seconds, and microseconds. Accepts strings in these formats: 2 days 14 days 4:35:00 (hours, minutes and seconds) 4:35:12.087465 (hours, minutes, seconds and microseconds) 7 days, 3:23:34 7 days, 3:23:34.087465 .087465 (microseconds only) :raises ckan.logic.ValidationError: if the given string does not match any of the recognised formats ''' patterns = [] days_only_pattern = '(?P<days>\d+)\s+day(s)?' patterns.append(days_only_pattern) hms_only_pattern = '(?P<hours>\d?\d):(?P<minutes>\d\d):(?P<seconds>\d\d)' patterns.append(hms_only_pattern) ms_only_pattern = '.(?P<milliseconds>\d\d\d)(?P<microseconds>\d\d\d)' patterns.append(ms_only_pattern) hms_and_ms_pattern = hms_only_pattern + ms_only_pattern patterns.append(hms_and_ms_pattern) days_and_hms_pattern = '{0},\s+{1}'.format(days_only_pattern, hms_only_pattern) patterns.append(days_and_hms_pattern) days_and_hms_and_ms_pattern = days_and_hms_pattern + ms_only_pattern patterns.append(days_and_hms_and_ms_pattern) for pattern in patterns: match = re.match('^{0}$'.format(pattern), s) if match: break if not match: raise logic.ValidationError('Not a valid time: {0}'.format(s)) gd = match.groupdict() days = int(gd.get('days', '0')) hours = int(gd.get('hours', '0')) minutes = int(gd.get('minutes', '0')) seconds = int(gd.get('seconds', '0')) milliseconds = int(gd.get('milliseconds', '0')) microseconds = int(gd.get('microseconds', '0')) delta = datetime.timedelta(days=days, hours=hours, minutes=minutes, seconds=seconds, milliseconds=milliseconds, microseconds=microseconds) return delta def _notifications_for_activities(activities, user_dict): '''Return one or more email notifications covering the given activities. This function handles grouping multiple activities into a single digest email. :param activities: the activities to consider :type activities: list of activity dicts like those returned by ckan.logic.action.get.dashboard_activity_list() :returns: a list of email notifications :rtype: list of dicts each with keys 'subject' and 'body' ''' if not activities: return [] if not user_dict.get('activity_streams_email_notifications'): return [] # We just group all activities into a single "new activity" email that # doesn't say anything about _what_ new activities they are. # TODO: Here we could generate some smarter content for the emails e.g. # say something about the contents of the activities, or single out # certain types of activity to be sent in their own individual emails, # etc. subject = ungettext( "1 new activity from {site_title}", "{n} new activities from {site_title}", len(activities)).format( site_title=pylons.config.get('ckan.site_title'), n=len(activities)) body = base.render( 'activity_streams/activity_stream_email_notifications.text', extra_vars={'activities': activities}) notifications = [{ 'subject': subject, 'body': body }] return notifications def _notifications_from_dashboard_activity_list(user_dict, since): '''Return any email notifications from the given user's dashboard activity list since `since`. ''' # Get the user's dashboard activity stream. context = {'model': model, 'session': model.Session, 'user': user_dict['id']} activity_list = logic.get_action('dashboard_activity_list')(context, {}) # Filter out the user's own activities., so they don't get an email every # time they themselves do something (we are not Trac). activity_list = [activity for activity in activity_list if activity['user_id'] != user_dict['id']] # Filter out the old activities. strptime = datetime.datetime.strptime fmt = '%Y-%m-%dT%H:%M:%S.%f' activity_list = [activity for activity in activity_list if strptime(activity['timestamp'], fmt) > since] return _notifications_for_activities(activity_list, user_dict) # A list of functions that provide email notifications for users from different # sources. Add to this list if you want to implement a new source of email # notifications. _notifications_functions = [ _notifications_from_dashboard_activity_list, ] def get_notifications(user_dict, since): '''Return any email notifications for the given user since `since`. For example email notifications about activity streams will be returned for any activities the occurred since `since`. :param user_dict: a dictionary representing the user, should contain 'id' and 'name' :type user_dict: dictionary :param since: datetime after which to return notifications from :rtype since: datetime.datetime :returns: a list of email notifications :rtype: list of dicts with keys 'subject' and 'body' ''' notifications = [] for function in _notifications_functions: notifications.extend(function(user_dict, since)) return notifications def send_notification(user, email_dict): '''Email `email_dict` to `user`.''' import ckan.lib.mailer if not user.get('email'): # FIXME: Raise an exception. return try: ckan.lib.mailer.mail_recipient(user['display_name'], user['email'], email_dict['subject'], email_dict['body']) except ckan.lib.mailer.MailerException: raise def get_and_send_notifications_for_user(user): # Parse the email_notifications_since config setting, email notifications # from longer ago than this time will not be sent. email_notifications_since = pylons.config.get( 'ckan.email_notifications_since', '2 days') email_notifications_since = string_to_timedelta( email_notifications_since) email_notifications_since = (datetime.datetime.now() - email_notifications_since) # FIXME: We are accessing model from lib here but I'm not sure what # else to do unless we add a get_email_last_sent() logic function which # would only be needed by this lib. email_last_sent = model.Dashboard.get(user['id']).email_last_sent activity_stream_last_viewed = ( model.Dashboard.get(user['id']).activity_stream_last_viewed) since = max(email_notifications_since, email_last_sent, activity_stream_last_viewed) notifications = get_notifications(user, since) # TODO: Handle failures from send_email_notification. for notification in notifications: send_notification(user, notification) # FIXME: We are accessing model from lib here but I'm not sure what # else to do unless we add a update_email_last_sent() # logic function which would only be needed by this lib. dash = model.Dashboard.get(user['id']) dash.email_last_sent = datetime.datetime.now() model.repo.commit() def get_and_send_notifications_for_all_users(): context = {'model': model, 'session': model.Session, 'ignore_auth': True, 'keep_email': True} users = logic.get_action('user_list')(context, {}) for user in users: get_and_send_notifications_for_user(user)
mit
3,647,540,538,416,515,000
33.61674
79
0.670781
false
3.917248
false
false
false
vnsofthe/odoo-dev
addons/rhwl_gene/rhwl_gene.py
1
55239
# -*- coding: utf-8 -*- from openerp import SUPERUSER_ID, api from openerp.osv import fields, osv from openerp.tools.translate import _ import openerp.addons.decimal_precision as dp import datetime,time import logging import os import shutil import re import urllib2 from openerp import tools from lxml import etree _logger = logging.getLogger(__name__) class rhwl_gene(osv.osv): STATE_SELECT_LIST=[ ('draft', u'草稿'), ('cancel', u'检测取消'), ('except', u'信息异常'), ('except_confirm', u'异常已确认'), ('confirm', u'信息已确认'), ('dna_except', u'DNA质检不合格'), ('dna_ok',u"DNA质检合格"), ('ok', u'位点数据已导入'), ('report', u'生成报告中'), ('report_done', u"报告已生成"), ("result_done", u"风险报告确认"), ("deliver", u"印刷厂已接收"), ('done', u'客户已收货') ] STATE_SELECT = dict(STATE_SELECT_LIST) _name = "rhwl.easy.genes" _order = "date desc,name asc" def _genes_type_get(self, cr, uid, ids, field_names, arg, context=None): res = {} maps = {} for id in ids: res[id] = {}.fromkeys(field_names, "") type_ids = self.pool.get("rhwl.easy.genes.type").search(cr, uid, [("genes_id.id", '=', id)], context=context) for i in self.pool.get("rhwl.easy.genes.type").browse(cr, uid, type_ids, context=context): res[id][maps.get(i.snp, i.snp)] = i.typ return res def _get_risk_detail(self,cr,uid,ids,field_names,arg,context=None): res={} for id in ids: res[id] = {}.fromkeys(field_names,"") obj = self.pool.get("rhwl.easy.genes").browse(cr,uid,id,context=context) for o in obj.risk: res[id][o.disease_id.code]=o.risk return res def _get_risk(self,cr,uid,ids,field_names,arg,context=None): res={} for id in ids: res[id]={"risk_count":0,"risk_text":""} risk_id = self.pool.get("rhwl.easy.gene.risk").search(cr,uid,[("genes_id.id","=",id),'|',("risk","=","高风险"),("risk","=","低能力")]) res[id]["risk_count"]=risk_id.__len__() t=[] for i in self.pool.get("rhwl.easy.gene.risk").browse(cr,uid,risk_id,context=context): t.append(i.disease_id.name) res[id]["risk_text"]=u"、".join(t) return res _columns = { "batch_no": fields.char(u"批次",select=True), "name": fields.char(u"基因样本编号", required=True, size=10), "date": fields.date(u"送检日期", required=True), "cust_name": fields.char(u"会员姓名", required=True, size=50), "sex": fields.selection([('T', u"男"), ('F', u"女")], u"性别", required=True), "identity": fields.char(u"身份证号", size=18), "mobile": fields.char(u"手机号码", size=15), "birthday": fields.date(u"出生日期"), "receiv_date": fields.datetime(u"接收时间"), "except_note": fields.text(u"信息异常内容"), "confirm_note": fields.text(u"信息异常反馈"), "state": fields.selection(STATE_SELECT_LIST, u"状态"), "note": fields.text(u"备注"), "gene_id": fields.char(u"基因编号", size=20), "language":fields.selection([("CN",u"中文"),("EN",u"英文"),("RU",u"俄文"),("VN",u"越南文"),("MY",u"马来语"),("ID",u"印度尼西亚语"),("IN",u"印度")],u"报告语种"), "cust_prop": fields.selection([("tjs", u"泰济生普通客户"), ("tjs_vip",u"泰济生VIP客户"),("employee", u"内部员工"), ("vip", u"内部VIP客户"), ("extra", u"外部人员")], string=u"客户属性"), "package":fields.selection([("01",u"标准版"),("03",u"尊享版"),("02",u"升级版+"),("04",u"优雅女士"),("06",u"快乐儿童"),("05",u"精英男士")],string=u"产品类别"), "package_id":fields.many2one("rhwl.tjs.genes.base.package",string=u"检测项目"), "img": fields.binary(u"图片"), "img_atta":fields.many2one("ir.attachment","IMG"), "img_new":fields.related("img_atta","datas",type="binary"), "log": fields.one2many("rhwl.easy.genes.log", "genes_id", "Log"), "typ": fields.one2many("rhwl.easy.genes.type", "genes_id", "Type"), "dns_chk": fields.one2many("rhwl.easy.genes.check", "genes_id", "DNA_Check"), "risk": fields.one2many("rhwl.easy.gene.risk", "genes_id", "Risk"), "pdf_file": fields.char(u"中文风险报告", size=100), "pdf_file_en": fields.char(u"英文风险报告", size=100), "pdf_file_other": fields.char(u"母语风险报告", size=100), "is_risk":fields.boolean(u"是高风险"), "is_child":fields.boolean(u"是儿童"), "risk_count": fields.function(_get_risk, type="integer", string=u'高风险疾病数', multi='risk'), "risk_text": fields.function(_get_risk, type="char", string=u'高风险疾病', multi='risk'), "snp_name":fields.char("SNP File",size=20), "batch_id":fields.many2one("rhwl.easy.genes.batch","Batch_id"), "export_img":fields.boolean("Export Img"), "ftp_upload":fields.boolean("FTP Upload"), "A1":fields.function(_get_risk_detail,type="char",string="A1",multi="risk_detail"), "A2":fields.function(_get_risk_detail,type="char",string="A2",multi="risk_detail"), "A3":fields.function(_get_risk_detail,type="char",string="A3",multi="risk_detail"), "A4":fields.function(_get_risk_detail,type="char",string="A4",multi="risk_detail"), "A5":fields.function(_get_risk_detail,type="char",string="A5",multi="risk_detail"), "A6":fields.function(_get_risk_detail,type="char",string="A6",multi="risk_detail"), "A7":fields.function(_get_risk_detail,type="char",string="A7",multi="risk_detail"), "A8":fields.function(_get_risk_detail,type="char",string="A8",multi="risk_detail"), "A9":fields.function(_get_risk_detail,type="char",string="A9",multi="risk_detail"), "A10":fields.function(_get_risk_detail,type="char",string="A10",multi="risk_detail"), "A11":fields.function(_get_risk_detail,type="char",string="A11",multi="risk_detail"), "A12":fields.function(_get_risk_detail,type="char",string="A12",multi="risk_detail"), "A13":fields.function(_get_risk_detail,type="char",string="A13",multi="risk_detail"), "A14":fields.function(_get_risk_detail,type="char",string="A14",multi="risk_detail"), "A15":fields.function(_get_risk_detail,type="char",string="A15",multi="risk_detail"), "A16":fields.function(_get_risk_detail,type="char",string="A16",multi="risk_detail"), "A17":fields.function(_get_risk_detail,type="char",string="A17",multi="risk_detail"), "A18":fields.function(_get_risk_detail,type="char",string="A18",multi="risk_detail"), "A19":fields.function(_get_risk_detail,type="char",string="A19",multi="risk_detail"), "A20":fields.function(_get_risk_detail,type="char",string="A20",multi="risk_detail"), "A21":fields.function(_get_risk_detail,type="char",string="A21",multi="risk_detail"), "A22":fields.function(_get_risk_detail,type="char",string="A22",multi="risk_detail"), "A23":fields.function(_get_risk_detail,type="char",string="A23",multi="risk_detail"), "B1":fields.function(_get_risk_detail,type="char",string="B1",multi="risk_detail"), "B2":fields.function(_get_risk_detail,type="char",string="B2",multi="risk_detail"), "B3":fields.function(_get_risk_detail,type="char",string="B3",multi="risk_detail"), "B4":fields.function(_get_risk_detail,type="char",string="B4",multi="risk_detail"), "B5":fields.function(_get_risk_detail,type="char",string="B5",multi="risk_detail"), "B6":fields.function(_get_risk_detail,type="char",string="B6",multi="risk_detail"), "B7":fields.function(_get_risk_detail,type="char",string="B7",multi="risk_detail"), "B8":fields.function(_get_risk_detail,type="char",string="B8",multi="risk_detail"), "B9":fields.function(_get_risk_detail,type="char",string="B9",multi="risk_detail"), "B10":fields.function(_get_risk_detail,type="char",string="B10",multi="risk_detail"), "B11":fields.function(_get_risk_detail,type="char",string="B11",multi="risk_detail"), "B12":fields.function(_get_risk_detail,type="char",string="B12",multi="risk_detail"), "B13":fields.function(_get_risk_detail,type="char",string="B13",multi="risk_detail"), "B14":fields.function(_get_risk_detail,type="char",string="B14",multi="risk_detail"), "B15":fields.function(_get_risk_detail,type="char",string="B15",multi="risk_detail"), "B16":fields.function(_get_risk_detail,type="char",string="B16",multi="risk_detail"), "C1":fields.function(_get_risk_detail,type="char",string="C1",multi="risk_detail"), "C2":fields.function(_get_risk_detail,type="char",string="C2",multi="risk_detail"), "C3":fields.function(_get_risk_detail,type="char",string="C3",multi="risk_detail"), "C4":fields.function(_get_risk_detail,type="char",string="C4",multi="risk_detail"), "C5":fields.function(_get_risk_detail,type="char",string="C5",multi="risk_detail"), "C6":fields.function(_get_risk_detail,type="char",string="C6",multi="risk_detail"), "C7":fields.function(_get_risk_detail,type="char",string="C7",multi="risk_detail"), "C8":fields.function(_get_risk_detail,type="char",string="C8",multi="risk_detail"), "C9":fields.function(_get_risk_detail,type="char",string="C9",multi="risk_detail"), "C10":fields.function(_get_risk_detail,type="char",string="C10",multi="risk_detail"), "C11":fields.function(_get_risk_detail,type="char",string="C11",multi="risk_detail"), "C12":fields.function(_get_risk_detail,type="char",string="C12",multi="risk_detail"), "D1":fields.function(_get_risk_detail,type="char",string="D1",multi="risk_detail"), "D2":fields.function(_get_risk_detail,type="char",string="D2",multi="risk_detail"), "D3":fields.function(_get_risk_detail,type="char",string="D3",multi="risk_detail"), "D4":fields.function(_get_risk_detail,type="char",string="D4",multi="risk_detail"), "D5":fields.function(_get_risk_detail,type="char",string="D5",multi="risk_detail"), "D6":fields.function(_get_risk_detail,type="char",string="D6",multi="risk_detail"), "D7":fields.function(_get_risk_detail,type="char",string="D7",multi="risk_detail"), "D8":fields.function(_get_risk_detail,type="char",string="D8",multi="risk_detail"), "D9":fields.function(_get_risk_detail,type="char",string="D9",multi="risk_detail"), "D10":fields.function(_get_risk_detail,type="char",string="D10",multi="risk_detail"), "D11":fields.function(_get_risk_detail,type="char",string="D11",multi="risk_detail"), "D12":fields.function(_get_risk_detail,type="char",string="D12",multi="risk_detail"), "D13":fields.function(_get_risk_detail,type="char",string="D13",multi="risk_detail"), "D14":fields.function(_get_risk_detail,type="char",string="D14",multi="risk_detail"), "E1":fields.function(_get_risk_detail,type="char",string="E1",multi="risk_detail"), "E2":fields.function(_get_risk_detail,type="char",string="E2",multi="risk_detail"), "E3":fields.function(_get_risk_detail,type="char",string="E3",multi="risk_detail"), "F1":fields.function(_get_risk_detail,type="char",string="F1",multi="risk_detail"), "F2":fields.function(_get_risk_detail,type="char",string="F2",multi="risk_detail"), } _sql_constraints = [ ('rhwl_easy_genes_name_uniq', 'unique(name)', u'样本编号不能重复!'), ] _defaults = { "state": 'draft', "cust_prop": "tjs", "is_risk":False, "is_child":False, "export_img":False, "language":"CN", "ftp_upload":False, "package":"01" } def init(self, cr): ids = self.search(cr,SUPERUSER_ID,[("package","=","A")]) self.write(cr,SUPERUSER_ID,ids,{"package":"01"}) ids = self.search(cr,SUPERUSER_ID,[("birthday","=",False)]) for i in ids: obj = self.browse(cr,SUPERUSER_ID,i) if obj.identity and len(obj.identity)==18: try: d=datetime.datetime.strptime(obj.identity[6:14],"%Y%m%d").strftime("%Y/%m/%d") self.write(cr,SUPERUSER_ID,i,{"birthday":d}) except: pass #ids = self.search(cr,SUPERUSER_ID,[("package_id","=",False)]) #for i in self.browse(cr,SUPERUSER_ID,ids): # pid = self.pool.get("rhwl.tjs.genes.base.package").search(cr,SUPERUSER_ID,[("code","=",i.package)]) # self.write(cr,SUPERUSER_ID,i.id,{"package_id":pid[0]}) def create(self, cr, uid, val, context=None): val["log"] = [[0, 0, {"note": u"资料新增", "data": "create"}]] if not val.get("batch_no",None): val["batch_no"]=datetime.datetime.strftime(datetime.datetime.today(),"%m-%d") if val.has_key("package") and (not val.has_key("package_id")): p_id = self.pool.get("rhwl.tjs.genes.base.package").search(cr,uid,[("code","=",val.get("package"))]) val["packaage_id"] = p_id[0] if val.has_key("package_id") and (not val.has_key("package")): p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,val.get("package_id")) val["package"] = p_obj.code return super(rhwl_gene, self).create(cr, uid, val, context=context) def write(self, cr, uid, id, val, context=None): if not context: context={} if val.has_key("package") and (not val.has_key("package_id")): p_id = self.pool.get("rhwl.tjs.genes.base.package").search(cr,uid,[("code","=",val.get("package"))]) p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,p_id,context=context) val["packaage_id"] = p_obj.id if val.has_key("package_id") and (not val.has_key("package")): p_obj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,uid,val.get("package_id")) val["package"] = p_obj.code if val.get("state","") in ("confirm",): obj = self.browse(cr,SUPERUSER_ID,id,context=context) identity = val.get("identity",obj.identity) if identity and len(identity)==18: try: birthday = datetime.datetime.strptime(identity[6:14],"%Y%m%d") day = datetime.datetime.today() - birthday if day.days<0 or day.days>54750: raise osv.except_osv(u"错误",u"身份证号码中的年月日不在合理范围。") except: raise osv.except_osv(u"错误",u"身份证号码中的年月日格式错误。") if val.get("identity") and len(val.get("identity"))==18: val["birthday"]=datetime.datetime.strptime(val.get("identity")[6:14],"%Y%m%d") if val.has_key("state"): val["log"] = [ [0, 0, {"note": u"状态变更为:" + self.STATE_SELECT.get(val.get("state")), "data": val.get("state"),"user_id":context.get("user_id",uid)}]] #如果重新变更为已收货,则PDF要重新上传 if val.get("state")=="done": val["ftp_upload"]=False if val.has_key("img"): #log_id = self.pool.get("rhwl.easy.genes.log").search(cr,uid,[("genes_id","in",id),("data","=","expimg")]) #if log_id: # self.pool.get("rhwl.easy.genes.log").write(cr,uid,log_id,{"data":"expimg,1"},context=context) val["log"] = [[0, 0, {"note": u"图片变更", "data": "img"}]] val["export_img"]=False if context.has_key("name"): obj_name = context["name"] else: obj = self.browse(cr,SUPERUSER_ID,id,context=context) obj_name = obj.name vals={ "name":obj_name, "datas_fname":obj_name+".jpg", "description":obj_name+" information to IMG", "res_model":"rhwl.easy.genes", "res_id":id[0], "create_date":fields.datetime.now, "create_uid":SUPERUSER_ID, "datas":val.get("img"), } atta_obj = self.pool.get('ir.attachment') #if obj.img_atta: # atta_obj.unlink(cr,SUPERUSER_ID,obj.img_atta.id) atta_id = atta_obj.create(cr,SUPERUSER_ID,vals) val["img_atta"]=atta_id val.pop("img") return super(rhwl_gene, self).write(cr, uid, id, val, context=context) def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): if groupby.count("date")>0 and not orderby: orderby="date desc" else: orderby="id desc" res=super(rhwl_gene,self).read_group(cr,uid,domain,fields,groupby,offset,limit,context=context,orderby=orderby,lazy=lazy) return res def unlink(self, cr, uid, ids, context=None): if isinstance(ids, (long, int)): ids = [ids] if uid != SUPERUSER_ID: ids = self.search(cr, uid, [("id", "in", ids), ("state", "=", "draft")], context=context) return super(rhwl_gene, self).unlink(cr, uid, ids, context=context) def action_state_except(self, cr, uid, ids, context=None): if not context: context = {} if context.get("view_type") == "tree": return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'res_model': 'rhwl.easy.genes.popup', 'view_mode': 'form', 'name': u"异常说明", 'target': 'new', 'context': {'col': 'except_note'}, 'flags': {'form': {'action_buttons': False}}} return self.write(cr, uid, ids, {"state": "except"}) def action_state_except_confirm(self, cr, uid, ids, context=None): if not context: context = {} if context.get("view_type") == "tree": return { 'type': 'ir.actions.act_window', 'view_type': 'form', 'res_model': 'rhwl.easy.genes.popup', 'view_mode': 'form', 'name': u"回馈说明", 'target': 'new', 'context': {'col': 'confirm_note'}, 'flags': {'form': {'action_buttons': False}}} return self.write(cr, uid, ids, {"state": "except_confirm"}) def action_state_confirm(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "confirm"}) def action_state_cancel(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "cancel"}) def action_state_dna(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "dna_except"}) def action_state_dnaok(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "dna_ok"}) def action_state_ok(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "ok"}) def action_state_reset(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "draft"}) def action_state_report(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "report"}) def action_state_result_done(self, cr, uid, ids, context=None): return self.write(cr, uid, ids, {"state": "result_done"}) def action_view_pdf(self, cr, uid, ids, context=None): return {'type': 'ir.actions.act_url', 'url': context.get("file_name", "/"), 'target': 'new'} #取得指定id列表的所有位点数据 def get_gene_type_list(self,cr,uid,ids,context=None): data={} for i in self.browse(cr,uid,ids,context=context): sex=i.sex.encode("utf-8") if i.sex.encode("utf-8") == 'F' else 'M' key = i.name.encode("utf-8") if not data.has_key(sex): data[sex]={} if not data[sex].has_key(key): data[sex][key]={"name":key, "cust_name":i.cust_name.encode("utf-8"), "language":i.language.encode("utf-8") } for t in i.typ: k = t.snp.encode("utf-8") data[sex][key][k]=(t.typ).encode("utf-8").replace("/","") return data #导出样本信息图片 def export_genes_img(self,cr,uid,context=None): upload_path = os.path.join(os.path.split(__file__)[0], "static/local/upload/tjs") d=os.path.join(upload_path,u"样本信息图片") if not os.path.exists(d): os.mkdir(d) all_ids = self.search(cr,uid,[("cust_prop","in",["tjs","tjs_vip"]),("export_img","=",False)],context=context) #pic_ids = self.search(cr,uid,[("cust_prop","in",["tjs","tjs_vip"]),("export_img","=",False)],context=context) #for i in pic_ids: # all_ids.remove(i) filestore=tools.config.filestore(cr.dbname) for i in self.browse(cr,uid,all_ids,context=context): if not i.img_atta:continue if len(i.date.split("/"))>1: tname = ".".join(i.date.split('/')[1:]) + u"会_图片" else: tname = ".".join(i.date.split('-')[1:]) + u"会_图片" tname = os.path.join(d,tname) if not os.path.exists(tname): os.mkdir(tname) att_obj = self.pool.get('ir.attachment').browse(cr,uid,i.img_atta.id,context=context) if not os.path.exists(os.path.join(filestore,att_obj.store_fname)):continue if (not os.path.exists(os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg"))) or os.stat(os.path.join(filestore,att_obj.store_fname)).st_size != os.stat(os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg")).st_size: shutil.copy(os.path.join(filestore,att_obj.store_fname),os.path.join(tname,i.name+u"_"+i.cust_name+u".jpg")) self.write(cr,uid,i.id,{"log":[[0,0,{"note":u"图片导出","data":"expimg"}]],"export_img":True}) #导出样本位点数据到报告生成服务器 def create_gene_type_file(self,cr,uid,ids,context=None): self.pool.get("rhwl.genes.picking").export_box_genes(cr,uid,context=context) #先导出已经分箱的样本 self.export_genes_img(cr,uid,context=context) #导出图片信息 cr.execute("select package,count(*) from rhwl_easy_genes where state='ok' group by package") for i in cr.fetchall(): self.create_gene_type_file_package(cr,uid,ids,i[0],context=context) def create_gene_type_file_package(self, cr, uid, ids, package,context=None): ids = self.search(cr, uid, [("state", "=", "ok"),("package","=",package),("typ","!=",False)], order="batch_no,name",limit=200,context=context) if not ids:return if isinstance(ids, (long, int)): ids = [ids] data = self.get_gene_type_list(cr,uid,ids,context=context) if package=="01": snp_name = "snp_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") fpath = os.path.join(os.path.split(__file__)[0], "static/remote/snp") else: pid = self.pool.get("rhwl.tjs.genes.base.package").search(cr,SUPERUSER_ID,[("code","=",package)]) pobj = self.pool.get("rhwl.tjs.genes.base.package").browse(cr,SUPERUSER_ID,pid,context=context) snp_name = pobj.report_no+"_"+datetime.datetime.now().strftime("%Y%m%d%H%M%S") fpath = os.path.join(os.path.split(__file__)[0], "static/tjs_new_remote/snp") fname = os.path.join(fpath, snp_name + ".txt") header=[] f = open(fname, "w+") for s in ["F","M"]: if not data.has_key(s):continue data_list=data[s].keys() data_list.sort() for k in data_list: line_row=[data[s][k]["name"],data[s][k]["cust_name"],s,data[s][k]["language"]] if not header: header = data[s][k].keys() header.remove("name") header.remove("cust_name") header.remove("language") header.sort() f.write("编号\t姓名\t性别\t语种\t" + "\t".join(header) + '\n') for i in header: line_row.append(data[s][k][i]) f.write("\t".join(line_row) + '\n') f.close() os.system("chmod 777 "+fname) self.action_state_report(cr, uid, ids, context=context) self.write(cr,uid,ids,{"snp_name":snp_name},context=context) js={ "first":"易感样本检测结果转报告生成:", "keyword1":"即时", "keyword2":"本次转出样本%s笔,等待生成报告。" %(len(ids),), "keyword3":fields.datetime.now(), "remark":"以上数据仅供参考,详细情况请登录Odoo查询。" } self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_lib_import",context=context) #发送文件大小错误微信通知 def pdf_size_error(self,cr,uid,file,lens,context=None): s=os.stat(file).st_size if s/1024/1024<16 or ( (lens<10 and s/1024/1024>50) or (lens>=10 and s/1024/1024>90) ): js={ "first":"易感样本报告接收出错:", "keyword1":"即时", "keyword2":"样本报告%s文件大小不正确。" %(os.path.split(file)[-1],), "keyword3":fields.datetime.now(), "remark":"以上数据仅供参考,详细情况请登录服务器查询。" } self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_jobmanager",context=context) return True else: return False #接收风险报告 def get_gene_pdf_file(self, cr, uid, context=None): #_logger.warn("cron job get_gene_pdf_file") pdf_files=[] model_path=os.path.split(__file__)[0] fpath = os.path.join(model_path, "static/remote/report") for f in os.listdir(fpath): pdf_files.append(os.path.join(fpath,f)) fpath = os.path.join(model_path, "static/tjs_new_remote/report") for f in os.listdir(fpath): pdf_files.append(os.path.join(fpath,f)) tpath = os.path.join(model_path, "static/local/report") pdf_count = 0 last_week = time.time() - 60*60*24*3 self.pool.get("rhwl.genes.picking")._clear_picking_dict() for newfile in pdf_files: #newfile = os.path.join(fpath, f) if not os.path.isdir(newfile):continue for f1 in os.listdir(newfile): name_list = re.split("[_\.]",f1) #分解文件名称 #文件名分为六种模式 if self.pdf_size_error(cr,uid,os.path.join(newfile, f1),len(name_list),context=context): continue if len(name_list)==2: f2 = ".".join(name_list) shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2)) ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])]) if ids: self.write(cr, uid, ids, {"pdf_file": "rhwl_gene/static/local/report/" + f2, "state": "report_done"}) pdf_count += 1 elif len(name_list)==3: f2 = ".".join([name_list[0],name_list[2]]) shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2)) ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])]) if ids: self.write(cr, uid, ids, {"pdf_file": "rhwl_gene/static/local/report/" + f2, "state": "report_done"}) pdf_count += 1 elif len(name_list)==4: #23999945_张三_CN.pdf lang = name_list[2] col_name="pdf_file" if lang=="CN": f2 = ".".join([name_list[0],name_list[3]]) else: f2 = ".".join([name_list[0]+"_"+name_list[2],name_list[3]]) if lang=="EN": col_name = "pdf_file_en" else: col_name = "pdf_file_other" shutil.move(os.path.join(newfile, f1), os.path.join(tpath, f2)) ids = self.search(cr, uid, [("name", "=", f2.split(".")[0])]) if ids: self.write(cr, uid, ids, {col_name: "rhwl_gene/static/local/report/" + f2, "state": "report_done"}) pdf_count += 1 elif len(name_list)==6 or len(name_list)==10: gene_no = name_list[2] if len(f.split("_"))==3: picking_no = f.split("_")[1] else: picking_no = self.pool.get("rhwl.genes.picking")._get_picking_from_genes(cr,uid,gene_no,context=context) if not picking_no:continue ppath=os.path.join(tpath,picking_no) if not os.path.exists(ppath): os.mkdir(ppath) shutil.move(os.path.join(newfile, f1), os.path.join(ppath, f1)) if os.path.getmtime(newfile) < last_week: os.rmdir(newfile) cr.commit() if pdf_count>0: js={ "first":"易感样本报告接收:", "keyword1":"即时", "keyword2":"本次接收样本报告%s本。" %(pdf_count,), "keyword3":fields.datetime.now(), "remark":"以上数据仅供参考,详细情况请登录Odoo查询。" } self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_jobmanager",context=context) #分析风险数据 fpath = os.path.join(model_path, "static/remote/excel") tpath = os.path.join(model_path, "static/local/excel") for f in os.listdir(fpath): if f.split(".")[-1]!="xls":continue if f.split("_")[0]=="box":continue if os.path.isfile(os.path.join(tpath, f)):os.remove(os.path.join(tpath, f)) #删除目标位置相同的文件 shutil.move(os.path.join(fpath, f), os.path.join(tpath, f)) fs = open(os.path.join(tpath, f),"r") res = fs.readlines() fs.close() risk = res[0].replace("\n","").split("\t")[3:] disease = self.pool.get("rhwl.gene.disease") disease_dict={} #疾病在表中的id dict_index=3 #检查风险报告中的疾病基本数据 for r in risk: if not r:continue r_id = disease.search(cr,uid,[("name","=",r.decode("utf-8"))]) if not r_id: shutil.move(os.path.join(tpath, f),os.path.join(fpath, f)) _logger.warn(u"疾病名称[%s]在基本数据中不存在。" %(r.decode("utf-8"),)) return disease_dict[dict_index]=[r,r_id[0]] dict_index +=1 for l in res[1:]: is_risk=False l = l.replace("\n","").split("\t") gene_id = self.pool.get("rhwl.easy.genes").search(cr,uid,[("name","=",l[0].decode("utf-8"))]) if not gene_id: _logger.warn(u"样本编号[%s]在基本数据中不存在。" %(l[0].decode("utf-8"),)) else: risk_id=self.pool.get("rhwl.easy.gene.risk").search(cr,uid,[("genes_id","in",gene_id)]) if risk_id: self.pool.get("rhwl.easy.gene.risk").write(cr,uid,risk_id,{"active":False}) val=[] for k in disease_dict.keys(): val.append([0, 0, {"disease_id": disease_dict[k][1], "risk": l[k]}]) if l[k]=="高风险" or l[k]=="低能力":is_risk=True self.pool.get("rhwl.easy.genes").write(cr,uid,gene_id,{"is_risk":is_risk,"risk":val}) self.pool.get("rhwl.genes.picking").create_box(cr,uid,context=context) #接收完风险数据以后,重新调用分箱 #样本状态数据微信通知 def weixin_notice_template2(self,cr,uid,context=None): s_date,e_date = self.date_between(20) #统计今日收样笔数 cr.execute("""select count(*) from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') and create_date::date = now()::date""") for i in cr.fetchall(): today_count = i[0] #下次送货数据 pick_count=0 pick_id = self.pool.get( "rhwl.genes.picking").search(cr,uid,[("date",">=",datetime.datetime.today()),("state","!=","done")],order="date",limit=1) if pick_id: pick_obj = self.pool.get( "rhwl.genes.picking").browse(cr,uid,pick_id,context=context) pick_count = pick_obj.files #本期样本笔数 idscount = self.search_count(cr,uid,[("date",">=",s_date),("date","<=",e_date),("cust_prop","in",["tjs","tjs_vip"])],context=context) cr.execute("""with d as (select batch_no,state,count(*) as c,date from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') group by batch_no,state,date order by batch_no) select * from d dd where not exists(select * from d where state='done' and d.batch_no=dd.batch_no)""") v_count0=0 v_count1=0 v_count2=0 v_count3=0 v_count4=0 v_count5 = 0 dna_rate={} not_dna_except={} #记录不报告质检比率的批次 wait_receiv=[] for i in cr.fetchall(): if not dna_rate.has_key(i[0]): dna_rate[i[0]]={"count":0,"except":0} dna_rate[i[0]]["count"] =dna_rate[i[0]]["count"]+i[2] if i[1]=='draft': batch_id = self.pool.get("rhwl.easy.genes.batch").search(cr,uid,[("name","=",i[0]),("post_date","!=",False)]) if not batch_id: v_count0 += i[2] #待收件 wait_receiv.append(str(i[2])+"/"+".".join((i[3].split("-")[1:]))) not_dna_except[i[0]]=True #样本是草稿,但如果已经设定实验收件日期,则数据归为实验中 batch_id = self.pool.get("rhwl.easy.genes.batch").search(cr,uid,[("name","=",i[0]),("lib_date","!=",False)]) if batch_id: v_count1 += i[2] #待检测 elif i[1] in ['except','except_confirm','confirm']: v_count1 += i[2] #待检测 not_dna_except[i[0]]=True elif i[1] in ['dna_ok','ok','report']: v_count2 += i[2] #待生成报告 elif i[1] == 'dna_except': v_count3 += i[2] #质检异常 dna_rate[i[0]]["except"] = dna_rate[i[0]]["except"] + i[2] elif i[1] in ['report_done',"result_done","deliver",]: v_count4 += i[2] #待送货 elif i[1] in ['done']: v_count5 += i[2] #已完成 except_rate=[] for k,v in dna_rate.items(): if not not_dna_except.get(k,False): except_rate.append(k.encode("utf-8")+"="+str(v["except"])+"/"+str(v["count"])) js={ "first":"易感样本状况统计:", "keyword1":"本期从(%s-%s)"%(s_date.strftime("%Y/%m/%d"),e_date.strftime("%Y/%m/%d")), "keyword2":"今日送样%s,在途%s%s,实验中%s,排版中%s,已出报告%s(质检不合格%s,待印刷%s,下次送货%s)。本期总计%s笔。" %(today_count,v_count0,("["+",".join(wait_receiv)+"]" if wait_receiv else ""),v_count1,v_count2,v_count4+v_count3+v_count5,v_count3,v_count4-pick_count,pick_count,idscount), "keyword3":(datetime.datetime.utcnow() + datetime.timedelta(hours=8)).strftime("%Y/%m/%d %H:%M:%S"), "remark":"以上数据仅供参考,详细情况请登录Odoo查询。" } self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_notice",context=context) #样本实验进度微信提醒 def weixin_notice_template3(self,cr,uid,context=None): cr.execute("""select date,count(*) c from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') and state in ('confirm','except_confirm','draft','except') and date<=(now() - interval '4 day')::date group by date""") res=[] for i in cr.fetchall(): res.append("日期:"+str(i[0])+",样本数:"+str(i[1])) if res: js={ "first":"易感样本实验进度提醒:", "keyword1":"4天之前送达样本", "keyword2":";".join(res), "keyword3":(datetime.datetime.utcnow() + datetime.timedelta(hours=8)).strftime("%Y/%m/%d %H:%M:%S"), "remark":"亲爱的实验同事,以上样本,须在本周日之前出结果,否则就会超出和客户约定的送货周期。收到本条消息时,请及时和运营部同事确认,谢谢。" } self.pool.get("rhwl.weixin.base").send_template2(cr,uid,js,"is_library",context=context) content="易感样本实验进度提醒,统计周期:%s,提醒说明:%s,%s"%(js["keyword1"],js["keyword2"],js["remark"]) self.pool.get("rhwl.weixin.base").send_qy_text(cr,uid,'rhwlyy',"is_library",content,context=context) #根据中间日期计算本周期的起迄日期 def date_between(self,days=20): today = datetime.datetime.today() if today.day<=days: s_date = today-datetime.timedelta(days=today.day+1) s_date = datetime.datetime(s_date.year,s_date.month,days+1) e_date = today else: s_date = datetime.datetime(today.year,today.month,days+1) e_date = today return s_date,e_date def action_ftp_upload(self,cr,uid,ids,context=None): self.ftp_uploads(cr,uid,ids,context=context) def ftp_uploads(self,cr,uid,ids,context=None): ids = self.search(cr,uid,[("state","=","done"),("ftp_upload","=",False),("cust_prop","in",["tjs","tjs_vip"])],limit=100) for i in self.browse(cr,uid,ids,context=context): os.system("scp /data/odoo/file/report/%s*.pdf rhwlwz@119.39.48.126:/home/rhwlwz/ftp/"%(i.name.encode("utf-8"),)) self.write(cr,uid,i.id,{"ftp_upload":True}) #导出样本位点数据到报告生成服务器 def temp_export(self, cr, uid, ids, context=None): ids = self.search(cr, uid, [("name", "in", ['3599999021','3599999843','3599998984','3599999187','3599999887'])], order="batch_no,name",limit=200,context=context) if not ids:return if isinstance(ids, (long, int)): ids = [ids] data = self.get_gene_type_list(cr,uid,ids,context=context) snp_name = "snp_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S") fpath = os.path.join(os.path.split(__file__)[0], "static/remote/snp/hebin") fname = os.path.join(fpath, snp_name + ".txt") header=[] f = open(fname, "w+") for s in ["F","M"]: if not data.has_key(s):continue data_list=data[s].keys() data_list.sort() for k in data_list: line_row=[data[s][k]["name"],data[s][k]["cust_name"],s] if not header: header = data[s][k].keys() header.remove("name") header.remove("cust_name") header.sort() f.write("编号\t姓名\t性别\t" + "\t".join(header) + '\n') for i in header: line_row.append(data[s][k][i]) f.write("\t".join(line_row) + '\n') f.close() #在线接收T客户样本信息 def action_get_online_genes(self,cr,uid,ids,context=None): today= datetime.datetime.today().strftime("%Y-%m-%d") before_day = (datetime.datetime.today()+datetime.timedelta(days=-3)).strftime("%Y-%m-%d") u = urllib2.urlopen("http://genereport.taiji-sun.com/file/API/SampleInfoToGenetalks?beginTime="+before_day+"&endTime="+today) data = u.readlines() if not data:return content = eval(data[0]) package={ "01":"01", "02":"02", "03":"03", "04":"04", "05":"05", "06":"06" } batch_no={} for i in content: id = self.search(cr,uid,[("name","=",i["SampleCode"])],context=context) if id:continue if not package.has_key(i["SampleCatalogCode"]): raise osv.except_osv("Error",u"检测代号[%s]名称[%s]在系统未设置,不可以转入。"%(i["SampleCatalogCode"],i["SampleCatalogName"])) sex = i["Gender"]==u"男" and "T" or "F" date = i["CreatedTime"].split(" ")[0] cust_prop = i["IsVIP"]==u"否" and "tjs" or "tjs_vip" idt = i["IDNumber"] is_child = True if len(idt)==18 and int(idt[6:10])>=(datetime.datetime.today().year-12) and int(idt[6:10])<(datetime.datetime.today().year) else False birthday = False if idt and len(idt)==18: try: birthday = datetime.datetime.strptime(idt[6:14],"%Y%m%d").strftime("%Y/%m/%d") except: pass if not batch_no.has_key(date): batch_no[date]={} if batch_no.get(date).get(package.get(i["SampleCatalogCode"])): max_no=batch_no.get(date).get(package.get(i["SampleCatalogCode"])) else: cr.execute("select max(batch_no) from rhwl_easy_genes where cust_prop in ('tjs','tjs_vip') and package='%s' "%(package.get(i["SampleCatalogCode"]))) max_no=None for no in cr.fetchall(): max_no = no[0] if not max_no:max_no=package.get(i["SampleCatalogCode"])+"-000" if package.get(i["SampleCatalogCode"])=="01": max_no=str(int(max_no)+1).zfill(3) else: max_no=max_no[0:3]+str(int(max_no[3:])+1).zfill(3) batch_no[date][package.get(i["SampleCatalogCode"])]=max_no self.create(cr,uid,{"name":i["SampleCode"],"receiv_date":i["RecivedTime"],"identity":i["IDNumber"],"cust_name":i["ClientName"],"sex":sex,"date":date,"cust_prop":cust_prop,"is_child":is_child,"birthday":birthday,"package":package.get(i["SampleCatalogCode"]),"batch_no":max_no},context=context) #样本对象操作日志 class rhwl_gene_log(osv.osv): _name = "rhwl.easy.genes.log" _order = "date desc" _columns = { "genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True), "date": fields.datetime(u"时间"), "user_id": fields.many2one("res.users", u"操作人员"), "note": fields.text(u"作业说明"), "data": fields.char("Data") } _defaults = { "date": fields.datetime.now, "user_id": lambda obj, cr, uid, context: uid, } #疾病检测结果对象 class rhwl_gene_check(osv.osv): _name = "rhwl.easy.genes.check" _columns = { "genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True), "date": fields.date(u"收样日期"), "dna_date": fields.date(u"提取日期"), "concentration": fields.char(u"浓度", size=5, help=u"参考值>=10"), "lib_person": fields.char(u"实验操作人", size=10), "od260_280": fields.char("OD260/OD280", size=5, help=u"参考值1.8-2.0"), "od260_230": fields.char("OD260/OD230", size=5, help=u"参考值>=2.0"), "chk_person": fields.char(u"检测人", size=10), "data_loss": fields.char(u"数据缺失率", size=6, help=u"参考值<1%"), "loss_person": fields.char(u"判读人", size=10), "loss_date": fields.date(u"判读日期"), "active": fields.boolean("Active"), } _defaults = { "active": True } #疾病位点数据对象 class rhwl_gene_type(osv.osv): _name = "rhwl.easy.genes.type" _columns = { "genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True), "snp": fields.char("SNP", size=20), "typ": fields.char("Type", size=10), "active": fields.boolean("Active"), } _defaults = { "active": True } #疾病风险对象 class rhwl_gene_risk(osv.osv): _name = "rhwl.easy.gene.risk" _columns = { "genes_id": fields.many2one("rhwl.easy.genes", "Genes ID",select=True), "disease_id": fields.many2one("rhwl.gene.disease", string=u"疾病名"), "risk": fields.char(u"风险", size=20), "active": fields.boolean("Active"), } _defaults = { "active": True } #报告书信息异常 class rhwl_report_except(osv.osv): _name = "rhwl.easy.genes.report.except" _columns={ "name":fields.many2one("rhwl.easy.genes",u"基因样本编号",required=True), "cust_name": fields.char(u"会员姓名(原)", readonly=True, size=10), "sex": fields.selection([('T', u"男"), ('F', u"女")], u"性别(原)", readonly=True), "identity": fields.char(u"身份证号(原)", size=18,readonly=True), "cust_name_n": fields.char(u"会员姓名(新)", required=True, size=10), "sex_n": fields.selection([('T', u"男"), ('F', u"女")], u"性别(新)", required=True), "identity_n": fields.char(u"身份证号(新)", size=18), "state":fields.selection([("draft",u"草稿"),("confirm",u"确认")]), "user_id":fields.many2one("res.users",u"异常确认人",required=True), "date":fields.date(u"确认日期",required=True), "note":fields.text(u"备注"), } _defaults={ "state":'draft', } @api.onchange("name") def onchange_name(self): self.cust_name = self.name.cust_name self.sex = self.name.sex self.identity = self.name.identity self.cust_name_n = self.name.cust_name self.sex_n = self.name.sex self.identity_n = self.name.identity def create(self,cr,uid,val,context=None): obj = self.pool.get("rhwl.easy.genes").browse(cr,uid,val.get("name"),context=context) val["cust_name"]=obj.cust_name val["sex"] = obj.sex val["identity"] = obj.identity return super(rhwl_report_except,self).create(cr,uid,val,context=context) def action_state_confirm(self,cr,uid,ids,context=None): self.write(cr,uid,ids,{"state":"confirm"},context=context) obj = self.browse(cr,uid,ids,context=context) if obj.cust_name != obj.cust_name_n or obj.sex != obj.sex_n or obj.identity != obj.identity_n: self.pool.get("rhwl.easy.genes").write(cr,uid,obj.name.id,{"cust_name":obj.cust_name_n,"sex":obj.sex_n,"identity":obj.identity_n},context=context) if obj.name.state.encode("utf-8") in ('report','report_done',"result_done","deliver",'done'): self.pool.get("rhwl.easy.genes").write(cr,uid,obj.name.id,{"state":"ok"},context=context) #批号时间段统计 class rhwl_gene_batch(osv.osv): _name = "rhwl.easy.genes.batch" _order = "name desc" def str2date(self,str): if not str:return None return datetime.datetime.strptime(str.split(" ")[0],"%Y-%m-%d") def _get_genes1(self,cr,uid,ids,field_names,arg,context=None): res=dict.fromkeys(ids,{}) genes_table = self.pool.get("rhwl.easy.genes") log_table = self.pool.get("rhwl.easy.genes.log") for i in ids: res[i] = dict.fromkeys(field_names,None) gene_id = genes_table.search(cr,uid,[("batch_id","=",i)],context=context) if not gene_id:continue gene_obj = genes_table.browse(cr,uid,gene_id[0],context=context) res[i]["date"] = self.str2date(gene_obj.date) res[i]["qty"] = len(gene_id) res[i]["imgs"] = genes_table.search_count(cr,uid,[("batch_id","=",i),("img_atta","!=",False)],context=context) log_id = log_table.search(cr,uid,[("genes_id","in",gene_id),("data","=","DNA")],order="date desc",context=context) if log_id: log_id = log_id[0] log_obj = log_table.browse(cr,uid,log_id,context=context) res[i]["dna_date"] = self.str2date(log_obj.date) else: res[i]["dna_date"] = None log_id = log_table.search(cr,uid,[("genes_id","in",gene_id),("data","=","SNP")],order="date desc",context=context) if log_id: log_id = log_id[0] log_obj = log_table.browse(cr,uid,log_id,context=context) res[i]["snp_date"] = self.str2date(log_obj.date) else: res[i]["snp_date"] = None gene_id = genes_table.search(cr,uid,[("batch_id","=",i),("state","=","dna_except")],context=context) res[i]["dna_qty"] = len(gene_id) res[i]["dna_rate"] = str(round((res[i]["dna_qty"]*1.0)/res[i]["qty"],4)*100)+"%" cr.execute("select name,lib_date from rhwl_easy_genes_batch where id="+str(i)) obj = cr.fetchall() batch_no,lib_date = obj[0] if lib_date:lib_date = self.str2date(lib_date) if res[i]["date"] and lib_date: res[i]["express_days"] = (lib_date - res[i]["date"]).days if lib_date and res[i]["snp_date"]: res[i]["library_days"] = (res[i]["snp_date"] - lib_date).days wd=lib_date.weekday() if res[i]["library_days"]<=7-wd: res[i]["library_result"] = 3 elif res[i]["library_days"]<=(7-wd)+7: res[i]["library_result"] = 2 elif res[i]["library_days"]<=(7-wd)+14: res[i]["library_result"] = 1 else: res[i]["library_result"] = 0 line_id = self.pool.get("rhwl.genes.picking.line").search(cr,uid,[("batch_no","=",batch_no)],order="id desc",context=context) if line_id: line_id = line_id[0] line_obj = self.pool.get("rhwl.genes.picking.line").browse(cr,uid,line_id,context=context) res[i]["send_date"] = self.str2date(line_obj.picking_id.date) res[i]["real_date"] = self.str2date(line_obj.picking_id.real_date) if res[i]["date"] and res[i]["real_date"]: res[i]["all_days"] = (res[i]["real_date"] - res[i]["date"]).days return res _columns={ "name":fields.char(u"批次",required=True), "date":fields.function(_get_genes1,type="date",string=u"送检日期",store=True,multi="get_genes1"), "qty":fields.function(_get_genes1,type="integer",string=u"送检数量",multi="get_genes1"), "post_date":fields.date(u'快递收件日期'), "lib_date":fields.date(u'实验签收日期'), "express_days":fields.function(_get_genes1,type="integer",arg="name",string=u"收样天数",multi="get_genes1"), "dna_date":fields.function(_get_genes1,type="date",string=u"质检确认日期",multi="get_genes1"), "snp_date":fields.function(_get_genes1,type="date",string=u"位点导入日期",multi="get_genes1"), "dna_qty":fields.function(_get_genes1,type="integer",string=u"质检不合格数量",multi="get_genes1"), "dna_rate":fields.function(_get_genes1,type="char",string=u"质检不合格比率(%)",multi="get_genes1"), "library_days":fields.function(_get_genes1,type="integer",string=u"实验天数",multi="get_genes1"), "library_result":fields.function(_get_genes1,type="integer",string=u"实验进度",multi="get_genes1"), "send_date":fields.function(_get_genes1,type="date",string=u"预计发货日期",multi="get_genes1"), "real_date":fields.function(_get_genes1,type="date",string=u"实际发货日期",multi="get_genes1"), "all_days":fields.function(_get_genes1,type="integer",string=u"送货周期",multi="get_genes1"), "imgs":fields.function(_get_genes1,type="integer",string=u"已拍照数",multi="get_genes1"), } def create(self,cr,uid,val,context=None): gene_id = self.pool.get("rhwl.easy.genes").search(cr,uid,[("batch_no","=",val.get("name"))],context=context) if not gene_id: raise osv.except_osv(u"错误",u"批次错误,请输入正确的批次号。") id = super(rhwl_gene_batch,self).create(cr,uid,val,context=context) self.pool.get("rhwl.easy.genes").write(cr,uid,gene_id,{"batch_id":id},context=context) return id def action_button(self,cr,uid,ids,context=None): pass #疾病分类对象 class rhwl_gene_disease_type(osv.osv): _name = "rhwl.gene.disease.type" _columns = { "name": fields.char(u"分类名称", size=100), "line": fields.one2many("rhwl.gene.disease", "type_id", string=u"疾病名称") } #疾病明细对象 class rhwl_gene_disease(osv.osv): _name = "rhwl.gene.disease" _columns = { "name": fields.char(u"疾病名称", size=50), "type_id": fields.many2one("rhwl.gene.disease.type", string=u"分类名称"), "code":fields.char("Code",size=5), } class rhwl_gene_popup(osv.osv_memory): _name = "rhwl.easy.genes.popup" _columns = { "note": fields.text(u"说明") } def action_ok(self, cr, uid, ids, context=None): obj = self.browse(cr, uid, ids, context) s = { "confirm_note": "except_confirm", "except_note": "except" } col = context.get('col') if not context: context={} context["user_id"]=uid tab = context.get("tab","rhwl.easy.genes") self.pool.get(tab).write(cr, SUPERUSER_ID, context.get("active_id", 0), {col: obj.note, "state": s.get(col)},context=context)
agpl-3.0
-2,561,111,213,991,747,000
48.208411
304
0.545021
false
2.91949
false
false
false
pazagra/catkin_ws
src/Multimodal_Interaction/Offline_Learning/SVM.py
1
3693
from scipy.cluster.vq import * from sklearn.preprocessing import Normalizer from sklearn.svm import LinearSVC from sklearn.externals import joblib from sklearn.svm import * from sklearn.metrics import * from sklearn import linear_model import matplotlib.pyplot as plt import cv2 import os import numpy as np from sklearn.model_selection import StratifiedShuffleSplit from sklearn.model_selection import GridSearchCV class SVM_offline: def load(self,path): if os.path.isfile(path): self.clf, self.clf.classes_, self.stdSlr = joblib.load(path) return True return False def predict(self,descriptors): if self.prob: P = self.clf.predict_proba(descriptors) # print max(P[0]) i =np.nonzero(P[0] == max(P[0]))[0][0] return self.clf.classes_[i],P[0][i] else: return self.clf.predict(descriptors) def get_names(self): return self.clf.classes_ def IsObject(self,label): return label in self.clf.classes_ def train(self, descriptors, names): # Scaling the words # N = Normalizer().fit(descriptors) # descriptors = N.transform(descriptors) # self.stdSlr = StandardScaler().fit(descriptors) # im_features = self.stdSlr.transform(descriptors) # # Train the Linear SVM # unique, counts = np.unique(names, return_counts=True) # print dict(zip(unique, counts)) # C_range = np.logspace(-5, 10, 13) # gamma_range = np.logspace(-9, 5, 13) # param_grid = dict(gamma=gamma_range, C=C_range) # cv = StratifiedShuffleSplit(n_splits=5, test_size=0.3, random_state=42) # grid = GridSearchCV(SVC(), param_grid=param_grid, cv=cv) # grid.fit(descriptors, names) # # print("The best parameters are %s with a score of %0.2f" # % (grid.best_params_, grid.best_score_)) self.clf = SVC(kernel='rbf', C=self.C, verbose=False, max_iter=self.iter, probability=self.prob, gamma=self.gamma) names = names.ravel() self.clf.fit(descriptors, names) if self.prob: pred = self.clf.predict(descriptors) # print("Classification report for classifier %s:\n%s\n" # % (self.clf, classification_report(names, pred))) print("Confusion matrix:\n%s" % confusion_matrix(names, pred)) # print self.clf.classes_ print self.clf.score(descriptors, names) joblib.dump((self.clf, self.clf.classes_, self.stdSlr), self.path, compress=3) return self.clf.classes_ def test(self, descriptors): # Scale the features # test_features = self.stdSlr.transform(descriptors) descriptors = descriptors.reshape(-1, 1).transpose() return self.predict(descriptors) def __init__(self, path, C,gamma, iter, c_w, probability): if not self.load(path): self.path = path self.clf = SVC() self.C = C self.gamma = gamma self.iter = iter self.prob = probability if c_w: self.class_weight = {0: 1000, 1: 200} else: self.class_weight = 'balanced' self.stdSlr = None self.loaded = False else: self.path = path self.C = C self.gamma = gamma self.iter = iter self.prob = probability if c_w: self.class_weight = {0: 1000, 1: 200} else: self.class_weight = 'balanced' self.loaded = True
gpl-3.0
-1,168,504,347,862,580,700
34.519231
86
0.580558
false
3.826943
false
false
false
jscottcronin/PinkSlipper
Featurize_Data/prnewswire_featurize.py
1
5306
from collections import defaultdict import pymongo import requests from bs4 import BeautifulSoup import numpy as np import newspaper def in_clean_db(title, database): ''' PURPOSE: check if article is in given database INPUT: title (str) - article headline database (pymongo obj) - connection to mongodb OUTPUT: boolean - True if article is in database ''' if database.find({'_id': title}).count() > 0: return True else: return False def page_data_added(title, database): ''' PURPOSE: check if page_data was added to article in mongodb INPUT: title (str) - article headline database (pymongo obj) - connection to mongodb OUTPUT: boolean - True if article page_data is in database ''' if database.find({'_id': title, 'date': {'$exists': True}}).count() > 0: return True else: return False def add_to_clean_db(title, link, soup, source, database): ''' PURPOSE: use newspaper to extract article features and save into database INPUT: title (str) - article headline link (str) - url for article soup (str) - article body soup source (str) - article source database (pymongo obj) - mongodb connection obj OUTPUT: None ''' article = newspaper.Article(link) article.download(html=soup) article.parse() data = {'_id': title, 'link': link, 'source': source, 'source_url': article.source_url, 'url': article.url, 'title': article.title, 'top_img': article.top_img, 'meta_img': article.meta_img, 'body': article.text, 'keywords': article.keywords, 'meta_keywords': article.meta_keywords, # 'tags': article.tags, 'authors': article.authors, 'publish_date': article.publish_date, 'summary': article.summary, 'meta_desc': article.meta_description, 'lang': article.meta_lang} database.insert_one(data) def add_features_from_page_soup(title, link, soup, db): ''' PURPOSE: update article features in mongodb with info from page_soup INPUT: title (str) - article headline link (str) - url for article soup (str) - page_soup for given article db (pymongo obj) - connection to mongodb OUTPUT: None ''' print title if in_clean_db(title, db) and not page_data_added(title, db): soup = BeautifulSoup(soup, 'html.parser') s = soup.find(class_='news-release', href=link) \ .find_parent() \ .find_parent() \ .find_parent() \ .find_previous_sibling() try: date = s.find(class_='date').text.strip() except: date = '' try: time = s.find(class_='time').text.strip() except: time = '' try: img = soup.find(href=link).img['src'] except: img = '' try: summary = soup.find(class_='news-release', href=link) \ .find_parent().find_next_sibling().text.strip() except: summary = '' uid = {'_id': title} additional_data = {'date': date, 'time': time, 'brief': summary, 'img': img } db.update_one(uid, {'$set': additional_data}) def main2(): ''' PURPOSE: update articles in new mongodb with features extracted from page_soup INPUT: None OUTPUT: None ''' cli = pymongo.MongoClient() db = cli.pr coll = db.prnewswire coll2 = db.pr_clean cursor = coll.find() tot = coll.find().count() count = 1 for doc in cursor: title = doc['_id'] link = doc['link'] psoup = doc['page_soup'] source = doc['source'] if not in_clean_db(title, coll2): print 'error - not in pr_clean db' else: print 'updating features' add_features_from_page_soup(title, link, psoup, coll2) print 'Importing article %i of %i' % (count, tot) count += 1 cli.close() def main(): ''' PURPOSE: cleanse articles from original mongodb and store in new mongodb with updated features from body soup INPUT: None OUTPUT: None ''' cli = pymongo.MongoClient() db = cli.pr coll = db.prnewswire coll2 = db.pr_clean cursor = coll.find() tot = coll.find().count() count = 1 for doc in cursor: title = doc['_id'] link = doc['link'] soup = doc['body_soup'] source = doc['source'] if not in_clean_db(title, coll2): print 'adding to clean db' add_to_clean_db(title, link, soup, source, coll2) else: print 'already in clean db' print 'Importing article %i of %i' % (count, tot) count += 1 cli.close() if __name__ == '__main__': main() # main2()
gpl-2.0
3,361,504,421,104,785,000
28.977401
76
0.525066
false
4.097297
false
false
false
marcardioid/DailyProgrammer
solutions/237_Intermediate/solution.py
1
1128
def fill(grid): legend = {0: '#', 1: '=', 2: '-', 3: '.'} grid = {(x, y): grid[y][x] for y in range(h) for x in range(w)} def flood(root=(1, 1), depth=0): visited = set() queue = {root} while queue: node = queue.pop() if node in visited: continue visited.add(node) if grid[node] == '+' and grid[(node[0]+1, node[1])] == '-' and grid[(node[0], node[1]+1)] == '|': flood((node[0]+1, node[1]+1), depth+1) elif grid[node] == ' ': grid[node] = legend.get(depth, ' ') for dx, dy in [(0, -1), (1, 0), (0, 1), (-1, 0)]: if (node[0]+dx, node[1]+dy) in grid: queue.add((node[0]+dx, node[1]+dy)) flood() return grid if __name__ == "__main__": with open("input/input.txt", "r") as file: dimensions, *data = file.read().splitlines() h, w = map(int, dimensions.split()) grid = fill(data) print('\n'.join(''.join(grid[(x, y)] for x in range(w)) for y in range(h)))
mit
7,404,926,787,901,546,000
36.633333
109
0.428191
false
3.27907
false
false
false
AstroTech/workshop-python
django/solution/untitled/iris/migrations/0001_initial.py
1
1172
# Generated by Django 2.1.4 on 2018-12-05 10:46 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Iris', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('date_added', models.DateTimeField(auto_now_add=True, verbose_name='Date Added')), ('date_modified', models.DateTimeField(auto_now=True, verbose_name='Date Modified')), ('sepal_length', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Sepal Length')), ('sepal_width', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Sepal Width')), ('petal_length', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Petal Length')), ('petal_width', models.DecimalField(decimal_places=1, max_digits=3, verbose_name='Petal Width')), ('species', models.CharField(max_length=30, verbose_name='Species')), ], ), ]
mit
4,240,290,656,096,076,300
42.407407
115
0.610922
false
3.919732
false
false
false
endlessm/chromium-browser
third_party/chromite/lib/unittest_lib.py
1
2714
# -*- coding: utf-8 -*- # Copyright 2014 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Unittest-only utility functions library.""" from __future__ import print_function import os from chromite.lib import cros_build_lib from chromite.lib import osutils class BuildELFError(Exception): """Generic error building an ELF file.""" def BuildELF(filename, defined_symbols=None, undefined_symbols=None, used_libs=None, executable=False, static=False): """Builds a dynamic ELF with the provided import and exports. Compiles and links a dynamic program that exports some functions, as libraries do, and requires some symbols from other libraries. Dependencies shoud live in the same directory as the result. This function Args: filename: The output filename where the ELF is created. defined_symbols: The list of symbols this ELF exports. undefined_symbols: The list of symbols this ELF requires from other ELFs. used_libs: The list of libraries this ELF loads dynamically, including only the name of the library. For example, 'bz2' rather than 'libbz2.so.1.0'. executable: Whether the file has a main() function. static: Whether the file is statically linked (implies executable=True). """ if defined_symbols is None: defined_symbols = [] if undefined_symbols is None: undefined_symbols = [] if used_libs is None: used_libs = [] if static and not executable: raise ValueError('static requires executable to be True.') source = ''.join('void %s();\n' % sym for sym in undefined_symbols) source += """ void __defined_symbols(const char*) __attribute__ ((visibility ("hidden"))); void __defined_symbols(const char* sym) { %s } """ % ('\n '.join('%s();' % sym for sym in undefined_symbols)) source += ''.join(""" void %s() __attribute__ ((visibility ("default"))); void %s() { __defined_symbols("%s"); } """ % (sym, sym, sym) for sym in defined_symbols) if executable: source += """ int main() { __defined_symbols("main"); return 42; } """ source_fn = filename + '_tmp.c' osutils.WriteFile(source_fn, source) outdir = os.path.dirname(filename) cmd = ['gcc', '-o', filename, source_fn] if not executable: cmd += ['-shared', '-fPIC'] if static: cmd += ['-static'] cmd += ['-L.', '-Wl,-rpath=./'] cmd += ['-l%s' % lib for lib in used_libs] try: cros_build_lib.run( cmd, cwd=outdir, stdout=True, stderr=True, print_cmd=False) except cros_build_lib.RunCommandError as e: raise BuildELFError('%s\n%s' % (e, e.result.error)) finally: os.unlink(source_fn)
bsd-3-clause
6,010,867,396,611,084,000
31.309524
80
0.670597
false
3.652759
false
false
false
recrm/Zanar2
udebs/interpret.py
1
11842
#!/usr/bin/env python3 import sys import re import copy import json import itertools class standard: """ Basic functionality built into the Udebs scripting language. None of the functions here can depend on any other Udebs module. """ def _print(*args): print(*args) return True def logicif(cond, value, other): return value if cond else other def inside(before, after): return before in after def notin(before, after): return before not in after def equal(*args): x = args[0] for y in args: if y != x: return False return True def notequal(before, after): return before != after def gt(before, after): return before > after def lt(before, after): return before < after def gtequal(before, after): return before >= after def ltequal(before, after): return before <= after def plus(*args): return sum(args) def multiply(*args): i = 1 for number in args: i *= number return i def logicor(*args): return any(args) def logicif(cond, value, other): return value if cond else other def mod(before, after): return before % after def setvar(storage, variable, value): storage[variable] = value return True #prefix functions def getvar(storage, variable): return storage[variable] def div(before, after): return before/after def logicnot(element): return not element def minus(before, element): return before - element def sub(before, after): return next(itertools.islice(before, int(after), None), 'empty') def length(list_): return len(list(list_)) class variables: """ Base environment object that Udebs scripts are interpreted through. """ keywords = { "SUB": { "f": "standard.sub", "args": ["-$1", "$1"], }, "in": { "f": "standard.inside", "args": ["-$1", "$1"], }, "not-in": { "f": "standard.notin", "args": ["-$1", "$1"], }, "if": { "f": "standard.logicif", "args": ["$1", "$2", "$3"], "default": {"$2": True, "$3": False}, }, "min": { "f": "min", "all": True, }, "max": { "f": "max", "all": True, }, "if": { "f": "standard.logicif", "args": ["$1", "$2", "$3"], "default": {"$2": True, "$3": False}, }, "==": { "f": "standard.equal", "all": True, }, "!=": { "f": "standard.notequal", "args": ["-$1", "$1"], }, ">": { "f": "standard.gt", "args": ["-$1", "$1"], }, "<": { "f": "standard.lt", "args": ["-$1", "$1"], }, ">=": { "f": "standard.gtequal", "args": ["-$1", "$1"], }, "<=": { "f": "standard.ltequal", "args": ["-$1", "$1"], }, "%": { "f": "standard.mod", "args": ["-$1", "$1"], }, "+": { "f": "standard.plus", "all": True, }, "*": { "f": "standard.multiply", "all": True, }, "or": { "f": "standard.logicor", "all": True, }, "|": { "f": "abs", "args": ["$1"] }, "/": { "f": "standard.div", "args": ["-$1", "$1"], "default": {"-$1": 1} }, "!": { "f": "standard.logicnot", "args": ["$1"], }, "-": { "f": "standard.minus", "args": ["-$1", "$1"], "default": {"-$1": 0} }, "=": { "f": "standard.setvar", "args": ["storage", "-$1", "$1"], }, "$": { "f": "standard.getvar", "args": ["storage","$1"], }, "print": { "f": "standard._print", "all": True, }, "length": { "f": "standard.length", "args": ["$1"], }, # "solitary": { # "f": "solitary", # }, # "testing": { # "f": "TEST", # "default": {"$3": 50}, # "args": ["-$1", "$1", "$2", "three"], # "kwargs": {"none": "$3", "value": "empty", "test": 10}, # } } env = {"__builtin__": None, "standard": standard, "storage": {}, "abs": abs, "min": min, "max": max} default = { "f": "", "args": [], "kwargs": {}, "all": False, "default": {}, "string": [], } def importModule(dicts={}, globs={}): """ Allows user to extend base variables available to the interpreter. Should be run before the instance object is created. """ variables.keywords.update(dicts) variables.env.update(globs) def _getEnv(local, glob=False): """Retrieves a copy of the base variables.""" value = copy.copy(variables.env) if glob: value.update(glob) value["storage"] = local return value class UdebsSyntaxError(Exception): def __init__(self, string): self.message = string def __str__(self): return repr(self.message) class UdebsParserError(Exception): def __init__(self, string): self.message = string def __str__(self): return repr(self.message) def formatS(string, debug): """Converts a string into its python representation.""" string = str(string) if string.isdigit(): return string #String quoted by user. elif string[0] == string[-1] and string[0] in {"'", '"'}: return string #String has already been handled by call elif string[-1] == ")": return string elif string in variables.env: return string #In case prefix notation used in keyword defaults. elif string[0] in variables.keywords: return interpret(string, debug) else: return "'"+string+"'" def call(args, debug=False): """Converts callList into functionString.""" if not isinstance(args, list): raise UdebsParserError("There is a bug in the parser, call recived '{}'".format(args)) if debug: print("call:", args) #Find keyword keywords = [i for i in args if i in variables.keywords] #If there are too many keywords, some might stand alone. if len(keywords) > 1: for key in keywords[:]: values = variables.keywords[key] arguments = sum(len(values.get(i, [])) for i in ["args", "kwargs", "default"]) if arguments == 0 and not values.get("all", False): new = call([key]) args[args.index(key)] = new keywords.remove(key) #Still to many keywords is a syntax error. if len(keywords) > 1: raise UdebsSyntaxError("CallList contains to many keywords '{}'".format(args)) #No keywords creates a tuple object. elif len(keywords) == 0: value = "(" for i in args: value +=formatS(i, debug)+"," computed = value[:-1] + ")" if debug: print("computed:", computed) return computed keyword = keywords[0] #Get and fix data for this keyword. data = copy.copy(variables.default) data.update(variables.keywords[keyword]) #Create dict of values current = args.index(keyword) nodes = copy.copy(data["default"]) for index in range(len(args)): value = "$" if index >= current else "-$" value += str(abs(index - current)) if args[index] != keyword: nodes[value] = args[index] #Force strings into long arguments. for string in data["string"]: nodes[string] = "'"+str(nodes[string]).replace("'", "\\'")+"'" #Claim keyword arguments. kwargs = {} for key, value in data["kwargs"].items(): if value in nodes: newvalue = nodes[value] del nodes[value] else: newvalue = value kwargs[key] = formatS(newvalue, debug) arguments = [] #Insert positional arguments for key in data["args"]: if key in nodes: arguments.append(formatS(nodes[key], debug)) del nodes[key] else: arguments.append(formatS(key, debug)) #Insert ... arguments. if data["all"]: for key in sorted(list(nodes.keys())): arguments.append(formatS(nodes[key], debug)) del nodes[key] if len(nodes) > 0: raise UdebsSyntaxError("Keyword contains unused arguments. '{}'".format(args)) #Insert keyword arguments. for key, value in kwargs.items(): arguments.append(str(key) + "=" + str(value)) computed = data["f"] + "(" + ",".join(arguments) + ")" if debug: print("computed:", computed) return computed def split_callstring(raw): """Converts callString into callList.""" openBracket = {'(', '{', '['} closeBracket = {')', '}', ']'} string = raw.strip() callList = [] buf = '' inBrackets = 0 dotLegal = True for char in string: #Ignore everything until matching bracket is found. if inBrackets: if char in openBracket: inBrackets +=1 elif char in closeBracket: inBrackets -=1 buf += char continue #Found opening Bracket if char in openBracket: if len(buf) > 1: raise UdebsSyntaxError("Too many bits before bracket. '{}'".format(raw)) inBrackets +=1 #Dot split elif dotLegal and char == ".": callList.append(buf) buf = '' continue #Normal whitespace split` elif char.isspace(): if dotLegal: dotLegal = False if callList: buf = ".".join(callList)+"."+buf callList = [] if buf: callList.append(buf) buf = '' continue #Everything else buf += char callList.append(buf) if inBrackets: raise UdebsSyntaxError("Brackets are mismatched. '{}'".format(raw)) if '' in callList: raise UdebsSyntaxError("Empty element in callList. '{}'".format(raw)) #Length one special cases. if len(callList) == 1: value = callList[0] #unnecessary brackets. (Future fix: deal with this at start of function as these are common.) if value[0] in openBracket and value[-1] in closeBracket: return split_callstring(value[1:-1]) #Prefix calling. if value not in variables.keywords: if value[0] in variables.keywords: return [value[0], value[1:]] return callList def interpret(string, debug=False, first=True): """Recursive function that parses callString""" #Small hack for solitary keywords if first and string in variables.keywords: return call([string]) _list = split_callstring(string) #Exit condition if len(_list) == 1: return _list[0] if debug: print("Interpret:", string) _list = [interpret(i, debug, False) for i in _list] return call(_list, debug) if __name__ == "__main__": with open("keywords.json") as fp: importModule(json.load(fp), {'self': None}) interpret(sys.argv[1], debug=True)
mit
6,787,578,980,685,789,000
25.374165
104
0.492653
false
4.015599
false
false
false
prheenan/prhUtil
igor/scripts/SurfaceDetection/SurfaceUtil.py
1
4678
# force floating point division. Can still use integer with // from __future__ import division # This file is used for importing the common utilities classes. import numpy as np import matplotlib.pyplot as plt # need to add the utilities class. Want 'home' to be platform independent from os.path import expanduser home = expanduser("~") # get the utilties directory (assume it lives in ~/utilities/python) # but simple to change path= home +"/utilities/python" import sys sys.path.append(path) # import the patrick-specific utilities import GenUtilities as pGenUtil import PlotUtilities as pPlotUtil import CheckpointUtilities as pCheckUtil # idea of this file is to hold a lot of the 'brains' for the # actual surface detection. should be fairly easy to port, etc. class CalibrateObject: # keeps track of calibration (approach or retraction touchoff) # idxStart and idxEnd are from 'getCrossIdxFromApproach': the start # and end of the 'invols' region. sliceAppr and sliceTouch are the # slices before and after this region. the parameters are the GenFit # returns for the two regions; time and index are where the intersections # happen (nominally, where the surface is) def __init__(self,idxStart,idxEnd,sliceAppr,sliceTouch, params1,paramsStd1,predicted1, params2,paramsStd2,predicted2, timeSurface,idxSurface): self._idxStart = idxStart self._idxEnd = idxEnd self._sliceAppr = sliceAppr self._sliceTouch = sliceTouch self._params1 = params1 self._paramsStd1 = paramsStd1 self._predicted1 = predicted1 self._params2 = params2 self._paramsStd2 = paramsStd2 self._predicted2 = predicted2 self._timeSurface = timeSurface self._idxSurface = idxSurface # gets the start and end index of the surface touchoff (ie: where invols # are calculated). Assumes that somewhere in forceDiff is a *single* # high location (high derivative), followed by a low derivate until the end. def getCrossIdxFromApproach(forceDiff,method=None,approachIfTrue=True): # get the maximum force change location maxDiffIdx = np.argmax(forceDiff) # get the median, and where we are <= the median median = np.median(forceDiff) whereLess = np.where(forceDiff <= median)[0] # look where we are less than the median *and* {before/after} the max # this gets a decent guess for where the surface contact happens # (ie: between the two bounds) # last element is -1 lastIndexBeforeList = whereLess[np.where(whereLess < maxDiffIdx)] if (lastIndexBeforeList.size == 0): lastIndexBefore = 0 else: lastIndexBefore = lastIndexBeforeList[-1] # first element is 0 possibleFirstIdx = whereLess[np.where(whereLess > maxDiffIdx)] # if we neever went back to the median, we likely had no dwell. # just use the entire curve. if (possibleFirstIdx.size == 0): firstIndexAfter = forceDiff.size-1 else: firstIndexAfter = possibleFirstIdx[0] return lastIndexBefore,firstIndexAfter def getTouchoffCalibration(timeAppr,forceAppr,mDerivApproach,isApproach): idxStart,idxEnd = getCrossIdxFromApproach(mDerivApproach) # fit lines to the force # start and end *always demarcate the start and end (ish) of the invols # if we are approach, we take everything *before* as constant # if we are touchoff, we take everything *after* as constant if (isApproach): constantSlice = np.s_[0:idxStart] touchoffSlice = np.s_[idxStart:idxEnd] else: constantSlice = np.s_[idxEnd:] touchoffSlice = np.s_[idxStart:idxEnd] timeApprLow = timeAppr[constantSlice] timeTouch = timeAppr[touchoffSlice] paramsFirst,stdFirst,predFirst= pGenUtil.GenFit(timeApprLow, forceAppr[constantSlice]) paramsSecond,stdSecond,predSecond = \ pGenUtil.GenFit(timeTouch,forceAppr[touchoffSlice]) # XXX get error estimate using standard deviations? timeSurface = pGenUtil.lineIntersectParam(paramsFirst, paramsSecond) idxSurface = np.argmin(np.abs(timeAppr-timeSurface)) # set the variables we care about calibObj = CalibrateObject(idxStart,idxEnd, constantSlice,touchoffSlice, paramsFirst,stdFirst,predFirst, paramsSecond,stdSecond,predSecond, timeSurface,idxSurface) return calibObj def run(): pass if __name__ == "__main__": run()
gpl-2.0
-2,648,850,792,978,439,000
42.314815
77
0.680847
false
3.951014
false
false
false
caiogit/bird-a
backend/birda/storage/__init__.py
1
8395
#!/usr/bin/env python # -*- coding: utf-8 -*- # References: # - Static and abstract methods: https://julien.danjou.info/blog/2013/guide-python-static-class-abstract-methods # - Singletons in Python: http://stackoverflow.com/questions/6760685/creating-a-singleton-in-python?rq=1 # - Lock acquisition with a decorator: http://stackoverflow.com/questions/489720/what-are-some-common-uses-for-python-decorators/490090#490090 # - Python thread synchronization guide: http://www.laurentluce.com/posts/python-threads-synchronization-locks-rlocks-semaphores-conditions-events-and-queues/ # -------------------------------------- # # Enables python3-like strings handling from __future__ import unicode_literals str = unicode # -------------------------------------- # import os import abc import rdflib import birda.utils.ascii_utils import utils import birda.bModel as bModel import birda.bModel.ontology as ontology # --------------------------------- # SUPPORTED_OUTPUT_TYPES = ['triples', 'xml', 'n3', 'turtle', 'nt', 'pretty-xml'] # "Fake settings" for testing purpose FAKE_DB_PATH = os.path.dirname( os.path.realpath(__file__) ) + "/../../../db" FAKE_SETTINGS = { 'birda.storage_type': 'file', 'birda.storage_file_birda_db': FAKE_DB_PATH + '/birda.turtle', 'birda.storage_file_indiv_db': FAKE_DB_PATH + '/indiv.turtle', 'birda.storage_file_test_db': FAKE_DB_PATH + '/test.turtle', } # ============================================================================ # class Results(object): """ Wrapper for sparql_results who provides some utility features """ query = "" sparql_results = [] elapsed_time = 0.0 namespaces = {} # ----------------------------------------------------------------------- # def __init__(self, query, sparql_results, elapsed_time, namespaces={}): self.query = query self.sparql_results = sparql_results self.elapsed_time = elapsed_time self.namespaces = namespaces # ----------------------------------------------------------------------- # def getFields(self): return [str(k) for k in self.sparql_results.vars] # ----------------------------------------------------------------------- # def getDictList(self): """ Get a list of dictionaries which keys are strings and values are RDFLib object :return: List of dictionaries """ l = [] for res in self.sparql_results.bindings: d = {} for k in self.getFields(): d[str(k)] = res[k] l += [ d ] return l # ----------------------------------------------------------------------- # def getPrettyDictList(self): """ Get a list of dictionaries which keys are strings and values are pretty_urls, strings, ints and dates :return: List of dictionaries """ # Order namespaces from longest to shortest (in order to match first # full path instead of partial path) namespaces_ordered_keys = sorted(self.namespaces.keys(), (lambda x,y: len(x)-len(y)), reverse=True ) l = [] for res in self.sparql_results.bindings: d = {} for k in self.getFields(): d[str(k)] = utils.prettify(res[k], namespaces=self.namespaces, namespaces_ordered_keys=namespaces_ordered_keys) l += [ d ] return l # ----------------------------------------------------------------------- # def printQueryResults(self): """ Print query results in a MySQL ascii tab fashion :return: None """ if self.sparql_results != None: print birda.utils.ascii_utils.render_list_dict( self.getPrettyDictList(), map=self.getFields() ) , print "%s rows in set (%s sec)" % ( len(self.getPrettyDictList()), birda.utils.ascii_utils.hhmmss(self.elapsed_time,tutto=False) ) else: print "Updated (%s sec)" % birda.utils.ascii_utils.hhmmss(self.elapsed_time,tutto=False) print # ----------------------------------------------------------------------- # @staticmethod def printQuery(query, lines_number=False): query_rows = query.replace('\t',' ').split('\n') if lines_number: # Little ugly function of convenience def ln(s): ln.n += 1 return "%2s %s" % (ln.n, s) ln.n = 0 query = "\n".join([ ln(r) for r in query_rows if r.strip() ]) else: query = "\n".join([ r for r in query_rows if r.strip() ]) print '====================================' print query print '====================================' # ============================================================================ # class Connection(object): """ Abstract object wrapping all functionalities relative to db interaction. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def __init__(self, settings, dataset='', namespaces={}, verbose=False): pass @abc.abstractmethod def query(self, query): """ Exectutes a read-only sparql query :return: Result object """ raise NotImplementedError("This method should be implemented by subclasses") # ----------------------------------------------------------------------- # @abc.abstractmethod def update(self, query): """ Exectutes a write-only sparql query :return: ??? """ raise NotImplementedError("This method should be implemented by subclasses") # ----------------------------------------------------------------------- # @abc.abstractmethod def commit(self): """ Commits updates and deletes to db :return: None """ raise NotImplementedError("This method should be implemented by subclasses") # ----------------------------------------------------------------------- # @abc.abstractmethod def rollback(self): """ Rollback updates and deletes and restore the initial status :return: None """ raise NotImplementedError("This method should be implemented by subclasses") # ----------------------------------------------------------------------- # @abc.abstractmethod def close(self): """ Close the connection :return: None """ raise NotImplementedError("This method should be implemented by subclasses") # ============================================================================ # class RDFWrapper(object): """ Object that wraps rdflib.Graph object. It is intended to accumulate rdf statements and dump them in several formats. """ rdf = None # ----------------------------------------------------------------------- # def __init__(self): self.rdf = ontology.new_rdf_Graph() # ----------------------------------------------------------------------- # def add(self, s, p, o): """ Add the rdf statement to the rdf container :param s: subject of the rdf statement :param p: predicate of the rdf statement :param o: object of the rdf statement :return: None """ assert type(s) in (type(''),type(u'')) or type(s) == type(rdflib.term.URIRef('')) assert type(p) in (type(''),type(u'')) or type(p) == type(rdflib.term.URIRef('')) if type(s) in (type(''),type(u'')): s = rdflib.term.URIRef(s) if type(p) in (type(''),type(u'')): p = rdflib.term.URIRef(p) o = utils.py2rdf(o) self.rdf.add((s,p,o)) # ----------------------------------------------------------------------- # def dumps(self, output_format): """ Dump the rdf graph into a string :param output_format: Format of the dumped rdf :return: String rapresentation of the rdf graph """ assert output_format in SUPPORTED_OUTPUT_TYPES return self.rdf.serialize(format=output_format) # ============================================================================ # class Storage(object): """ Storage abstract class """ # ----------------------------------------------------------------------- # def __init__(self): raise NotImplementedError("Storage should not be instantiated") # ----------------------------------------------------------------------- # @staticmethod def connect(settings, dataset='', namespaces=bModel.NAMESPACES, verbose=False): """ Creates a connection to a sparql endpoint using "setting" parameters :return: Connection object (sublass of storage.Connection) """ if settings['birda.storage_type'] == 'file': import file_storage return file_storage.FileConnection(settings, dataset=dataset, namespaces=namespaces, verbose=verbose) else: raise NotImplementedError("Storage type unknown") # ================================================================================================ # if __name__ == '__main__': storage = Storage()
gpl-3.0
6,128,479,153,392,613,000
26.709571
158
0.537344
false
3.849152
false
false
false
onelab-eu/sfa
sfa/client/sfaadmin.py
1
22773
#!/usr/bin/python import os import sys import copy from pprint import pformat, PrettyPrinter from optparse import OptionParser from sfa.generic import Generic from sfa.util.xrn import Xrn from sfa.storage.record import Record from sfa.trust.hierarchy import Hierarchy from sfa.trust.gid import GID from sfa.trust.certificate import convert_public_key from sfa.client.common import optparse_listvalue_callback, optparse_dictvalue_callback, terminal_render, filter_records from sfa.client.candidates import Candidates from sfa.client.sfi import save_records_to_file pprinter = PrettyPrinter(indent=4) try: help_basedir=Hierarchy().basedir except: help_basedir='*unable to locate Hierarchy().basedir' def add_options(*args, **kwargs): def _decorator(func): func.__dict__.setdefault('add_options', []).insert(0, (args, kwargs)) return func return _decorator class Commands(object): def _get_commands(self): command_names = [] for attrib in dir(self): if callable(getattr(self, attrib)) and not attrib.startswith('_'): command_names.append(attrib) return command_names class RegistryCommands(Commands): def __init__(self, *args, **kwds): self.api= Generic.the_flavour().make_api(interface='registry') def version(self): """Display the Registry version""" version = self.api.manager.GetVersion(self.api, {}) pprinter.pprint(version) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='authority to list (hrn/urn - mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default='all') @add_options('-r', '--recursive', dest='recursive', metavar='<recursive>', help='list all child records', action='store_true', default=False) @add_options('-v', '--verbose', dest='verbose', action='store_true', default=False) def list(self, xrn, type=None, recursive=False, verbose=False): """List names registered at a given authority - possibly filtered by type""" xrn = Xrn(xrn, type) options_dict = {'recursive': recursive} records = self.api.manager.List(self.api, xrn.get_hrn(), options=options_dict) list = filter_records(type, records) # terminal_render expects an options object class Options: pass options=Options() options.verbose=verbose terminal_render (list, options) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='save record to file') @add_options('-f', '--format', dest='format', metavar='<display>', type='choice', choices=('text', 'xml', 'simple'), help='display record in different formats') def show(self, xrn, type=None, format=None, outfile=None): """Display details for a registered object""" records = self.api.manager.Resolve(self.api, xrn, type, details=True) for record in records: sfa_record = Record(dict=record) sfa_record.dump(format) if outfile: save_records_to_file(outfile, records) def _record_dict(self, xrn, type, email, key, slices, researchers, pis, url, description, extras): record_dict = {} if xrn: if type: xrn = Xrn(xrn, type) else: xrn = Xrn(xrn) record_dict['urn'] = xrn.get_urn() record_dict['hrn'] = xrn.get_hrn() record_dict['type'] = xrn.get_type() if url: record_dict['url'] = url if description: record_dict['description'] = description if key: try: pubkey = open(key, 'r').read() except IOError: pubkey = key record_dict['reg-keys'] = [pubkey] if slices: record_dict['slices'] = slices if researchers: record_dict['reg-researchers'] = researchers if email: record_dict['email'] = email if pis: record_dict['reg-pis'] = pis if extras: record_dict.update(extras) return record_dict @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn', default=None) @add_options('-t', '--type', dest='type', metavar='<type>', help='object type (mandatory)',) @add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='check all users GID') @add_options('-v', '--verbose', dest='verbose', metavar='<verbose>', action='store_true', default=False, help='verbose mode: display user\'s hrn ') def check_gid(self, xrn=None, type=None, all=None, verbose=None): """Check the correspondance between the GID and the PubKey""" # db records from sfa.storage.model import RegRecord db_query = self.api.dbsession().query(RegRecord).filter_by(type=type) if xrn and not all: hrn = Xrn(xrn).get_hrn() db_query = db_query.filter_by(hrn=hrn) elif all and xrn: print "Use either -a or -x <xrn>, not both !!!" sys.exit(1) elif not all and not xrn: print "Use either -a or -x <xrn>, one of them is mandatory !!!" sys.exit(1) records = db_query.all() if not records: print "No Record found" sys.exit(1) OK = [] NOK = [] ERROR = [] NOKEY = [] for record in records: # get the pubkey stored in SFA DB if record.reg_keys: db_pubkey_str = record.reg_keys[0].key try: db_pubkey_obj = convert_public_key(db_pubkey_str) except: ERROR.append(record.hrn) continue else: NOKEY.append(record.hrn) continue # get the pubkey from the gid gid_str = record.gid gid_obj = GID(string = gid_str) gid_pubkey_obj = gid_obj.get_pubkey() # Check if gid_pubkey_obj and db_pubkey_obj are the same check = gid_pubkey_obj.is_same(db_pubkey_obj) if check : OK.append(record.hrn) else: NOK.append(record.hrn) if not verbose: print "Users NOT having a PubKey: %s\n\ Users having a non RSA PubKey: %s\n\ Users having a GID/PubKey correpondence OK: %s\n\ Users having a GID/PubKey correpondence Not OK: %s\n"%(len(NOKEY), len(ERROR), len(OK), len(NOK)) else: print "Users NOT having a PubKey: %s and are: \n%s\n\n\ Users having a non RSA PubKey: %s and are: \n%s\n\n\ Users having a GID/PubKey correpondence OK: %s and are: \n%s\n\n\ Users having a GID/PubKey correpondence NOT OK: %s and are: \n%s\n\n"%(len(NOKEY),NOKEY, len(ERROR), ERROR, len(OK), OK, len(NOK), NOK) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) @add_options('-e', '--email', dest='email', default="", help="email (mandatory for users)") @add_options('-u', '--url', dest='url', metavar='<url>', default=None, help="URL, useful for slices") @add_options('-d', '--description', dest='description', metavar='<description>', help='Description, useful for slices', default=None) @add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file', default=None) @add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-p', '--pis', dest='pis', metavar='<PIs>', help='Set/replace Principal Investigators/Project Managers', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>", action="callback", callback=optparse_dictvalue_callback, nargs=1, help="set extra/testbed-dependent flags, e.g. --extra enabled=true") def register(self, xrn, type=None, email='', key=None, slices='', pis='', researchers='', url=None, description=None, extras={}): """Create a new Registry record""" record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key, slices=slices, researchers=researchers, pis=pis, url=url, description=description, extras=extras) self.api.manager.Register(self.api, record_dict) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) @add_options('-u', '--url', dest='url', metavar='<url>', help='URL', default=None) @add_options('-d', '--description', dest='description', metavar='<description>', help='Description', default=None) @add_options('-k', '--key', dest='key', metavar='<key>', help='public key string or file', default=None) @add_options('-s', '--slices', dest='slices', metavar='<slices>', help='Set/replace slice xrns', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-r', '--researchers', dest='researchers', metavar='<researchers>', help='Set/replace slice researchers', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-p', '--pis', dest='pis', metavar='<PIs>', help='Set/replace Principal Investigators/Project Managers', default='', type="str", action='callback', callback=optparse_listvalue_callback) @add_options('-X','--extra',dest='extras',default={},type='str',metavar="<EXTRA_ASSIGNS>", action="callback", callback=optparse_dictvalue_callback, nargs=1, help="set extra/testbed-dependent flags, e.g. --extra enabled=true") def update(self, xrn, type=None, email='', key=None, slices='', pis='', researchers='', url=None, description=None, extras={}): """Update an existing Registry record""" record_dict = self._record_dict(xrn=xrn, type=type, email=email, key=key, slices=slices, researchers=researchers, pis=pis, url=url, description=description, extras=extras) self.api.manager.Update(self.api, record_dict) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) def remove(self, xrn, type=None): """Remove given object from the registry""" xrn = Xrn(xrn, type) self.api.manager.Remove(self.api, xrn) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) def credential(self, xrn, type=None): """Invoke GetCredential""" cred = self.api.manager.GetCredential(self.api, xrn, type, self.api.hrn) print cred def import_registry(self): """Run the importer""" from sfa.importer import Importer importer = Importer() importer.run() def sync_db(self): """Initialize or upgrade the db""" from sfa.storage.dbschema import DBSchema dbschema=DBSchema() dbschema.init_or_upgrade() @add_options('-a', '--all', dest='all', metavar='<all>', action='store_true', default=False, help='Remove all registry records and all files in %s area' % help_basedir) @add_options('-c', '--certs', dest='certs', metavar='<certs>', action='store_true', default=False, help='Remove all cached certs/gids found in %s' % help_basedir ) @add_options('-0', '--no-reinit', dest='reinit', metavar='<reinit>', action='store_false', default=True, help='Prevents new DB schema from being installed after cleanup') def nuke(self, all=False, certs=False, reinit=True): """Cleanup local registry DB, plus various additional filesystem cleanups optionally""" from sfa.storage.dbschema import DBSchema from sfa.util.sfalogging import _SfaLogger logger = _SfaLogger(logfile='/var/log/sfa_import.log', loggername='importlog') logger.setLevelFromOptVerbose(self.api.config.SFA_API_LOGLEVEL) logger.info("Purging SFA records from database") dbschema=DBSchema() dbschema.nuke() # for convenience we re-create the schema here, so there's no need for an explicit # service sfa restart # however in some (upgrade) scenarios this might be wrong if reinit: logger.info("re-creating empty schema") dbschema.init_or_upgrade() # remove the server certificate and all gids found in /var/lib/sfa/authorities if certs: logger.info("Purging cached certificates") for (dir, _, files) in os.walk('/var/lib/sfa/authorities'): for file in files: if file.endswith('.gid') or file == 'server.cert': path=dir+os.sep+file os.unlink(path) # just remove all files that do not match 'server.key' or 'server.cert' if all: logger.info("Purging registry filesystem cache") preserved_files = [ 'server.key', 'server.cert'] for (dir,_,files) in os.walk(Hierarchy().basedir): for file in files: if file in preserved_files: continue path=dir+os.sep+file os.unlink(path) class CertCommands(Commands): def __init__(self, *args, **kwds): self.api= Generic.the_flavour().make_api(interface='registry') def import_gid(self, xrn): pass @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') @add_options('-t', '--type', dest='type', metavar='<type>', help='object type', default=None) @add_options('-o', '--outfile', dest='outfile', metavar='<outfile>', help='output file', default=None) def export(self, xrn, type=None, outfile=None): """Fetch an object's GID from the Registry""" from sfa.storage.model import RegRecord hrn = Xrn(xrn).get_hrn() request=self.api.dbsession().query(RegRecord).filter_by(hrn=hrn) if type: request = request.filter_by(type=type) record=request.first() if record: gid = GID(string=record.gid) else: # check the authorities hierarchy hierarchy = Hierarchy() try: auth_info = hierarchy.get_auth_info(hrn) gid = auth_info.gid_object except: print "Record: %s not found" % hrn sys.exit(1) # save to file if not outfile: outfile = os.path.abspath('./%s.gid' % gid.get_hrn()) gid.save_to_file(outfile, save_parents=True) @add_options('-g', '--gidfile', dest='gid', metavar='<gid>', help='path of gid file to display (mandatory)') def display(self, gidfile): """Print contents of a GID file""" gid_path = os.path.abspath(gidfile) if not gid_path or not os.path.isfile(gid_path): print "No such gid file: %s" % gidfile sys.exit(1) gid = GID(filename=gid_path) gid.dump(dump_parents=True) class AggregateCommands(Commands): def __init__(self, *args, **kwds): self.api= Generic.the_flavour().make_api(interface='aggregate') def version(self): """Display the Aggregate version""" version = self.api.manager.GetVersion(self.api, {}) pprinter.pprint(version) @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='object hrn/urn (mandatory)') def status(self, xrn): """Retrieve the status of the slivers belonging to the named slice (Status)""" urns = [Xrn(xrn, 'slice').get_urn()] status = self.api.manager.Status(self.api, urns, [], {}) pprinter.pprint(status) @add_options('-r', '--rspec-version', dest='rspec_version', metavar='<rspec_version>', default='KOREN', help='version/format of the resulting rspec response') def resources(self, rspec_version='KOREN'): """Display the available resources at an aggregate""" options = {'geni_rspec_version': rspec_version} print options resources = self.api.manager.ListResources(self.api, [], options) print resources @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)') @add_options('-r', '--rspec', dest='rspec', metavar='<rspec>', help='rspec file (mandatory)') def allocate(self, xrn, rspec): """Allocate slivers""" xrn = Xrn(xrn, 'slice') slice_urn=xrn.get_urn() rspec_string = open(rspec).read() options={} expiration = None manifest = self.api.manager.Allocate(self.api, slice_urn, [], rspec_string, expiration, options) print manifest @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)') def provision(self, xrn): """Provision slivers""" xrn = Xrn(xrn, 'slice') slice_urn=xrn.get_urn() options = {'geni_rspec_version': 'KOREN'} manifest = self.api.manager.Provision(self.api, [slice_urn], [], options) print manifest @add_options('-x', '--xrn', dest='xrn', metavar='<xrn>', help='slice hrn/urn (mandatory)') def delete(self, xrn): """Delete slivers""" self.api.manager.Delete(self.api, [xrn], [], {}) class SliceManagerCommands(AggregateCommands): def __init__(self, *args, **kwds): self.api= Generic.the_flavour().make_api(interface='slicemgr') class SfaAdmin: CATEGORIES = {'certificate': CertCommands, 'registry': RegistryCommands, 'aggregate': AggregateCommands, 'slicemgr': SliceManagerCommands} # returns (name,class) or (None,None) def find_category (self, input): full_name=Candidates (SfaAdmin.CATEGORIES.keys()).only_match(input) if not full_name: return (None,None) return (full_name,SfaAdmin.CATEGORIES[full_name]) def summary_usage (self, category=None): print "Usage:", self.script_name + " category command [<options>]" if category and category in SfaAdmin.CATEGORIES: categories=[category] else: categories=SfaAdmin.CATEGORIES for c in categories: cls=SfaAdmin.CATEGORIES[c] print "==================== category=%s"%c names=cls.__dict__.keys() names.sort() for name in names: method=cls.__dict__[name] if name.startswith('_'): continue margin=15 format="%%-%ds"%margin print "%-15s"%name, doc=getattr(method,'__doc__',None) if not doc: print "<missing __doc__>" continue lines=[line.strip() for line in doc.split("\n")] line1=lines.pop(0) print line1 for extra_line in lines: print margin*" ",extra_line sys.exit(2) def main(self): argv = copy.deepcopy(sys.argv) self.script_name = argv.pop(0) # ensure category is specified if len(argv) < 1: self.summary_usage() # ensure category is valid category_input = argv.pop(0) (category_name, category_class) = self.find_category (category_input) if not category_name or not category_class: self.summary_usage(category_name) usage = "%%prog %s command [options]" % (category_name) parser = OptionParser(usage=usage) # ensure command is valid category_instance = category_class() commands = category_instance._get_commands() if len(argv) < 1: # xxx what is this about ? command_name = '__call__' else: command_input = argv.pop(0) command_name = Candidates (commands).only_match (command_input) if command_name and hasattr(category_instance, command_name): command = getattr(category_instance, command_name) else: self.summary_usage(category_name) # ensure options are valid usage = "%%prog %s %s [options]" % (category_name, command_name) parser = OptionParser(usage=usage) for args, kwdargs in getattr(command, 'add_options', []): parser.add_option(*args, **kwdargs) (opts, cmd_args) = parser.parse_args(argv) cmd_kwds = vars(opts) # dont overrride meth for k, v in cmd_kwds.items(): if v is None: del cmd_kwds[k] # execute command try: #print "invoking %s *=%s **=%s"%(command.__name__,cmd_args, cmd_kwds) command(*cmd_args, **cmd_kwds) sys.exit(0) except TypeError: print "Possible wrong number of arguments supplied" #import traceback #traceback.print_exc() print command.__doc__ parser.print_help() sys.exit(1) #raise except Exception: print "Command failed, please check log for more info" raise sys.exit(1)
mit
-1,940,394,473,199,436,800
42.543021
151
0.575243
false
3.84744
false
false
false
gnocchixyz/python-gnocchiclient
gnocchiclient/v1/resource_cli.py
1
10591
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import distutils.util from cliff import command from cliff import lister from cliff import show from gnocchiclient import exceptions from gnocchiclient import utils class CliResourceList(lister.Lister): """List resources.""" COLS = ('id', 'type', 'project_id', 'user_id', 'original_resource_id', 'started_at', 'ended_at', 'revision_start', 'revision_end') def get_parser(self, prog_name, history=True): parser = super(CliResourceList, self).get_parser(prog_name) parser.add_argument("--details", action='store_true', help="Show all attributes of generic resources"), if history: parser.add_argument("--history", action='store_true', help="Show history of the resources"), parser.add_argument("--limit", type=int, metavar="<LIMIT>", help="Number of resources to return " "(Default is server default)") parser.add_argument("--marker", metavar="<MARKER>", help="Last item of the previous listing. " "Return the next results after this value") parser.add_argument("--sort", action="append", metavar="<SORT>", help="Sort of resource attribute " "(example: user_id:desc-nullslast") parser.add_argument("--type", "-t", dest="resource_type", default="generic", help="Type of resource") return parser def _list2cols(self, resources): """Return a formatted list of resources.""" if not resources: return self.COLS, [] cols = list(self.COLS) for k in resources[0]: if k not in cols: cols.append(k) if 'creator' in cols: cols.remove('created_by_user_id') cols.remove('created_by_project_id') return utils.list2cols(cols, resources) def take_action(self, parsed_args): resources = utils.get_client(self).resource.list( resource_type=parsed_args.resource_type, **utils.get_pagination_options(parsed_args)) # Do not dump metrics because it makes the list way too long for r in resources: del r['metrics'] return self._list2cols(resources) class CliResourceHistory(CliResourceList): """Show the history of a resource.""" def get_parser(self, prog_name): parser = super(CliResourceHistory, self).get_parser(prog_name, history=False) parser.add_argument("resource_id", help="ID of a resource") return parser def take_action(self, parsed_args): resources = utils.get_client(self).resource.history( resource_type=parsed_args.resource_type, resource_id=parsed_args.resource_id, **utils.get_pagination_options(parsed_args)) if parsed_args.formatter == 'table': return self._list2cols(list(map(normalize_metrics, resources))) return self._list2cols(resources) class CliResourceSearch(CliResourceList): """Search resources with specified query rules.""" def get_parser(self, prog_name): parser = super(CliResourceSearch, self).get_parser(prog_name) utils.add_query_argument("query", parser) return parser def take_action(self, parsed_args): resources = utils.get_client(self).resource.search( resource_type=parsed_args.resource_type, query=parsed_args.query, **utils.get_pagination_options(parsed_args)) # Do not dump metrics because it makes the list way too long for r in resources: del r['metrics'] return self._list2cols(resources) def normalize_metrics(res): res['metrics'] = "\n".join(sorted( ["%s: %s" % (name, _id) for name, _id in res['metrics'].items()])) return res class CliResourceShow(show.ShowOne): """Show a resource.""" def get_parser(self, prog_name): parser = super(CliResourceShow, self).get_parser(prog_name) parser.add_argument("--type", "-t", dest="resource_type", default="generic", help="Type of resource") parser.add_argument("resource_id", help="ID of a resource") return parser def take_action(self, parsed_args): res = utils.get_client(self).resource.get( resource_type=parsed_args.resource_type, resource_id=parsed_args.resource_id) if parsed_args.formatter == 'table': normalize_metrics(res) return self.dict2columns(res) class CliResourceCreate(show.ShowOne): """Create a resource.""" def get_parser(self, prog_name): parser = super(CliResourceCreate, self).get_parser(prog_name) parser.add_argument("--type", "-t", dest="resource_type", default="generic", help="Type of resource") parser.add_argument("resource_id", help="ID of the resource") parser.add_argument("-a", "--attribute", action='append', default=[], help=("name and value of an attribute " "separated with a ':'")) parser.add_argument("-m", "--add-metric", action='append', default=[], help="name:id of a metric to add"), parser.add_argument( "-n", "--create-metric", action='append', default=[], help="name:archive_policy_name of a metric to create"), return parser def _resource_from_args(self, parsed_args, update=False): # Get the resource type to set the correct type rt_attrs = utils.get_client(self).resource_type.get( name=parsed_args.resource_type)['attributes'] resource = {} if not update: resource['id'] = parsed_args.resource_id if parsed_args.attribute: for attr in parsed_args.attribute: attr, __, value = attr.partition(":") attr_type = rt_attrs.get(attr, {}).get('type') if attr_type == "number": value = float(value) elif attr_type == "bool": value = bool(distutils.util.strtobool(value)) resource[attr] = value if (parsed_args.add_metric or parsed_args.create_metric or (update and parsed_args.delete_metric)): if update: r = utils.get_client(self).resource.get( parsed_args.resource_type, parsed_args.resource_id) default = r['metrics'] for metric_name in parsed_args.delete_metric: try: del default[metric_name] except KeyError: raise exceptions.MetricNotFound( message="Metric name %s not found" % metric_name) else: default = {} resource['metrics'] = default for metric in parsed_args.add_metric: name, _, value = metric.partition(":") resource['metrics'][name] = value for metric in parsed_args.create_metric: name, _, value = metric.partition(":") if value: resource['metrics'][name] = {'archive_policy_name': value} else: resource['metrics'][name] = {} return resource def take_action(self, parsed_args): resource = self._resource_from_args(parsed_args) res = utils.get_client(self).resource.create( resource_type=parsed_args.resource_type, resource=resource) if parsed_args.formatter == 'table': normalize_metrics(res) return self.dict2columns(res) class CliResourceUpdate(CliResourceCreate): """Update a resource.""" def get_parser(self, prog_name): parser = super(CliResourceUpdate, self).get_parser(prog_name) parser.add_argument("-d", "--delete-metric", action='append', default=[], help="Name of a metric to delete"), return parser def take_action(self, parsed_args): resource = self._resource_from_args(parsed_args, update=True) res = utils.get_client(self).resource.update( resource_type=parsed_args.resource_type, resource_id=parsed_args.resource_id, resource=resource) if parsed_args.formatter == 'table': normalize_metrics(res) return self.dict2columns(res) class CliResourceDelete(command.Command): """Delete a resource.""" def get_parser(self, prog_name): parser = super(CliResourceDelete, self).get_parser(prog_name) parser.add_argument("resource_id", help="ID of the resource") return parser def take_action(self, parsed_args): utils.get_client(self).resource.delete(parsed_args.resource_id) class CliResourceBatchDelete(show.ShowOne): """Delete a batch of resources based on attribute values.""" def get_parser(self, prog_name): parser = super(CliResourceBatchDelete, self).get_parser(prog_name) parser.add_argument("--type", "-t", dest="resource_type", default="generic", help="Type of resource") utils.add_query_argument("query", parser) return parser def take_action(self, parsed_args): res = utils.get_client(self).resource.batch_delete( resource_type=parsed_args.resource_type, query=parsed_args.query) return self.dict2columns(res)
apache-2.0
-1,724,077,443,211,696,600
38.966038
78
0.572656
false
4.365622
false
false
false
cualbondi/cualbondi.com.ar
apps/widget/views.py
1
3631
from apps.catastro.models import Ciudad from apps.core.models import Recorrido from django.contrib.gis.geos import Point from django.http import HttpResponse from django.shortcuts import render_to_response from django.template import RequestContext from django.conf import settings from django.contrib.sites.models import Site from django.views.decorators.csrf import csrf_exempt from django.views.decorators.http import require_GET @csrf_exempt @require_GET def v1_busqueda(request, extension): if request.GET.get("key") == '123456789': if extension == "html": if request.GET.get("ciudad"): ci = Ciudad.objects.get(slug=request.GET.get("ciudad")) ciudades = [] else: ci = None ciudades = Ciudad.objects.all() return render_to_response('widget/v1/busqueda.html', { 'ciudades': ciudades, 'ciudad' : ci }, context_instance=RequestContext(request)) else: if extension == "js": current_site = Site.objects.get_current() try: ci = Ciudad.objects.get(slug=request.GET.get("ciudad")) ciudad_arg = "&ciudad="+ci.slug except: ciudad_arg = "" return render_to_response('widget/v1/busqueda.js', { 'current_site': current_site, 'ciudad_arg' : ciudad_arg }, context_instance=RequestContext(request), #content_type="application/x-JavaScript") #django => 1.5 mimetype="application/x-JavaScript") #django < 1.5 else: return HttpResponse(status=403) @csrf_exempt @require_GET def v1_lineas(request, extension): if request.GET.get("key") == '123456789': if extension == "html": try: lat = float(request.GET.get("lat", "NaN")) lon = float(request.GET.get("lon", "NaN")) rad = int(request.GET.get("rad", "NaN")) except: return HttpResponse(status=501) print_ramales = request.GET.get("ramales") == "true" recorridos = Recorrido.objects.select_related('linea').filter(ruta__dwithin=(Point(lon, lat), 0.1), ruta__distance_lt=(Point(lon, lat), rad)) if not print_ramales: recorridos = list(set([x.linea for x in recorridos])) return render_to_response('widget/v1/lineas.html', { 'listado': recorridos, 'print_ramales': print_ramales, }, context_instance=RequestContext(request)) else: if extension == "js": if request.GET.get("lat") and request.GET.get("lon") and request.GET.get("rad"): current_site = Site.objects.get_current() return render_to_response('widget/v1/lineas.js', { 'current_site': current_site }, context_instance=RequestContext(request), #content_type="application/x-JavaScript") #django => 1.5 mimetype="application/x-JavaScript") #django < 1.5 else: return HttpResponse(status=501) else: return HttpResponse(status=403) def not_found(request): return HttpResponse(status=404) def test(request): return render_to_response('widget/test.html', { 'current_site': Site.objects.get_current()})
agpl-3.0
6,066,271,413,888,378,000
40.261364
153
0.552189
false
4.003308
false
false
false
alexharmenta/Inventationery
Inventationery/apps/Inventory/migrations/0015_orderhistorymodel.py
1
1364
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('Inventory', '0014_auto_20151227_1250'), ] operations = [ migrations.CreateModel( name='OrderHistoryModel', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created', models.DateTimeField(auto_now_add=True)), ('modified', models.DateTimeField(auto_now=True)), ('DocumentId', models.CharField(max_length=30)), ('CustVendName', models.CharField(default=None, max_length=100)), ('DocumentDate', models.DateField()), ('DocumentTotal', models.DecimalField(max_digits=20, decimal_places=2)), ('Qty', models.IntegerField(default=0)), ('Price', models.DecimalField(max_digits=10, decimal_places=2)), ('SubTotal', models.DecimalField(null=True, max_digits=20, decimal_places=2, blank=True)), ('Item', models.ForeignKey(related_name='OrderHistory', default=None, blank=True, to='Inventory.ItemModel', null=True)), ], options={ 'abstract': False, }, ), ]
bsd-3-clause
8,270,451,206,548,817,000
40.333333
136
0.574047
false
4.302839
false
false
false
cdiener/micom
micom/solution.py
1
3836
"""A community solution object.""" import numpy as np import pandas as pd from optlang.interface import OPTIMAL from cobra.core import Solution, get_solution def _group_species(values, ids, species, what="reaction"): """Format a list of values by id and species.""" df = pd.DataFrame({values.name: values, what: ids, "species": species}) df = df.pivot(index="species", columns=what, values=values.name) df.name = values.name return df class CommunitySolution(Solution): """An FBA solution for an entire community. Attributes ---------- objective_value : float The (optimal) value for the objective function. members : pandas.Series Contains basic info about the individual members of the community such as id, abundance and growth rates. growth_rate : float The overall growth rate for the community normalized to 1 gDW. status : str The solver status related to the solution. fluxes : pandas.DataFrame Contains the reaction fluxes (primal values of variables) stratified by species. Columns denote individual fluxes and rows denote species. Fluxes will be NA if the reaction does not exist in the organism. reduced_costs : pandas.Series Contains reaction reduced costs (dual values of variables) stratified by species. Columns denote individual fluxes and rows denote species. Reduced costs will be NA if the reaction does not exist in the organism. shadow_prices : pandas.Series Contains metabolite shadow prices (dual values of constraints) stratified by species. Columns denote individual metabolites and rows denote species. Shadow prices will be NA if the metabolite does not exist in the organism. """ def __init__(self, community, slim=False, reactions=None, metabolites=None): """Get the solution from a community model.""" if reactions is None: reactions = community.reactions if metabolites is None: metabolites = community.metabolites if not slim: rids = np.array([(r.global_id, r.community_id) for r in reactions]) mids = np.array([(m.global_id, m.community_id) for m in metabolites]) sol = get_solution(community, reactions, metabolites) super(CommunitySolution, self).__init__( community.solver.objective.value, community.solver.status, np.unique(rids[:, 0]), _group_species(sol.fluxes, rids[:, 0], rids[:, 1]), _group_species(sol.reduced_costs, rids[:, 0], rids[:, 1]), np.unique(mids[:, 0]), _group_species(sol.shadow_prices, mids[:, 0], mids[:, 1], what="metabolites")) else: super(CommunitySolution, self).__init__( community.solver.objective.value, community.solver.status, None, None, None, None, None) gcs = pd.Series() for sp in community.objectives: gcs[sp] = community.constraints["objective_" + sp].primal self.members = pd.DataFrame({"id": gcs.index, "abundance": community.abundances, "growth_rate": gcs}) self.growth_rate = sum(community.abundances * gcs) del self.reactions del self.metabolites def __repr__(self): """Convert CommunitySolution instance to string representation.""" if self.status != OPTIMAL: return "<CommunitySolution {0:s} at 0x{1:x}>".format( self.status, id(self)) return "<CommunitySolution {0:.3f} at 0x{1:x}>".format( self.growth_rate, id(self))
apache-2.0
2,541,745,594,639,425,500
42.590909
79
0.612617
false
4.156013
false
false
false
pitpig/rozowoo
app/trackback.py
1
3315
"""tblib.py: A Trackback (client) implementation in Python """ __author__ = "Matt Croydon <matt@ooiio.com>" __copyright__ = "Copyright 2003, Matt Croydon" __license__ = "GPL" __version__ = "0.1.0" __history__ = """ 0.1.0: 1/29/03 - Code cleanup, release. It can send pings, and autodiscover a URL to ping. 0.0.9: 1/29/03 - Basic error handling and autodiscovery works! 0.0.5: 1/29/03 - Internal development version. Working on autodiscovery and error handling. 0.0.4: 1/22/03 - First public release, code cleanup. 0.0.3: 1/22/03 - Removed hard coding that was used for testing. 0.0.2: 1/21/03 - First working version. 0.0.1: 1/21/03 - Initial version. Thanks to Mark Pilgrim for helping me figure some module basics out. """ import httplib, urllib, urlparse, re from google.appengine.api import urlfetch import logging """Everything I needed to know about trackback I learned from the trackback tech specs page http://www.movabletype.org/docs/mttrackback.html. All arguments are optional. This allows us to create an empty TrackBack object, then use autodiscovery to populate its attributes. """ class TrackBack: def __init__(self, tbUrl=None, title=None, excerpt=None, url=None, blog_name=None): self.tbUrl = tbUrl self.title = title self.excerpt = excerpt self.url = url self.blog_name = blog_name self.tbErrorCode = None self.tbErrorMessage = None def ping(self): # Only execute if a trackback url has been defined. if self.tbUrl: # Create paramaters and make them play nice with HTTP # Python's httplib example helps a lot: # http://python.org/doc/current/lib/httplib-examples.html params = urllib.urlencode({'title': self.title, 'url': self.url, 'excerpt': self.excerpt, 'blog_name': self.blog_name}) headers = ({"Content-type": "application/x-www-form-urlencoded", "User-Agent": "micolog"}) # urlparse is my hero # http://www.python.org/doc/current/lib/module-urlparse.html logging.info("ping...%s",params) response=urlfetch.fetch(self.tbUrl,method=urlfetch.POST,payload=params,headers=headers) self.httpResponse = response.status_code data = response.content self.tbResponse = data logging.info("ping...%s"%data) # Thanks to Steve Holden's book: _Python Web Programming_ (http://pydish.holdenweb.com/pwp/) # Why parse really simple XML when you can just use regular expressions? Rawk. errorpattern = r'<error>(.*?)</error>' reg = re.search(errorpattern, self.tbResponse) if reg: self.tbErrorCode = reg.group(1) if int(self.tbErrorCode) == 1: errorpattern2 = r'<message>(.*?)</message>' reg2 = re.search(errorpattern2, self.tbResponse) if reg2: self.tbErrorMessage = reg2.group(1) else: return 1 def autodiscover(self, urlToCheck): response=urlfetch.fetch(urlToCheck) data = response.content tbpattern = r'trackback:ping="(.*?)"' reg = re.search(tbpattern, data) if reg: self.tbUrl = reg.group(1)
mit
-4,092,896,514,661,918,700
43.213333
131
0.625943
false
3.607182
false
false
false
MaxVanDeursen/tribler
Tribler/Core/statistics.py
1
5656
import os from Tribler.Core.CacheDB.sqlitecachedb import DB_FILE_RELATIVE_PATH from Tribler.Core.simpledefs import NTFY_TORRENTS, NTFY_CHANNELCAST DATA_NONE = u"None" class TriblerStatistics(object): def __init__(self, session): """ Constructor. :param session: The Tribler session. """ self.session = session def get_tribler_statistics(self): """ Return a dictionary with some general Tribler statistics. """ torrent_db_handler = self.session.open_dbhandler(NTFY_TORRENTS) channel_db_handler = self.session.open_dbhandler(NTFY_CHANNELCAST) torrent_stats = torrent_db_handler.getTorrentsStats() torrent_total_size = 0 if torrent_stats[1] is None else torrent_stats[1] stats_dict = {"torrents": {"num_collected": torrent_stats[0], "total_size": torrent_total_size, "num_files": torrent_stats[2]}, "num_channels": channel_db_handler.getNrChannels(), "database_size": os.path.getsize( os.path.join(self.session.get_state_dir(), DB_FILE_RELATIVE_PATH))} if self.session.lm.rtorrent_handler: torrent_queue_stats = self.session.lm.rtorrent_handler.get_queue_stats() torrent_queue_size_stats = self.session.lm.rtorrent_handler.get_queue_size_stats() torrent_queue_bandwidth_stats = self.session.lm.rtorrent_handler.get_bandwidth_stats() stats_dict["torrent_queue_stats"] = torrent_queue_stats stats_dict["torrent_queue_size_stats"] = torrent_queue_size_stats stats_dict["torrent_queue_bandwidth_stats"] = torrent_queue_bandwidth_stats return stats_dict def get_dispersy_statistics(self): """ Return a dictionary with some general Dispersy statistics. """ dispersy = self.session.get_dispersy_instance() dispersy.statistics.update() stats = dispersy.statistics return { "wan_address": "%s:%d" % stats.wan_address, "lan_address": "%s:%d" % stats.lan_address, "connection": unicode(stats.connection_type), "runtime": stats.timestamp - stats.start, "total_downloaded": stats.total_down, "total_uploaded": stats.total_up, "packets_sent": stats.total_send, "packets_received": stats.total_received, "packets_success": stats.msg_statistics.success_count, "packets_dropped": stats.msg_statistics.drop_count, "packets_delayed_sent": stats.msg_statistics.delay_send_count, "packets_delayed_received": stats.msg_statistics.delay_received_count, "packets_delayed_success": stats.msg_statistics.delay_success_count, "packets_delayed_timeout": stats.msg_statistics.delay_timeout_count, "total_walk_attempts": stats.walk_attempt_count, "total_walk_success": stats.walk_success_count, "sync_messages_created": stats.msg_statistics.created_count, "bloom_new": sum(c.sync_bloom_new for c in stats.communities), "bloom_reused": sum(c.sync_bloom_reuse for c in stats.communities), "bloom_skipped": sum(c.sync_bloom_skip for c in stats.communities), } def get_community_statistics(self): """ Return a dictionary with general statistics of the active Dispersy communities. """ communities_stats = [] dispersy = self.session.get_dispersy_instance() dispersy.statistics.update() for community in dispersy.statistics.communities: if community.dispersy_enable_candidate_walker or community.dispersy_enable_candidate_walker_responses or \ community.candidates: candidate_count = "%s" % len(community.candidates) else: candidate_count = "-" communities_stats.append({ "identifier": community.hex_cid, "member": community.hex_mid, "classification": community.classification, "global_time": community.global_time, "median_global_time": community.acceptable_global_time - community.dispersy_acceptable_global_time_range, "acceptable_global_time_range": community.dispersy_acceptable_global_time_range, "walk_attempts": community.msg_statistics.walk_attempt_count, "walk_success": community.msg_statistics.walk_success_count, "sync_bloom_created": community.sync_bloom_new, "sync_bloom_reused": community.sync_bloom_reuse, "sync_bloom_skipped": community.sync_bloom_skip, "sync_messages_created": community.msg_statistics.created_count, "packets_sent": community.msg_statistics.outgoing_count, "packets_received": community.msg_statistics.total_received_count, "packets_success": community.msg_statistics.success_count, "packets_dropped": community.msg_statistics.drop_count, "packets_delayed_sent": community.msg_statistics.delay_send_count, "packets_delayed_received": community.msg_statistics.delay_received_count, "packets_delayed_success": community.msg_statistics.delay_success_count, "packets_delayed_timeout": community.msg_statistics.delay_timeout_count, "candidates": candidate_count }) return communities_stats
lgpl-3.0
5,846,355,451,444,601,000
47.34188
118
0.62111
false
4.031361
false
false
false
osaddon/cdmi
test/functional/cdmi/test_utils.py
1
3630
# Copyright (c) 2010-2011 IBM. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest #from test import get_config from swift.common.utils import readconf import httplib import time import json import base64 import os def get_config(section_name=None, defaults=None): """ Attempt to get a test config dictionary. :param section_name: the section to read (all sections if not defined) :param defaults: an optional dictionary namespace of defaults """ config_file = os.environ.get('SWIFT_TEST_CONFIG_FILE', '/etc/swift/test.conf') config = {} if defaults is not None: config.update(defaults) try: config = readconf(config_file, section_name) except SystemExit: if not os.path.exists(config_file): print >>sys.stderr, \ 'Unable to read test config %s - file not found' \ % config_file elif not os.access(config_file, os.R_OK): print >>sys.stderr, \ 'Unable to read test config %s - permission denied' \ % config_file else: print >>sys.stderr, \ 'Unable to read test config %s - section %s not found' \ % (config_file, section_name) return config def get_auth(auth_host, auth_port, auth_url, user_name, user_key, tenant_name): """Authenticate""" if auth_url.find('tokens') >= 0: """ v2.0 authentication""" conn = httplib.HTTPConnection(auth_host, auth_port) headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} body = {} body['auth'] = { "passwordCredentials": { "username": user_name, "password": user_key, }, "tenantName": tenant_name } conn.request('POST', auth_url, json.dumps(body, indent=2), headers) res = conn.getresponse() if res.status != 200: raise Exception('The authentication has failed') data = res.read() body = json.loads(data) token = body.get('access').get('token').get('id') endpoints = body.get('access').get('serviceCatalog') for endpoint in endpoints: if 'object-store' == endpoint.get('type'): public_url = endpoint.get('endpoints')[0].get('publicURL') parts = public_url.split('/') account_id = parts[-1] return token, account_id else: """ try the old way""" conn = httplib.HTTPConnection(auth_host, auth_port) headers = {'X-Storage-User': tenant_name + ':' + user_name, 'X-Storage-Pass': user_key} conn.request('GET', auth_url, None, headers) res = conn.getresponse() if res.status != 200: raise Exception('The authentication has failed') token = res.getheader('X-Auth-Token') public_url = res.getheader('X-Storage-Url') parts = public_url.split('/') return token, parts[-1]
apache-2.0
5,300,052,521,575,340,000
33.571429
79
0.585675
false
4.148571
true
false
false
amyxchen/openhtf
openhtf/util/functions.py
1
1523
# Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for functions.""" import functools import inspect def CallOnce(func): """Decorate a function to only allow it to be called once. Note that it doesn't make sense to only call a function once if it takes arguments (use @functools.lru_cache for that sort of thing), so this only works on callables that take no args. """ argspec = inspect.getargspec(func) if argspec.args or argspec.varargs or argspec.keywords: raise ValueError('Can only decorate functions with no args', func, argspec) @functools.wraps(func) def _Wrapper(): # If we haven't been called yet, actually invoke func and save the result. if not _Wrapper.HasRun(): _Wrapper.MarkAsRun() _Wrapper.return_value = func() return _Wrapper.return_value _Wrapper.has_run = False _Wrapper.HasRun = lambda: _Wrapper.has_run _Wrapper.MarkAsRun = lambda: setattr(_Wrapper, 'has_run', True) return _Wrapper
apache-2.0
8,318,662,720,419,666,000
32.844444
79
0.732108
false
4.029101
false
false
false
penguinscontrol/Spinal-Cord-Modeling
Python/morphology_parser.py
1
7351
# -*- coding: utf-8 -*- """ Created on Thu Feb 25 19:14:49 2016 @author: Radu """ from pyparsing import Word, nums, OneOrMore, Keyword, Literal, ZeroOrMore, Optional, Group from string import lowercase def print_for_loop(target_file): def for_parseaction(origString, loc, tokens): put_string = 'for ' + tokens[0] + ' in range(' + tokens[1] + ', ' + tokens[2] + '):\n\t' target_file.write(put_string) return for_parseaction def update_current_section(target_file): def update_cs_parseaction(origString, loc, tokens): global current_section_name current_section_name = 'self.' if isinstance(tokens[0], str): # single section current_section_name += tokens[0] elif isinstance(tokens[0], type(tokens)): current_section_name += tokens[0][0] + '[' for a in range(1,len(tokens[0])): current_section_name += tokens[0][a] current_section_name += ']' put_string = 'h.pt3dclear(sec = ' + current_section_name + ')\n' target_file.write(put_string) return update_cs_parseaction def print_point_add(target_file): def point_add_parseaction(origString, loc, tokens): put_string = 'h.pt3dadd(' for a in range(len(tokens)): if isinstance(tokens[a], str): put_string += tokens[a] elif isinstance(tokens[a], type(tokens)): for b in range(len(tokens[a])): put_string+= tokens[a][b] put_string += ', ' put_string += 'sec = ' + current_section_name + ')\n' target_file.write(put_string) return point_add_parseaction def print_point_style(target_file): def point_style_parseaction(origString, loc, tokens): put_string = 'h.pt3dstyle(' for a in range(len(tokens)): if isinstance(tokens[a], str): put_string += tokens[a] elif isinstance(tokens[a], type(tokens)): for b in range(len(tokens[a])): put_string+= tokens[a][b] put_string += ', ' put_string += 'sec = ' + current_section_name + ')\n' target_file.write(put_string) return point_style_parseaction def print_create(target_file): def create_parseaction(origString, loc, tokens): for a in range(len(tokens)): if isinstance(tokens[a], str): # single section put_string = 'self.' + tokens[a] + ' = h.Section(cell = self)\n' elif isinstance(tokens[a], type(tokens)): put_string = 'self.' + tokens[a][0] + ' = [h.Section(cell = self) for x in range(' + tokens[a][1]\ + ')]\n' target_file.write(put_string) target_file.write('\n') return create_parseaction def connect_output_string(tokens): if isinstance(tokens[0][0], str): # tokens [0][0] is the name of the parent section parent = tokens[0][0] elif isinstance(tokens[0][0], type(tokens)): parent = tokens[0][0][0] + '[' for a in range(1,len(tokens[0][0])): parent += tokens[0][0][a] parent += ']' # tokens [0][1] is the location in the parent where we connect to parent_loc = '' for a in range(len(tokens[0][1])): parent_loc += tokens[0][1][a] if isinstance(tokens[1][0], str): # tokens [0][0] is the name of the child section child = tokens[1][0] elif isinstance(tokens[1][0], type(tokens)): child = tokens[1][0][0] + '[' for a in range(1,len(tokens[1][0])): child += tokens[1][0][a] child += ']' # tokens [1][1] is the location in the child where we connect to child_loc = '' for a in range(len(tokens[1][1])): child_loc += tokens[1][1][a] put_string = 'self.' + parent + '.connect(' + 'self.' + child + ', ' + child_loc + ', ' + parent_loc + ')\n' return put_string def print_connect(target_file): def connect_parseaction(origString, loc, tokens): put_string = connect_output_string(tokens) target_file.write(put_string) return connect_parseaction def print_geom_define(target_file): def geom_define_parseaction(origString, loc, tokens): target_file.write('geom_define\n') target_file.write(tokens[0]) return geom_define_parseaction # Resulting python file filename = 'Mn_geometry_output3.py' global current_section_name current_section_name = '' converted_file = open(filename, 'w') # define lists of characters for a..z and 1..9 uppercase = lowercase.upper() lowercaseplus = lowercase+('_') lowercaseplus = lowercaseplus+(uppercase) nonzero = ''.join([str(i) for i in range(1, 10)]) COMMA = Literal(',') EQUALS = Literal('=') MINUS = Literal('-') PERIOD = Literal('.') LCURL = Literal('{') RCURL = Literal('}') LBRACK = Literal('(') RBRACK = Literal(')') LSQUARE = Literal('[') RSQUARE = Literal(']') PTSCLEAR = Literal('{pt3dclear()').suppress() PTSCLEARNL = Literal('{\npt3dclear()\n').suppress() integer = Word(nums) single_section = Word(lowercaseplus, min = 2) single_section.setResultsName('SINGLE') integer_var = Word(lowercase, exact = 1) double = Group(Optional(MINUS) + integer + Optional(PERIOD + integer)) operand = integer ^ integer_var operator = Word('+-*/', exact=1) unaryoperation = operand binaryoperation = operand + operator + operand operation = unaryoperation ^ binaryoperation array_section = Group(single_section + LSQUARE.suppress() + operation + RSQUARE.suppress()) array_section.setResultsName('ARRAY') section = single_section ^ array_section section_location = Group(section + LBRACK.suppress() + double + RBRACK.suppress()) create = Keyword('create').suppress() + section + ZeroOrMore(COMMA.suppress() + section) create.setParseAction(print_create(converted_file)) connect = Keyword('connect').suppress() + section_location + COMMA.suppress() + section_location connect.setParseAction(print_connect(converted_file)) for_loop = Keyword('for').suppress() + integer_var + EQUALS.suppress() + integer + COMMA.suppress() + integer # NOTE TO FUTURE SELF: for loops can only have one line of code in this implementation for_loop.setParseAction(print_for_loop(converted_file)) point_add = Literal('pt3dadd(').suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + RBRACK.suppress() point_add.setParseAction(print_point_add(converted_file)) point_style = Literal('pt3dstyle(').suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + COMMA.suppress() + double + RBRACK.suppress() point_style.setParseAction(print_point_style(converted_file)) geom_define_pre = section + (PTSCLEAR ^ PTSCLEARNL) geom_define_body = OneOrMore(point_add ^ point_style) + RCURL.suppress() geom_define_pre.setParseAction(update_current_section(converted_file)) geom_define = geom_define_pre + geom_define_body expression = (connect ^ for_loop ^ geom_define ^ create) codeblock = OneOrMore(expression) test_str = 'Ia_node[0] {\npt3dclear()\n pt3dadd( 47, 76, 92.5, 3.6) }' #file_to_parse = open('../../tempdata/Ia_geometry') file_to_parse = open('motoneuron_geometry_preparser.txt') tokens = codeblock.parseString(file_to_parse.read()) #tokens = codeblock.parseString(test_str)
gpl-2.0
7,230,308,802,427,356,000
36.697436
161
0.629846
false
3.425443
false
false
false
XandyWang/PythonDemo
baseTest/GuiDemo.py
1
2027
#!/usr/bin/python # -*- coding: utf-8 -*- from Tkinter import * import tkMessageBox import tkFileDialog import xlrd import os class MainPanel(Frame): def __init__(self,master=None): Frame.__init__(self,master) self.pack(expand = 1, fill='both',padx = 5, pady = 5) self.createWidgets() def createWidgets(self): padxPx = 10 padyPx = 10 self.dirLabel = Label(self, text=u'工程目录',font = 18) self.dirLabel.grid(row = 0) self.nameEntry = Entry(self,font = 18,bd = 2, fg = 'red') self.nameEntry.grid(row = 0 , column = 1,columnspan=2) self.quiteButton = Button(self, text=u'选择目录', command=self.selectExcel, relief=GROOVE) self.quiteButton.grid(row = 0 , column = 3) def selectExcel(self): rootDirPath = os.path.expanduser('~') fileTyps = [('xlx / xlsx files', '.xl*'),('all files', '.*')] file = tkFileDialog.askopenfilename(initialdir = rootDirPath , filetypes = fileTyps) # file = u'/home/wangxiaoyang/share/FineOS应用支持的语言表_QL613.xlsx' print file if(len(file) > 0) : data = xlrd.open_workbook(file) sheets = data.sheets(); nTable = len( sheets ) for index in range(nTable) : table = data.sheet_by_index(index) nRows = table.nrows nCols = table.ncols for row in range(nRows) : for col in range(nCols) : print "row_%d col_%d : %s" % ( row , col, table.cell(row,col).value ) rt = Tk() # update window ,must do rt.update() # get screen width and height scnWidth,scnHeight = rt.maxsize() # get current width rWidth = 0.5 * scnWidth # get current height rHeight = 0.5 * scnHeight # now generate configuration information tmpcnf = '%dx%d+%d+%d' % (rWidth, rHeight, (scnWidth - rWidth) / 2, (scnHeight - rHeight) / 2) rt.geometry(tmpcnf) rt.title('Hello GUI') mainPanel = MainPanel(rt) rt.mainloop()
mit
-5,904,888,387,031,046,000
32.830508
94
0.592982
false
3.156646
false
false
false
tktrungna/leetcode
Python/verify-preorder-serialization-binary-tree.py
1
2175
""" QUESTION: One way to serialize a binary tree is to use pre-oder traversal. When we encounter a non-null node, we record the node's value. If it is a null node, we record using a sentinel value such as #. _9_ / \ 3 2 / \ / \ 4 1 # 6 / \ / \ / \ # # # # # # For example, the above binary tree can be serialized to the string "9,3,4,#,#,1,#,#,2,#,6,#,#", where # represents a null node. Given a string of comma separated values, verify whether it is a correct preorder traversal serialization of a binary tree. Find an algorithm without reconstructing the tree. Each comma separated value in the string must be either an integer or a character '#' representing null pointer. You may assume that the input format is always valid, for example it could never contain two consecutive commas such as "1,,3". Example 1: "9,3,4,#,#,1,#,#,2,#,6,#,#" Return true Example 2: "1,#" Return false Example 3: "9,#,#,1" Return false ANSWER: 1) Using stack O(n) 2) """ class Solution(object): def isValidSerialization(self, preorder): """ :type preorder: str :rtype: bool """ st = [] for n in preorder.split(','): st.append(n) while len(st) >= 3 and st[-1] == st[-2] == '#' and st[-3] != '#': st.pop() st.pop() st.pop() st.append('#') return len(st) == 1 and st[-1] == '#' def isValidSerialization_2(self, preorder): diff = 1 for n in preorder.split(','): diff -= 1 if diff < 0: return False if n != '#': diff += 2 return diff == 0 if __name__ == '__main__': print Solution().isValidSerialization("9,3,4,#,#,1,#,#,2,#,6,#,#") print Solution().isValidSerialization("9,#,#,1") print Solution().isValidSerialization("1,#") print Solution().isValidSerialization("") print Solution().isValidSerialization_2("9,3,4,#,#,1,#,#,2,#,6,#,#") print Solution().isValidSerialization_2("9,#,#,1") print Solution().isValidSerialization_2("1,#") print Solution().isValidSerialization_2("1")
mit
3,118,352,650,100,566,000
28.405405
119
0.569655
false
3.530844
false
false
false
AechPro/Machine-Learning
Partners Healthcare/2016 Breast Cancer/dev/ReconNet/util/Intensity_Converter.py
1
3553
import numpy as np from openpyxl import load_workbook from scipy.optimize import fsolve workingDirectory = "C:/Users/Matt/Desktop/abs to conc/1. Abs to Conc (for Matt)" def padConcatenate(matrices,shapes): largestAxis = np.max(shapes) for matrix in matrices: if len(matrix) != largestAxis: for _ in range(abs(largestAxis - len(matrix))): matrix.append([np.nan for __ in range(len(matrix[0]))]) concatenatedMatrix = np.concatenate(matrices,axis=1) return concatenatedMatrix def load_values(directory, workbooks): matrices = [] for entry in workbooks: workbook = load_workbook(''.join([directory, '/', '0. BT474_1 (abs).xlsx'])) sheet = workbook[entry] matrix = [] for row in sheet.rows: matrix.append([]) for cell in row: if cell.value == None: matrix[len(matrix) - 1].append(np.nan) else: matrix[len(matrix) - 1].append(cell.value) matrices.append(matrix) return matrices colors = ["dual", "red", "blue", "uns"] matrices = load_values(workingDirectory,colors) matrixLengths = np.asarray([(len(i[0]), len(i)) for i in matrices]) paddedMatrices = padConcatenate(matrices,matrixLengths) matrices = np.asarray(matrices) cell_bkgd_4_avg = np.nanmean(matrices[-1][:,0]) cell_bkgd_4_SD = np.nanstd(matrices[-1][:,0]) cell_bkgd_6_avg = np.nanmean(matrices[-1][:,1]) cell_bkgd_6_SD = np.nanstd(matrices[-1][:,1]) SD_multi = 1 cell_bkgd_4_cutoff = cell_bkgd_4_avg + SD_multi * cell_bkgd_4_SD cell_bkgd_6_cutoff = cell_bkgd_6_avg + SD_multi * cell_bkgd_6_SD absNoBackground = paddedMatrices.copy() for i in range(0,2,8): absNoBackground[:,i] = paddedMatrices[:,i] - cell_bkgd_4_cutoff absNoBackground[:,i+1] = paddedMatrices[:,i+1] - cell_bkgd_6_cutoff """ % Convert Abs to Conc for i = 1:4 for j = 1:abs_length(i) abs = abs_all_NObkgd(j,i+(i-1):i+(i-1)+1); % abs = abs_all(j,i+(i-1):i+(i-1)+1); if abs(1) ~= 'NaN' x0 = [0,1]; x = fsolve(@(x)abs2conc(x,abs),x0); conc_all(j,i+(i-1):i+(i-1)+1) = x; end end end csvwrite('conc_all.xlsx',conc_all); return function F = abs2conc(x,abs) % fitted curves for red and blue dyes % Y=Y0 + (Plateau-Y0)*(1-exp(-K*x)) % [Red_470, Red_625, Blue_470, Blue_625] HRP 03-20-17 Y0 =[0.04506 0.02659 0.0511 0.0199]; P = [0.719 0.3026 0.2012 0.7079]; K = [3.597 4.145 1.474 4.393]; F(1) = abs(1) - (Y0(1) + (P(1)-Y0(1))*(1-exp(-K(1)*x(1)))) - (Y0(3) + (P(3)-Y0(3))*(1-exp(-K(3)*x(2)))) ; F(2) = abs(2) - (Y0(2) + (P(2)-Y0(2))*(1-exp(-K(2)*x(1)))) - (Y0(4) + (P(4)-Y0(4))*(1-exp(-K(4)*x(2)))) ; return """ def F(x,abs): Y0 = [0.04506,0.02659,0.0511,0.0199] P = [0.719,0.3026,0.2012,0.7079] K = [3.597,4.145,1.474,4.393] out = [0,0] out[0] = abs[0][0] - (Y0[0] + (P[0]-Y0[0])*(1-np.exp(-K[0]*x[0]))) - (Y0[2] + (P[2] - Y0[2])*(1-np.exp(-K[2]*x[1]))) out[1] = abs[0][1] - (Y0[1] + (P[1]-Y0[1])*(1-np.exp(-K[1]*x[0]))) - (Y0[3] + (P[3] - Y0[3])*(1-np.exp(-K[3]*x[1]))) return out for i in range(4): for j in range(len(matrices[i])): abs = absNoBackground[j,i+i-1:i+i+1] if len(abs)>=1: if not np.isnan(abs[0]): x0 = np.asarray([0,1]) x = fsolve(F,x0,args=[abs]) print(x)
apache-2.0
6,532,349,445,452,838,000
35.03125
120
0.532789
false
2.50741
false
false
false
JamesLinEngineer/RKMC
addons/plugin.audio.jambmc/addon.py
1
44439
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013 Tristan Fischer (sphere@dersphere.de) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import xbmcvfs # FIXME: Import form xbmcswift if fixed upstream from xbmcswift2 import Plugin, xbmcgui, NotFoundException, xbmc from resources.lib.api import JamendoApi, ApiError, ConnectionError from resources.lib.geolocate import get_location, QuotaReached from resources.lib.downloader import JamendoDownloader STRINGS = { # Root menu entries 'discover': 30000, 'search': 30001, 'show_tracks': 30002, 'show_albums': 30003, 'show_artists': 30004, 'show_radios': 30005, 'show_playlists': 30006, 'search_tracks': 30007, 'search_albums': 30008, 'search_artists': 30009, 'search_playlists': 30010, 'show_history': 30011, 'show_downloaded_tracks': 30012, 'show_mixtapes': 30013, 'show_featured_tracks': 30014, 'show_user_artists': 30015, 'show_user_albums': 30016, 'show_user_tracks': 30017, 'show_user_account': 30018, 'show_user_playlists': 30019, 'show_near_artists': 30020, 'show_downloaded_albums': 30021, # Misc strings 'page': 30025, 'language': 30026, 'instruments': 30027, 'vartags': 30028, # Context menu 'album_info': 30030, 'song_info': 30031, 'show_tracks_in_this_album': 30032, 'show_albums_by_this_artist': 30033, 'show_similar_tracks': 30034, 'addon_settings': 30035, 'download_track': 30036, 'download_album': 30037, # Dialogs 'search_heading_album': 30040, 'search_heading_artist': 30041, 'search_heading_tracks': 30042, 'search_heading_playlist': 30043, 'no_download_path': 30044, 'want_set_now': 30045, 'choose_download_folder': 30046, 'enter_username': 30047, 'select_user': 30048, 'no_username_set': 30049, 'geolocating': 30050, 'will_send_one_request_to': 30051, 'freegeoip_net': 30052, # Error dialogs 'connection_error': 30060, 'api_error': 30061, 'api_returned': 30062, 'try_again_later': 30063, 'check_network_or': 30064, 'try_again_later': 30065, # Notifications 'download_suceeded': 30070, 'history_empty': 30071, 'downloads_empty': 30072, # Mixtapes 'mixtape_name': 30090, 'delete_mixtape_head': 30091, 'are_you_sure': 30092, 'add_to_new_mixtape': 30093, 'add_to_mixtape_s': 30094, 'del_from_mixtape_s': 30095, 'select_mixtape': 30096, 'add_mixtape': 30097, 'add_del_track_to_mixtape': 30098, 'delete_mixtape': 30099, 'rename_mixtape': 30124, # Sort methods 'sort_method_default': 30100, 'sort_method_buzzrate': 30101, 'sort_method_downloads_week': 30102, 'sort_method_downloads_month': 30103, 'sort_method_downloads_total': 30104, 'sort_method_joindate_asc': 30105, 'sort_method_joindate_desc': 30107, 'sort_method_listens_week': 30108, 'sort_method_listens_month': 30109, 'sort_method_listens_total': 30110, 'sort_method_name': 30111, 'sort_method_popularity_week': 30112, 'sort_method_popularity_month': 30113, 'sort_method_popularity_total': 30114, 'sort_method_releasedate_asc': 30115, 'sort_method_releasedate_desc': 30116, # Tags 'current_tags': 30120, 'tag_type_genres': 30121, 'tag_type_instruments': 30122, 'tag_type_moods': 30123, } class Plugin_patched(Plugin): def _dispatch(self, path): for rule in self._routes: try: view_func, items = rule.match(path) except NotFoundException: continue self._request.view = view_func.__name__ # added self._request.view_params = items # added listitems = view_func(**items) if not self._end_of_directory and self.handle >= 0: if listitems is None: self.finish(succeeded=False) else: listitems = self.finish(listitems) return listitems raise NotFoundException('No matching view found for %s' % path) plugin = Plugin_patched() api = JamendoApi( client_id='de0f381a', limit=plugin.get_setting('limit', int), image_size=plugin.get_setting( 'image_size', choices=('big', 'medium', 'small') ), ) ########################### Static Views ###################################### @plugin.route('/') def show_root_menu(): fix_xbmc_music_library_view() items = [ {'label': _('discover'), 'path': plugin.url_for(endpoint='show_discover_root'), 'thumbnail': 'DefaultMusicCompilations.png'}, {'label': _('search'), 'path': plugin.url_for(endpoint='show_search_root'), 'thumbnail': 'DefaultMusicVideos.png'}, {'label': _('show_radios'), 'path': plugin.url_for(endpoint='show_radios'), 'thumbnail': 'DefaultMusicGenres.png'}, {'label': _('show_history'), 'path': plugin.url_for(endpoint='show_history'), 'thumbnail': 'DefaultMusicYears.png'}, {'label': _('show_downloaded_tracks'), 'path': plugin.url_for(endpoint='show_downloaded_tracks'), 'thumbnail': 'DefaultMusicPlaylists.png'}, {'label': _('show_downloaded_albums'), 'path': plugin.url_for(endpoint='show_downloaded_albums'), 'thumbnail': 'DefaultMusicPlaylists.png'}, {'label': _('show_mixtapes'), 'path': plugin.url_for(endpoint='show_mixtapes'), 'thumbnail': 'DefaultMusicSongs.png'}, {'label': _('show_featured_tracks'), 'path': plugin.url_for(endpoint='show_featured_tracks'), 'thumbnail': 'DefaultMusicAlbums.png'}, {'label': _('show_user_account'), 'path': plugin.url_for(endpoint='show_user_root'), 'thumbnail': 'DefaultAddonMusic.png'}, ] return add_static_items(items) @plugin.route('/search/') def show_search_root(): items = [ {'label': _('search_tracks'), 'path': plugin.url_for(endpoint='search_tracks'), 'thumbnail': 'DefaultMusicSongs.png'}, {'label': _('search_albums'), 'path': plugin.url_for(endpoint='search_albums'), 'thumbnail': 'DefaultMusicAlbums.png'}, {'label': _('search_artists'), 'path': plugin.url_for(endpoint='search_artists'), 'thumbnail': 'DefaultMusicArtists.png'}, {'label': _('search_playlists'), 'path': plugin.url_for(endpoint='search_playlists'), 'thumbnail': 'DefaultMusicPlaylists.png'}, ] return add_static_items(items) @plugin.route('/discover/') def show_discover_root(): items = [ {'label': _('show_tracks'), 'path': plugin.url_for(endpoint='show_tracks'), 'thumbnail': 'DefaultMusicSongs.png'}, {'label': _('show_albums'), 'path': plugin.url_for(endpoint='show_albums'), 'thumbnail': 'DefaultMusicAlbums.png'}, {'label': _('show_artists'), 'path': plugin.url_for(endpoint='show_artists'), 'thumbnail': 'DefaultMusicArtists.png'}, {'label': _('show_playlists'), 'path': plugin.url_for(endpoint='show_playlists'), 'thumbnail': 'DefaultMusicPlaylists.png'}, {'label': _('show_near_artists'), 'path': plugin.url_for(endpoint='show_near_artists'), 'thumbnail': 'DefaultMusicArtists.png'}, ] return add_static_items(items) @plugin.route('/user/') def show_user_root(): items = [ {'label': _('show_user_artists'), 'path': plugin.url_for(endpoint='show_user_artists'), 'thumbnail': 'DefaultMusicArtists.png'}, {'label': _('show_user_albums'), 'path': plugin.url_for(endpoint='show_user_albums'), 'thumbnail': 'DefaultMusicAlbums.png'}, {'label': _('show_user_tracks'), 'path': plugin.url_for(endpoint='show_user_tracks'), 'thumbnail': 'DefaultMusicSongs.png'}, {'label': _('show_user_playlists'), 'path': plugin.url_for(endpoint='show_user_playlists'), 'thumbnail': 'DefaultMusicPlaylists.png'}, ] return add_static_items(items) ########################### Dynamic Views ##################################### @plugin.route('/albums/') def show_albums(): page = int(get_args('page', 1)) sort_method = get_args('sort_method', 'popularity_month') albums = get_cached(api.get_albums, page=page, sort_method=sort_method) items = format_albums(albums) items.append(get_sort_method_switcher_item('albums', sort_method)) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/albums/<artist_id>/') def show_albums_by_artist(artist_id): page = int(get_args('page', 1)) albums = get_cached(api.get_albums, page=page, artist_id=artist_id) items = format_albums(albums) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/artists/') def show_artists(): page = int(get_args('page', 1)) sort_method = get_args('sort_method', 'popularity_month') artists = get_cached(api.get_artists, page=page, sort_method=sort_method) items = format_artists(artists) items.append(get_sort_method_switcher_item('artists', sort_method)) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/artists/near/') def show_near_artists(): lat_long = plugin.get_setting('lat_long', str) while not lat_long: confirmed = xbmcgui.Dialog().yesno( _('geolocating'), _('will_send_one_request_to'), _('freegeoip_net'), _('are_you_sure') ) if not confirmed: return try: location = get_location() except QuotaReached: plugin.notify(_('try_again_later')) return lat_long = '%s_%s' % (location['latitude'], location['longitude']) plugin.set_setting('lat_long', lat_long) artists = get_cached(api.get_artists_by_location, coords=lat_long) items = format_artists_location(artists) return add_items(items) @plugin.route('/playlists/') def show_playlists(): page = int(get_args('page', 1)) playlists = get_cached(api.get_playlists, page=page) items = format_playlists(playlists) items.extend(get_page_switcher_items(len(items))) return add_items(items, same_cover=True) @plugin.route('/radios/') def show_radios(): page = int(get_args('page', 1)) radios = get_cached(api.get_radios, page=page) items = format_radios(radios) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/tracks/') def show_tracks(): page = int(get_args('page', 1)) sort_method = get_args('sort_method', 'popularity_month') tags = get_args('tags') tracks = get_cached( api.get_tracks, page=page, sort_method=sort_method, tags=tags ) items = format_tracks(tracks) items.append(get_sort_method_switcher_item('tracks', sort_method)) items.append(get_tag_filter_item()) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/tracks/album/<album_id>/') def show_tracks_in_album(album_id): tracks = get_cached(api.get_tracks, album_id=album_id) items = format_tracks(tracks) items.extend(get_page_switcher_items(len(items))) return add_items(items, same_cover=True) @plugin.route('/tracks/featured/') def show_featured_tracks(): page = int(get_args('page', 1)) sort_method = 'releasedate_desc' tracks = get_cached( api.get_tracks, page=page, sort_method=sort_method, featured=True ) items = format_tracks(tracks) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/tracks/playlist/<playlist_id>/') def show_tracks_in_playlist(playlist_id): playlist, tracks = get_cached( api.get_playlist_tracks, playlist_id=playlist_id ) items = format_playlist_tracks(playlist, tracks) items.extend(get_page_switcher_items(len(items))) return add_items(items, same_cover=True) @plugin.route('/tracks/similar/<track_id>/') def show_similar_tracks(track_id): page = int(get_args('page', 1)) tracks = get_cached(api.get_similar_tracks, track_id=track_id, page=page) items = format_similar_tracks(tracks) items.extend(get_page_switcher_items(len(items))) return add_items(items) ############################# Search Views #################################### @plugin.route('/albums/search/') def search_albums(): query = get_args('input') or plugin.keyboard( heading=_('search_heading_album') ) if query: albums = get_cached(api.get_albums, search_terms=query) items = format_albums(albums) return add_items(items) @plugin.route('/artists/search/') def search_artists(): query = get_args('input') or plugin.keyboard( heading=_('search_heading_artist') ) if query: artists = api.get_artists(search_terms=query) items = format_artists(artists) return add_items(items) @plugin.route('/playlists/search/') def search_playlists(): query = get_args('input') or plugin.keyboard( heading=_('search_heading_playlist') ) if query: playlists = api.get_playlists(search_terms=query) items = format_playlists(playlists) return add_items(items, same_cover=True) @plugin.route('/tracks/search/') def search_tracks(): query = get_args('input') or plugin.keyboard( heading=_('search_heading_tracks') ) if query: tracks = api.search_tracks(search_terms=query) items = format_tracks(tracks) return add_items(items) ############################ Jamendo Views #################################### @plugin.route('/user/albums/') def show_user_albums(): user_id = get_user_account() if user_id: page = int(get_args('page', 1)) albums = api.get_user_albums(user_id=user_id, page=page) items = format_albums(albums) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/user/artists/') def show_user_artists(): user_id = get_user_account() if user_id: page = int(get_args('page', 1)) artists = api.get_user_artists(user_id=user_id, page=page) items = format_artists(artists) items.extend(get_page_switcher_items(len(items))) return add_items(items) @plugin.route('/user/playlists/') def show_user_playlists(): user_id = get_user_account() if user_id: playlists = api.get_playlists(user_id=user_id) items = format_playlists(playlists) return add_items(items, same_cover=True) @plugin.route('/user/set_user_account/') def set_user_account(): query = get_args('input') or plugin.keyboard( heading=_('enter_username') ) if query: users = api.get_users(search_terms=query) if users: selected = xbmcgui.Dialog().select( _('select_user'), [u['name'] for u in users] ) if selected >= 0: user = users[selected] plugin.set_setting('user_name', user['name']) plugin.set_setting('user_id', user['id']) @plugin.route('/user/tracks/') def show_user_tracks(): user_id = get_user_account() if user_id: page = int(get_args('page', 1)) tracks = api.get_user_tracks(user_id=user_id, page=page) items = format_tracks(tracks) items.extend(get_page_switcher_items(len(items))) return add_items(items) ############################## Downloads ###################################### @plugin.route('/downloads/albums/') def show_downloaded_albums(): downloads = plugin.get_storage('downloaded_albums') if downloads.items(): albums = [t['data'] for t in downloads.itervalues()] items = format_downloaded_albums(albums) return add_items(items) plugin.notify(_('downloads_empty')) @plugin.route('/downloads/albums/<album_id>/') def show_downloaded_album_tracks(album_id): downloads = plugin.get_storage('downloaded_albums') album = downloads[album_id] tracks = [t['data'] for t in album['tracks'].itervalues()] items = format_tracks(tracks) return add_items(items, same_cover=True) @plugin.route('/downloads/tracks/') def show_downloaded_tracks(): downloads = plugin.get_storage('downloaded_tracks') if downloads.items(): tracks = [t['data'] for t in downloads.itervalues()] items = format_tracks(tracks) return add_items(items) plugin.notify(_('downloads_empty')) ############################### History ####################################### @plugin.route('/history/') def show_history(): history = plugin.get_storage('history') tracks = history.get('items', []) if tracks: items = format_tracks(reversed(tracks)) return add_items(items) plugin.notify(_('history_empty')) ############################## Mixtapes ####################################### @plugin.route('/mixtapes/') def show_mixtapes(): mixtapes = plugin.get_storage('mixtapes') items = format_mixtapes(mixtapes) items.append(get_add_mixtape_item()) return add_static_items(items) @plugin.route('/mixtapes/add') def add_mixtape(return_name=False): name = get_args('input') or plugin.keyboard( heading=_('mixtape_name') ) if name: mixtapes = plugin.get_storage('mixtapes') if not name in mixtapes: mixtapes[name] = [] mixtapes.sync() if return_name: return name @plugin.route('/mixtapes/del/<mixtape_id>') def del_mixtape(mixtape_id): mixtapes = plugin.get_storage('mixtapes') confirmed = xbmcgui.Dialog().yesno( _('delete_mixtape_head'), _('are_you_sure') ) if confirmed and mixtape_id in mixtapes: del mixtapes[mixtape_id] mixtapes.sync() _refresh_view() @plugin.route('/mixtapes/rename/<mixtape_id>') def rename_mixtape(mixtape_id): mixtapes = plugin.get_storage('mixtapes') mixtape = mixtapes.pop(mixtape_id) new_mixtape_id = plugin.keyboard( heading=_('mixtape_name'), default=mixtape_id ) mixtapes[new_mixtape_id] = mixtape mixtapes.sync() _refresh_view() @plugin.route('/mixtapes/add/<track_id>') def add_del_track_to_mixtape(track_id): mixtapes = plugin.get_storage('mixtapes') items = [{ 'label':_('add_to_new_mixtape'), }] for (mixtape_id, mixtape) in mixtapes.iteritems(): track_ids = [t['id'] for t in mixtape] if track_id in track_ids: items.append({ 'label': _('del_from_mixtape_s') % mixtape_id.decode('utf-8'), 'action': 'del', 'mixtape_id': mixtape_id }) else: items.append({ 'label': _('add_to_mixtape_s') % mixtape_id.decode('utf-8'), 'action': 'add', 'mixtape_id': mixtape_id }) selected = xbmcgui.Dialog().select( _('select_mixtape'), [i['label'] for i in items] ) if selected == 0: mixtape_id = add_mixtape(return_name=True) if mixtape_id: add_track_to_mixtape(mixtape_id, track_id) elif selected > 0: action = items[selected]['action'] mixtape_id = items[selected]['mixtape_id'] if action == 'add': add_track_to_mixtape(mixtape_id, track_id) elif action == 'del': del_track_from_mixtape(mixtape_id, track_id) @plugin.route('/mixtapes/<mixtape_id>/') def show_mixtape(mixtape_id): mixtapes = plugin.get_storage('mixtapes') tracks = mixtapes[mixtape_id] items = format_tracks(tracks) return add_items(items) @plugin.route('/mixtapes/<mixtape_id>/add/<track_id>') def add_track_to_mixtape(mixtape_id, track_id): mixtapes = plugin.get_storage('mixtapes') track = get_cached(api.get_track, track_id) mixtapes[mixtape_id].append(track) mixtapes.sync() @plugin.route('/mixtapes/<mixtape_id>/del/<track_id>') def del_track_from_mixtape(mixtape_id, track_id): mixtapes = plugin.get_storage('mixtapes') mixtapes[mixtape_id] = [ t for t in mixtapes[mixtape_id] if not t['id'] == track_id ] mixtapes.sync() ########################### Callback Views #################################### @plugin.route('/sort_methods/<entity>/') def show_sort_methods(entity): sort_methods = api.get_sort_methods(entity) items = format_sort_methods(sort_methods, entity) return add_static_items(items) @plugin.route('/tracks/tags/') def show_tags(): tags = api.get_tags() items = format_tags(tags) return add_static_items(items) ############################ Action Views ##################################### @plugin.route('/download/track/<track_id>') def download_track(track_id): download_path = get_download_path('tracks_download_path') if not download_path: return show_progress = plugin.get_setting('show_track_download_progress', bool) downloader = JamendoDownloader(api, download_path, show_progress) formats = ('mp3', 'ogg', 'flac') audioformat = plugin.get_setting('download_format', choices=formats) include_cover = plugin.get_setting('download_track_cover', bool) tracks = downloader.download_tracks([track_id], audioformat, include_cover) if tracks: downloaded_tracks = plugin.get_storage('downloaded_tracks') downloaded_tracks.update(tracks) downloaded_tracks.sync() plugin.notify(msg=_('download_suceeded')) @plugin.route('/download/album/<album_id>') def download_album(album_id): download_path = get_download_path('albums_download_path') if not download_path: return show_progress = plugin.get_setting('show_album_download_progress', bool) downloader = JamendoDownloader(api, download_path, show_progress) formats = ('mp3', 'ogg', 'flac') audioformat = plugin.get_setting('download_format', choices=formats) include_cover = plugin.get_setting('download_album_cover', bool) album = downloader.download_album(album_id, audioformat, include_cover) if album: downloaded_albums = plugin.get_storage('downloaded_albums') downloaded_albums.update(album) downloaded_albums.sync() plugin.notify(msg=_('download_suceeded')) @plugin.route('/play/radio/<radio_id>') def play_radio(radio_id): stream_url = api.get_radio_url(radio_id) return plugin.set_resolved_url(stream_url) @plugin.route('/play/track/<track_id>') def play_track(track_id): add_track_to_history(track_id) track_url = get_downloaded_track(track_id) if not track_url: formats = ('mp3', 'ogg') audioformat = plugin.get_setting('playback_format', choices=formats) track_url = api.get_track_url(track_id, audioformat) return plugin.set_resolved_url(track_url) @plugin.route('/settings') def open_settings(): plugin.open_settings() ############################# Formaters ####################################### def format_albums(albums): plugin.set_content('albums') items = [{ 'label': u'%s - %s' % (album['artist_name'], album['name']), 'info': { 'count': i + 2, 'artist': album['artist_name'], 'album': album['name'], 'year': int(album.get('releasedate', '0-0-0').split('-')[0]), }, 'context_menu': context_menu_album( artist_id=album['artist_id'], album_id=album['id'], ), 'replace_context_menu': True, 'thumbnail': album['image'], 'path': plugin.url_for( endpoint='show_tracks_in_album', album_id=album['id'] ) } for i, album in enumerate(albums)] return items def format_artists(artists): plugin.set_content('artists') items = [{ 'label': artist['name'], 'info': { 'count': i + 2, 'artist': artist['name'], }, 'context_menu': context_menu_artist(artist['id']), 'replace_context_menu': True, 'thumbnail': get_artist_image(artist['image']), 'path': plugin.url_for( endpoint='show_albums_by_artist', artist_id=artist['id'], ) } for i, artist in enumerate(artists)] return items def format_artists_location(artists): plugin.set_content('artists') items = [{ 'label': u'%s (%s - %s)' % ( artist['name'], artist['locations'][0]['country'], artist['locations'][0]['city'], ), 'info': { 'count': i + 2, 'artist': artist['name'], }, 'context_menu': context_menu_artist(artist['id']), 'replace_context_menu': True, 'thumbnail': get_artist_image(artist['image']), 'path': plugin.url_for( endpoint='show_albums_by_artist', artist_id=artist['id'], ) } for i, artist in enumerate(artists)] return items def format_comment(musicinfo): return '[CR]'.join(( '[B]%s[/B]: %s' % ( _('language'), musicinfo['lang'] ), '[B]%s[/B]: %s' % ( _('instruments'), ', '.join(musicinfo['tags']['instruments']) ), '[B]%s[/B]: %s' % ( _('vartags'), ', '.join(musicinfo['tags']['vartags']) ), )) def format_downloaded_albums(albums): plugin.set_content('albums') items = [{ 'label': u'%s - %s' % (album['artist_name'], album['name']), 'info': { 'count': i + 2, 'artist': album['artist_name'], 'album': album['name'], 'year': int(album.get('releasedate', '0-0-0').split('-')[0]), }, 'context_menu': context_menu_album( artist_id=album['artist_id'], album_id=album['id'], ), 'replace_context_menu': True, 'thumbnail': album['image'], 'path': plugin.url_for( endpoint='show_downloaded_album_tracks', album_id=album['id'] ) } for i, album in enumerate(albums)] return items def format_mixtapes(mixtapes): items = [{ 'label': mixtape_id, 'info': { 'count': i + 1, }, 'context_menu': context_menu_mixtape( mixtape_id=mixtape_id, ), 'replace_context_menu': True, 'path': plugin.url_for( endpoint='show_mixtape', mixtape_id=mixtape_id ) } for i, (mixtape_id, mixtape) in enumerate(mixtapes.iteritems())] return items def format_playlists(playlists): plugin.set_content('music') items = [{ 'label': u'%s (%s)' % (playlist['name'], playlist['user_name']), 'info': { 'count': i + 2, 'artist': playlist['user_name'], 'album': playlist['name'], 'year': int(playlist.get('creationdate', '0-0-0').split('-')[0]), }, 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'path': plugin.url_for( endpoint='show_tracks_in_playlist', playlist_id=playlist['id'] ) } for i, playlist in enumerate(playlists)] return items def format_playlist_tracks(playlist, tracks): plugin.set_content('songs') items = [{ 'label': track['name'], 'info': { 'count': i + 2, 'tracknumber': int(track['position']), 'duration': track['duration'], 'title': track['name'], }, 'context_menu': context_menu_track( artist_id=track['artist_id'], track_id=track['id'], album_id=track['album_id'], ), 'replace_context_menu': True, 'is_playable': True, 'path': plugin.url_for( endpoint='play_track', track_id=track['id'] ) } for i, track in enumerate(tracks)] return items def format_radios(radios): plugin.set_content('music') items = [{ 'label': radio['dispname'], 'info': { 'count': i + 2, }, 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'thumbnail': radio['image'], 'is_playable': True, 'path': plugin.url_for( endpoint='play_radio', radio_id=radio['id'], ) } for i, radio in enumerate(radios)] return items def format_similar_tracks(tracks): plugin.set_content('songs') items = [{ 'label': u'%s - %s (%s)' % ( track['artist_name'], track['name'], track['album_name'] ), 'info': { 'count': i + 2, 'title': track['name'], 'album': track['album_name'], 'duration': track['duration'], 'artist': track['artist_name'], 'year': int(track.get('releasedate', '0-0-0').split('-')[0]), }, 'context_menu': context_menu_track( artist_id=track['artist_id'], track_id=track['id'], album_id=track['album_id'] ), 'replace_context_menu': True, 'is_playable': True, 'thumbnail': track['album_image'], 'path': plugin.url_for( endpoint='play_track', track_id=track['id'] ) } for i, track in enumerate(tracks)] return items def format_sort_methods(sort_methods, entity): original_params = plugin.request.view_params extra_params = {} current_method = get_args('sort_method') if 'tags' in plugin.request.args: extra_params['tags'] = get_args('tags') items = [{ 'label': ( u'[B]%s[/B]' if sort_method == current_method else u'%s' ) % _('sort_method_%s' % sort_method), 'thumbnail': 'DefaultMusicPlugins.png', 'info': { 'count': i, }, 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'path': plugin.url_for( endpoint='show_%s' % entity, is_update='true', **dict(original_params, sort_method=sort_method, **extra_params) ) } for i, sort_method in enumerate(sort_methods)] return items def format_tags(tags): original_params = plugin.request.view_params extra_params = {} current_tags = [t for t in get_args('tags', '').split('+') if t] if 'sort_method' in plugin.request.args: extra_params['sort_method'] = get_args('sort_method') items = [] for tag_type, type_tags in tags: for i, tag in enumerate(type_tags): tag_str = u'%s: %s' % ( _('tag_type_%s' % tag_type), tag.capitalize() ) if tag in current_tags: new_tags = '+'.join((t for t in current_tags if not t == tag)) extra_params['tags'] = new_tags label = u'[B]%s[/B]' % tag_str else: new_tags = '+'.join(([tag] + current_tags)) extra_params['tags'] = new_tags label = u'%s' % tag_str items.append({ 'label': label, 'thumbnail': 'DefaultMusicPlugins.png', 'info': { 'count': i, }, 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'path': plugin.url_for( endpoint='show_tracks', is_update='true', **dict(original_params, **extra_params) ) }) return items def format_tracks(tracks): plugin.set_content('songs') items = [{ 'label': u'%s - %s (%s)' % ( track['artist_name'], track['name'], track['album_name'] ), 'info': { 'count': i + 2, 'title': track['name'], 'album': track['album_name'], 'duration': track['duration'], 'artist': track['artist_name'], 'genre': u', '.join(track['musicinfo']['tags']['genres']), 'comment': format_comment(track['musicinfo']), 'year': int(track.get('releasedate', '0-0-0').split('-')[0]), }, 'context_menu': context_menu_track( artist_id=track['artist_id'], track_id=track['id'], album_id=track['album_id'] ), 'replace_context_menu': True, 'is_playable': True, 'thumbnail': track['album_image'], 'path': plugin.url_for( endpoint='play_track', track_id=track['id'] ) } for i, track in enumerate(tracks)] return items ############################### Items ######################################### def get_add_mixtape_item(): return { 'label': u'[B]%s[/B]' % _('add_mixtape'), 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'info': { 'count': 0, }, 'path': plugin.url_for( endpoint='add_mixtape', ), } def get_page_switcher_items(items_len): current_page = int(get_args('page', 1)) has_next_page = items_len >= api.current_limit has_previous_page = current_page > 1 original_params = plugin.request.view_params extra_params = {} if 'sort_method' in plugin.request.args: extra_params['sort_method'] = get_args('sort_method') if 'tags' in plugin.request.args: extra_params['tags'] = get_args('tags', '') items = [] if has_next_page: next_page = int(current_page) + 1 extra_params['page'] = str(next_page) items.append({ 'label': u'>> %s %d >>' % (_('page'), next_page), 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'info': { 'count': items_len + 2, }, 'path': plugin.url_for( endpoint=plugin.request.view, is_update='true', **dict(original_params, **extra_params) ) }) if has_previous_page: previous_page = int(current_page) - 1 extra_params['page'] = str(previous_page) items.append({ 'label': u'<< %s %d <<' % (_('page'), previous_page), 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'info': { 'count': 1, }, 'path': plugin.url_for( endpoint=plugin.request.view, is_update='true', **dict(original_params, **extra_params) ) }) return items def get_sort_method_switcher_item(entity, current_method='default'): original_params = plugin.request.view_params extra_params = {} extra_params['entity'] = entity extra_params['sort_method'] = current_method if 'tags' in plugin.request.args: extra_params['tags'] = get_args('tags') return { 'label': u'[B][[ %s ]][/B]' % _('sort_method_%s' % current_method), 'thumbnail': 'DefaultMusicPlugins.png', 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'info': { 'count': 0, }, 'path': plugin.url_for( endpoint='show_sort_methods', is_update='true', **dict(original_params, **extra_params) ), } def get_tag_filter_item(): current_tags = [t for t in get_args('tags', '').split('+') if t] extra_params = {} if 'sort_method' in plugin.request.args: extra_params['sort_method'] = get_args('sort_method') extra_params['tags'] = get_args('tags', '') return { 'label': u'[B][[ %s: %s ]][/B]' % ( _('current_tags'), len(current_tags) ), 'thumbnail': 'DefaultMusicPlugins.png', 'context_menu': context_menu_empty(), 'replace_context_menu': True, 'info': { 'count': 0, }, 'path': plugin.url_for( endpoint='show_tags', is_update='true', **extra_params ), } ############################ Item-Adders ###################################### def add_items(items, same_cover=False): is_update = 'is_update' in plugin.request.args finish_kwargs = { 'update_listing': is_update, 'sort_methods': ('playlist_order', ) } if plugin.get_setting('force_viewmode', bool) and not same_cover: finish_kwargs['view_mode'] = 'thumbnail' elif plugin.get_setting('force_viewmode_tracks', bool) and same_cover: finish_kwargs['view_mode'] = 'thumbnail' return plugin.finish(items, **finish_kwargs) def add_static_items(items): for item in items: if not 'context_menu' in item: item['context_menu'] = context_menu_empty() item['replace_context_menu'] = True if 'is_update' in plugin.request.args: return plugin.finish(items, update_listing=True) else: return plugin.finish(items) ############################ Context-Menu ##################################### def context_menu_album(artist_id, album_id): return [ (_('album_info'), _action('info')), (_('download_album'), _run(endpoint='download_album', album_id=album_id)), (_('show_tracks_in_this_album'), _view(endpoint='show_tracks_in_album', album_id=album_id)), (_('show_albums_by_this_artist'), _view(endpoint='show_albums_by_artist', artist_id=artist_id)), (_('addon_settings'), _run(endpoint='open_settings')), ] def context_menu_artist(artist_id): return [ (_('show_albums_by_this_artist'), _view(endpoint='show_albums_by_artist', artist_id=artist_id)), (_('addon_settings'), _run(endpoint='open_settings')), ] def context_menu_empty(): return [ (_('addon_settings'), _run(endpoint='open_settings')), ] def context_menu_mixtape(mixtape_id): return [ (_('rename_mixtape'), _run(endpoint='rename_mixtape', mixtape_id=mixtape_id)), (_('delete_mixtape'), _run(endpoint='del_mixtape', mixtape_id=mixtape_id)), (_('addon_settings'), _run(endpoint='open_settings')), ] def context_menu_track(artist_id, track_id, album_id): return [ (_('song_info'), _action('info')), (_('download_track'), _run(endpoint='download_track', track_id=track_id)), (_('add_del_track_to_mixtape'), _run(endpoint='add_del_track_to_mixtape', track_id=track_id)), (_('show_albums_by_this_artist'), _view(endpoint='show_albums_by_artist', artist_id=artist_id)), (_('show_similar_tracks'), _view(endpoint='show_similar_tracks', track_id=track_id)), (_('show_tracks_in_this_album'), _view(endpoint='show_tracks_in_album', album_id=album_id)), (_('addon_settings'), _run(endpoint='open_settings')), ] ############################## Callers ######################################## def _action(arg): return 'XBMC.Action(%s)' % arg def _run(*args, **kwargs): return 'XBMC.RunPlugin(%s)' % plugin.url_for(*args, **kwargs) def _view(*args, **kwargs): return 'XBMC.Container.Update(%s)' % plugin.url_for(*args, **kwargs) def _refresh_view(): xbmc.executebuiltin('Container.Refresh') ############################## Helpers ######################################## def get_args(arg_name, default=None): return plugin.request.args.get(arg_name, [default])[0] def get_cached(func, *args, **kwargs): @plugin.cached(kwargs.pop('TTL', 1440)) def wrap(func_name, *args, **kwargs): return func(*args, **kwargs) return wrap(func.__name__, *args, **kwargs) def get_download_path(setting_name): download_path = plugin.get_setting(setting_name, str) while not download_path: try_again = xbmcgui.Dialog().yesno( _('no_download_path'), _('want_set_now') ) if not try_again: return download_path = xbmcgui.Dialog().browse( 3, # ShowAndGetWriteableDirectory _('choose_download_folder'), 'music', ) plugin.set_setting(setting_name, download_path) return download_path def get_downloaded_track(track_id): tracks = plugin.get_storage('downloaded_tracks') if track_id in tracks: if xbmcvfs.exists(tracks[track_id]['file']): log('Track is already downloaded, playing local') return tracks[track_id]['file'] albums = plugin.get_storage('downloaded_albums') for album in albums.itervalues(): if track_id in album['tracks']: if xbmcvfs.exists(album['tracks'][track_id]['file']): log('Album is already downloaded, playing local') return album['tracks'][track_id]['file'] def get_artist_image(url): if url: # fix whitespace in some image urls return url.replace(' ', '%20') else: return 'DefaultActor.png' def get_user_account(): user_id = plugin.get_setting('user_id', str) while not user_id: try_again = xbmcgui.Dialog().yesno( _('no_username_set'), _('want_set_now') ) if not try_again: return set_user_account() user_id = plugin.get_setting('user_id', str) return user_id def add_track_to_history(track_id): history = plugin.get_storage('history') history_limit = plugin.get_setting('history_limit', int) if not 'items' in history: history['items'] = [] if not track_id in [t['id'] for t in history['items']]: track = get_cached(api.get_track, track_id) else: track = [t for t in history['items'] if t['id'] == track_id][0] history['items'] = [ t for t in history['items'] if not t['id'] == track_id ] history['items'].append(track) if history_limit: while len(history['items']) > history_limit: history['items'].pop(0) history.sync() def log(text): plugin.log.info(text) def fix_xbmc_music_library_view(): # avoid context menu replacing bug by # switching window from musiclibrary to musicfiles if xbmcgui.getCurrentWindowId() == 10502: url = plugin.url_for(endpoint='show_root_menu') xbmc.executebuiltin('ReplaceWindow(MusicFiles, %s)' % url) def _(string_id): if string_id in STRINGS: return plugin.get_string(STRINGS[string_id]) else: log('String is missing: %s' % string_id) return string_id if __name__ == '__main__': try: plugin.run() except ApiError, message: xbmcgui.Dialog().ok( _('api_error'), _('api_returned'), unicode(message), _('try_again_later') ) except ConnectionError: xbmcgui.Dialog().ok( _('connection_error'), '', _('check_network_or'), _('try_again_later') )
gpl-2.0
24,996,541,849,658,104
30.764832
79
0.560769
false
3.543497
false
false
false
jbalogh/airflow
airflow/models.py
1
73231
import copy from datetime import datetime, timedelta import getpass import imp import jinja2 import json import logging import os import dill import re import signal import socket import sys from sqlalchemy import ( Column, Integer, String, DateTime, Text, Boolean, ForeignKey, PickleType, Index,) from sqlalchemy import case, func, or_ from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.dialects.mysql import LONGTEXT from sqlalchemy.orm import relationship from airflow import settings, utils from airflow.executors import DEFAULT_EXECUTOR, LocalExecutor from airflow.configuration import conf from airflow.utils import ( AirflowException, State, apply_defaults, provide_session) Base = declarative_base() ID_LEN = 250 SQL_ALCHEMY_CONN = conf.get('core', 'SQL_ALCHEMY_CONN') DAGS_FOLDER = os.path.expanduser(conf.get('core', 'DAGS_FOLDER')) if 'mysql' in SQL_ALCHEMY_CONN: LongText = LONGTEXT else: LongText = Text def clear_task_instances(tis, session): ''' Clears a set of task instances, but makes sure the running ones get killed. ''' job_ids = [] for ti in tis: if ti.state == State.RUNNING: if ti.job_id: ti.state = State.SHUTDOWN job_ids.append(ti.job_id) else: session.delete(ti) if job_ids: from airflow.jobs import BaseJob as BJ # HA! for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all(): job.state = State.SHUTDOWN class DagBag(object): """ A dagbag is a collection of dags, parsed out of a folder tree and has high level configuration settings, like what database to use as a backend and what executor to use to fire off tasks. This makes it easier to run distinct environments for say production and development, tests, or for different teams or security profiles. What would have been system level settings are now dagbag level so that one system can run multiple, independent settings sets. :param dag_folder: the folder to scan to find DAGs :type dag_folder: str :param executor: the executor to use when executing task instances in this DagBag :param include_examples: whether to include the examples that ship with airflow or not :type include_examples: bool :param sync_to_db: whether to sync the properties of the DAGs to the metadata DB while finding them, typically should be done by the scheduler job only :type sync_to_db: bool """ def __init__( self, dag_folder=None, executor=DEFAULT_EXECUTOR, include_examples=conf.getboolean('core', 'LOAD_EXAMPLES'), sync_to_db=False): dag_folder = dag_folder or DAGS_FOLDER logging.info("Filling up the DagBag from " + dag_folder) self.dag_folder = dag_folder self.dags = {} self.sync_to_db = sync_to_db self.file_last_changed = {} self.executor = executor self.collect_dags(dag_folder) if include_examples: example_dag_folder = os.path.join( os.path.dirname(__file__), 'example_dags') self.collect_dags(example_dag_folder) if sync_to_db: self.deactivate_inactive_dags() def get_dag(self, dag_id): """ Gets the DAG out of the dictionary, and refreshes it if expired """ if dag_id in self.dags: dag = self.dags[dag_id] if dag.is_subdag: orm_dag = DagModel.get_current(dag.parent_dag.dag_id) else: orm_dag = DagModel.get_current(dag_id) if orm_dag and dag.last_loaded < ( orm_dag.last_expired or datetime(2100, 1, 1)): self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) dag = self.dags[dag_id] else: orm_dag = DagModel.get_current(dag_id) self.process_file( filepath=orm_dag.fileloc, only_if_updated=False) if dag_id in self.dags: dag = self.dags[dag_id] else: dag = None return dag def process_file(self, filepath, only_if_updated=True, safe_mode=True): """ Given a path to a python module, this method imports the module and look for dag objects within it. """ try: # This failed before in what may have been a git sync # race condition dttm = datetime.fromtimestamp(os.path.getmtime(filepath)) mod_name, file_ext = os.path.splitext(os.path.split(filepath)[-1]) mod_name = 'unusual_prefix_' + mod_name except: return if safe_mode and os.path.isfile(filepath): # Skip file if no obvious references to airflow or DAG are found. with open(filepath, 'r') as f: content = f.read() if not all([s in content for s in ('DAG', 'airflow')]): return if ( not only_if_updated or filepath not in self.file_last_changed or dttm != self.file_last_changed[filepath]): try: logging.info("Importing " + filepath) if mod_name in sys.modules: del sys.modules[mod_name] with utils.timeout(30): m = imp.load_source(mod_name, filepath) except: logging.error("Failed to import: " + filepath) logging.exception("") self.file_last_changed[filepath] = dttm return for dag in m.__dict__.values(): if isinstance(dag, DAG): dag.full_filepath = filepath dag.is_subdag = False self.bag_dag(dag, parent_dag=dag, root_dag=dag) # dag.pickle() self.file_last_changed[filepath] = dttm def bag_dag(self, dag, parent_dag, root_dag): """ Adds the DAG into the bag, recurses into sub dags. """ self.dags[dag.dag_id] = dag dag.resolve_template_files() dag.last_loaded = datetime.now() if self.sync_to_db: session = settings.Session() orm_dag = session.query( DagModel).filter(DagModel.dag_id == dag.dag_id).first() if not orm_dag: orm_dag = DagModel(dag_id=dag.dag_id) orm_dag.fileloc = root_dag.full_filepath orm_dag.is_subdag = dag.is_subdag orm_dag.owners = root_dag.owner orm_dag.is_active = True session.merge(orm_dag) session.commit() session.close() for subdag in dag.subdags: subdag.full_filepath = dag.full_filepath subdag.parent_dag = dag subdag.fileloc = root_dag.full_filepath subdag.is_subdag = True self.bag_dag(subdag, parent_dag=dag, root_dag=root_dag) logging.info('Loaded DAG {dag}'.format(**locals())) def collect_dags( self, dag_folder=None, only_if_updated=True): """ Given a file path or a folder, this file looks for python modules, imports them and adds them to the dagbag collection. Note that if a .airflowignore file is found while processing, the directory, it will behaves much like a .gitignore does, ignoring files that match any of the regex patterns specified in the file. """ dag_folder = dag_folder or self.dag_folder if os.path.isfile(dag_folder): self.process_file(dag_folder, only_if_updated=only_if_updated) elif os.path.isdir(dag_folder): patterns = [] for root, dirs, files in os.walk(dag_folder): ignore_file = [f for f in files if f == '.airflowignore'] if ignore_file: f = open(os.path.join(root, ignore_file[0]), 'r') patterns += [p for p in f.read().split('\n') if p] f.close() for f in files: try: filepath = os.path.join(root, f) if not os.path.isfile(filepath): continue mod_name, file_ext = os.path.splitext( os.path.split(filepath)[-1]) if file_ext != '.py': continue if not any([re.findall(p, filepath) for p in patterns]): self.process_file( filepath, only_if_updated=only_if_updated) except: pass def deactivate_inactive_dags(self): active_dag_ids = [dag.dag_id for dag in self.dags.values()] session = settings.Session() for dag in session.query( DagModel).filter(~DagModel.dag_id.in_(active_dag_ids)).all(): dag.is_active = False session.merge(dag) session.commit() session.close() def paused_dags(self): session = settings.Session() dag_ids = [dp.dag_id for dp in session.query(DagModel).filter( DagModel.is_paused == True)] session.commit() session.close() return dag_ids class BaseUser(Base): __tablename__ = "user" id = Column(Integer, primary_key=True) username = Column(String(ID_LEN), unique=True) email = Column(String(500)) def __repr__(self): return self.username def get_id(self): return unicode(self.id) class Connection(Base): """ Placeholder to store information about different database instances connection information. The idea here is that scripts use references to database instances (conn_id) instead of hard coding hostname, logins and passwords when using operators or hooks. """ __tablename__ = "connection" id = Column(Integer(), primary_key=True) conn_id = Column(String(ID_LEN)) conn_type = Column(String(500)) host = Column(String(500)) schema = Column(String(500)) login = Column(String(500)) password = Column(String(500)) port = Column(Integer()) extra = Column(String(5000)) def __init__( self, conn_id=None, conn_type=None, host=None, login=None, password=None, schema=None, port=None): self.conn_id = conn_id self.conn_type = conn_type self.host = host self.login = login self.password = password self.schema = schema self.port = port def get_hook(self): from airflow import hooks try: if self.conn_type == 'mysql': return hooks.MySqlHook(mysql_conn_id=self.conn_id) elif self.conn_type == 'postgres': return hooks.PostgresHook(postgres_conn_id=self.conn_id) elif self.conn_type == 'hive_cli': return hooks.HiveCliHook(hive_cli_conn_id=self.conn_id) elif self.conn_type == 'presto': return hooks.PrestoHook(presto_conn_id=self.conn_id) elif self.conn_type == 'hiveserver2': return hooks.HiveServer2Hook(hiveserver2_conn_id=self.conn_id) elif self.conn_type == 'sqlite': return hooks.SqliteHook(sqlite_conn_id=self.conn_id) elif self.conn_type == 'jdbc': return hooks.JdbcHook(conn_id=self.conn_id) except: return None def __repr__(self): return self.conn_id @property def extra_dejson(self): """Returns the extra property by deserializing json""" obj = {} if self.extra: try: obj = json.loads(self.extra) except Exception as e: logging.exception(e) logging.error( "Failed parsing the json for " "conn_id {}".format(self.conn_id)) return obj class DagPickle(Base): """ Dags can originate from different places (user repos, master repo, ...) and also get executed in different places (different executors). This object represents a version of a DAG and becomes a source of truth for a BackfillJob execution. A pickle is a native python serialized object, and in this case gets stored in the database for the duration of the job. The executors pick up the DagPickle id and read the dag definition from the database. """ id = Column(Integer, primary_key=True) pickle = Column(PickleType(pickler=dill)) created_dttm = Column(DateTime, default=func.now()) pickle_hash = Column(Integer) __tablename__ = "dag_pickle" def __init__(self, dag): self.dag_id = dag.dag_id if hasattr(dag, 'template_env'): dag.template_env = None self.pickle_hash = hash(dag) self.pickle = dag class TaskInstance(Base): """ Task instances store the state of a task instance. This table is the authority and single source of truth around what tasks have run and the state they are in. The SqlAchemy model doesn't have a SqlAlchemy foreign key to the task or dag model deliberately to have more control over transactions. Database transactions on this table should insure double triggers and any confusion around what task instances are or aren't ready to run even while multiple schedulers may be firing task instances. """ __tablename__ = "task_instance" task_id = Column(String(ID_LEN), primary_key=True) dag_id = Column(String(ID_LEN), primary_key=True) execution_date = Column(DateTime, primary_key=True) start_date = Column(DateTime) end_date = Column(DateTime) duration = Column(Integer) state = Column(String(20)) try_number = Column(Integer) hostname = Column(String(1000)) unixname = Column(String(1000)) job_id = Column(Integer) pool = Column(String(50)) queue = Column(String(50)) priority_weight = Column(Integer) __table_args__ = ( Index('ti_dag_state', dag_id, state), Index('ti_state_lkp', dag_id, task_id, execution_date, state), Index('ti_pool', pool, state, priority_weight), ) def __init__(self, task, execution_date, state=None, job=None): self.dag_id = task.dag_id self.task_id = task.task_id self.execution_date = execution_date self.state = state self.task = task self.queue = task.queue self.pool = task.pool self.priority_weight = task.priority_weight_total self.try_number = 1 self.unixname = getpass.getuser() if job: self.job_id = job.id def command( self, mark_success=False, ignore_dependencies=False, force=False, local=False, pickle_id=None, raw=False, task_start_date=None, job_id=None): """ Returns a command that can be executed anywhere where airflow is installed. This command is part of the message sent to executors by the orchestrator. """ iso = self.execution_date.isoformat() mark_success = "--mark_success" if mark_success else "" pickle = "--pickle {0}".format(pickle_id) if pickle_id else "" job_id = "--job_id {0}".format(job_id) if job_id else "" ignore_dependencies = "-i" if ignore_dependencies else "" force = "--force" if force else "" local = "--local" if local else "" task_start_date = \ "-s " + task_start_date.isoformat() if task_start_date else "" raw = "--raw" if raw else "" subdir = "" if not pickle and self.task.dag and self.task.dag.full_filepath: subdir = "-sd DAGS_FOLDER/{0}".format(self.task.dag.filepath) return ( "airflow run " "{self.dag_id} {self.task_id} {iso} " "{mark_success} " "{pickle} " "{local} " "{ignore_dependencies} " "{force} " "{job_id} " "{raw} " "{subdir} " "{task_start_date} " ).format(**locals()) @property def log_filepath(self): iso = self.execution_date.isoformat() log = os.path.expanduser(conf.get('core', 'BASE_LOG_FOLDER')) return ( "{log}/{self.dag_id}/{self.task_id}/{iso}.log".format(**locals())) @property def log_url(self): iso = self.execution_date.isoformat() BASE_URL = conf.get('webserver', 'BASE_URL') return BASE_URL + ( "/admin/airflow/log" "?dag_id={self.dag_id}" "&task_id={self.task_id}" "&execution_date={iso}" ).format(**locals()) @property def mark_success_url(self): iso = self.execution_date.isoformat() BASE_URL = conf.get('webserver', 'BASE_URL') return BASE_URL + ( "/admin/airflow/action" "?action=success" "&task_id={self.task_id}" "&dag_id={self.dag_id}" "&execution_date={iso}" "&upstream=false" "&downstream=false" ).format(**locals()) def current_state(self, main_session=None): """ Get the very latest state from the database, if a session is passed, we use and looking up the state becomes part of the session, otherwise a new session is used. """ session = main_session or settings.Session() TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).all() if ti: state = ti[0].state else: state = None if not main_session: session.commit() session.close() return state def error(self, main_session=None): """ Forces the task instance's state to FAILED in the database. """ session = settings.Session() logging.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit() session.close() def refresh_from_db(self, main_session=None): """ Refreshes the task instance from the database based on the primary key """ session = main_session or settings.Session() TI = TaskInstance ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date == self.execution_date, ).first() if ti: self.state = ti.state self.start_date = ti.start_date self.end_date = ti.end_date self.try_number = ti.try_number if not main_session: session.commit() session.close() @property def key(self): """ Returns a tuple that identifies the task instance uniquely """ return (self.dag_id, self.task_id, self.execution_date) def is_queueable(self, flag_upstream_failed=False): """ Returns a boolean on whether the task instance has met all dependencies and is ready to run. It considers the task's state, the state of its dependencies, depends_on_past and makes sure the execution isn't in the future. It doesn't take into account whether the pool has a slot for it to run. :param flag_upstream_failed: This is a hack to generate the upstream_failed state creation while checking to see whether the task instance is runnable. It was the shortest path to add the feature :type flag_upstream_failed: boolean """ if self.execution_date > datetime.now() - self.task.schedule_interval: return False elif self.state == State.UP_FOR_RETRY and not self.ready_for_retry(): return False elif self.task.end_date and self.execution_date > self.task.end_date: return False elif self.state == State.SKIPPED: return False elif ( self.state in State.runnable() and self.are_dependencies_met( flag_upstream_failed=flag_upstream_failed)): return True else: return False def is_runnable(self): """ Returns whether a task is ready to run AND there's room in the queue. """ return self.is_queueable() and not self.pool_full() def are_dependents_done(self, main_session=None): """ Checks whether the dependents of this task instance have all succeeded. This is meant to be used by wait_for_downstream. This is useful when you do not want to start processing the next schedule of a task until the dependents are done. For instance, if the task DROPs and recreates a table. """ session = main_session or settings.Session() task = self.task if not task._downstream_list: return True downstream_task_ids = [t.task_id for t in task._downstream_list] ti = session.query(func.count(TaskInstance.task_id)).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.task_id.in_(downstream_task_ids), TaskInstance.execution_date == self.execution_date, TaskInstance.state == State.SUCCESS, ) count = ti[0][0] if not main_session: session.commit() session.close() return count == len(task._downstream_list) def are_dependencies_met( self, main_session=None, flag_upstream_failed=False): """ Returns a boolean on whether the upstream tasks are in a SUCCESS state and considers depends_on_past and the previous run's state. :param flag_upstream_failed: This is a hack to generate the upstream_failed state creation while checking to see whether the task instance is runnable. It was the shortest path to add the feature :type flag_upstream_failed: boolean """ TI = TaskInstance # Using the session if passed as param session = main_session or settings.Session() task = self.task # Checking that the depends_on_past is fulfilled if (task.depends_on_past and not self.execution_date == task.start_date): previous_ti = session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == task.task_id, TI.execution_date == self.execution_date-task.schedule_interval, TI.state == State.SUCCESS, ).first() if not previous_ti: return False # Applying wait_for_downstream previous_ti.task = self.task if task.wait_for_downstream and not \ previous_ti.are_dependents_done(session): return False # Checking that all upstream dependencies have succeeded if task._upstream_list: upstream_task_ids = [t.task_id for t in task._upstream_list] qry = ( session .query( func.sum( case([(TI.state == State.SUCCESS, 1)], else_=0)), func.sum( case([(TI.state == State.SKIPPED, 1)], else_=0)), func.count(TI.task_id), ) .filter( TI.dag_id == self.dag_id, TI.task_id.in_(upstream_task_ids), TI.execution_date == self.execution_date, TI.state.in_([ State.SUCCESS, State.FAILED, State.UPSTREAM_FAILED, State.SKIPPED]), ) ) successes, skipped, done = qry[0] if flag_upstream_failed: if skipped: self.state = State.SKIPPED self.start_date = datetime.now() self.end_date = datetime.now() session.merge(self) elif successes < done >= len(task._upstream_list): self.state = State.UPSTREAM_FAILED self.start_date = datetime.now() self.end_date = datetime.now() session.merge(self) if successes < len(task._upstream_list): return False if not main_session: session.commit() session.close() return True def __repr__(self): return ( "<TaskInstance: {ti.dag_id}.{ti.task_id} " "{ti.execution_date} [{ti.state}]>" ).format(ti=self) def ready_for_retry(self): """ Checks on whether the task instance is in the right state and timeframe to be retried. """ return self.state == State.UP_FOR_RETRY and \ self.end_date + self.task.retry_delay < datetime.now() @provide_session def pool_full(self, session): """ Returns a boolean as to whether the slot pool has room for this task to run """ if not self.task.pool: return False pool = ( session .query(Pool) .filter(Pool.pool == self.task.pool) .first() ) if not pool: return False open_slots = pool.open_slots(session=session) return open_slots <= 0 def run( self, verbose=True, ignore_dependencies=False, # Doesn't check for deps, just runs force=False, # Disregards previous successes mark_success=False, # Don't run the task, act as if it succeeded test_mode=False, # Doesn't record success or failure in the DB job_id=None,): """ Runs the task instance. """ task = self.task session = settings.Session() self.refresh_from_db(session) session.commit() self.job_id = job_id iso = datetime.now().isoformat() self.hostname = socket.gethostname() if self.state == State.RUNNING: logging.warning("Another instance is running, skipping.") elif not force and self.state == State.SUCCESS: logging.info( "Task {self} previously succeeded" " on {self.end_date}".format(**locals()) ) elif not ignore_dependencies and \ not self.are_dependencies_met(session): logging.warning("Dependencies not met yet") elif self.state == State.UP_FOR_RETRY and \ not self.ready_for_retry(): next_run = (self.end_date + task.retry_delay).isoformat() logging.info( "Not ready for retry yet. " + "Next run after {0}".format(next_run) ) elif force or self.state in State.runnable(): msg = "\n" + ("-" * 80) if self.state == State.UP_FOR_RETRY: msg += "\nRetry run {self.try_number} out of {task.retries} " msg += "starting @{iso}\n" else: msg += "\nNew run starting @{iso}\n" msg += ("-" * 80) logging.info(msg.format(**locals())) self.start_date = datetime.now() if not force and task.pool: # If a pool is set for this task, marking the task instance # as QUEUED self.state = State.QUEUED session.merge(self) session.commit() session.close() logging.info("Queuing into pool {}".format(task.pool)) return if self.state == State.UP_FOR_RETRY: self.try_number += 1 else: self.try_number = 1 if not test_mode: session.add(Log(State.RUNNING, self)) self.state = State.RUNNING self.end_date = None if not test_mode: session.merge(self) session.commit() if verbose: if mark_success: msg = "Marking success for " else: msg = "Executing " msg += "{self.task} on {self.execution_date}" context = {} try: logging.info(msg.format(self=self)) if not mark_success: context = self.get_template_context() task_copy = copy.copy(task) self.task = task_copy def signal_handler(signum, frame): '''Setting kill signal handler''' logging.error("Killing subprocess") task_copy.on_kill() raise AirflowException("Task received SIGTERM signal") signal.signal(signal.SIGTERM, signal_handler) self.render_templates() settings.policy(task_copy) task_copy.pre_execute(context=context) # If a timout is specified for the task, make it fail # if it goes beyond if task_copy.execution_timeout: with utils.timeout(int( task_copy.execution_timeout.total_seconds())): task_copy.execute(context=context) else: task_copy.execute(context=context) task_copy.post_execute(context=context) except (Exception, StandardError, KeyboardInterrupt) as e: self.handle_failure(e, test_mode, context) raise # Recording SUCCESS session = settings.Session() self.end_date = datetime.now() self.set_duration() self.state = State.SUCCESS if not test_mode: session.add(Log(State.SUCCESS, self)) session.merge(self) # Success callback try: if task.on_success_callback: task.on_success_callback(context) except Exception as e3: logging.error("Failed when executing success callback") logging.exception(e3) session.commit() def handle_failure(self, error, test_mode, context): logging.exception(error) task = self.task session = settings.Session() self.end_date = datetime.now() self.set_duration() if not test_mode: session.add(Log(State.FAILED, self)) # Let's go deeper try: if self.try_number <= task.retries: self.state = State.UP_FOR_RETRY if task.email_on_retry and task.email: self.email_alert(error, is_retry=True) else: self.state = State.FAILED if task.email_on_failure and task.email: self.email_alert(error, is_retry=False) except Exception as e2: logging.error( 'Failed to send email to: ' + str(task.email)) logging.exception(e2) # Handling callbacks pessimistically try: if self.state == State.UP_FOR_RETRY and task.on_retry_callback: task.on_retry_callback(context) if self.state == State.FAILED and task.on_failure_callback: task.on_failure_callback(context) except Exception as e3: logging.error("Failed at executing callback") logging.exception(e3) if not test_mode: session.merge(self) session.commit() logging.error(str(error)) def get_template_context(self): task = self.task from airflow import macros tables = None if 'tables' in task.params: tables = task.params['tables'] ds = self.execution_date.isoformat()[:10] yesterday_ds = (self.execution_date - timedelta(1)).isoformat()[:10] tomorrow_ds = (self.execution_date + timedelta(1)).isoformat()[:10] ds_nodash = ds.replace('-', '') ti_key_str = "{task.dag_id}__{task.task_id}__{ds_nodash}" ti_key_str = ti_key_str.format(**locals()) params = {} if hasattr(task, 'dag') and task.dag.params: params.update(task.dag.params) if task.params: params.update(task.params) return { 'dag': task.dag, 'ds': ds, 'yesterday_ds': yesterday_ds, 'tomorrow_ds': tomorrow_ds, 'END_DATE': ds, 'ds_nodash': ds_nodash, 'end_date': ds, 'execution_date': self.execution_date, 'latest_date': ds, 'macros': macros, 'params': params, 'tables': tables, 'task': task, 'task_instance': self, 'ti': self, 'task_instance_key_str': ti_key_str, 'conf': conf, } def render_templates(self): task = self.task jinja_context = self.get_template_context() if hasattr(self, 'task') and hasattr(self.task, 'dag'): if self.task.dag.user_defined_macros: jinja_context.update( self.task.dag.user_defined_macros) rt = self.task.render_template # shortcut to method for attr in task.__class__.template_fields: content = getattr(task, attr) if content: if isinstance(content, basestring): result = rt(content, jinja_context) elif isinstance(content, (list, tuple)): result = [rt(s, jinja_context) for s in content] elif isinstance(content, dict): result = { k: rt(v, jinja_context) for k, v in content.items()} else: raise AirflowException("Type not supported for templating") setattr(task, attr, result) def email_alert(self, exception, is_retry=False): task = self.task title = "Airflow alert: {self}".format(**locals()) exception = str(exception).replace('\n', '<br>') try_ = task.retries + 1 body = ( "Try {self.try_number} out of {try_}<br>" "Exception:<br>{exception}<br>" "Log: <a href='{self.log_url}'>Link</a><br>" "Host: {self.hostname}<br>" "Log file: {self.log_filepath}<br>" "Mark success: <a href='{self.mark_success_url}'>Link</a><br>" ).format(**locals()) utils.send_email(task.email, title, body) def set_duration(self): if self.end_date and self.start_date: self.duration = (self.end_date - self.start_date).seconds else: self.duration = None class Log(Base): """ Used to actively log events to the database """ __tablename__ = "log" id = Column(Integer, primary_key=True) dttm = Column(DateTime) dag_id = Column(String(ID_LEN)) task_id = Column(String(ID_LEN)) event = Column(String(30)) execution_date = Column(DateTime) owner = Column(String(500)) def __init__(self, event, task_instance): self.dttm = datetime.now() self.dag_id = task_instance.dag_id self.task_id = task_instance.task_id self.execution_date = task_instance.execution_date self.event = event self.owner = task_instance.task.owner class BaseOperator(object): """ Abstract base class for all operators. Since operators create objects that become node in the dag, BaseOperator contains many recursive methods for dag crawling behavior. To derive this class, you are expected to override the constructor as well as the 'execute' method. Operators derived from this task should perform or trigger certain tasks synchronously (wait for completion). Example of operators could be an operator the runs a Pig job (PigOperator), a sensor operator that waits for a partition to land in Hive (HiveSensorOperator), or one that moves data from Hive to MySQL (Hive2MySqlOperator). Instances of these operators (tasks) target specific operations, running specific scripts, functions or data transfers. This class is abstract and shouldn't be instantiated. Instantiating a class derived from this one results in the creation of a task object, which ultimately becomes a node in DAG objects. Task dependencies should be set by using the set_upstream and/or set_downstream methods. Note that this class is derived from SQLAlquemy's Base class, which allows us to push metadata regarding tasks to the database. Deriving this classes needs to implement the polymorphic specificities documented in SQLAlchemy. This should become clear while reading the code for other operators. :param task_id: a unique, meaningful id for the task :type task_id: string :param owner: the owner of the task, using the unix username is recommended :type owner: string :param retries: the number of retries that should be performed before failing the task :type retries: int :param retry_delay: delay between retries :type retry_delay: timedelta :param start_date: start date for the task, the scheduler will start from this point in time :type start_date: datetime :param end_date: if specified, the scheduler won't go beyond this date :type end_date: datetime :param schedule_interval: interval at which to schedule the task :type schedule_interval: timedelta :param depends_on_past: when set to true, task instances will run sequentially while relying on the previous task's schedule to succeed. The task instance for the start_date is allowed to run. :type depends_on_past: bool :param wait_for_downstream: when set to true, an instance of task X will wait for tasks immediately downstream of the previous instance of task X to finish successfully before it runs. This is useful if the different instances of a task X alter the same asset, and this asset is used by tasks downstream of task X. :type wait_for_downstream: bool :param queue: which queue to target when running this job. Not all executors implement queue management, the CeleryExecutor does support targeting specific queues. :type queue: str :param dag: a reference to the dag the task is attached to (if any) :type dag: DAG :param priority_weight: priority weight of this task against other task. This allows the executor to trigger higher priority tasks before others when things get backed up. :type priority_weight: int :param pool: the slot pool this task should run in, slot pools are a way to limit concurrency for certain tasks :type pool: str :param sla: time by which the job is expected to succeed. Note that this represents the ``timedelta`` after the period is closed. For example if you set an SLA of 1 hour, the scheduler would send dan email soon after 1:00AM on the ``2016-01-02`` if the ``2016-01-01`` instance has not succeede yet. The scheduler pays special attention for jobs with an SLA and sends alert emails for sla misses. SLA misses are also recorded in the database for future reference. All tasks that share the same SLA time get bundled in a single email, sent soon after that time. SLA notification are sent once and only once for each task instance. :type sla: datetime.timedelta :param execution_timeout: max time allowed for the execution of this task instance, if it goes beyond it will raise and fail. :type execution_timeout: datetime.timedelta :param on_failure_callback: a function to be called when a task instance of this task fails. a context dictionary is passed as a single parameter to this function. Context contains references to related objects to the task instance and is documented under the macros section of the API. :type on_failure_callback: callable :param on_retry_callback: much like the ``on_failure_callback`` excepts that it is executed when retries occur. :param on_success_callback: much like the ``on_failure_callback`` excepts that it is executed when the task succeeds. :type on_success_callback: callable """ # For derived classes to define which fields will get jinjaified template_fields = [] # Defines wich files extensions to look for in the templated fields template_ext = [] # Defines the color in the UI ui_color = '#fff' ui_fgcolor = '#000' @apply_defaults def __init__( self, task_id, owner, email=None, email_on_retry=True, email_on_failure=True, retries=0, retry_delay=timedelta(seconds=300), start_date=None, end_date=None, schedule_interval=timedelta(days=1), depends_on_past=False, wait_for_downstream=False, dag=None, params=None, default_args=None, adhoc=False, priority_weight=1, queue=conf.get('celery', 'default_queue'), pool=None, sla=None, execution_timeout=None, on_failure_callback=None, on_success_callback=None, on_retry_callback=None, *args, **kwargs): utils.validate_key(task_id) self.dag_id = dag.dag_id if dag else 'adhoc_' + owner self.task_id = task_id self.owner = owner self.email = email self.email_on_retry = email_on_retry self.email_on_failure = email_on_failure self.start_date = start_date self.end_date = end_date self.depends_on_past = depends_on_past self.wait_for_downstream = wait_for_downstream self._schedule_interval = schedule_interval self.retries = retries self.queue = queue self.pool = pool self.sla = sla self.execution_timeout = execution_timeout self.on_failure_callback = on_failure_callback self.on_success_callback = on_success_callback self.on_retry_callback = on_retry_callback if isinstance(retry_delay, timedelta): self.retry_delay = retry_delay else: logging.debug("retry_delay isn't timedelta object, assuming secs") self.retry_delay = timedelta(seconds=retry_delay) self.params = params or {} # Available in templates! self.adhoc = adhoc self.priority_weight = priority_weight if dag: dag.add_task(self) self.dag = dag # Private attributes self._upstream_list = [] self._downstream_list = [] @property def schedule_interval(self): """ The schedule interval of the DAG always wins over individual tasks so that tasks within a DAG always line up. The task still needs a schedule_interval as it may not be attached to a DAG. """ if hasattr(self, 'dag') and self.dag: return self.dag.schedule_interval else: return self._schedule_interval @property def priority_weight_total(self): return sum([ t.priority_weight for t in self.get_flat_relatives(upstream=False) ]) + self.priority_weight def __cmp__(self, other): blacklist = { '_sa_instance_state', '_upstream_list', '_downstream_list', 'dag'} for k in set(self.__dict__) - blacklist: if self.__dict__[k] != other.__dict__[k]: logging.debug(str(( self.dag_id, self.task_id, k, self.__dict__[k], other.__dict__[k]))) return -1 return 0 def pre_execute(self, context): """ This is triggered right before self.execute, it's mostly a hook for people deriving operators. """ pass def execute(self, context): """ This is the main method to derive when creating an operator. Context is the same dictionary used as when rendering jinja templates. Refer to get_template_context for more context. """ raise NotImplemented() def post_execute(self, context): """ This is triggered right after self.execute, it's mostly a hook for people deriving operators. """ pass def on_kill(self): ''' Override this method to cleanup subprocesses when a task instance gets killed. Any use of the threading, subprocess or multiprocessing module within an operator needs to be cleaned up or it will leave ghost processes behind. ''' pass def __deepcopy__(self, memo): """ Hack sorting double chained task lists by task_id to avoid hitting max_depth on deepcopy operations. """ cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result self._upstream_list = sorted(self._upstream_list, key=lambda x: x.task_id) self._downstream_list = sorted(self._downstream_list, key=lambda x: x.task_id) for k, v in self.__dict__.items(): if k not in ('user_defined_macros', 'params'): setattr(result, k, copy.deepcopy(v, memo)) return result def render_template(self, content, context): if hasattr(self, 'dag'): env = self.dag.get_template_env() else: env = jinja2.Environment(cache_size=0) exts = self.__class__.template_ext if any([content.endswith(ext) for ext in exts]): template = env.get_template(content) else: template = env.from_string(content) return template.render(**context) def prepare_template(self): ''' Hook that is triggered after the templated fields get replaced by their content. If you need your operator to alter the content of the file before the template is rendered, it should override this method to do so. ''' pass def resolve_template_files(self): # Getting the content of files for template_field / template_ext for attr in self.template_fields: content = getattr(self, attr) if (content and isinstance(content, basestring) and any([content.endswith(ext) for ext in self.template_ext])): env = self.dag.get_template_env() try: setattr(self, attr, env.loader.get_source(env, content)[0]) except Exception as e: logging.exception(e) self.prepare_template() @property def upstream_list(self): """@property: list of tasks directly upstream""" return self._upstream_list @property def downstream_list(self): """@property: list of tasks directly downstream""" return self._downstream_list def clear( self, start_date=None, end_date=None, upstream=False, downstream=False): """ Clears the state of task instances associated with the task, following the parameters specified. """ session = settings.Session() TI = TaskInstance qry = session.query(TI).filter(TI.dag_id == self.dag_id) if start_date: qry = qry.filter(TI.execution_date >= start_date) if end_date: qry = qry.filter(TI.execution_date <= end_date) tasks = [self.task_id] if upstream: tasks += \ [t.task_id for t in self.get_flat_relatives(upstream=True)] if downstream: tasks += \ [t.task_id for t in self.get_flat_relatives(upstream=False)] qry = qry.filter(TI.task_id.in_(tasks)) count = qry.count() clear_task_instances(qry, session) session.commit() session.close() return count def get_task_instances(self, session, start_date=None, end_date=None): """ Get a set of task instance related to this task for a specific date range. """ TI = TaskInstance end_date = end_date or datetime.now() return session.query(TI).filter( TI.dag_id == self.dag_id, TI.task_id == self.task_id, TI.execution_date >= start_date, TI.execution_date <= end_date, ).order_by(TI.execution_date).all() def get_flat_relatives(self, upstream=False, l=None): """ Get a flat list of relatives, either upstream or downstream. """ if not l: l = [] for t in self.get_direct_relatives(upstream): if not utils.is_in(t, l): l.append(t) t.get_flat_relatives(upstream, l) return l def detect_downstream_cycle(self, task=None): """ When invoked, this routine will raise an exception if a cycle is detected downstream from self. It is invoked when tasks are added to the DAG to detect cycles. """ if not task: task = self for t in self.get_direct_relatives(): if task is t: msg = "Cycle detect in DAG. Faulty task: {0}".format(task) raise AirflowException(msg) else: t.detect_downstream_cycle(task=task) return False def run( self, start_date=None, end_date=None, ignore_dependencies=False, force=False, mark_success=False): """ Run a set of task instances for a date range. """ start_date = start_date or self.start_date end_date = end_date or self.end_date or datetime.now() for dt in utils.date_range( start_date, end_date, self.schedule_interval): TaskInstance(self, dt).run( mark_success=mark_success, ignore_dependencies=ignore_dependencies, force=force,) def get_direct_relatives(self, upstream=False): """ Get the direct relatives to the current task, upstream or downstream. """ if upstream: return self.upstream_list else: return self.downstream_list def __repr__(self): return "<Task({self.__class__.__name__}): {self.task_id}>".format(self=self) @property def task_type(self): return self.__class__.__name__ def append_only_new(self, l, item): if any([item is t for t in l]): raise AirflowException( 'Dependency {self}, {item} already registered' ''.format(**locals())) else: l.append(item) def _set_relatives(self, task_or_task_list, upstream=False): try: task_list = list(task_or_task_list) except TypeError: task_list = [task_or_task_list] for task in task_list: if not isinstance(task, BaseOperator): raise AirflowException('Expecting a task') if upstream: self.append_only_new(task._downstream_list, self) self.append_only_new(self._upstream_list, task) else: self.append_only_new(task._upstream_list, self) self.append_only_new(self._downstream_list, task) self.detect_downstream_cycle() def set_downstream(self, task_or_task_list): """ Set a task, or a task task to be directly downstream from the current task. """ self._set_relatives(task_or_task_list, upstream=False) def set_upstream(self, task_or_task_list): """ Set a task, or a task task to be directly upstream from the current task. """ self._set_relatives(task_or_task_list, upstream=True) class DagModel(Base): __tablename__ = "dag" """ These items are stored in the database for state related information """ dag_id = Column(String(ID_LEN), primary_key=True) # A DAG can be paused from the UI / DB is_paused = Column(Boolean, default=False) # Whether the DAG is a subdag is_subdag = Column(Boolean, default=False) # Whether that DAG was seen on the last DagBag load is_active = Column(Boolean, default=False) # Last time the scheduler started last_scheduler_run = Column(DateTime) # Last time this DAG was pickled last_pickled = Column(DateTime) # When the DAG received a refreshed signal last, used to know when # we need to force refresh last_expired = Column(DateTime) # Whether (one of) the scheduler is scheduling this DAG at the moment scheduler_lock = Column(Boolean) # Foreign key to the latest pickle_id pickle_id = Column(Integer) # The location of the file containing the DAG object fileloc = Column(String(2000)) # String representing the owners owners = Column(String(2000)) def __repr__(self): return "<DAG: {self.dag_id}>".format(self=self) @classmethod def get_current(cls, dag_id): session = settings.Session() obj = session.query(cls).filter(cls.dag_id == dag_id).first() session.expunge_all() session.commit() session.close() return obj class DAG(object): """ A dag (directed acyclic graph) is a collection of tasks with directional dependencies. A dag also has a schedule, a start end an end date (optional). For each schedule, (say daily or hourly), the DAG needs to run each individual tasks as their dependencies are met. Certain tasks have the property of depending on their own past, meaning that they can't run until their previous schedule (and upstream tasks) are completed. DAGs essentially act as namespaces for tasks. A task_id can only be added once to a DAG. :param dag_id: The id of the DAG :type dag_id: string :param schedule_interval: Defines how often that DAG runs :type schedule_interval: datetime.timedelta :param start_date: The timestamp from which the scheduler will attempt to backfill :type start_date: datetime.datetime :param end_date: A date beyond which your DAG won't run, leave to None for open ended scheduling :type end_date: datetime.datetime :param template_searchpath: This list of folders (non relative) defines where jinja will look for your templates. Order matters. Note that jinja/airflow includes the path of your DAG file by default :type template_searchpath: string or list of stings :param user_defined_macros: a dictionary of macros that will be merged :type user_defined_macros: dict :param default_args: A dictionary of default parameters to be used as constructor keyword parameters when initialising operators. Note that operators have the same hook, and precede those defined here, meaning that if your dict contains `'depends_on_past': True` here and `'depends_on_past': False` in the operator's call `default_args`, the actual value will be `False`. :type default_args: dict :param params: a dictionary of DAG level parameters that are made accessible in templates, namespaced under `params`. These params can be overridden at the task level. :type params: dict """ def __init__( self, dag_id, schedule_interval=timedelta(days=1), start_date=None, end_date=None, full_filepath=None, template_searchpath=None, user_defined_macros=None, default_args=None, params=None): self.user_defined_macros = user_defined_macros self.default_args = default_args or {} self.params = params utils.validate_key(dag_id) self.tasks = [] self.dag_id = dag_id self.start_date = start_date self.end_date = end_date or datetime.now() self.schedule_interval = schedule_interval self.full_filepath = full_filepath if full_filepath else '' if isinstance(template_searchpath, basestring): template_searchpath = [template_searchpath] self.template_searchpath = template_searchpath self.parent_dag = None # Gets set when DAGs are loaded self.last_loaded = datetime.now() def __repr__(self): return "<DAG: {self.dag_id}>".format(self=self) @property def task_ids(self): return [t.task_id for t in self.tasks] @property def filepath(self): fn = self.full_filepath.replace(DAGS_FOLDER + '/', '') fn = fn.replace(os.path.dirname(__file__) + '/', '') return fn @property def folder(self): return os.path.dirname(self.full_filepath) @property def owner(self): return ", ".join(list(set([t.owner for t in self.tasks]))) @property def latest_execution_date(self): TI = TaskInstance session = settings.Session() execution_date = session.query(func.max(TI.execution_date)).filter( TI.dag_id == self.dag_id, TI.task_id.in_(self.task_ids) ).scalar() session.commit() session.close() return execution_date @property def subdags(self): # Late import to prevent circular imports from airflow.operators import SubDagOperator l = [] for task in self.tasks: if isinstance(task, SubDagOperator): l.append(task.subdag) l += task.subdag.subdags return l def resolve_template_files(self): for t in self.tasks: t.resolve_template_files() def crawl_for_tasks(objects): """ Typically called at the end of a script by passing globals() as a parameter. This allows to not explicitly add every single task to the dag explicitly. """ raise NotImplemented("") def override_start_date(self, start_date): """ Sets start_date of all tasks and of the DAG itself to a certain date. This is used by BackfillJob. """ for t in self.tasks: t.start_date = start_date self.start_date = start_date def get_template_env(self): ''' Returns a jinja2 Environment while taking into account the DAGs template_searchpath and user_defined_macros ''' searchpath = [self.folder] if self.template_searchpath: searchpath += self.template_searchpath env = jinja2.Environment( loader=jinja2.FileSystemLoader(searchpath), extensions=["jinja2.ext.do"], cache_size=0) if self.user_defined_macros: env.globals.update(self.user_defined_macros) return env def set_dependency(self, upstream_task_id, downstream_task_id): """ Simple utility method to set dependency between two tasks that already have been added to the DAG using add_task() """ self.get_task(upstream_task_id).set_downstream( self.get_task(downstream_task_id)) def get_task_instances( self, session, start_date=None, end_date=None, state=None): TI = TaskInstance if not start_date: start_date = (datetime.today()-timedelta(30)).date() start_date = datetime.combine(start_date, datetime.min.time()) if not end_date: end_date = datetime.now() tis = session.query(TI).filter( TI.dag_id == self.dag_id, TI.execution_date >= start_date, TI.execution_date <= end_date, TI.task_id.in_([t.task_id for t in self.tasks]), ) if state: tis = tis.filter(TI.state == state) tis = tis.all() return tis @property def roots(self): return [t for t in self.tasks if not t.downstream_list] def clear( self, start_date=None, end_date=None, only_failed=False, only_running=False, confirm_prompt=False, include_subdags=True, dry_run=False): session = settings.Session() """ Clears a set of task instances associated with the current dag for a specified date range. """ TI = TaskInstance tis = session.query(TI) if include_subdags: # Crafting the right filter for dag_id and task_ids combo conditions = [] for dag in self.subdags + [self]: conditions.append( TI.dag_id.like(dag.dag_id) & TI.task_id.in_(dag.task_ids) ) tis = tis.filter(or_(*conditions)) else: tis = session.query(TI).filter(TI.dag_id == self.dag_id) tis = tis.filter(TI.task_id.in_(self.task_ids)) if start_date: tis = tis.filter(TI.execution_date >= start_date) if end_date: tis = tis.filter(TI.execution_date <= end_date) if only_failed: tis = tis.filter(TI.state == State.FAILED) if only_running: tis = tis.filter(TI.state == State.RUNNING) if dry_run: tis = tis.all() session.expunge_all() return tis count = tis.count() if count == 0: print("Nothing to clear.") return 0 if confirm_prompt: ti_list = "\n".join([str(t) for t in tis]) question = ( "You are about to delete these {count} tasks:\n" "{ti_list}\n\n" "Are you sure? (yes/no): ").format(**locals()) if utils.ask_yesno(question): clear_task_instances(tis, session) else: count = 0 print("Bail. Nothing was cleared.") else: clear_task_instances(tis, session) session.commit() session.close() return count def __deepcopy__(self, memo): # Swiwtcharoo to go around deepcopying objects coming through the # backdoor cls = self.__class__ result = cls.__new__(cls) memo[id(self)] = result for k, v in self.__dict__.items(): if k not in ('user_defined_macros', 'params'): setattr(result, k, copy.deepcopy(v, memo)) result.user_defined_macros = self.user_defined_macros result.params = self.params return result def sub_dag( self, task_regex, include_downstream=False, include_upstream=True): """ Returns a subset of the current dag as a deep copy of the current dag based on a regex that should match one or many tasks, and includes upstream and downstream neighbours based on the flag passed. """ dag = copy.deepcopy(self) regex_match = [ t for t in dag.tasks if re.findall(task_regex, t.task_id)] also_include = [] for t in regex_match: if include_downstream: also_include += t.get_flat_relatives(upstream=False) if include_upstream: also_include += t.get_flat_relatives(upstream=True) # Compiling the unique list of tasks that made the cut tasks = list(set(regex_match + also_include)) dag.tasks = tasks for t in dag.tasks: # Removing upstream/downstream references to tasks that did not # made the cut t._upstream_list = [ ut for ut in t._upstream_list if utils.is_in(ut, tasks)] t._downstream_list = [ ut for ut in t._downstream_list if utils.is_in(ut, tasks)] return dag def has_task(self, task_id): return task_id in (t.task_id for t in self.tasks) def get_task(self, task_id): for task in self.tasks: if task.task_id == task_id: return task raise AirflowException("Task {task_id} not found".format(**locals())) def __cmp__(self, other): blacklist = {'_sa_instance_state', 'end_date', 'last_pickled', 'tasks'} for k in set(self.__dict__) - blacklist: if self.__dict__[k] != other.__dict__[k]: return -1 if len(self.tasks) != len(other.tasks): return -1 i = 0 for task in self.tasks: if task != other.tasks[i]: return -1 i += 1 logging.info("Same as before") return 0 def pickle(self, main_session=None): session = main_session or settings.Session() dag = session.query( DagModel).filter(DAG.dag_id == self.dag_id).first() dp = None if dag and dag.pickle_id: dp = session.query(DagPickle).filter( DagPickle.id == dag.pickle_id).first() if not dp or dp.pickle != self: dp = DagPickle(dag=self) session.add(dp) self.last_pickled = datetime.now() session.commit() self.pickle_id = dp.id if not main_session: session.close() def tree_view(self): """ Shows an ascii tree representation of the DAG """ def get_downstream(task, level=0): print (" " * level * 4) + str(task) level += 1 for t in task.upstream_list: get_downstream(t, level) for t in self.roots: get_downstream(t) def add_task(self, task): ''' Add a task to the DAG :param task: the task you want to add :type task: task ''' if not self.start_date and not task.start_date: raise AirflowException("Task is missing the start_date parameter") if not task.start_date: task.start_date = self.start_date if task.task_id in [t.task_id for t in self.tasks]: raise AirflowException( "Task id '{0}' has already been added " "to the DAG ".format(task.task_id)) else: self.tasks.append(task) task.dag_id = self.dag_id task.dag = self self.task_count = len(self.tasks) def add_tasks(self, tasks): ''' Add a list of tasks to the DAG :param task: a lit of tasks you want to add :type task: list of tasks ''' for task in tasks: self.add_task(task) def db_merge(self): BO = BaseOperator session = settings.Session() tasks = session.query(BO).filter(BO.dag_id == self.dag_id).all() for t in tasks: session.delete(t) session.commit() session.merge(self) session.commit() def run( self, start_date=None, end_date=None, mark_success=False, include_adhoc=False, local=False, executor=None, donot_pickle=False, ignore_dependencies=False): from airflow.jobs import BackfillJob if not executor and local: executor = LocalExecutor() elif not executor: executor = DEFAULT_EXECUTOR job = BackfillJob( self, start_date=start_date, end_date=end_date, mark_success=mark_success, include_adhoc=include_adhoc, executor=executor, donot_pickle=donot_pickle, ignore_dependencies=ignore_dependencies) job.run() class Chart(Base): __tablename__ = "chart" id = Column(Integer, primary_key=True) label = Column(String(200)) conn_id = Column(String(ID_LEN), nullable=False) user_id = Column(Integer(), ForeignKey('user.id'),) chart_type = Column(String(100), default="line") sql_layout = Column(String(50), default="series") sql = Column(Text, default="SELECT series, x, y FROM table") y_log_scale = Column(Boolean) show_datatable = Column(Boolean) show_sql = Column(Boolean, default=True) height = Column(Integer, default=600) default_params = Column(String(5000), default="{}") owner = relationship( "User", cascade=False, cascade_backrefs=False, backref='charts') x_is_date = Column(Boolean, default=True) iteration_no = Column(Integer, default=0) last_modified = Column(DateTime, default=datetime.now()) def __repr__(self): return self.label class KnownEventType(Base): __tablename__ = "known_event_type" id = Column(Integer, primary_key=True) know_event_type = Column(String(200)) def __repr__(self): return self.know_event_type class KnownEvent(Base): __tablename__ = "known_event" id = Column(Integer, primary_key=True) label = Column(String(200)) start_date = Column(DateTime) end_date = Column(DateTime) user_id = Column(Integer(), ForeignKey('user.id'),) known_event_type_id = Column(Integer(), ForeignKey('known_event_type.id'),) reported_by = relationship( "User", cascade=False, cascade_backrefs=False, backref='known_events') event_type = relationship( "KnownEventType", cascade=False, cascade_backrefs=False, backref='known_events') description = Column(Text) def __repr__(self): return self.label class Variable(Base): __tablename__ = "variable" id = Column(Integer, primary_key=True) key = Column(String(ID_LEN), unique=True) val = Column(Text) def __repr__(self): return '{} : {}'.format(self.key, self.val) @classmethod @provide_session def get(cls, key, session, deserialize_json=False): obj = session.query(cls).filter(cls.key == key).first() v = obj.val if deserialize_json and v: v = json.loads(v) return v class Pool(Base): __tablename__ = "slot_pool" id = Column(Integer, primary_key=True) pool = Column(String(50), unique=True) slots = Column(Integer, default=0) description = Column(Text) def __repr__(self): return self.pool @provide_session def used_slots(self, session): """ Returns the number of slots used at the moment """ running = ( session .query(TaskInstance) .filter(TaskInstance.pool == self.pool) .filter(TaskInstance.state == State.RUNNING) .count() ) return running @provide_session def queued_slots(self, session): """ Returns the number of slots used at the moment """ return ( session .query(TaskInstance) .filter(TaskInstance.pool == self.pool) .filter(TaskInstance.state == State.QUEUED) .count() ) @provide_session def open_slots(self, session): """ Returns the number of slots open at the moment """ used_slots = self.used_slots(session=session) return self.slots - used_slots class SlaMiss(Base): """ Model that stores a history of the SLA that have been missed. It is used to keep track of SLA failures over time and to avoid double triggering alert emails. """ __tablename__ = "sla_miss" task_id = Column(String(ID_LEN), primary_key=True) dag_id = Column(String(ID_LEN), primary_key=True) execution_date = Column(DateTime, primary_key=True) email_sent = Column(Boolean, default=False) timestamp = Column(DateTime) description = Column(Text) def __repr__(self): return str(( self.dag_id, self.task_id, self.execution_date.isoformat()))
apache-2.0
-7,476,350,181,389,807,000
34.757324
86
0.572422
false
4.168669
false
false
false
simone/django-gb
django/utils/deprecation.py
1
2585
import inspect import warnings class RemovedInDjango20Warning(DeprecationWarning): pass class RemovedInDjango19Warning(PendingDeprecationWarning): pass RemovedInNextVersionWarning = RemovedInDjango19Warning class warn_about_renamed_method(object): def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning): self.class_name = class_name self.old_method_name = old_method_name self.new_method_name = new_method_name self.deprecation_warning = deprecation_warning def __call__(self, f): def wrapped(*args, **kwargs): warnings.warn( "`%s.%s` is deprecated, use `%s` instead." % (self.class_name, self.old_method_name, self.new_method_name), self.deprecation_warning, 2) return f(*args, **kwargs) return wrapped class RenameMethodsBase(type): """ Handles the deprecation paths when renaming a method. It does the following: 1) Define the new method if missing and complain about it. 2) Define the old method if missing. 3) Complain whenever an old method is called. See #15363 for more details. """ renamed_methods = () def __new__(cls, name, bases, attrs): new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs) for base in inspect.getmro(new_class): class_name = base.__name__ for renamed_method in cls.renamed_methods: old_method_name = renamed_method[0] old_method = base.__dict__.get(old_method_name) new_method_name = renamed_method[1] new_method = base.__dict__.get(new_method_name) deprecation_warning = renamed_method[2] wrapper = warn_about_renamed_method(class_name, *renamed_method) # Define the new method if missing and complain about it if not new_method and old_method: warnings.warn( "`%s.%s` method should be renamed `%s`." % (class_name, old_method_name, new_method_name), deprecation_warning, 2) setattr(base, new_method_name, old_method) setattr(base, old_method_name, wrapper(old_method)) # Define the old method as a wrapped call to the new method. if not old_method and new_method: setattr(base, old_method_name, wrapper(new_method)) return new_class
bsd-3-clause
-6,056,115,480,740,207,000
34.410959
90
0.592263
false
4.279801
false
false
false
lixun910/pysal
pysal/explore/giddy/mobility.py
1
4182
""" Income mobility measures. """ __author__ = "Wei Kang <weikang9009@gmail.com>, Sergio J. Rey <sjsrey@gmail.com>" __all__ = ["markov_mobility"] import numpy as np import numpy.linalg as la def markov_mobility(p, measure="P",ini=None): """ Markov-based mobility index. Parameters ---------- p : array (k, k), Markov transition probability matrix. measure : string If measure= "P", :math:`M_{P} = \\frac{m-\sum_{i=1}^m P_{ii}}{m-1}`; if measure = "D", :math:`M_{D} = 1 - |\det(P)|`, where :math:`\det(P)` is the determinant of :math:`P`; if measure = "L2", :math:`M_{L2} = 1 - |\lambda_2|`, where :math:`\lambda_2` is the second largest eigenvalue of :math:`P`; if measure = "B1", :math:`M_{B1} = \\frac{m-m \sum_{i=1}^m \pi_i P_{ii}}{m-1}`, where :math:`\pi` is the initial income distribution; if measure == "B2", :math:`M_{B2} = \\frac{1}{m-1} \sum_{i=1}^m \sum_{ j=1}^m \pi_i P_{ij} |i-j|`, where :math:`\pi` is the initial income distribution. ini : array (k,), initial distribution. Need to be specified if measure = "B1" or "B2". If not, the initial distribution would be treated as a uniform distribution. Returns ------- mobi : float Mobility value. Notes ----- The mobility indices are based on :cite:`Formby:2004fk`. Examples -------- >>> import numpy as np >>> import pysal.lib >>> import pysal.viz.mapclassify as mc >>> from pysal.explore.giddy.markov import Markov >>> from pysal.explore.giddy.mobility import markov_mobility >>> f = pysal.lib.io.open(pysal.lib.examples.get_path("usjoin.csv")) >>> pci = np.array([f.by_col[str(y)] for y in range(1929,2010)]) >>> q5 = np.array([mc.Quantiles(y).yb for y in pci]).transpose() >>> m = Markov(q5) >>> m.p array([[0.91011236, 0.0886392 , 0.00124844, 0. , 0. ], [0.09972299, 0.78531856, 0.11080332, 0.00415512, 0. ], [0. , 0.10125 , 0.78875 , 0.1075 , 0.0025 ], [0. , 0.00417827, 0.11977716, 0.79805014, 0.07799443], [0. , 0. , 0.00125156, 0.07133917, 0.92740926]]) (1) Estimate Shorrock1 mobility index: >>> mobi_1 = markov_mobility(m.p, measure="P") >>> print("{:.5f}".format(mobi_1)) 0.19759 (2) Estimate Shorrock2 mobility index: >>> mobi_2 = markov_mobility(m.p, measure="D") >>> print("{:.5f}".format(mobi_2)) 0.60685 (3) Estimate Sommers and Conlisk mobility index: >>> mobi_3 = markov_mobility(m.p, measure="L2") >>> print("{:.5f}".format(mobi_3)) 0.03978 (4) Estimate Bartholomew1 mobility index (note that the initial distribution should be given): >>> ini = np.array([0.1,0.2,0.2,0.4,0.1]) >>> mobi_4 = markov_mobility(m.p, measure = "B1", ini=ini) >>> print("{:.5f}".format(mobi_4)) 0.22777 (5) Estimate Bartholomew2 mobility index (note that the initial distribution should be given): >>> ini = np.array([0.1,0.2,0.2,0.4,0.1]) >>> mobi_5 = markov_mobility(m.p, measure = "B2", ini=ini) >>> print("{:.5f}".format(mobi_5)) 0.04637 """ p = np.array(p) k = p.shape[1] if measure == "P": t = np.trace(p) mobi = (k - t) / (k - 1) elif measure == "D": mobi = 1 - abs(la.det(p)) elif measure == "L2": w, v = la.eig(p) eigen_value_abs = abs(w) mobi = 1 - np.sort(eigen_value_abs)[-2] elif measure == "B1": if ini is None: ini = 1.0/k * np.ones(k) mobi = (k - k * np.sum(ini * np.diag(p))) / (k - 1) elif measure == "B2": mobi = 0 if ini is None: ini = 1.0 / k * np.ones(k) for i in range(k): for j in range(k): mobi = mobi + ini[i] * p[i, j] * abs(i - j) mobi = mobi / (k - 1) return mobi
bsd-3-clause
3,632,192,855,585,092,600
31.169231
81
0.504304
false
2.920391
false
false
false
evilkost/cdic
src/cdic_project/cdic/logic/copr_logic.py
1
1290
# coding: utf-8 from flask import abort from sqlalchemy.orm.exc import NoResultFound from sqlalchemy.orm.query import Query from .. import db from ..logic.event_logic import create_project_event from ..logic.project_logic import ProjectLogic from ..models import Project, User, LinkedCopr def check_link_exists(project: Project, username: str, coprname: str) -> bool: query = ( LinkedCopr.query .filter(LinkedCopr.project_id == project.id) .filter(LinkedCopr.username == username) .filter(LinkedCopr.coprname == coprname) ) if len(query.all()) > 0: return True else: return False def create_link(project: Project, username: str, coprname: str,) -> LinkedCopr: link = LinkedCopr( project=project, username=username, coprname=coprname, ) event = create_project_event( project, "Linked copr: {}/{}".format(username, coprname), event_type="created_link") ProjectLogic.update_patched_dockerfile(project) db.session.add_all([link, event, project]) return link def get_link_by_id(link_id: int) -> LinkedCopr: return LinkedCopr.query.get(link_id) # def unlink_copr(link_id: int): # link = get_link_by_id(link_id) # if link: # db.session.
gpl-3.0
4,390,492,934,067,052,000
25.326531
79
0.66124
false
3.333333
false
false
false
MilchReis/PicSort
src/gui/components.py
1
4356
# -*- coding: utf-8 -*- ''' @author: nick ''' import pygame def isIn(objPos, objBounds, point): if (point[0] > objPos[0] and point[0] < objPos[0] + objBounds[0]) and (point[1] > objPos[1] and point[1] < objPos[1] + objBounds[1]): return True else: return False class Component(): def __init__(self): self.position = (None, None) self.bounds = (None, None) self.action = None self.colorBg = (200, 200, 200) self.colorBorder = (10, 10, 10) def update(self, event): pass def refresh(self): pass def render(self, surface): pass class InfoField(Component): def __init__(self): Component.__init__(self) self.text = "" self.alpha = 128 self.colorBg = (0, 0, 0) self.colorFont = (255, 255, 255) self.fontSize = 14 self.font = pygame.font.SysFont("sans", self.fontSize) self.font.set_bold(True) self.event = None def refresh(self): if not self.event == None: self.event() def update(self, event): pass def render(self, surface): if not(self.text == None) and not(self.text == ""): text_width, text_height = self.font.size(self.text) s = pygame.Surface((text_width + 20, text_height + 10)) s.set_alpha(self.alpha) s.fill(self.colorBg) surface.blit(s, self.position) txt = self.font.render(self.text, 1, self.colorFont) surface.blit(txt, (self.position[0] + 10, self.position[1] + 5)) class InputField(Component): def __init__(self): Component.__init__(self) self.cursorTime = 500 self.cursorHide = False self.text = "" self.active = False def update(self, event): mouse = pygame.mouse.get_pos() if event.type == pygame.KEYUP and self.active: if event.key == pygame.K_BACKSPACE: self.text = self.text[:-1] elif event.key == pygame.K_KP_ENTER and not self.action == None: self.action() else: if event.key in range(256): self.text += chr(event.key) if event.type == pygame.MOUSEBUTTONDOWN and isIn(self.position, self.bounds, mouse): self.active = True def render(self, pygamesurface): # bg pygame.draw.rect(pygamesurface, self.colorBg, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 0) # border pygame.draw.rect(pygamesurface, self.colorBorder, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 1) # text if len(self.text) > 0: myfont = pygame.font.SysFont("sans", 12 ) txt = myfont.render( self.text, 1, self.colorBorder) pygamesurface.blit( txt, (self.position[0] +10 , self.position[1] + 5) ) class Button(Component): def __init__(self): Component.__init__(self) self.text = None self.colorBg = (30, 30, 30) self.buttonColorHover = (40, 40, 40) self.buttonColorText = (255, 255, 255) self.colorBorder = (10, 10, 10) def update(self, event): mouse = pygame.mouse.get_pos() if isIn(self.position, self.bounds, mouse): if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and not(self.action == None): self.action(); def render(self, pygamesurface): mouse = pygame.mouse.get_pos() color = self.colorBg if isIn(self.position, self.bounds, mouse): color = self.buttonColorHover # bg pygame.draw.rect( pygamesurface, color, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 0 ) # border pygame.draw.rect( pygamesurface, self.colorBorder, (self.position[0], self.position[1], self.bounds[0], self.bounds[1]), 1 ) # text myfont = pygame.font.SysFont("sans", 12 ) txt = myfont.render( self.text, 1, self.buttonColorText) pygamesurface.blit( txt, (self.position[0] +10 , self.position[1] + 5) )
gpl-2.0
8,014,038,098,838,877,000
28.632653
137
0.541093
false
3.67285
false
false
false
csangani/ReproducingSprout
create_trace.py
1
1774
## Create a network trace using specified distribution for packet intervals import numpy import os import random import sys UPLINK_TRACE_SIZE = 30000 DOWNLINK_TRACE_SIZE = 350000 TRACES_PATH = 'cleaned_traces' def create_trace(d_name, d_function, mode): intervals = [int(round(abs(d_function()))) for _ in range(UPLINK_TRACE_SIZE if mode == 'uplink' else DOWNLINK_TRACE_SIZE)] values = [] current_value = 0 for i in intervals: values += [current_value] current_value += i with open('%s/%s/%s.pps' % (TRACES_PATH, d_name, mode), 'w+') as f: for v in values: f.write('%s\n' % v) if __name__ == '__main__': if len(sys.argv) != 2: print 'Usage: python create_trace.py <distribution>' sys.exit(1) d_name = sys.argv[1] if not os.path.exists('%s/%s' % (TRACES_PATH, d_name)): os.makedirs('%s/%s' % (TRACES_PATH, d_name)) if d_name == 'gauss': uplink_function = lambda: random.gauss(14.383, 298.962) downlink_function = lambda: random.gauss(2.320, 11.526) elif d_name == 'expovariate': uplink_function = lambda: random.expovariate(1 / 14.383) downlink_function = lambda: random.expovariate(1 / 2.320) elif d_name == 'poisson': uplink_function = lambda: numpy.random.poisson(14.383) downlink_function = lambda: numpy.random.poisson(2.320) elif d_name == 'uniform': uplink_function = lambda: random.uniform(0,30) downlink_function = lambda: random.uniform(0,10) else: print "Unrecognized distribution" sys.exit(1) create_trace(d_name, uplink_function, 'uplink') create_trace(d_name, downlink_function, 'downlink')
mit
-5,686,957,577,726,436,000
30.678571
126
0.603157
false
3.353497
false
false
false
fle-internal/content-pack-maker
minimize-content-pack.py
1
1088
""" minimize-content-pack Remove assessment items, subtitles and po files from a content pack. Usage: minimize-content-pack.py <old-content-pack-path> <out-path> """ import zipfile from pathlib import Path from docopt import docopt ITEMS_TO_TRANSFER = [ "metadata.json", "content.db", "backend.mo", "frontend.mo", ] def minimize_content_pack(oldpackpath: Path, outpath: Path): with zipfile.ZipFile(str(oldpackpath)) as oldzf,\ zipfile.ZipFile(str(outpath), "w") as newzf: items = list(i for i in oldzf.namelist() for will_be_transferred in ITEMS_TO_TRANSFER if will_be_transferred in i) for item in items: bytes = oldzf.read(item) newzf.writestr(item, bytes) def main(): args = docopt(__doc__) contentpackpath = Path(args["<old-content-pack-path>"]) outpath = Path(args["<out-path>"] or "out/minimal.zip") outpath = outpath.expanduser() minimize_content_pack(contentpackpath, outpath) if __name__ == "__main__": main()
bsd-2-clause
1,715,905,646,846,452,200
22.652174
68
0.620404
false
3.443038
false
false
false
mouton5000/DiscreteEventApplicationEditor
test/testsArithmeticExpressions/MathFunctions/testAbs.py
1
2775
__author__ = 'mouton' from triggerExpressions import Evaluation from unittest import TestCase from math import pi, sqrt from arithmeticExpressions import ALitteral, Func, UndefinedLitteral, SelfLitteral from database import Variable class TestAbs(TestCase): @classmethod def setUpClass(cls): import grammar.grammars grammar.grammars.compileGrammars() def setUp(self): self.eval1 = Evaluation() self.eval2 = Evaluation() self.eval2[Variable('X')] = pi self.eval2[Variable('T')] = 'abc' self.eval2[Variable('Z')] = 12.0 def test_integer_abs_with_empty_evaluation(self): a1 = ALitteral(1) absr = Func(a1, abs) self.assertEqual(absr.value(self.eval1), abs(1)) def test_integer_abs_with_non_empty_evaluation(self): a1 = ALitteral(1) absr = Func(a1, abs) self.assertEqual(absr.value(self.eval2), abs(1)) def test_float_abs_with_empty_evaluation(self): a1 = ALitteral(pi) absr = Func(a1, abs) self.assertEqual(absr.value(self.eval1), abs(pi)) def test_float_abs_with_non_empty_evaluation(self): a1 = ALitteral(pi) absr = Func(a1, abs) self.assertEqual(absr.value(self.eval2), abs(pi)) def test_string_abs_with_empty_evaluation(self): a1 = ALitteral('abc') absr = Func(a1, abs) with self.assertRaises(TypeError): absr.value(self.eval1) def test_string_abs_with_non_empty_evaluation(self): a1 = ALitteral('abc') absr = Func(a1, abs) with self.assertRaises(TypeError): absr.value(self.eval2) def test_undefined_abs_with_empty_evaluation(self): a1 = UndefinedLitteral() absr = Func(a1, abs) with self.assertRaises(TypeError): absr.value(self.eval1) def test_undefined_abs_with_non_empty_evaluation(self): a1 = UndefinedLitteral() absr = Func(a1, abs) with self.assertRaises(TypeError): absr.value(self.eval2) def test_evaluated_variable_abs(self): a1 = ALitteral(Variable('X')) absr = Func(a1, abs) self.assertEqual(absr.value(self.eval2), abs(pi)) def test_unevaluated_variable_abs(self): a1 = ALitteral(Variable('Y')) absr = Func(a1, abs) with self.assertRaises(ValueError): absr.value(self.eval2) def test_self_litteral_abs_with_empty_evaluation(self): a1 = SelfLitteral() absr = Func(a1, abs) self.assertEqual(absr.value(self.eval1, pi), abs(pi)) def test_self_litteral_abs_with_non_empty_evaluation(self): a1 = SelfLitteral() absr = Func(a1, abs) self.assertEqual(absr.value(self.eval2, pi), abs(pi))
mit
7,408,478,610,740,688,000
30.908046
82
0.625586
false
3.234266
true
false
false
emptyewer/DEEPN
functions/stat_graph.py
1
3795
import math import re import matplotlib as mpl from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas import matplotlib.colors as colors import matplotlib.cm as cm import numpy as np from matplotlib.figure import Figure from PyQt4 import QtGui class GraphFrame(QtGui.QFrame): def __init__(self, filename, parent=None): super(GraphFrame, self).__init__(parent) self.setFrameShape(QtGui.QFrame.NoFrame) self.parent = parent self.graph_view = GraphView(filename, self) def resizeEvent(self, event): self.graph_view.setGeometry(self.rect()) class GraphView(QtGui.QWidget): def __init__(self, filename, parent=None): super(GraphView, self).__init__(parent) self.dpi = 300 self.filename = filename self.data = None self.fig = Figure((4.5, 4.5), dpi=self.dpi) self.axes = self.fig.add_subplot(111) self.canvas = FigureCanvas(self.fig) self.canvas.setParent(self) self.canvas.mpl_connect('button_press_event', self._onpick) self.layout = QtGui.QVBoxLayout() self.layout.addWidget(self.canvas) self.layout.setStretchFactor(self.canvas, 1) self.setLayout(self.layout) self.x = [] self.y = [] self.read_data() self.load_data() self.canvas.show() self.set_parameters() def read_data(self): fh = open(self.filename) for line in fh.readlines(): if not re.match(r'[A-Za-z]', line): values = line.strip().split(',') try: self.x.append(float(values[0])) self.y.append(float(values[2])) except: pass self.x = np.array(self.x) self.y = np.array(self.y) def load_data(self): self.bar = self.axes.plot(self.x, self.y, linewidth=1.0) def _get_clicked_residues(self, event): xmin, xmax = self.axes.get_xlim() return(int(math.ceil(event.xdata - xmin))-1) def _onpick(self, event): pass def set_parameters(self): self.axes.tick_params(axis=u'both', which=u'both', length=0) self.axes.set_xlim(min(self.x), max(self.x)) self.axes.set_ylim(0, max(self.y) + 1) self.axes.set_xlabel('Threshold') self.axes.set_ylabel('Overdispersion') # fractions = self.y / max(self.y) # normalized_colors = colors.Normalize(fractions.min(), fractions.max()) # count = 0 # for rect in self.bar: # c = cm.jet(normalized_colors(fractions[count])) # rect.set_facecolor(c) # count += 1 # self.fig.patch.set_facecolor((0.886, 0.886, 0.886)) ticks_font = mpl.font_manager.FontProperties(family='times new roman', style='normal', size=12, weight='normal', stretch='normal') labels = [self.axes.title, self.axes.xaxis.label, self.axes.yaxis.label] labels += self.axes.get_xticklabels() + self.axes.get_yticklabels() for item in labels: item.set_fontproperties(ticks_font) item.set_fontsize(4) self.fig.set_size_inches(30, self.fig.get_figheight(), forward=True) # # def update_graph(self): # self.axes.clear() # if self.pmap.use_ca: # self.xcoor = self.pmap.residue_numbers_ca[self.pmap.parent.current_model] # else: # self.xcoor = self.pmap.residue_numbers_cb[self.pmap.parent.current_model] # self.ycoor = self.pmap.histogram_maps[self.pmap.parent.current_model] # self.bar = self.axes.bar(self.xcoor, self.ycoor, width=1.0, linewidth=0) # self.set_parameters() # self.canvas.draw()
mit
-6,688,862,044,473,764,000
37.72449
103
0.594466
false
3.42509
false
false
false
rookies/dmx2serial
DMX.py
1
4980
#!/usr/bin/python3 from enum import IntEnum import struct class Flag(IntEnum): Payload = 0b10000000 Success = 0b01000000 Resend = 0b00100000 Configurate = 0b00000100 Hello = 0b00000010 Parity = 0b00000001 class FlagSet(object): def __init__(self, flags=0x00): flags = int(flags) if flags < 0 or flags > 255: raise ValueError("Invalid flags.") self.flags = flags def __str__(self): return "{}({})".format(self.__class__.__name__, ",".join(['%s=%s' % (k, v) for (k, v) in self.asDict().items()])) def asDict(self): res = {} for f in Flag: if self.isSet(f): res[f.name] = 1 else: res[f.name] = 0 return res def getBitfield(self): return self.flags def set(self, flag): if not isinstance(flag, Flag): raise ValueError("Please use instance of Flag.") self.flags |= flag def unset(self, flag): if not isinstance(flag, Flag): raise ValueError("Please use instance of Flag.") self.flags &= ~flag def toggle(self, flag): if not isinstance(flag, Flag): raise ValueError("Please use instance of Flag.") self.flags ^= flag def isSet(self, flag): if not isinstance(flag, Flag): raise ValueError("Please use instance of Flag.") return ((self.flags & flag) is not 0) class Packet(object): checksum = 0x0 def __init__(self, version=0x00, flags=0x00, universe=0x00, channel=0x0000, value=0x00): self.setVersion(version) self.flags = FlagSet(flags) self.setUniverse(universe) self.setChannel(channel) self.setValue(value) def __str__(self): return "{}(version={},flags={},universe={},channel={},value={},checksum={})".format(self.__class__.__name__, self.version, str(self.flags), self.universe, self.channel, self.value, self.checksum) def getVersion(self): return self.version def getFlags(self): return self.flags def getUniverse(self): return self.universe def getChannel(self): return self.channel def getValue(self): return self.value def setVersion(self, version): version = int(version) if version < 0 or version > 255: raise ValueError("Invalid version.") self.version = version def setUniverse(self, universe): universe = int(universe) if universe < 0 or universe > 255: raise ValueError("Invalid universe.") self.universe = universe def setChannel(self, channel): channel = int(channel) if channel < 0 or channel > 65535: raise ValueError("Invalid channel.") self.channel = channel def setValue(self, value): value = int(value) if value < 0 or value > 255: raise ValueError("Invalid value.") self.value = value def calculateParity(self): self.flags.unset(Flag.Parity) odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2 if odd is 1: self.flags.set(Flag.Parity) def checkParity(self): odd = (bin(self.version).count("1") + bin(self.flags.getBitfield()).count("1")) % 2 return (odd is 0) def calculateChecksum(self): pass #TODO# def checkChecksum(self): pass #TODO# def serialize(self): if self.flags.isSet(Flag.Payload): return struct.pack( "<BBBHB", self.version, self.flags.getBitfield(), self.universe, self.channel, self.value ) else: return struct.pack( "<BB", self.version, self.flags.getBitfield() ) def deserialize(self, data): pass #TODO# class PacketFactory(object): @staticmethod def createHsAsk(): return Packet(flags=(Flag.Hello | Flag.Parity)) @staticmethod def createHsAnswer(success, resend): p = Packet(version=1, flags=Flag.Hello) if success: p.flags.set(Flag.Success) if resend: p.flags.set(Flag.Resend) p.calculateParity() return p @staticmethod def createChSet(universe, channel, value): p = Packet(version=1, flags=Flag.Payload, universe=universe, channel=channel, value=value) p.calculateChecksum() return p @staticmethod def createChAnswer(success, resend): p = Packet(version=1) if success: p.flags.set(Flag.Success) if resend: p.flags.set(Flag.Resend) p.calculateParity() return p @staticmethod def createCfgAnswer(success, resend): p = Packet(version=1, flags=Flag.Configurate) if success: p.flags.set(Flag.Success) if resend: p.flags.set(Flag.Resend) p.calculateParity() return p if __name__ == "__main__": #p = Packet(version=1, flags=(Flag.Payload | Flag.Hello)) #print(p) #print(p.checkParity()) #p.calculateParity() #print(p) #print(p.checkParity()) print(" HsAsk():", PacketFactory.createHsAsk()) print(" HsAnswer(1):", PacketFactory.createHsAnswer(True)) print(" HsAnswer(0):", PacketFactory.createHsAnswer(False)) print(" ChSet(...):", PacketFactory.createChSet(7, 10, 255)) print(" ChAnswer(1):", PacketFactory.createChAnswer(True)) print(" ChAnswer(0):", PacketFactory.createChAnswer(False)) print("CfgAnswer(1):", PacketFactory.createCfgAnswer(True)) print("CfgAnswer(0):", PacketFactory.createCfgAnswer(False))
mit
-8,310,246,874,338,883,000
25.349206
197
0.675301
false
3
false
false
false
philipn/i-am-cc
cc/cc/settings.py
1
5647
# Django settings for cc project. import os DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': '', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = os.path.join(os.path.split(os.path.abspath(__file__))[0], '..', '..', 'static') # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( os.path.join(os.path.split(os.path.abspath(__file__))[0], 'static'), ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.cache.UpdateCacheMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.cache.FetchFromCacheMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'cc.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'cc.wsgi.application' TEMPLATE_DIRS = ( os.path.join(os.path.split(os.path.abspath(__file__))[0], '..', '..', 'templates'), ) LOGIN_URL = '/login/' LOGIN_REDIRECT_URL = '/setup/' AUTHENTICATION_BACKENDS = ( 'profiles.auth.InstagramBackend', 'django.contrib.auth.backends.ModelBackend', ) from django.template.defaultfilters import slugify SOCIAL_AUTH_COMPLETE_URL_NAME = 'socialauth_complete' SOCIAL_AUTH_ASSOCIATE_URL_NAME = 'associate_complete' SOCIAL_AUTH_DEFAULT_USERNAME = lambda u: slugify(u) SOCIAL_AUTH_EXTRA_DATA = True SOCIAL_AUTH_CHANGE_SIGNAL_ONLY = True SOCIAL_AUTH_ASSOCIATE_BY_MAIL = True INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_extensions', 'social_auth', 'tastypie', # our apps 'profiles', 'external_apis', 'auth', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } try: from localsettings import * except: pass
mit
999,494,496,442,481,700
31.085227
100
0.680538
false
3.695681
false
false
false
equella/Equella
Source/Tools/ImportLibraries/Python/odbclient.py
1
1697
# Copyright Dytech Solutions, 2005. # This module is provided 'commercial-in-confidence' and may not be reproduced nor redistributed without # express written permission from the copyright holder. # Author: Adam Eijdenberg, Dytech Solutions <adam.eijdenberg@dytech.com.au> # Note: This is a very basic ODBC database access wrapper. It requires Python and the Win32 extensions to be installed. import dbi, odbc import re DATE_MATCHER = re.compile ('[^ ]+ ([^ ]+) ([0-9]+) ([0-9]+):([0-9]+):([0-9]+) ([0-9]+)') MONTHS = { 'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6, 'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12, } def clean_field (field): if hasattr (field, '__class__') and field.__class__ == str: return unicode (field, 'cp1252') else: return field def zp (s, i): while len (s) < i: s = '0' + s return s class ODBCClient: # Create an ODBC client given the datasource name def __init__ (self, odbcSourceName): self.dbc = odbc.odbc (odbcSourceName) # Given a SQL statement, return a two dimensional array of unicode strings as a result set def fetch (self, q): cursor = self.dbc.cursor () cursor.execute (q) res = [[clean_field (field) for field in row] for row in cursor.fetchall ()] cursor.close () return res def date_to_iso (self, date): month, date, hour, minute, second, year = DATE_MATCHER.match (str (date)).groups () return '%s-%s-%sT%s:%s:%s' % (zp (year, 4), zp (str (MONTHS [month]), 2), zp (date, 2), zp (hour, 2), zp (minute, 2), zp (second, 2))
apache-2.0
-7,283,380,422,882,922,000
29.854545
149
0.582793
false
3.057658
false
false
false
erigones/esdc-ce
bin/eslib/filelock.py
1
2314
import os import time from functools import wraps class FileLockTimeout(Exception): pass class FileLockError(Exception): pass class FileLock(object): """ Simple file lock. """ def __init__(self, lockfile): self._lockfile = lockfile self._lockfile_fd = None def __repr__(self): return '%s(%s)' % (self.__class__.__name__, self._lockfile) def __nonzero__(self): return self.is_locked() def _write_file(self): self._lockfile_fd = os.open(self._lockfile, os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC, 0o644) def _remove_file(self): os.close(self._lockfile_fd) self._lockfile_fd = None os.remove(self._lockfile) def exists(self): return os.path.exists(self._lockfile) def is_locked(self): return self._lockfile_fd is not None def acquire(self, timeout=30, sleep_interval=1): start_time = time.time() while not self.is_locked(): if not self.exists(): try: self._write_file() except (OSError, IOError): pass # Failed to create lock file else: break # Created lock file if timeout is not None and (time.time() - start_time) > timeout: raise FileLockTimeout('Could not acquire lock within %d seconds' % timeout) else: time.sleep(sleep_interval) def release(self): if self.exists(): if self.is_locked(): self._remove_file() else: raise FileLockError('Lock was never acquired') else: raise FileLockError('Not locked') def filelock(lockfile, **acquire_kwargs): """Simple file lock decorator""" def filelock_decorator(fun): @wraps(fun) def wrap(*args, **kwargs): if hasattr(lockfile, '__call__'): filepath = lockfile(*args, **kwargs) else: filepath = lockfile flock = FileLock(filepath) flock.acquire(**acquire_kwargs) try: return fun(*args, **kwargs) finally: flock.release() return wrap return filelock_decorator
apache-2.0
-2,884,021,981,432,574,500
25.295455
109
0.537165
false
4.26151
false
false
false
Berserker66/FactorioManager
tests.py
1
2223
__author__ = 'Fabian' import unittest broken_mods = {"5dim mod", "Air Filtering", "canInsert"} class TestRemoteAPI(unittest.TestCase): indextestfields = ["title", "contact", "name", "homepage", "author"] def test_index(self): from FactorioManager import remoteapi index = remoteapi.ModIndex for mod in index.index: self.assertTrue(type(mod) is str) self.assertTrue(len(index.index)> 1) self.index = index def test_mod_download(self): from FactorioManager import remoteapi mod = remoteapi.ModIndex.list[0] loc = remoteapi.download(mod) from FactorioManager.ModFile import ModFile ModFile.checkfile(loc) def test_all_mods_integrity(self): from FactorioManager import remoteapi from FactorioManager.ModFile import ModFile for i,mod in enumerate(remoteapi.ModIndex.list): modname = mod["title"] with self.subTest(modname): print("Testing mod {} of {}.".format(i + 1, len(remoteapi.ModIndex.list))) loc = remoteapi.download(mod) modfile = ModFile(loc) ret = modfile.check() if ret != True: if modname in broken_mods: self.skipTest("Mod {} is expected to fail: {}".format(modname,ret)) raise ret elif modname in broken_mods: self.fail("Mod {} is repaired, but still listed as broken.".format(modname)) else: with self.subTest(modname + " Sanity Check"): info = modfile.get_info() for field in self.indextestfields: if field in info and field not in mod: self.fail("Infofield {} is in info.json but not in index.".format(field)) elif field not in info and field in mod: self.fail("Infofield {} is in index but not in info.json.".format(field)) elif field in info and field in mod: self.assertEqual(info[field], mod[field])
mit
-479,495,605,471,145,800
42.588235
105
0.547458
false
4.490909
true
false
false
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/stages.py
1
3495
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Variables and classes related to the different stages of a PKB run.""" import itertools from absl import flags PROVISION = 'provision' PREPARE = 'prepare' RUN = 'run' CLEANUP = 'cleanup' TEARDOWN = 'teardown' STAGES = [PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN] _NEXT_STAGE = {PROVISION: PREPARE, PREPARE: RUN, RUN: CLEANUP, CLEANUP: TEARDOWN} _ALL = 'all' _VALID_FLAG_VALUES = PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN, _ALL _SYNTACTIC_HELP = ( "A complete benchmark execution consists of {0} stages: {1}. Possible flag " "values include an individual stage, a comma-separated list of stages, or " "'all'. If a list of stages is provided, they must be in order without " "skipping any stage.".format(len(STAGES), ', '.join(STAGES))) class RunStageParser(flags.ListParser): """Parse a string containing PKB run stages. See _SYNTACTIC_HELP for more information. """ def __init__(self, *args, **kwargs): super(RunStageParser, self).__init__(*args, **kwargs) self.syntactic_help = _SYNTACTIC_HELP def parse(self, argument): """Parses a list of stages. Args: argument: string or list of strings. Returns: list of strings whose elements are chosen from STAGES. Raises: ValueError: If argument does not conform to the guidelines explained in syntactic_help. """ stage_list = super(RunStageParser, self).parse(argument) if not stage_list: raise ValueError('Unable to parse {0}. Stage list cannot be ' 'empty.'.format(repr(argument))) invalid_items = set(stage_list).difference(_VALID_FLAG_VALUES) if invalid_items: raise ValueError( 'Unable to parse {0}. Unrecognized stages were found: {1}'.format( repr(argument), ', '.join(sorted(invalid_items)))) if _ALL in stage_list: if len(stage_list) > 1: raise ValueError( "Unable to parse {0}. If 'all' stages are specified, individual " "stages cannot also be specified.".format(repr(argument))) return list(STAGES) previous_stage = stage_list[0] for stage in itertools.islice(stage_list, 1, None): expected_stage = _NEXT_STAGE.get(previous_stage) if not expected_stage: raise ValueError("Unable to parse {0}. '{1}' should be the last " "stage.".format(repr(argument), previous_stage)) if stage != expected_stage: raise ValueError( "Unable to parse {0}. The stage after '{1}' should be '{2}', not " "'{3}'.".format(repr(argument), previous_stage, expected_stage, stage)) previous_stage = stage return stage_list flags.DEFINE( RunStageParser(), 'run_stage', STAGES, "The stage or stages of perfkitbenchmarker to run.", flags.FLAGS, flags.ListSerializer(','))
apache-2.0
4,238,887,930,728,280,600
32.932039
80
0.660658
false
3.803047
false
false
false
googledatalab/pydatalab
solutionbox/ml_workbench/tensorflow/transform.py
1
18304
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Flake8 cannot disable a warning for the file. Flake8 does not like beam code # and reports many 'W503 line break before binary operator' errors. So turn off # flake8 for this file. # flake8: noqa from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import datetime import json import logging import os import sys import apache_beam as beam import textwrap def parse_arguments(argv): """Parse command line arguments. Args: argv: list of command line arguments including program name. Returns: The parsed arguments as returned by argparse.ArgumentParser. """ parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description=textwrap.dedent("""\ Runs preprocessing on raw data for TensorFlow training. This script applies some transformations to raw data to improve training performance. Some data transformations can be expensive such as the tf-idf text column transformation. During training, the same raw data row might be used multiply times to train a model. This means the same transformations are applied to the same data row multiple times. This can be very inefficient, so this script applies partial transformations to the raw data and writes an intermediate preprocessed datasource to disk for training. Running this transformation step is required for two usage paths: 1) If the img_url_to_vec transform is used. This is because preprocessing as image is expensive and TensorFlow cannot easily read raw image files during training. 2) If the raw data is in BigQuery. TensorFlow cannot read from a BigQuery source. Running this transformation step is recommended if a text transform is used (like tf-idf or bag-of-words), and the text value for each row is very long. Running this transformation step may not have an interesting training performance impact if the transforms are all simple like scaling numerical values.""")) source_group = parser.add_mutually_exclusive_group(required=True) source_group.add_argument( '--csv', metavar='FILE', required=False, action='append', help='CSV data to transform.') source_group.add_argument( '--bigquery', metavar='PROJECT_ID.DATASET.TABLE_NAME', type=str, required=False, help=('Must be in the form `project.dataset.table_name`. BigQuery ' 'data to transform')) parser.add_argument( '--analysis', metavar='ANALYSIS_OUTPUT_DIR', required=True, help='The output folder of analyze') parser.add_argument( '--prefix', metavar='OUTPUT_FILENAME_PREFIX', required=True, type=str) parser.add_argument( '--output', metavar='DIR', default=None, required=True, help=('Google Cloud Storage or Local directory in which ' 'to place outputs.')) parser.add_argument( '--shuffle', action='store_true', default=False, help='If used, data source is shuffled. This is recommended for training data.') parser.add_argument( '--batch-size', metavar='N', type=int, default=100, help='Larger values increase performance and peak memory usage.') cloud_group = parser.add_argument_group( title='Cloud Parameters', description='These parameters are only used if --cloud is used.') cloud_group.add_argument( '--cloud', action='store_true', help='Run preprocessing on the cloud.') cloud_group.add_argument( '--job-name', type=str, help='Unique dataflow job name.') cloud_group.add_argument( '--project-id', help='The project to which the job will be submitted.') cloud_group.add_argument( '--num-workers', metavar='N', type=int, default=0, help='Set to 0 to use the default size determined by the Dataflow service.') cloud_group.add_argument( '--worker-machine-type', metavar='NAME', type=str, help='A machine name from https://cloud.google.com/compute/docs/machine-types. ' ' If not given, the service uses the default machine type.') cloud_group.add_argument( '--async', action='store_true', help='If used, this script returns before the dataflow job is completed.') args = parser.parse_args(args=argv[1:]) if args.cloud and not args.project_id: raise ValueError('--project-id is needed for --cloud') if args.async and not args.cloud: raise ValueError('--async should only be used with --cloud') if not args.job_name: args.job_name = ('dataflow-job-{}'.format( datetime.datetime.now().strftime('%Y%m%d%H%M%S'))) return args @beam.ptransform_fn def shuffle(pcoll): # pylint: disable=invalid-name import random return (pcoll | 'PairWithRandom' >> beam.Map(lambda x: (random.random(), x)) | 'GroupByRandom' >> beam.GroupByKey() | 'DropRandom' >> beam.FlatMap(lambda (k, vs): vs)) def image_transform_columns(features): """Returns a list of columns that prepare_image_transforms() should run on. Because of beam + pickle, IMAGE_URL_TO_VEC_TRANSFORM cannot be used inside of a beam function, so we extract the columns prepare_image_transforms() should run on outside of beam. """ import six from trainer import feature_transforms img_cols = [] for name, transform in six.iteritems(features): if transform['transform'] == feature_transforms.IMAGE_TRANSFORM: img_cols.append(name) return img_cols def prepare_image_transforms(element, image_columns): """Replace an images url with its jpeg bytes. Args: element: one input row, as a dict image_columns: list of columns that are image paths Return: element, where each image file path has been replaced by a base64 image. """ import base64 import cStringIO from PIL import Image from tensorflow.python.lib.io import file_io as tf_file_io from apache_beam.metrics import Metrics img_error_count = Metrics.counter('main', 'ImgErrorCount') img_missing_count = Metrics.counter('main', 'ImgMissingCount') for name in image_columns: uri = element[name] if not uri: img_missing_count.inc() continue try: with tf_file_io.FileIO(uri, 'r') as f: img = Image.open(f).convert('RGB') # A variety of different calling libraries throw different exceptions here. # They all correspond to an unreadable file so we treat them equivalently. # pylint: disable broad-except except Exception as e: logging.exception('Error processing image %s: %s', uri, str(e)) img_error_count.inc() return # Convert to desired format and output. output = cStringIO.StringIO() img.save(output, 'jpeg') element[name] = base64.urlsafe_b64encode(output.getvalue()) return element class EmitAsBatchDoFn(beam.DoFn): """A DoFn that buffers the records and emits them batch by batch.""" def __init__(self, batch_size): """Constructor of EmitAsBatchDoFn beam.DoFn class. Args: batch_size: the max size we want to buffer the records before emitting. """ self._batch_size = batch_size self._cached = [] def process(self, element): self._cached.append(element) if len(self._cached) >= self._batch_size: emit = self._cached self._cached = [] yield emit def finish_bundle(self, element=None): from apache_beam.transforms import window from apache_beam.utils.windowed_value import WindowedValue if len(self._cached) > 0: # pylint: disable=g-explicit-length-test yield WindowedValue(self._cached, -1, [window.GlobalWindow()]) class TransformFeaturesDoFn(beam.DoFn): """Converts raw data into transformed data.""" def __init__(self, analysis_output_dir, features, schema, stats): self._analysis_output_dir = analysis_output_dir self._features = features self._schema = schema self._stats = stats self._session = None def start_bundle(self, element=None): """Build the transfromation graph once.""" import tensorflow as tf from trainer import feature_transforms g = tf.Graph() session = tf.Session(graph=g) # Build the transformation graph with g.as_default(): transformed_features, _, placeholders = ( feature_transforms.build_csv_serving_tensors_for_transform_step( analysis_path=self._analysis_output_dir, features=self._features, schema=self._schema, stats=self._stats, keep_target=True)) session.run(tf.tables_initializer()) self._session = session self._transformed_features = transformed_features self._input_placeholder_tensor = placeholders['csv_example'] def finish_bundle(self, element=None): self._session.close() def process(self, element): """Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists. """ import apache_beam as beam import six import tensorflow as tf # This function is invoked by a separate sub-process so setting the logging level # does not affect Datalab's kernel process. tf.logging.set_verbosity(tf.logging.ERROR) try: clean_element = [] for line in element: clean_element.append(line.rstrip()) # batch_result is list of numpy arrays with batch_size many rows. batch_result = self._session.run( fetches=self._transformed_features, feed_dict={self._input_placeholder_tensor: clean_element}) # ex batch_result. # Dense tensor: {'col1': array([[batch_1], [batch_2]])} # Sparse tensor: {'col1': tf.SparseTensorValue( # indices=array([[batch_1, 0], [batch_1, 1], ..., # [batch_2, 0], [batch_2, 1], ...]], # values=array[value, value, value, ...])} # Unbatch the results. for i in range(len(clean_element)): transformed_features = {} for name, value in six.iteritems(batch_result): if isinstance(value, tf.SparseTensorValue): batch_i_indices = value.indices[:, 0] == i batch_i_values = value.values[batch_i_indices] transformed_features[name] = batch_i_values.tolist() else: transformed_features[name] = value[i].tolist() yield transformed_features except Exception as e: # pylint: disable=broad-except yield beam.pvalue.TaggedOutput('errors', (str(e), element)) def decode_csv(csv_string, column_names): """Parse a csv line into a dict. Args: csv_string: a csv string. May contain missing values "a,,c" column_names: list of column names Returns: Dict of {column_name, value_from_csv}. If there are missing values, value_from_csv will be ''. """ import csv r = next(csv.reader([csv_string])) if len(r) != len(column_names): raise ValueError('csv line %s does not have %d columns' % (csv_string, len(column_names))) return {k: v for k, v in zip(column_names, r)} def encode_csv(data_dict, column_names): """Builds a csv string. Args: data_dict: dict of {column_name: 1 value} column_names: list of column names Returns: A csv string version of data_dict """ import csv import six values = [str(data_dict[x]) for x in column_names] str_buff = six.StringIO() writer = csv.writer(str_buff, lineterminator='') writer.writerow(values) return str_buff.getvalue() def serialize_example(transformed_json_data, info_dict): """Makes a serialized tf.example. Args: transformed_json_data: dict of transformed data. info_dict: output of feature_transforms.get_transfrormed_feature_info() Returns: The serialized tf.example version of transformed_json_data. """ import six import tensorflow as tf def _make_int64_list(x): return tf.train.Feature(int64_list=tf.train.Int64List(value=x)) def _make_bytes_list(x): return tf.train.Feature(bytes_list=tf.train.BytesList(value=x)) def _make_float_list(x): return tf.train.Feature(float_list=tf.train.FloatList(value=x)) if sorted(six.iterkeys(transformed_json_data)) != sorted(six.iterkeys(info_dict)): raise ValueError('Keys do not match %s, %s' % (list(six.iterkeys(transformed_json_data)), list(six.iterkeys(info_dict)))) ex_dict = {} for name, info in six.iteritems(info_dict): if info['dtype'] == tf.int64: ex_dict[name] = _make_int64_list(transformed_json_data[name]) elif info['dtype'] == tf.float32: ex_dict[name] = _make_float_list(transformed_json_data[name]) elif info['dtype'] == tf.string: ex_dict[name] = _make_bytes_list(transformed_json_data[name]) else: raise ValueError('Unsupported data type %s' % info['dtype']) ex = tf.train.Example(features=tf.train.Features(feature=ex_dict)) return ex.SerializeToString() def preprocess(pipeline, args): """Transfrom csv data into transfromed tf.example files. Outline: 1) read the input data (as csv or bigquery) into a dict format 2) replace image paths with base64 encoded image files 3) build a csv input string with images paths replaced with base64. This matches the serving csv that a trained model would expect. 4) batch the csv strings 5) run the transformations 6) write the results to tf.example files and save any errors. """ from tensorflow.python.lib.io import file_io from trainer import feature_transforms schema = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.SCHEMA_FILE)).decode()) features = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.FEATURES_FILE)).decode()) stats = json.loads(file_io.read_file_to_string( os.path.join(args.analysis, feature_transforms.STATS_FILE)).decode()) column_names = [col['name'] for col in schema] if args.csv: all_files = [] for i, file_pattern in enumerate(args.csv): all_files.append(pipeline | ('ReadCSVFile%d' % i) >> beam.io.ReadFromText(file_pattern)) raw_data = ( all_files | 'MergeCSVFiles' >> beam.Flatten() | 'ParseCSVData' >> beam.Map(decode_csv, column_names)) else: columns = ', '.join(column_names) query = 'SELECT {columns} FROM `{table}`'.format(columns=columns, table=args.bigquery) raw_data = ( pipeline | 'ReadBiqQueryData' >> beam.io.Read(beam.io.BigQuerySource(query=query, use_standard_sql=True))) # Note that prepare_image_transforms does not make embeddings, it justs reads # the image files and converts them to byte stings. TransformFeaturesDoFn() # will make the image embeddings. image_columns = image_transform_columns(features) clean_csv_data = ( raw_data | 'PreprocessTransferredLearningTransformations' >> beam.Map(prepare_image_transforms, image_columns) | 'BuildCSVString' >> beam.Map(encode_csv, column_names)) if args.shuffle: clean_csv_data = clean_csv_data | 'ShuffleData' >> shuffle() transform_dofn = TransformFeaturesDoFn(args.analysis, features, schema, stats) (transformed_data, errors) = ( clean_csv_data | 'Batch Input' >> beam.ParDo(EmitAsBatchDoFn(args.batch_size)) | 'Run TF Graph on Batches' >> beam.ParDo(transform_dofn).with_outputs('errors', main='main')) _ = (transformed_data | 'SerializeExamples' >> beam.Map(serialize_example, feature_transforms.get_transformed_feature_info(features, schema)) | 'WriteExamples' >> beam.io.WriteToTFRecord( os.path.join(args.output, args.prefix), file_name_suffix='.tfrecord.gz')) _ = (errors | 'WriteErrors' >> beam.io.WriteToText( os.path.join(args.output, 'errors_' + args.prefix), file_name_suffix='.txt')) def main(argv=None): """Run Preprocessing as a Dataflow.""" args = parse_arguments(sys.argv if argv is None else argv) temp_dir = os.path.join(args.output, 'tmp') if args.cloud: pipeline_name = 'DataflowRunner' else: pipeline_name = 'DirectRunner' # Suppress TF warnings. os.environ['TF_CPP_MIN_LOG_LEVEL']='3' options = { 'job_name': args.job_name, 'temp_location': temp_dir, 'project': args.project_id, 'setup_file': os.path.abspath(os.path.join( os.path.dirname(__file__), 'setup.py')), } if args.num_workers: options['num_workers'] = args.num_workers if args.worker_machine_type: options['worker_machine_type'] = args.worker_machine_type pipeline_options = beam.pipeline.PipelineOptions(flags=[], **options) p = beam.Pipeline(pipeline_name, options=pipeline_options) preprocess(pipeline=p, args=args) pipeline_result = p.run() if not args.async: pipeline_result.wait_until_finish() if args.async and args.cloud: print('View job at https://console.developers.google.com/dataflow/job/%s?project=%s' % (pipeline_result.job_id(), args.project_id)) if __name__ == '__main__': main()
apache-2.0
2,258,065,721,742,861,000
32.340619
127
0.661385
false
3.88455
false
false
false
weblyzard/inscriptis
tests/test_block.py
1
1720
""" Test cases for the Block class. """ from inscriptis.model.canvas.block import Block from inscriptis.model.canvas.prefix import Prefix def test_merge_normal_text_collapsable_whitespaces(): """ test cases where the block has collapsable whitespaces """ b = Block(0, Prefix()) b.merge_normal_text("Hallo") assert b._content == 'Hallo' assert not b.collapsable_whitespace b = Block(0, Prefix()) b.merge_normal_text(" Hallo ") assert b._content == 'Hallo ' assert b.collapsable_whitespace b = Block(0, Prefix()) b.merge_normal_text('') assert b._content == '' assert b.collapsable_whitespace b.merge_normal_text(' ') assert b._content == '' assert b.collapsable_whitespace b.merge_normal_text(' ') assert b._content == '' assert b.collapsable_whitespace def test_merge_normal_non_collapsable_whitespaces(): b = Block(0, Prefix()) b.collapsable_whitespace = False b.merge_normal_text("Hallo") assert b._content == 'Hallo' assert not b.collapsable_whitespace b = Block(0, Prefix()) b.collapsable_whitespace = False b.merge_normal_text(" Hallo ") assert b._content == ' Hallo ' assert b.collapsable_whitespace b = Block(0, Prefix()) b.collapsable_whitespace = False b.merge_normal_text('') assert b._content == '' assert not b.collapsable_whitespace b = Block(0, Prefix()) b.collapsable_whitespace = False b.merge_normal_text(' ') assert b._content == ' ' assert b.collapsable_whitespace b = Block(0, Prefix()) b.collapsable_whitespace = False b.merge_normal_text(' ') assert b._content == ' ' assert b.collapsable_whitespace
gpl-2.0
-7,282,760,581,919,414,000
25.461538
58
0.64593
false
3.392505
false
false
false
maschwanden/boxsimu
boxsimu/solver.py
1
31181
# -*- coding: utf-8 -*- """ Created on Thu Jun 23 2016 at 10:37UTC @author: Mathias Aschwanden (mathias.aschwanden@gmail.com) """ import os import pdb import copy import time as time_module import datetime import numpy as np import dill as pickle import matplotlib.pyplot as plt from attrdict import AttrDict import math from . import solution as bs_solution from . import utils as bs_utils from . import ur def save_simulation_state(system): filename = '{:%Y%m%d}_{}_TS{}.pickle'.format( datetime.date.today(), system.name, timestep) with open(filename, 'wb') as f: pickle.dump(system, f) def load_simulation_state(system): pass def solve(system, total_integration_time, dt, save_frequency=100, debug=False): """Simulate the time evolution of all variables within the system. Collect all information about the system, create differential equations from this information and integrate them (numercially) into the future. Args: system (System): The system that is simulated. total_integration_time (pint.Quantity [T]): The time span which the system should be solved into the "future". The system will be simulated for the time period zero to approximately total_integration_time (depending whether total_integration_time is a multiple of dt; if not the real integration horizon will be be bigger than [0, total_integration_time]). dt (pint.Quantity [T]): Size of the timestep for the simulation. The bigger the timestep the faster the simulation will be calculated, however, if the timestep is chosen too high there can arise numerical instabilites! save_frequency (int): Number of timesteps after which the solve progress is saved to a pickle file. If the solver is interupted after the state was saved to a pickle file, the solve function will automatically progress at the latest saved state. debug (bool): Activates debugging mode (pdb.set_trace()). Defaults to False. """ # Start time of function func_start_time = time_module.time() # Saves the time since the start of the simulate at which the last # save (pickling) was conducted last_save_timedelta = 0 if debug: pdb.set_trace() # Get number of time steps - round up if there is a remainder N_timesteps = math.ceil(total_integration_time / dt) # Recalculate total integration time based on the number of timesteps total_integration_time = N_timesteps * dt print('DDATTEE') print('Start solving the BoxModelSystem...') print('- total integration time: {}'.format(total_integration_time)) print('- dt (time step): {}'.format(dt)) print('- number of time steps: {}'.format(N_timesteps)) time = total_integration_time * 0 sol = bs_solution.Solution(system, N_timesteps, dt) # Save initial state to solution for box in system.box_list: # sol.df.loc[0] = np.nan sol.df.loc[0, (box.name,'mass')] = box.fluid.mass.magnitude sol.df.loc[0, (box.name,'volume')] = \ system.get_box_volume(box).magnitude for variable in system.variable_list: var_name = variable.name sol.df.loc[0, (box.name,var_name)] = \ box.variables[var_name].mass.magnitude timetesteps_since_last_save = 0 progress = 0 for timestep in range(N_timesteps): # Calculate progress in percentage of processed timesteps progress_old = progress progress = int(float(timestep) / float(N_timesteps)*10) * 10.0 if progress != progress_old: print("{}%".format(progress)) #print(timetesteps_since_last_save) # Check if simulation is running long enough to save the state if timetesteps_since_last_save >= save_frequency: timetesteps_since_last_save = 1 else: timetesteps_since_last_save += 1 time += dt ################################################## # Calculate Mass fluxes ################################################## dm, f_flow = _calculate_mass_flows(system, time, dt) ################################################## # Calculate Variable changes due to PROCESSES, # REACTIONS, FUXES and FLOWS ################################################## dvar = _calculate_changes_of_all_variables( system, time, dt, f_flow) ################################################## # Apply changes to Boxes and save values to # Solution instance ################################################## for box in system.box_list: # Write changes to box objects box.fluid.mass += dm[box.id] # Save mass to Solution instance sol.df.loc[timestep, (box.name, 'mass')] = \ box.fluid.mass.magnitude sol.df.loc[timestep, (box.name, 'volume')] = \ system.get_box_volume(box).magnitude for variable in system.variable_list: var_name = variable.name system.boxes[box.name].variables[var_name].mass += \ dvar[box.id, variable.id] sol.df.loc[timestep, (box.name,variable.name)] = \ box.variables[variable.name].mass.magnitude # End Time of Function func_end_time = time_module.time() print( 'Function "solve(...)" used {:3.3f}s'.format( func_end_time - func_start_time)) return sol def _calculate_mass_flows(system, time, dt): """Calculate mass changes of every box. Args: time (pint.Quantity [T]): Current time (age) of the system. dt (pint.Quantity [T]): Timestep used. Returns: dm (numpy 1D array of pint.Quantities): Mass changes of every box. f_flow (numpy 1D array): Reduction coefficient of the mass flows (due to becoming-empty boxes -> box mass cannot decrase below 0kg). """ # f_flow is the reduction coefficent of the "sink-flows" of each box # scaling factor for sinks of each box f_flow = np.ones(system.N_boxes) v1 = np.ones(system.N_boxes) m_ini = system.get_fluid_mass_1Darray() # get internal flow matrix and calculate the internal souce and sink # vectors. Also get the external sink and source vector A = system.get_fluid_mass_internal_flow_2Darray(time) # internal s_i = bs_utils.dot(A, v1) q_i = bs_utils.dot(A.T, v1) s_e = system.get_fluid_mass_flow_sink_1Darray(time) q_e = system.get_fluid_mass_flow_source_1Darray(time) # calculate first estimate of mass change vector dm = (q_e + q_i - s_e - s_i) * dt # calculate first estimate of mass after timestep m = m_ini + dm while np.any(m.magnitude < 0): argmin = np.argmin(m) # Calculate net sink and source and mass of the 'empty' box. net_source = (q_e[argmin] + q_i[argmin])*dt net_sink = (s_e[argmin] + s_i[argmin])*dt available_mass = m_ini[argmin] total_mass = (net_source + available_mass).to_base_units() if total_mass.magnitude > 0: f_new = (total_mass / net_sink).to_base_units().magnitude f_flow[argmin] = min(f_new, f_flow[argmin] * 0.98) else: f_flow[argmin] = 0 # Apply reduction of sinks of the box A = (A.T * f_flow).T s_i = bs_utils.dot(A, v1) q_i = bs_utils.dot(A.T, v1) s_e = f_flow * s_e dm = (q_e + q_i - s_e - s_i) * dt m = m_ini + dm return dm, f_flow def _calculate_changes_of_all_variables(system, time, dt, f_flow): """ Calculates the changes of all variable in every box. Args: time (pint.Quantity [T]): Current time (age) of the system. dt (pint.Quantity [T]): Timestep used. f_flow (numpy 1D array): Reduction coefficient of the mass flows due to empty boxes. Returns: dvar (numpy 2D array of pint.Quantities): Variables changes of every box. First dimension are the boxes, second dimension are the variables. """ # reduction coefficent of the "variable-sinks" of each box for the # treated variable # scaling factor for sinks of each box f_var = np.ones([system.N_boxes, system.N_variables]) var_ini = bs_utils.stack([system.get_variable_mass_1Darray( variable) for variable in system.variable_list], axis=-1) while True: dvar_list, net_sink_list, net_source_list = zip(*[_get_dvar( system, variable, time, dt, f_var, f_flow) for variable in system.variable_list]) dvar = bs_utils.stack(dvar_list, axis=-1) net_sink = bs_utils.stack(net_sink_list, axis=-1) net_source = bs_utils.stack(net_source_list, axis=-1) var = (var_ini + dvar).to_base_units() net_sink[net_sink.magnitude == 0] = np.nan # to evade division by zero f_var_tmp = ((var_ini + net_source) / net_sink).magnitude f_var_tmp[np.isnan(f_var_tmp)] = 1 f_var_tmp[f_var_tmp > 1] = 1 # If any element of f_var_tmp is smaller than one this means that # for at least one variable in one box the sinks are bigger than # the sum of the source and the already present variable mass. # Thus: The mass of this variable would fall below zero! # Reduce the sinks proportional to the ratio of the sources and # the already present variable mass to the sinks. if np.any(f_var_tmp < 1): # To be sure that the sinks are reduced enough and to # evade any rouding errors the reduction ratio of the sinks # (f_var_tmp) is further decreased by a very small number. f_var_tmp[f_var_tmp < 1] -= 1e-15 # np.nextafter(0, 1) f_var *= f_var_tmp else: break return dvar def _get_sink_source_flow(system, variable, time, dt, f_var, f_flow): v1 = np.ones(system.N_boxes) flows = system.flows A_flow = system.get_variable_internal_flow_2Darray(variable, time, f_flow, flows) A_flow = (A_flow.T * f_var[:, variable.id]).T s_flow_i = bs_utils.dot(A_flow, v1) q_flow_i = bs_utils.dot(A_flow.T, v1) s_flow_e = system.get_variable_flow_sink_1Darray(variable, time, f_flow, flows) s_flow_e = system.get_variable_flow_sink_1Darray(variable, time, f_flow, flows) * f_var[:, variable.id] q_flow_e = system.get_variable_flow_source_1Darray(variable, time, flows) sink_flow = ((s_flow_i + s_flow_e) * dt).to_base_units() source_flow = ((q_flow_i + q_flow_e) * dt).to_base_units() return sink_flow, source_flow def _get_sink_source_flux(system, variable, time, dt, f_var): v1 = np.ones(system.N_boxes) fluxes = system.fluxes A_flux = system.get_variable_internal_flux_2Darray(variable, time, fluxes) A_flux = (A_flux.T * f_var[:, variable.id]).T s_flux_i = bs_utils.dot(A_flux, v1) q_flux_i = bs_utils.dot(A_flux.T, v1) s_flux_e = system.get_variable_flux_sink_1Darray(variable, time, fluxes) s_flux_e = system.get_variable_flux_sink_1Darray(variable, time, fluxes) * f_var[:, variable.id] q_flux_e = system.get_variable_flux_source_1Darray(variable, time, fluxes) sink_flux = ((s_flux_i + s_flux_e) * dt).to_base_units() source_flux = ((q_flux_i + q_flux_e) * dt).to_base_units() dvar_flux = source_flux - sink_flux return sink_flux, source_flux def _get_sink_source_process(system, variable, time, dt, f_var): processes = system.processes s_process = system.get_variable_process_sink_1Darray(variable, time, processes) s_process = system.get_variable_process_sink_1Darray(variable, time, processes) * f_var[:, variable.id] q_process = system.get_variable_process_source_1Darray(variable, time, processes) sink_process = (s_process * dt).to_base_units() source_process = (q_process * dt).to_base_units() return sink_process, source_process def _get_sink_source_reaction(system, variable, time, dt, f_var): reactions = system.reactions rr_cube = system.get_reaction_rate_3Darray(time, reactions) ## APPLY CORRECTIONS HERE! if np.any(f_var < 1): f_rr_cube = np.ones_like(rr_cube) for index in np.argwhere(f_var < 1): reduction_factor = f_var[tuple(index)] box = system.box_list[index[0]] box_name = box.name variable_name = system.variable_list[index[1]].name sink_reaction_indecies = np.argwhere(rr_cube[index[0], index[1], :].magnitude < 0) sink_reaction_indecies = list(sink_reaction_indecies.flatten()) for sink_reaction_index in sink_reaction_indecies: if f_rr_cube[index[0], index[1], sink_reaction_index] > reduction_factor: f_rr_cube[index[0], :, sink_reaction_index] = reduction_factor rr_cube *= f_rr_cube # Set all positive values to 0 sink_rr_cube = np.absolute(rr_cube.magnitude.clip(max=0)) * rr_cube.units # Set all negative values to 0 source_rr_cube = rr_cube.magnitude.clip(min=0) * rr_cube.units s_reaction = sink_rr_cube.sum(axis=2)[:, variable.id] q_reaction = source_rr_cube.sum(axis=2)[:, variable.id] sink_reaction = (s_reaction * dt).to_base_units() source_reaction = (q_reaction * dt).to_base_units() return sink_reaction, source_reaction def _get_dvar(system, variable, time, dt, f_var, f_flow): # Get variables sources (q) and sinks (s) # i=internal, e=external sink_flow, source_flow = _get_sink_source_flow( system, variable, time, dt, f_var, f_flow) sink_flux, source_flux = _get_sink_source_flux( system, variable, time, dt, f_var) sink_process, source_process = _get_sink_source_process( system, variable, time, dt, f_var) sink_reaction, source_reaction = _get_sink_source_reaction( system, variable, time, dt, f_var) net_sink = sink_flow + sink_flux + sink_process + sink_reaction net_source = (source_flow + source_flux + source_process + source_reaction) net_sink = net_sink.to_base_units() net_source = net_source.to_base_units() dvar = (net_source - net_sink).to_base_units() return dvar, net_sink, net_source class Solver: """Class that simulates the evolution of a BoxModelSystem in time. Functions: solve: Solve the complete system. That means all Fluid mass flows are calculated together with processes/reactions/fluxes/flows of variables that are traced within the system. Returns a Solution instance which contains the time series of all system quantities and can also plot them. Attributes: system (System): System which is simulated. """ def __init__(self, system): self.system_initial = system def solve(self, total_integration_time, dt, debug=False): """Simulate the time evolution of all variables within the system. Collect all information about the system, create differential equations from this information and integrate them (numercially) into the future. Args: system (System): The system that is simulated. total_integration_time (pint.Quantity [T]): The time span which the system should be solved into the "future". The system will be simulated for the time period zero to approximately total_integration_time (depending whether total_integration_time is a multiple of dt; if not the real integration horizon will be be bigger than [0, total_integration_time]). dt (pint.Quantity [T]): Size of the timestep for the simulation. The bigger the timestep the faster the simulation will be calculated, however, if the timestep is chosen too high there can arise numerical instabilites! debug (bool): Activates debugging mode (pdb.set_trace()). Defaults to False. """ # Start time of function func_start_time = time_module.time() # Saves the time since the start of the simulate at which the last # save (pickling) was conducted last_save_timedelta = 0 if debug: pdb.set_trace() # Get number of time steps - round up if there is a remainder N_timesteps = math.ceil(total_integration_time / dt) # Recalculate total integration time based on the number of timesteps total_integration_time = N_timesteps * dt print('Start solving the BoxModelSystem...') print('- total integration time: {}'.format(total_integration_time)) print('- dt (time step): {}'.format(dt)) print('- number of time steps: {}'.format(N_timesteps)) time = total_integration_time * 0 self.system = copy.deepcopy(self.system_initial) sol = bs_solution.Solution(self.system, N_timesteps, dt) # Save initial state to solution for box in self.system.box_list: # sol.df.loc[0] = np.nan sol.df.loc[0, (box.name,'mass')] = box.fluid.mass.magnitude sol.df.loc[0, (box.name,'volume')] = \ self.system.get_box_volume(box).magnitude for variable in self.system.variable_list: var_name = variable.name sol.df.loc[0, (box.name,var_name)] = \ box.variables[var_name].mass.magnitude progress = 0 for timestep in range(N_timesteps): # Calculate progress in percentage of processed timesteps progress_old = progress progress = int(float(timestep) / float(N_timesteps)*10) * 10.0 if progress != progress_old: print("{}%".format(progress)) # Check if simulation is running since more than a minute # since the last save was conducted. time_since_last_save = (time_module.time() - func_start_time - last_save_timedelta) if time_since_last_save > 6: last_save_timedelta = time_module.time() - func_start_time self.save('{}_TS{}.pickle'.format(self.system.name, timestep)) time += dt ################################################## # Calculate Mass fluxes ################################################## dm, f_flow = self._calculate_mass_flows(time, dt) ################################################## # Calculate Variable changes due to PROCESSES, # REACTIONS, FUXES and FLOWS ################################################## dvar = self._calculate_changes_of_all_variables( time, dt, f_flow) ################################################## # Apply changes to Boxes and save values to # Solution instance ################################################## for box in self.system.box_list: # Write changes to box objects box.fluid.mass += dm[box.id] # Save mass to Solution instance sol.df.loc[timestep, (box.name, 'mass')] = \ box.fluid.mass.magnitude sol.df.loc[timestep, (box.name, 'volume')] = \ self.system.get_box_volume(box).magnitude for variable in self.system.variable_list: var_name = variable.name self.system.boxes[box.name].variables[var_name].mass += \ dvar[box.id, variable.id] sol.df.loc[timestep, (box.name,variable.name)] = \ box.variables[variable.name].mass.magnitude # End Time of Function func_end_time = time_module.time() print( 'Function "solve(...)" used {:3.3f}s'.format( func_end_time - func_start_time)) return sol # PICKLING def save(self, file_name): """Pickle instance and save to file_name.""" with open(file_name, 'wb') as f: pickle.dump(self, f) @classmethod def load(self, file_name): """Load pickled instance from file_name.""" with open(file_name, 'rb') as f: solution = pickle.load(f) if not isinstance(solution, Solution): raise ValueError( 'Loaded pickle object is not a Solution instance!') return solution # HELPER functions def _calculate_mass_flows(self, time, dt): """Calculate mass changes of every box. Args: time (pint.Quantity [T]): Current time (age) of the system. dt (pint.Quantity [T]): Timestep used. Returns: dm (numpy 1D array of pint.Quantities): Mass changes of every box. f_flow (numpy 1D array): Reduction coefficient of the mass flows (due to becoming-empty boxes -> box mass cannot decrase below 0kg). """ # f_flow is the reduction coefficent of the "sink-flows" of each box # scaling factor for sinks of each box f_flow = np.ones(self.system.N_boxes) v1 = np.ones(self.system.N_boxes) m_ini = self.system.get_fluid_mass_1Darray() # get internal flow matrix and calculate the internal souce and sink # vectors. Also get the external sink and source vector A = self.system.get_fluid_mass_internal_flow_2Darray(time) # internal s_i = bs_utils.dot(A, v1) q_i = bs_utils.dot(A.T, v1) s_e = self.system.get_fluid_mass_flow_sink_1Darray(time) q_e = self.system.get_fluid_mass_flow_source_1Darray(time) # calculate first estimate of mass change vector dm = (q_e + q_i - s_e - s_i) * dt # calculate first estimate of mass after timestep m = m_ini + dm while np.any(m.magnitude < 0): argmin = np.argmin(m) # Calculate net sink and source and mass of the 'empty' box. net_source = (q_e[argmin] + q_i[argmin])*dt net_sink = (s_e[argmin] + s_i[argmin])*dt available_mass = m_ini[argmin] total_mass = (net_source + available_mass).to_base_units() if total_mass.magnitude > 0: f_new = (total_mass / net_sink).to_base_units().magnitude f_flow[argmin] = min(f_new, f_flow[argmin] * 0.98) else: f_flow[argmin] = 0 # Apply reduction of sinks of the box A = (A.T * f_flow).T s_i = bs_utils.dot(A, v1) q_i = bs_utils.dot(A.T, v1) s_e = f_flow * s_e dm = (q_e + q_i - s_e - s_i) * dt m = m_ini + dm return dm, f_flow def _calculate_changes_of_all_variables(self, time, dt, f_flow): """ Calculates the changes of all variable in every box. Args: time (pint.Quantity [T]): Current time (age) of the system. dt (pint.Quantity [T]): Timestep used. f_flow (numpy 1D array): Reduction coefficient of the mass flows due to empty boxes. Returns: dvar (numpy 2D array of pint.Quantities): Variables changes of every box. First dimension are the boxes, second dimension are the variables. """ # reduction coefficent of the "variable-sinks" of each box for the # treated variable # scaling factor for sinks of each box f_var = np.ones([self.system.N_boxes, self.system.N_variables]) var_ini = bs_utils.stack([self.system.get_variable_mass_1Darray( variable) for variable in self.system.variable_list], axis=-1) while True: dvar_list, net_sink_list, net_source_list = zip(*[self._get_dvar( variable, time, dt, f_var, f_flow) for variable in self.system.variable_list]) dvar = bs_utils.stack(dvar_list, axis=-1) net_sink = bs_utils.stack(net_sink_list, axis=-1) net_source = bs_utils.stack(net_source_list, axis=-1) var = (var_ini + dvar).to_base_units() net_sink[net_sink.magnitude == 0] = np.nan # to evade division by zero f_var_tmp = ((var_ini + net_source) / net_sink).magnitude f_var_tmp[np.isnan(f_var_tmp)] = 1 f_var_tmp[f_var_tmp > 1] = 1 # If any element of f_var_tmp is smaller than one this means that # for at least one variable in one box the sinks are bigger than # the sum of the source and the already present variable mass. # Thus: The mass of this variable would fall below zero! # Reduce the sinks proportional to the ratio of the sources and # the already present variable mass to the sinks. if np.any(f_var_tmp < 1): # To be sure that the sinks are reduced enough and to # evade any rouding errors the reduction ratio of the sinks # (f_var_tmp) is further decreased by a very small number. f_var_tmp[f_var_tmp < 1] -= 1e-15 # np.nextafter(0, 1) f_var *= f_var_tmp else: break return dvar def _get_sink_source_flow(self, variable, time, dt, f_var, f_flow): v1 = np.ones(self.system.N_boxes) flows = self.system.flows A_flow = self.system.get_variable_internal_flow_2Darray(variable, time, f_flow, flows) A_flow = (A_flow.T * f_var[:, variable.id]).T s_flow_i = bs_utils.dot(A_flow, v1) q_flow_i = bs_utils.dot(A_flow.T, v1) s_flow_e = self.system.get_variable_flow_sink_1Darray(variable, time, f_flow, flows) s_flow_e = self.system.get_variable_flow_sink_1Darray(variable, time, f_flow, flows) * f_var[:, variable.id] q_flow_e = self.system.get_variable_flow_source_1Darray(variable, time, flows) sink_flow = ((s_flow_i + s_flow_e) * dt).to_base_units() source_flow = ((q_flow_i + q_flow_e) * dt).to_base_units() return sink_flow, source_flow def _get_sink_source_flux(self, variable, time, dt, f_var): v1 = np.ones(self.system.N_boxes) fluxes = self.system.fluxes A_flux = self.system.get_variable_internal_flux_2Darray(variable, time, fluxes) A_flux = (A_flux.T * f_var[:, variable.id]).T s_flux_i = bs_utils.dot(A_flux, v1) q_flux_i = bs_utils.dot(A_flux.T, v1) s_flux_e = self.system.get_variable_flux_sink_1Darray(variable, time, fluxes) s_flux_e = self.system.get_variable_flux_sink_1Darray(variable, time, fluxes) * f_var[:, variable.id] q_flux_e = self.system.get_variable_flux_source_1Darray(variable, time, fluxes) sink_flux = ((s_flux_i + s_flux_e) * dt).to_base_units() source_flux = ((q_flux_i + q_flux_e) * dt).to_base_units() dvar_flux = source_flux - sink_flux return sink_flux, source_flux def _get_sink_source_process(self, variable, time, dt, f_var): processes = self.system.processes s_process = self.system.get_variable_process_sink_1Darray(variable, time, processes) s_process = self.system.get_variable_process_sink_1Darray(variable, time, processes) * f_var[:, variable.id] q_process = self.system.get_variable_process_source_1Darray(variable, time, processes) sink_process = (s_process * dt).to_base_units() source_process = (q_process * dt).to_base_units() return sink_process, source_process def _get_sink_source_reaction(self, variable, time, dt, f_var): reactions = self.system.reactions rr_cube = self.system.get_reaction_rate_3Darray(time, reactions) ## APPLY CORRECTIONS HERE! if np.any(f_var < 1): f_rr_cube = np.ones_like(rr_cube) for index in np.argwhere(f_var < 1): reduction_factor = f_var[tuple(index)] box = self.system.box_list[index[0]] box_name = box.name variable_name = self.system.variable_list[index[1]].name sink_reaction_indecies = np.argwhere(rr_cube[index[0], index[1], :].magnitude < 0) sink_reaction_indecies = list(sink_reaction_indecies.flatten()) for sink_reaction_index in sink_reaction_indecies: if f_rr_cube[index[0], index[1], sink_reaction_index] > reduction_factor: f_rr_cube[index[0], :, sink_reaction_index] = reduction_factor rr_cube *= f_rr_cube # Set all positive values to 0 sink_rr_cube = np.absolute(rr_cube.magnitude.clip(max=0)) * rr_cube.units # Set all negative values to 0 source_rr_cube = rr_cube.magnitude.clip(min=0) * rr_cube.units s_reaction = sink_rr_cube.sum(axis=2)[:, variable.id] q_reaction = source_rr_cube.sum(axis=2)[:, variable.id] sink_reaction = (s_reaction * dt).to_base_units() source_reaction = (q_reaction * dt).to_base_units() return sink_reaction, source_reaction def _get_dvar(self, variable, time, dt, f_var, f_flow): # Get variables sources (q) and sinks (s) # i=internal, e=external sink_flow, source_flow = self._get_sink_source_flow(variable, time, dt, f_var, f_flow) sink_flux, source_flux = self._get_sink_source_flux(variable, time, dt, f_var) sink_process, source_process = self._get_sink_source_process( variable, time, dt, f_var) sink_reaction, source_reaction = self._get_sink_source_reaction( variable, time, dt, f_var) net_sink = sink_flow + sink_flux + sink_process + sink_reaction net_source = (source_flow + source_flux + source_process + source_reaction) net_sink = net_sink.to_base_units() net_source = net_source.to_base_units() dvar = (net_source - net_sink).to_base_units() return dvar, net_sink, net_source
mit
-289,562,725,548,882,940
37.305897
98
0.57612
false
3.693994
false
false
false
Delosari/dazer
bin/lib/Math_Libraries/linfit_script.py
1
13123
from numpy import asarray, array, sqrt from uncertainties import unumpy, ufloat def linfit(x_true, y, sigmay=None, relsigma=True, cov=False, chisq=False, residuals=False): """ Least squares linear fit. Fit a straight line `f(x_true) = a + bx` to points `(x_true, y)`. Returns coefficients `a` and `b` that minimize the squared error. Parameters ---------- x_true : array_like one dimensional array of `x_true` data with `n`>2 data points. y : array_like one dimensional array of `y` data with `n`>2 data points. sigmay : NoneType or float or array_like, optional one dimensional array of uncertainties (errors) in `y` data or a single positive number if all uncertainties are the same. `sigmay` determines the weighting in the least squares minimization. Leaving `sigmay=None` uses no weighting and is equivalent to `sigmay=1`. relsigma : bool, optional If `relsigma` is True, the residuals are used to scale the covariance matrix. Use this option if you do not know the absolute uncertainties (`sigmay`) in the data but still want a covariance matrix whose entries give meaningful estimates of the uncertainties in the fitting parameters `a` and `b` (from `f = a + bx`). If `relsigma` is False, the covariance matrix is calculated (provided `cov` = True) using sigmay assuming sigmay represents absolute undertainties. cov : bool, optional If True, calculate and return the 2x2 covarience matrix of the fitting parameters. chisq : bool, optional If True, calculate and return redchisq. residuals : bool, optional If True, calculate and return residuals. Returns ------- fit : array([a,b]) ndarray of floats The best fit model parameters `a` (the slope) and `b` (the `y`-intercept) for the input data arrays `x_true` and `y` cvm : array, shape (2,2) : returned only if cov=True Covarience matrix of the fitting parameters. Diagonal elements are estimated variances of the fitting parameters a and b; square roots of the diagonal elements thus provide estimates of the uncertainties in the fitting parameters `a` and `b`. Off diagonal elements (equal to each other) are the covarience between the fitting parameters `a` and `b`. redchisq : float : returned only if chisq=True Reduced chi-squared goodness of fit parameter. residuals : ndarray of floats : returned only if residuals=True Length n array of the differences `y-(ax+b)` between `y`-data and the fitted data `ax + b`. Raises ------ TypeError : if `x_true` and `y` have different lengths TypeError : If `x_true` and `y` have 2 or fewer elements TypeError : If `sigmay` length is not 1 or the same as `y` See Also -------- polyfit : Least squares fit to polynomial. linalg.lstsq : Least-squares solution to a linear matrix equation. Notes ----- By default, ``linfit`` returns optimal fitting parameters `a` and `b` without weighting of the data. In that case, linfit minimizes the squared error .. math :: E = \\sum_{i=0}^n [y_i - (a x_i + b)]^2 If `sigmay` is set equal to the uncertainties in the `y` data points, then linfit minimizes the `chi-squared` sum .. math :: \chi^2 = \\sum_{i=0}^n \\left[ \\frac{y_i-(a x_i + b)}{\\sigma_i} \\right]^2 where :math:`\sigma_i` is given by `sigmay`, the "error" or standard deviation of :math:`y_i`. `sigmay` can be either a single number that gives the uncertainty for all elements of `y`, or it can be an array of the same length as `y` that gives the "error" for each element of `y`. `redchisq` is :math:`\chi^2/(n-2)` where :math:`n` is the number of data points (the length of `x_true` or `y`). If `relsigma` is False, then the uncertainties `sigmay` in `y` are assumed to be the absolute one-standard-deviation uncertainties in `y`. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` provides a measure of the goodness of the fit. If it is near 1, then the linear fitting model is considered to be good and the values of the covariance matrix are appropriately scaled. In particular, the square root of the diagonal elements of the covariance matrix give the estimated uncertainty in the fitting parameters `a` and `b`. See Refernece [2] below for more information. If `relsigma` is True, then the uncertainties `sigmay` in `y` are considered to be only relative uncertainties. They are used to weight the data for the fit, but in this case, the covariance matrix is rescaled using the residuals between the fit and the data. In this case, the reduced chi-squared value :math:`\chi^2/(n-2)` does not provide a measure of the goodness of the fit. Nevertheless, the diagonal elements of the rescaled covariance matrix (returned by linfit) give the estimated uncertainty in the fitting parameters `a` and `b`. The covariance matrix is a 2x2 symmetric matrix where the diagonal elements are the variance of the fitting parameters. Their square roots provide estimates of the uncertainties in the fitting parameters. The off-diagonal elements are equal and give the cross correlation between the two fitting parameters `a` and `b`. linfit runs faster, by a factor of 2 to 3, if calculation of the residuals is suppressed letting `cov`, `chisq`, and `residuals` remain False (the default setting). Fitting a straight line to a single set of `(x_true, y)` data using ``linfit`` is typically 2 to 10 times faster than using either ``polyfit`` or ``linalg.lstsq``, especially when weighting is used and for very large data sets. References ---------- .. [1] An Introduction to Error Analysis, 2nd Ed. by John R. Taylor (University Science Books, 1997) .. [2] Numerical Recipes, The Art of Scientific Computing, 3rd Edition by W.H. Press, S. A. Teukolsky, W. T. Vetterling, & B. P. Flannery (Cambridge University Press, 2007) Examples -------- Fit a line, `y = ax + b`, through some noisy `(x_true, y)` data-points without any weighting (`sigmay` = None) to obtain fitting parameters `a` and `b`: >>> x_true = np.array([0, 1, 2, 3]) >>> y = np.array([-1, 0.2, 0.9, 2.1]) >>> fit = linfit(x_true, y) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 1.00, b = -0.95 Setting `cov` = True in the input, returns the covariance matrix `cvm`. When uncertainties `sigmay` are left unspecified, meaningful estimates of the uncertainties `da` and `db` in the fitting parameters `a` and `b` are given by the square roots of the diagonals of the covariance matrix `cvm`, provided `relsigma` = True (the default state). >>> fit, cvm = linfit(x_true, y, cov=True) >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.07, db = 0.13 A better practice is to supply estimates of the uncertainties in the input argument `sigmay`. `sigmay` can be a single float, if the uncertainties are the same for all data points, or it can be an array, if the uncertainties for different data points are different. Here we enter sigmay as an array. >>> dy = np.array([0.18, 0.13, 0.15, 0.17]) >>> fit, cvm, redchisq, resids = linfit(x_true, y, cov=True, sigmay=dy, relsigma=False, chisq=True, residuals=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.98, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.08, db = 0.14 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 1.21 >>> print(resids) [-0.08856653 0.12781099 -0.1558115 0.06056602] The value of reduced chi-squared `redchisq` is 1.21 indicating that a linear model is valid for these data. The residuals :math:`y_i - (a+bx_i)` are given by the output `resids`. If absolute estimates of the uncertainties are not available, but relative estimates of the uncertainties are known, a fit can be obtained with reasonable estimates of the uncertainties in the fitting parameters by setting `relsigma` = True. >>> dy = np.array([1.0, 0.75, 0.75, 1.25]) >>> fit, cvm, redchisq = linfit(x_true, y, cov=True, sigmay=dy, relsigma=True, chisq=True) >>> print("a = {0:0.2f}, b = {1:0.2f}".format(fit[0], fit[1])) a = 0.97, b = -0.91 >>> dfit = [np.sqrt(cvm[i,i]) for i in range(2)] >>> print("da = {0:0.2f}, db = {1:0.2f}".format(dfit[0], dfit[1])) da = 0.09, db = 0.16 >>> print("reduced chi-squared = {0:0.2f}".format(redchisq)) reduced chi-squared = 0.04 In this case, the value `redchisq` is meaningless, because only the relative, rather than the absolute uncertainties are known. Nevertheless, by setting `relsigma` = True, reasonable estimates for the uncertainties in the fitting parameters are obtained. Illustration: .. image:: example.png :scale: 75 % """ x_true = asarray(x_true) y = asarray(y) if x_true.size != y.size: raise TypeError('Expected x_true and y to have same length') if x_true.size <= 2: raise TypeError('Expected x_true and y length > 2') if sigmay is None: sigmay = 1.0 sigmay = asarray(sigmay) if sigmay.size == 1: sigy = float(sigmay) # convert 0-d array to a float wt = 1./(sigy*sigy) s = wt * y.size sx = wt * x_true.sum() sy = wt * y.sum() t = x_true-sx/s stt = wt * (t*t).sum() slope = wt * (t*y).sum()/stt yint = (sy - sx * slope)/s else: if sigmay.size != y.size: raise TypeError('Expected sigmay size to be 1 or same as y') wt = 1./(sigmay*sigmay) s = wt.sum() sx = (x_true*wt).sum() sy = (y*wt).sum() t = (x_true-sx/s)/sigmay stt = (t*t).sum() slope = (t*y/sigmay).sum()/stt yint = (sy - sx * slope)/s returns = array([slope, yint]) if cov is True: cvm00 = 1./stt cvm01 = -sx/(s*stt) cvm11 = (1.0-sx*cvm01)/s if relsigma is True: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) cvm00 *= redchisq cvm01 *= redchisq cvm11 *= redchisq returns = [returns] + [array([[cvm00, cvm01], [cvm01, cvm11]])] if residuals or chisq is True: if relsigma is False: redchisq, resids = _resids(x_true, y, sigmay, slope, yint) if type(returns) is not list: returns = [returns] if chisq is True: returns += [redchisq] if residuals is True: returns += [resids] return returns def _resids(x_true, y, sigmay, slope, yint): resids = y - (yint + slope*x_true) redchisq = ((resids/sigmay)**2).sum()/(x_true.size-2) return redchisq, resids def LinfitLinearRegression(x_true, y): if (x_true != None) and (y != None): if len(x_true) > 2: x_mag = unumpy.nominal_values(x_true) y_mag = unumpy.nominal_values(y) y_err = unumpy.std_devs(y) Regression_Fit, Uncertainty_Matrix = linfit(x_mag, y_mag, y_err, cov=True, relsigma=False) m_n_error = [sqrt(Uncertainty_Matrix[t,t]) for t in range(2)] gradient, gradient_error = Regression_Fit[0], m_n_error[0] n, n_error = Regression_Fit[1], m_n_error[1] Gradient_MagErr = ufloat(gradient, gradient_error) n_MagError = ufloat(n, n_error) elif len(x_true) == 2: x_mag = unumpy.nominal_values(x_true) y_mag = unumpy.nominal_values(y) m = (y_mag[1] - y_mag[0]) / (x_mag[1] - x_mag[0]) n = y_mag[0] - m * x_mag[0] Gradient_MagErr = ufloat(m, 1e-4) n_MagError = ufloat(n, 1e-4) else: print 'WARNING: Only one point to do a linear regression' else: Gradient_MagErr, n_MagError = None, None return Gradient_MagErr, n_MagError
mit
-1,549,250,426,603,208,400
42.456954
119
0.590338
false
3.492946
false
false
false
hirunatan/estelcon_web
activities/services.py
1
11340
from django.core.mail import send_mail, mail_managers from django.conf import settings from django.contrib.auth import authenticate from django.contrib.auth.models import User from django.db.models import Count from datetime import datetime, timedelta from collections import namedtuple import locale import math from .models import Activity from functools import reduce Day = namedtuple('Day', ['name', 'blocks']) Block = namedtuple('Block', ['hour', 'columns']) Column = namedtuple('Column', ['rowspan', 'colspan', 'activities']) PendingColumn = namedtuple('PendingColumn', ['current_row', 'column']) def get_schedule(): # Obtain the list of all activities (they are already ordered by start date) and put them in # a table divided in days, and then in blocks of half hour, from 8:30h to 05:00h next day. # Each block contains columns, and in each column fit one or more activities. Columns # may also span more than one block. # Set the language for day names locale.setlocale(locale.LC_ALL, 'es_ES.UTF-8') # Get the complete list of activities, and split into those with hour and those without activities = Activity.objects.all() activ_without_hour = [a for a in activities if a.start is None] activ_with_hour = [a for a in activities if a.start is not None] # Create the list of days days = [] if len(activ_with_hour) > 0: first_day = activ_with_hour[0].start.replace(hour=0, minute=0, second=0, microsecond=0) last_day = activ_with_hour[-1].start.replace(hour=0, minute=0, second=0, microsecond=0) day = first_day while day <= last_day: day_blocks = _build_day_blocks(activ_with_hour, day) days.append(day_blocks) day = day + timedelta(days=1) return (activ_without_hour, days) def _build_day_blocks(activ_with_hour, day): first_block_hour = day.replace(hour=8, minute=00) # from 08:30h last_block_hour = first_block_hour + timedelta(hours=20, minutes=30) # until 05:00h next day pending_cols = [ PendingColumn(0, Column(1, 2, [])), PendingColumn(0, Column(1, 1, [])), PendingColumn(0, Column(1, 1, [])) ] # Create a list of 30min blocks blocks = [] block_hour = first_block_hour while block_hour <= last_block_hour: block = _build_block(activ_with_hour, block_hour, pending_cols) if block: blocks.append(block) block_hour = block_hour + timedelta(minutes=30) # Remove all empty blocks at the beginning and the end of the day for i in [0, -1]: while len(blocks) > 0: block = blocks[i] if not block.columns: del blocks[i] else: break return Day(day.strftime('%A %d').upper(), blocks) def _build_block(activ_with_hour, block_hour, pending_cols): for ncol in range(3): rowspan, activities = _get_block_activities(activ_with_hour, block_hour, ncol) current_row, column = pending_cols[ncol] column.activities.extend(activities) if rowspan > column.rowspan - current_row: column = Column(rowspan + current_row, column.colspan, column.activities) pending_cols[ncol] = PendingColumn(current_row, column) if pending_cols[0].column.activities: if pending_cols[0].current_row == 0: columns = [pending_cols[0].column] else: columns = [] if pending_cols[1].column.activities and columns: columns[0].activities.extend(pending_cols[1].column.activities) if pending_cols[2].column.activities and columns: columns[0].activities.extend(pending_cols[2].column.activities) else: columns = [] if pending_cols[1].current_row == 0 and pending_cols[1].column.activities: columns.append(pending_cols[1].column) if pending_cols[2].current_row == 0 and pending_cols[2].column.activities: columns.append(pending_cols[2].column) for ncol in range(3): current_row, column = pending_cols[ncol] current_row += 1 if current_row >= column.rowspan: current_row = 0 column = Column(1, column.colspan, []) pending_cols[ncol] = PendingColumn(current_row, column) return Block(block_hour.strftime('%H:%M'), columns) def _get_block_activities(activ_with_hour, block_hour, ncol): activities = [] rowspan = 1 for activity in activ_with_hour: if (activity.start >= block_hour) and \ (activity.start < (block_hour + timedelta(minutes=30))) and \ (activity.column == ncol): activities.append(activity) if activity.end is None: duration = 0 else: duration = math.ceil((activity.end - activity.start).seconds / 60) activ_span = math.ceil(duration / 30) if activ_span > rowspan: rowspan = activ_span return (rowspan, activities) def get_activity_and_status(activity_id, user): try: activity = Activity.objects.get(pk = activity_id) except Activity.DoesNotExist: return (None, {}) is_owner = False is_organizer = False is_participant = False is_admin = False if user.is_authenticated(): if user in activity.owners.all(): is_owner = True if user in activity.organizers.all(): is_organizer = True if user in activity.participants.all(): is_participant = True if user.is_staff: is_admin = True user_status = { 'is_owner': is_owner, 'is_organizer': is_organizer, 'is_participant': is_participant, 'is_admin': is_admin } return (activity, user_status) def subscribe_to_activity(user, activity_id): #TODO: refactor to receive an actual activity object instead of an id try: activity = Activity.objects.get(pk = activity_id) except Activity.DoesNotExist: return # User is always added, even if the limit is reached activity.participants.add(user) activity.save() # Subscription limit control maxplacesreached = False if len(activity.participants.all()) > activity.max_places: maxplacesreached = True mail_managers( subject = '[Estelcon Admin] Inscripción en actividad %s' % (activity.title), message = ''' El usuario %s (%s) se ha inscrito en la actividad %s. ''' % (user.username, user.get_full_name(), activity.title), ) for owner in activity.owners.all(): send_mail( subject = '[Estelcon] Inscripción en actividad de la Estelcon que tú organizas', message = ''' El usuario %s (%s, %s) se ha inscrito en la actividad %s. ''' % (user.username, user.get_full_name(), user.email, activity.title), from_email = settings.MAIL_FROM, recipient_list = [owner.email], fail_silently = False ) if maxplacesreached: send_mail( subject = '[Estelcon] ATENCION: Tu actividad ha superado el máximo de plazas.', message = ''' Ponte en contacto con la organización, por favor, ya que tu actividad '%s' ya ha sobrepasado el máximo de plazas. Actualmente tienes %d inscritos en una actividad con un máximo establecido por ti de %d. ''' % (activity.title, len(activity.participants.all()), activity.max_places), from_email = settings.MAIL_FROM, recipient_list = [owner.email], fail_silently = False ) if maxplacesreached: message_participants_maxplaces = \ ''' ATENCION, tu inscripción ha superado el número máximo de plazas disponibles. Los responsables ya han sido notificados de este hecho y tomarán una decisión en breve. Si no recibes contestación en pocos días no dudes en escribir directamente a la organización. ''' else: message_participants_maxplaces = 'Te encuentras dentro del número máximo de plazas.' send_mail( subject = '[Estelcon] Inscripción en actividad de la Estelcon', message = ''' Se ha registrado tu inscripción en la actividad con título '%s'. Si en el futuro deseas cancelarla, escribe a la organización. %s ''' % (activity.title, message_participants_maxplaces), from_email = settings.MAIL_FROM, recipient_list = [user.email], fail_silently = True ) def change_activity(user, activity, home_url): mail_managers( subject = '[Estelcon Admin] Modificación de actividad "%s"' % (activity.title), message = ''' El usuario %s (%s) ha modificado una actividad Título: %s Subtítulo: %s Duración: %s Nº máximo de plazas: %d Mostrar responsables: %s Texto: %s Necesidades logísticas: %s Notas para la organización: %s''' % ( user.username, user.get_full_name(), activity.title, activity.subtitle, activity.duration, activity.max_places or 0, activity.show_owners, activity.text, activity.logistics, activity.notes_organization), ) send_mail( subject = '[Estelcon] Se ha modificado la actividad "%s"' % (activity.title), message = ''' Se ha modificado correctamente la actividad con título '%s'. ¡Muchas gracias por participar! Entre todos haremos una gran Mereth Aderthad. El equipo organizador. %s ''' % (activity.title, home_url), from_email = settings.MAIL_FROM, recipient_list = [user.email], fail_silently = True ) def send_proposal(user, data, home_url): mail_managers( subject = '[Estelcon Admin] Actividad propuesta: %s' % (data['title']), message = ''' El usuario %s (%s) ha propuesto una actividad. Título: %s Subtítulo: %s Duración: %s Nº máximo de plazas: %d Mostrar responsables: %s Requiere inscripción: %s Responsables: %s Organizadores: %s Texto: %s Necesidades logísticas: %s Notas para la organización: %s''' % ( user.username, user.get_full_name(), data['title'], data['subtitle'], data['duration'], data['max_places'] or 0, data['show_owners'], data['requires_inscription'], data['owners'], data['organizers'], data['text'], data['logistics'], data['notes_organization']), ) send_mail( subject = '[Estelcon] Actividad propuesta para la Estelcon', message = ''' Se ha enviado a los organizadores tu propuesta de actividad con título '%s'. Estudiaremos la actividad que propones y le buscaremos un hueco en la Estelcon. En cuanto lo hagamos, podrás ver cómo aparece en el Programa de actividades, incluyendo una ficha rellena con los datos que nos has enviado (al menos con la parte pública). Y si tú o cualquiera de las personas designadas como responsables accedéis a la web con vuestro usuario y contraseña, podréis consultar y modificar todos los datos. Si tenemos alguna duda o consulta que hacerte, contactaremos contigo a través del correo electrónico o el teléfono que indicaste al registrarte. ¡Muchas gracias por participar! Entre todos haremos una gran Mereth Aderthad. El equipo organizador. %s ''' % (data['title'], home_url), from_email = settings.MAIL_FROM, recipient_list = [user.email], fail_silently = True )
agpl-3.0
-6,948,931,239,060,189,000
29.928767
113
0.647002
false
3.371864
false
false
false
ruohoruotsi/Wavelet-Tree-Synth
nnet/VAE-RyotaKatoh-chainer/VAE_YZ_X.py
1
5152
import os import time import numpy as np from chainer import cuda, Variable, function, FunctionSet, optimizers from chainer import functions as F class VAE_YZ_X(FunctionSet): def __init__(self, **layers): super(VAE_YZ_X, self).__init__(**layers) def softplus(self, x): return F.log(F.exp(x) + 1) def identity(self, x): return x def forward_one_step(self, x_data, y_data, n_layers_recog, n_layers_gen, nonlinear_q='softplus', nonlinear_p='softplus', output_f = 'sigmoid', type_qx='gaussian', type_px='gaussian', gpu=-1): x = Variable(x_data) y = Variable(y_data) # set non-linear function nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu} nonlinear_f_q = nonlinear[nonlinear_q] nonlinear_f_p = nonlinear[nonlinear_p] output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh} output_a_f = output_activation[output_f] hidden_q = [ nonlinear_f_q( self.recog_x( x ) + self.recog_y( y ) ) ] # compute q(z|x, y) for i in range(n_layers_recog-1): hidden_q.append(nonlinear_f_q(getattr(self, 'recog_%i' % i)(hidden_q[-1]))) q_mean = getattr(self, 'recog_mean')(hidden_q[-1]) q_log_sigma = 0.5 * getattr(self, 'recog_log')(hidden_q[-1]) eps = np.random.normal(0, 1, (x.data.shape[0], q_log_sigma.data.shape[1])).astype('float32') if gpu >= 0: eps = cuda.to_gpu(eps) eps = Variable(eps) z = q_mean + F.exp(q_log_sigma) * eps # compute q(x |y, z) hidden_p = [ nonlinear_f_p( self.gen_y( y ) + self.gen_z( z ) ) ] for i in range(n_layers_gen-1): hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1]))) hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1]))) output = hidden_p[-1] rec_loss = F.mean_squared_error(output, x) KLD = -0.5 * F.sum(1 + q_log_sigma - q_mean**2 - F.exp(q_log_sigma)) / (x_data.shape[0]*x_data.shape[1]) return rec_loss, KLD, output def generate(self, sample_x, sample_y, n_layers_recog, n_layers_gen, nonlinear_q='relu', nonlinear_p='relu', output_f='sigmoid', gpu=-1): x = Variable(sample_x) y = Variable(sample_y) # set non-linear function nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu} nonlinear_f_q = nonlinear[nonlinear_q] nonlinear_f_p = nonlinear[nonlinear_p] output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh} output_a_f = output_activation[output_f] # compute q(z|x, y) hidden_q = [ nonlinear_f_q( self.recog_x( x ) + self.recog_y( y ) ) ] for i in range(n_layers_recog-1): hidden_q.append(nonlinear_f_q(getattr(self, 'recog_%i' % i)(hidden_q[-1]))) q_mean = getattr(self, 'recog_mean')(hidden_q[-1]) q_log_sigma = 0.5 * getattr(self, 'recog_log')(hidden_q[-1]) eps = np.random.normal(0, 1, (x.data.shape[0], q_log_sigma.data.shape[1])).astype('float32') if gpu >= 0: eps = cuda.to_gpu(eps) eps = Variable(eps) z = q_mean + F.exp(q_log_sigma) * eps outputs = np.zeros((sample_y.shape[1], sample_x.shape[1]), dtype=np.float32) for label in range(sample_y.shape[1]): sample_y = np.zeros((1, sample_y.shape[1]), dtype=np.float32) sample_y[0][label] = 1. # compute q(x |y, z) hidden_p = [ nonlinear_f_p( self.gen_y( Variable(sample_y) ) + self.gen_z( z ) ) ] for i in range(n_layers_gen-1): hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1]))) hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1]))) output = hidden_p[-1] outputs[label] = output.data return outputs def generate_z_x(self, x_size, sample_z, sample_y, n_layers_recog, n_layers_gen, nonlinear_q='relu', nonlinear_p='relu', output_f='sigmoid', gpu=-1): # set non-linear function nonlinear = {'sigmoid': F.sigmoid, 'tanh': F.tanh, 'softplus': self.softplus, 'relu': F.relu} nonlinear_f_q = nonlinear[nonlinear_q] nonlinear_f_p = nonlinear[nonlinear_p] output_activation = {'sigmoid': F.sigmoid, 'identity': self.identity, 'tanh': F.tanh} output_a_f = output_activation[output_f] # input variables z = Variable(sample_z.reshape((1, sample_z.shape[0]))) y = Variable(sample_y.reshape((1, sample_y.shape[0]))) outputs = np.zeros((1, x_size), dtype=np.float32) # compute q(x |y, z) hidden_p = [ nonlinear_f_p( self.gen_y( y ) + self.gen_z( z ) ) ] for i in range(n_layers_gen-1): hidden_p.append(nonlinear_f_p(getattr(self, 'gen_%i' % i)(hidden_p[-1]))) hidden_p.append(output_a_f(getattr(self, 'gen_out')(hidden_p[-1]))) output = hidden_p[-1] outputs = output.data return outputs
gpl-2.0
-8,410,004,486,679,140,000
36.064748
195
0.572011
false
2.927273
false
false
false
wangshunzi/Python_code
02-Python面向对象代码/面向对象-三大特性/封装.py
1
1583
# _*_ encoding:utf-8 _*_ # import win32com.client # # class Caculator(object): def __check_num_zsq(func): def inner(self, n): if not isinstance(n, int): raise TypeError("当前这个数据的类型有问题, 应该是一个整型数据") return func(self, n) return inner def __say(self, word): # 1. 创建一个播报器对象 speaker = win32com.client.Dispatch("SAPI.SpVoice") # 2. 通过这个播报器对象, 直接, 播放相对应的语音字符串就可以 speaker.Speak(word) def __create_say_zsq(word=""): def __say_zsq(func): def inner(self, n): self.__say(word + str(n)) return func(self, n) return inner return __say_zsq @__check_num_zsq @__create_say_zsq() def __init__(self, num): self.__result = num @__check_num_zsq @__create_say_zsq("加") def jia(self, n): self.__result += n return self @__check_num_zsq @__create_say_zsq("减去") def jian(self, n): self.__result -= n return self @__check_num_zsq @__create_say_zsq("乘以") def cheng(self, n): self.__result *= n return self def show(self): self.__say("sz牌计算机计算的结果是:%d" % self.__result) print("计算的结果是:%d" % self.__result) return self def clear(self): self.__result = 0 return self @property def result(self): return self.__result
mit
-5,992,346,275,371,996,000
22.459016
58
0.512229
false
2.773256
false
false
false
flegoff/hcrendu
settings.py
1
5119
# Django settings for hcrendu project. DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'. 'NAME': '/home/dotcloud/rendus_proj', # Or path to database file if using sqlite3. 'USER': '', # Not used with sqlite3. 'PASSWORD': '', # Not used with sqlite3. 'HOST': '', # Set to empty string for localhost. Not used with sqlite3. 'PORT': '', # Set to empty string for default. Not used with sqlite3. } } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # On Unix systems, a value of None will cause Django to use the same # timezone as the operating system. # If running in a Windows environment this must be set to the same as your # system time zone. TIME_ZONE = 'Europe/Paris' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'fr-fr' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale USE_L10N = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' SITE_URL = 'http://localhost:8000/' # URL prefix for admin static files -- CSS, JavaScript and images. # Make sure to use a trailing slash. # Examples: "http://foo.com/static/admin/", "/static/admin/". ADMIN_MEDIA_PREFIX = '/static/admin/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = '%i0kgcf0pz9$twap*$qt*^qh#la7s7ulj(iq*khjdl5m=v^#$t' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) ROOT_URLCONF = 'hcrendu.urls' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'hcrendu.hcstudyprojects' ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } EMAIL_HOST = '' EMAIL_PORT = 587 EMAIL_HOST_USER = '' EMAIL_HOST_PASSWORD = '' SENDER = '' ADMIN_MEDIA_PREFIX = '/static/admin_media/'
mit
7,010,484,665,604,025,000
32.457516
122
0.683727
false
3.643416
false
false
false
maxive/erp
addons/point_of_sale/tests/test_frontend.py
1
12623
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo.api import Environment from odoo.tools import DEFAULT_SERVER_DATE_FORMAT from datetime import date, timedelta import odoo.tests class TestUi(odoo.tests.HttpCase): def test_01_pos_basic_order(self): env = self.env journal_obj = env['account.journal'] account_obj = env['account.account'] main_company = env.ref('base.main_company') main_pos_config = env.ref('point_of_sale.pos_config_main') account_receivable = account_obj.create({'code': 'X1012', 'name': 'Account Receivable - Test', 'user_type_id': env.ref('account.data_account_type_receivable').id, 'reconcile': True}) field = env['ir.model.fields']._get('res.partner', 'property_account_receivable_id') env['ir.property'].create({'name': 'property_account_receivable_id', 'company_id': main_company.id, 'fields_id': field.id, 'value': 'account.account,' + str(account_receivable.id)}) # test an extra price on an attribute pear = env.ref('point_of_sale.poire_conference') attribute_value = env['product.attribute.value'].create({ 'name': 'add 2', 'product_ids': [(6, 0, [pear.id])], 'attribute_id': env['product.attribute'].create({ 'name': 'add 2', }).id, }) env['product.attribute.price'].create({ 'product_tmpl_id': pear.product_tmpl_id.id, 'price_extra': 2, 'value_id': attribute_value.id, }) fixed_pricelist = env['product.pricelist'].create({ 'name': 'Fixed', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.boni_orange').id, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 13.95, # test for issues like in 7f260ab517ebde634fc274e928eb062463f0d88f 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.papillon_orange').id, })], }) env['product.pricelist'].create({ 'name': 'Percentage', 'item_ids': [(0, 0, { 'compute_price': 'percentage', 'percent_price': 100, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.boni_orange').id, }), (0, 0, { 'compute_price': 'percentage', 'percent_price': 99, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.papillon_orange').id, }), (0, 0, { 'compute_price': 'percentage', 'percent_price': 0, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.citron').id, })], }) env['product.pricelist'].create({ 'name': 'Formula', 'item_ids': [(0, 0, { 'compute_price': 'formula', 'price_discount': 6, 'price_surcharge': 5, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.boni_orange').id, }), (0, 0, { # .99 prices 'compute_price': 'formula', 'price_surcharge': -0.01, 'price_round': 1, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.papillon_orange').id, }), (0, 0, { 'compute_price': 'formula', 'price_min_margin': 10, 'price_max_margin': 100, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.citron').id, }), (0, 0, { 'compute_price': 'formula', 'price_surcharge': 10, 'price_max_margin': 5, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.limon').id, }), (0, 0, { 'compute_price': 'formula', 'price_discount': -100, 'price_min_margin': 5, 'price_max_margin': 20, 'applied_on': '0_product_variant', 'product_id': env.ref('point_of_sale.pamplemousse_rouge_pamplemousse').id, })], }) env['product.pricelist'].create({ 'name': 'min_quantity ordering', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, 'applied_on': '0_product_variant', 'min_quantity': 2, 'product_id': env.ref('point_of_sale.boni_orange').id, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, 'applied_on': '0_product_variant', 'min_quantity': 1, 'product_id': env.ref('point_of_sale.boni_orange').id, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, 'applied_on': '0_product_variant', 'min_quantity': 2, 'product_id': env.ref('point_of_sale.product_product_consumable').id, })], }) env['product.pricelist'].create({ 'name': 'Product template', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, 'applied_on': '1_product', 'product_tmpl_id': env.ref('point_of_sale.boni_orange_product_template').id, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, })], }) env['product.pricelist'].create({ # no category has precedence over category 'name': 'Category vs no category', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, 'applied_on': '2_product_category', 'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, })], }) p = env['product.pricelist'].create({ 'name': 'Category', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, 'applied_on': '2_product_category', 'categ_id': env.ref('product.product_category_all').id, }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, 'applied_on': '2_product_category', 'categ_id': env.ref('product.product_category_3').id, # All / Saleable / Services })], }) today = date.today() one_week_ago = today - timedelta(weeks=1) two_weeks_ago = today - timedelta(weeks=2) one_week_from_now = today + timedelta(weeks=1) two_weeks_from_now = today + timedelta(weeks=2) env['product.pricelist'].create({ 'name': 'Dates', 'item_ids': [(0, 0, { 'compute_price': 'fixed', 'fixed_price': 1, 'date_start': two_weeks_ago.strftime(DEFAULT_SERVER_DATE_FORMAT), 'date_end': one_week_ago.strftime(DEFAULT_SERVER_DATE_FORMAT), }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 2, 'date_start': today.strftime(DEFAULT_SERVER_DATE_FORMAT), 'date_end': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT), }), (0, 0, { 'compute_price': 'fixed', 'fixed_price': 3, 'date_start': one_week_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT), 'date_end': two_weeks_from_now.strftime(DEFAULT_SERVER_DATE_FORMAT), })], }) cost_base_pricelist = env['product.pricelist'].create({ 'name': 'Cost base', 'item_ids': [(0, 0, { 'base': 'standard_price', 'compute_price': 'percentage', 'percent_price': 55, })], }) pricelist_base_pricelist = env['product.pricelist'].create({ 'name': 'Pricelist base', 'item_ids': [(0, 0, { 'base': 'pricelist', 'base_pricelist_id': cost_base_pricelist.id, 'compute_price': 'percentage', 'percent_price': 15, })], }) env['product.pricelist'].create({ 'name': 'Pricelist base 2', 'item_ids': [(0, 0, { 'base': 'pricelist', 'base_pricelist_id': pricelist_base_pricelist.id, 'compute_price': 'percentage', 'percent_price': 3, })], }) env['product.pricelist'].create({ 'name': 'Pricelist base rounding', 'item_ids': [(0, 0, { 'base': 'pricelist', 'base_pricelist_id': fixed_pricelist.id, 'compute_price': 'percentage', 'percent_price': 0.01, })], }) excluded_pricelist = env['product.pricelist'].create({ 'name': 'Not loaded' }) env.ref('base.res_partner_18').property_product_pricelist = excluded_pricelist # set the company currency to USD, otherwise it will assume # euro's. this will cause issues as the sales journal is in # USD, because of this all products would have a different # price main_company.currency_id = env.ref('base.USD') test_sale_journal = journal_obj.create({'name': 'Sales Journal - Test', 'code': 'TSJ', 'type': 'sale', 'company_id': main_company.id}) all_pricelists = env['product.pricelist'].search([('id', '!=', excluded_pricelist.id)]) all_pricelists.write(dict(currency_id=main_company.currency_id.id)) main_pos_config.write({ 'journal_id': test_sale_journal.id, 'invoice_journal_id': test_sale_journal.id, 'journal_ids': [(0, 0, {'name': 'Cash Journal - Test', 'code': 'TSC', 'type': 'cash', 'company_id': main_company.id, 'journal_user': True})], 'available_pricelist_ids': [(4, pricelist.id) for pricelist in all_pricelists], }) # open a session, the /pos/web controller will redirect to it main_pos_config.open_session_cb() # needed because tests are run before the module is marked as # installed. In js web will only load qweb coming from modules # that are returned by the backend in module_boot. Without # this you end up with js, css but no qweb. env['ir.module.module'].search([('name', '=', 'point_of_sale')], limit=1).state = 'installed' self.phantom_js("/pos/web", "odoo.__DEBUG__.services['web_tour.tour'].run('pos_pricelist')", "odoo.__DEBUG__.services['web_tour.tour'].tours.pos_pricelist.ready", login="admin") self.phantom_js("/pos/web", "odoo.__DEBUG__.services['web_tour.tour'].run('pos_basic_order')", "odoo.__DEBUG__.services['web_tour.tour'].tours.pos_basic_order.ready", login="admin") for order in env['pos.order'].search([]): self.assertEqual(order.state, 'paid', "Validated order has payment of " + str(order.amount_paid) + " and total of " + str(order.amount_total))
agpl-3.0
-4,505,519,686,318,333,000
41.076667
154
0.465816
false
4.025191
true
false
false
wwgong/CVoltDB
tools/vis.py
1
6217
#!/usr/bin/env python # This is a visualizer which pulls TPC-C benchmark results from the MySQL # databases and visualizes them. Four graphs will be generated, latency graph on # sinigle node and multiple nodes, and throughput graph on single node and # multiple nodes. # # Run it without any arguments to see what arguments are needed. import sys import os sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + os.sep + 'tests/scripts/') import time import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import matplotlib.ticker as ticker from voltdbclient import * STATS_SERVER = 'volt2' def COLORS(k): return (((k ** 3) % 255) / 255.0, ((k * 100) % 255) / 255.0, ((k * k) % 255) / 255.0) MARKERS = ['+', '*', '<', '>', '^', '_', 'D', 'H', 'd', 'h', 'o', 'p'] def get_stats(hostname, port, days): """Get statistics of all runs Example return value: { u'VoltKV': [ { 'lat95': 21, 'lat99': 35, 'nodes': 1, 'throughput': 104805, 'date': datetime object}], u'Voter': [ { 'lat95': 20, 'lat99': 47, 'nodes': 1, 'throughput': 66287, 'date': datetime object}]} """ conn = FastSerializer(hostname, port) proc = VoltProcedure(conn, 'BestOfPeriod', [FastSerializer.VOLTTYPE_SMALLINT]) resp = proc.call([days]) conn.close() # keyed on app name, value is a list of runs sorted chronologically stats = dict() run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99'] for row in resp.tables[0].tuples: app_stats = [] if row[0] not in stats: stats[row[0]] = app_stats else: app_stats = stats[row[0]] run_stats = dict(zip(run_stat_keys, row[1:])) app_stats.append(run_stats) # sort each one for app_stats in stats.itervalues(): app_stats.sort(key=lambda x: x['date']) return stats class Plot: DPI = 100.0 def __init__(self, title, xlabel, ylabel, filename, w, h): self.filename = filename self.legends = {} w = w == None and 800 or w h = h == None and 300 or h fig = plt.figure(figsize=(w / self.DPI, h / self.DPI), dpi=self.DPI) self.ax = fig.add_subplot(111) self.ax.set_title(title) plt.xticks(fontsize=10) plt.yticks(fontsize=10) plt.ylabel(ylabel, fontsize=8) plt.xlabel(xlabel, fontsize=8) fig.autofmt_xdate() def plot(self, x, y, color, marker_shape, legend): self.ax.plot(x, y, linestyle="-", label=str(legend), marker=marker_shape, markerfacecolor=color, markersize=4) def close(self): formatter = matplotlib.dates.DateFormatter("%b %d") self.ax.xaxis.set_major_formatter(formatter) ymin, ymax = plt.ylim() plt.ylim((0, ymax * 1.1)) plt.legend(prop={'size': 10}, loc=0) plt.savefig(self.filename, format="png", transparent=False, bbox_inches="tight", pad_inches=0.2) def plot(title, xlabel, ylabel, filename, width, height, app, data, data_type): plot_data = dict() for run in data: if run['nodes'] not in plot_data: plot_data[run['nodes']] = {'time': [], data_type: []} datenum = matplotlib.dates.date2num(run['date']) plot_data[run['nodes']]['time'].append(datenum) if data_type == 'tps': value = run['tps']/run['nodes'] else: value = run[data_type] plot_data[run['nodes']][data_type].append(value) if len(plot_data) == 0: return i = 0 pl = Plot(title, xlabel, ylabel, filename, width, height) sorted_data = sorted(plot_data.items(), key=lambda x: x[0]) for k, v in sorted_data: pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k) i += 3 pl.close() def generate_index_file(filenames): row = """ <tr> <td>%s</td> <td><a href="%s"><img src="%s" width="400" height="200"/></a></td> <td><a href="%s"><img src="%s" width="400" height="200"/></a></td> </tr> """ full_content = """ <html> <head> <title>Performance Graphs</title> </head> <body> <table> %s </table> </body> </html> """ % (''.join([row % (i[0], i[1], i[1], i[2], i[2]) for i in filenames])) return full_content def usage(): print "Usage:" print "\t", sys.argv[0], "output_dir filename_base" \ " [width] [height]" print print "\t", "width in pixels" print "\t", "height in pixels" def main(): if len(sys.argv) < 3: usage() exit(-1) if not os.path.exists(sys.argv[1]): print sys.argv[1], "does not exist" exit(-1) prefix = sys.argv[2] path = os.path.join(sys.argv[1], sys.argv[2]) width = None height = None if len(sys.argv) >= 4: width = int(sys.argv[3]) if len(sys.argv) >= 5: height = int(sys.argv[4]) stats = get_stats(STATS_SERVER, 21212, 30) # Plot single node stats for all apps filenames = [] # (appname, latency, throughput) for app, data in stats.iteritems(): app_filename = app.replace(' ', '_') latency_filename = '%s-latency-%s.png' % (prefix, app_filename) throughput_filename = '%s-throughput-%s.png' % (prefix, app_filename) filenames.append((app, latency_filename, throughput_filename)) plot(app + " latency", "Time", "Latency (ms)", path + "-latency-" + app_filename + ".png", width, height, app, data, 'lat99') plot(app + " throughput", "Time", "Throughput (txns/sec)", path + "-throughput-" + app_filename + ".png", width, height, app, data, 'tps') # generate index file index_file = open(path + '-index.html', 'w') sorted_filenames = sorted(filenames, key=lambda f: f[0].lower()) index_file.write(generate_index_file(sorted_filenames)) index_file.close() if __name__ == "__main__": main()
gpl-3.0
-5,714,690,900,903,100,000
29.326829
80
0.5493
false
3.358725
false
false
false
g0v/sunshine.cy
website/cy/api/views.py
1
4839
#from django.contrib.auth.models import User, Group from rest_framework import viewsets from .serializers import * from journals.models import Journals from reports.models import Reports from property.models import Stock, Land, Building, Car, Cash, Deposit, Aircraft, Boat, Bonds, Fund, OtherBonds, Antique, Insurance, Claim, Debt, Investment class JournalsViewSet(viewsets.ReadOnlyModelViewSet): queryset = Journals.objects.all() serializer_class = JournalsSerializer filter_fields = ('name', 'date') class ReportsViewSet(viewsets.ReadOnlyModelViewSet): queryset = Reports.objects.all().prefetch_related('land_set', 'building_set', 'boat_set', 'car_set', 'aircraft_set', 'cash_set', 'deposit_set', 'bonds_set', 'fund_set', 'otherbonds_set', 'antique_set', 'insurance_set', 'claim_set', 'debt_set', 'investment_set', ) serializer_class = ReportsSerializer filter_fields = ('journal', 'category', 'name', 'department', 'title', 'report_at', 'report_type', 'spouse', 'at_page', 'file_id', ) class StockViewSet(viewsets.ReadOnlyModelViewSet): queryset = Stock.objects.all() serializer_class = StockSerializer filter_fields = ('report', 'name', 'symbol', 'owner', 'quantity', 'face_value', 'currency', 'total') class LandViewSet(viewsets.ReadOnlyModelViewSet): queryset = Land.objects.all() serializer_class = LandSerializer filter_fields = ('report', 'name', 'area', 'share_portion', 'portion', 'owner', 'register_date', 'register_reason', 'acquire_value', 'total') class BuildingViewSet(viewsets.ReadOnlyModelViewSet): queryset = Building.objects.all() serializer_class = BuildingSerializer filter_fields = ('report', 'name', 'area', 'share_portion', 'portion', 'owner', 'register_date', 'register_reason', 'acquire_value', 'total') class CarViewSet(viewsets.ReadOnlyModelViewSet): queryset = Car.objects.all() serializer_class = CarSerializer filter_fields = ('report', 'name', 'capacity', 'owner', 'register_date', 'register_reason', 'acquire_value') class CashViewSet(viewsets.ReadOnlyModelViewSet): queryset = Cash.objects.all() serializer_class = CashSerializer filter_fields = ('report', 'currency', 'owner', 'total') class DepositViewSet(viewsets.ReadOnlyModelViewSet): queryset = Deposit.objects.all() serializer_class = DepositSerializer filter_fields = ('report', 'bank', 'deposit_type', 'currency', 'owner', 'total') class AircraftViewSet(viewsets.ReadOnlyModelViewSet): queryset = Aircraft.objects.all() serializer_class = AircraftSerializer filter_fields = ('report', 'name', 'maker', 'number', 'owner', 'register_date', 'register_reason', 'acquire_value') class BoatViewSet(viewsets.ReadOnlyModelViewSet): queryset = Boat.objects.all() serializer_class = BoatSerializer filter_fields = ('report', 'name', 'tonnage', 'homeport', 'owner', 'register_date', 'register_reason', 'acquire_value') class BondsViewSet(viewsets.ReadOnlyModelViewSet): queryset = Bonds.objects.all() serializer_class = BondsSerializer filter_fields = ('report', 'name', 'symbol', 'owner', 'dealer', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value') class FundViewSet(viewsets.ReadOnlyModelViewSet): queryset = Fund.objects.all() serializer_class = FundSerializer filter_fields = ('report', 'name', 'owner', 'dealer', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value') class OtherBondsViewSet(viewsets.ReadOnlyModelViewSet): queryset = OtherBonds.objects.all() serializer_class = OtherBondsSerializer filter_fields = ('report', 'name', 'owner', 'quantity', 'face_value', 'market_value', 'currency', 'total', 'total_value') class AntiqueViewSet(viewsets.ReadOnlyModelViewSet): queryset = Antique.objects.all() serializer_class = AntiqueSerializer filter_fields = ('report', 'name', 'owner', 'quantity', 'total') class InsuranceViewSet(viewsets.ReadOnlyModelViewSet): queryset = Insurance.objects.all() serializer_class = InsuranceSerializer filter_fields = ('report', 'name', 'company', 'owner') class ClaimViewSet(viewsets.ReadOnlyModelViewSet): queryset = Claim.objects.all() serializer_class = ClaimSerializer filter_fields = ('report', 'species', 'debtor', 'owner', 'register_date', 'register_reason', 'total') class DebtViewSet(viewsets.ReadOnlyModelViewSet): queryset = Debt.objects.all() serializer_class = DebtSerializer filter_fields = ('report', 'species', 'debtor', 'owner', 'register_date', 'register_reason', 'total') class InvestmentViewSet(viewsets.ReadOnlyModelViewSet): queryset = Investment.objects.all() serializer_class = InvestmentSerializer filter_fields = ('report', 'owner', 'company', 'address', 'register_date', 'register_reason', 'total')
cc0-1.0
-828,647,415,842,423,900
48.377551
267
0.713784
false
3.547654
false
false
false