{ "cells": [ { "cell_type": "code", "execution_count": 30, "metadata": {}, "outputs": [], "source": [ "%matplotlib inline\n", "import pandas as pd\n", "from pandas import DataFrame, Series\n", "import numpy as np\n", "import math\n", "\n", "import seaborn as sns\n", "import matplotlib.pyplot as plt\n", "import matplotlib.patches as mpatches\n", "import matplotlib.colors as colors\n", "from matplotlib.legend_handler import HandlerLine2D, HandlerTuple\n", "from matplotlib.colors import LinearSegmentedColormap\n", "from scipy import stats\n", "import scikit_posthocs as sp\n", "import sys\n", "\n", "from mpl_toolkits.mplot3d import axes3d" ] }, { "cell_type": "code", "execution_count": 66, "metadata": {}, "outputs": [], "source": [ "AllName=\"dataG.pkl\"\n", "ResizesName=\"dataM.pkl\"\n", "ItersName=\"dataL.pkl\"\n", "matrixIt_Total=\"data_L_Total.csv\"\n", "n_cores=20\n", "repet = 10 #CAMBIAR EL NUMERO SEGUN NUMERO DE EJECUCIONES POR CONFIG\n", "\n", "significance_value = 0.05\n", "processes = [2,20,40,80,120,160]\n", "\n", "positions = [321, 322, 323, 324, 325]\n", "positions_small = [221, 222, 223, 224]\n", "\n", "labels = ['(1,10)', '(1,20)', '(1,40)', '(1,80)', '(1,120)','(1,160)',\n", " '(10,1)', '(10,20)', '(10,40)', '(10,80)', '(10,120)','(10,160)',\n", " '(20,1)', '(20,10)', '(20,40)', '(20,80)', '(20,120)','(20,160)',\n", " '(40,1)', '(40,10)', '(40,20)', '(40,80)', '(40,120)','(40,160)',\n", " '(80,1)', '(80,10)', '(80,20)', '(80,40)', '(80,120)','(80,160)',\n", " '(120,1)','(120,10)', '(120,20)','(120,40)','(120,80)','(120,160)',\n", " '(160,1)','(160,10)', '(160,20)','(160,40)','(160,80)','(160,120)']\n", "\n", "labelsExpand = ['(1,10)', '(1,20)', '(1,40)', '(1,80)', '(1,120)','(1,160)',\n", " '(10,20)', '(10,40)', '(10,80)', '(10,120)','(10,160)',\n", " '(20,40)', '(20,80)', '(20,120)','(20,160)',\n", " '(40,80)', '(40,120)','(40,160)',\n", " '(80,120)','(80,160)',\n", " '(120,160)']\n", "labelsShrink = ['(10,1)', \n", " '(20,1)', '(20,10)',\n", " '(40,1)', '(40,10)', '(40,20)',\n", " '(80,1)', '(80,10)', '(80,20)', '(80,40)',\n", " '(120,1)','(120,10)', '(120,20)','(120,40)','(120,80)',\n", " '(160,1)','(160,10)', '(160,20)','(160,40)','(160,80)','(160,120)']\n", "\n", "# WORST BEST\n", "labels_dist = ['null', 'SpreadFit', 'CompactFit']\n", " #0 #1 #2 #3\n", "labelsMethods = ['Baseline', 'Baseline single','Baseline - Asynchronous','Baseline single - Asynchronous',\n", " 'Merge','Merge single','Merge - Asynchronous','Merge single - Asynchronous']\n", " #4 #5 #6 #7\n", " \n", "colors_m = ['green','darkgreen','red','darkred','mediumseagreen','seagreen','palegreen','springgreen','indianred','firebrick','darkgoldenrod','saddlebrown']\n", "linestyle_m = ['-', '--', '-.', ':']\n", "markers_m = ['.','v','s','p', 'h','d','X','P','^']\n", "\n", "OrMult_patch = mpatches.Patch(hatch='', facecolor='green', label='Baseline')\n", "OrSing_patch = mpatches.Patch(hatch='', facecolor='springgreen', label='Baseline single')\n", "OrPthMult_patch = mpatches.Patch(hatch='//', facecolor='blue', label='Baseline - Asyncrhonous')\n", "OrPthSing_patch = mpatches.Patch(hatch='\\\\', facecolor='darkblue', label='Baseline single - Asyncrhonous')\n", "MergeMult_patch = mpatches.Patch(hatch='||', facecolor='red', label='Merge')\n", "MergeSing_patch = mpatches.Patch(hatch='...', facecolor='darkred', label='Merge single')\n", "MergePthMult_patch = mpatches.Patch(hatch='xx', facecolor='yellow', label='Merge - Asyncrhonous')\n", "MergePthSing_patch = mpatches.Patch(hatch='++', facecolor='olive', label='Merge single - Asyncrhonous')\n", "\n", "handles_spawn = [OrMult_patch,OrSing_patch,OrPthMult_patch,OrPthSing_patch,MergeMult_patch,MergeSing_patch,MergePthMult_patch,MergePthSing_patch]" ] }, { "cell_type": "code", "execution_count": 48, "metadata": {}, "outputs": [], "source": [ "dfG = pd.read_pickle( AllName )\n", "\n", "dfG['ADR'] = round((dfG['ADR'] / dfG['DR']) * 100,1)\n", "dfG['SDR'] = round((dfG['SDR'] / dfG['DR']) * 100,1)\n", " \n", "out_group = dfG.groupby(['Groups', 'ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy'])['T_total']\n", "group = dfG.groupby(['ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','Groups'])['T_total']\n", "\n", "grouped_aggG = group.agg(['median'])\n", "grouped_aggG.rename(columns={'median':'T_total'}, inplace=True) \n", "\n", "out_grouped_G = out_group.agg(['median'])\n", "out_grouped_G.rename(columns={'median':'T_total'}, inplace=True) " ] }, { "cell_type": "code", "execution_count": 49, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/tmp/ipykernel_5684/462116935.py:8: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.\n", " out_group = dfM.groupby(['NP','NC','ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy'])['T_Malleability','T_Redistribution','T_spawn','T_spawn_real','T_SR','T_AR']\n", "/tmp/ipykernel_5684/462116935.py:9: FutureWarning: Indexing with multiple keys (implicitly converted to a tuple of keys) will be deprecated, use a list instead.\n", " group = dfM.groupby(['ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','NC'])['T_Malleability','T_Redistribution','T_spawn','T_spawn_real','T_SR','T_AR']\n" ] } ], "source": [ "dfM = pd.read_pickle( ResizesName )\n", "\n", "dfM['ADR'] = round((dfM['ADR'] / dfM['DR']) * 100,1)\n", "dfM['SDR'] = round((dfM['SDR'] / dfM['DR']) * 100,1)\n", "dfM['T_Redistribution'] = dfM['T_SR'] + dfM['T_AR']\n", "dfM['T_Malleability'] = dfM['T_spawn'] + dfM['T_Redistribution']\n", " \n", "out_group = dfM.groupby(['NP','NC','ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy'])['T_Malleability','T_Redistribution','T_spawn','T_spawn_real','T_SR','T_AR']\n", "group = dfM.groupby(['ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','NC'])['T_Malleability','T_Redistribution','T_spawn','T_spawn_real','T_SR','T_AR']\n", "\n", "grouped_aggM = group.agg(['median'])\n", "grouped_aggM.columns = grouped_aggM.columns.get_level_values(0)\n", "\n", "out_grouped_M = out_group.agg(['median'])\n", "out_grouped_M.columns = out_grouped_M.columns.get_level_values(0)" ] }, { "cell_type": "code", "execution_count": 50, "metadata": {}, "outputs": [], "source": [ "dfL = pd.read_pickle( ItersName )\n", "\n", "#Fixme comprobar si hay iters asincronas antes de esto\n", "#dfL['ADR'] = round((dfL['ADR'] / dfL['DR']) * 100,1)\n", "#dfL['SDR'] = round((dfL['SDR'] / dfL['DR']) * 100,1)\n", "dfL['ADR'].fillna(-1, inplace=True)\n", "dfL['SDR'].fillna(-1, inplace=True)\n", "dfL['DR'].fillna(-1, inplace=True)\n", " \n", "aux_df = dfL[(dfL.Asynch_Iters == True)]\n", "group = aux_df.groupby(['ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','NC'])['T_iter']\n", "grouped_aggLAsynch = group.agg(['median','count'])\n", "grouped_aggLAsynch.columns = grouped_aggLAsynch.columns.get_level_values(0)\n", "grouped_aggLAsynch['T_sum'] = grouped_aggLAsynch['count'] * grouped_aggLAsynch['median'] / repet\n", "grouped_aggLAsynch.rename(columns={'median':'T_iter'}, inplace=True) \n", "group = aux_df.groupby(['ADR','Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','NC'])['T_stages']\n", "aux_column = group.apply(list).apply(lambda x: np.median(x,0))\n", "grouped_aggLAsynch['T_stages'] = aux_column\n", "\n", "aux_df = dfL[(dfL.Asynch_Iters == False)]\n", "group = aux_df.groupby('NP')['T_iter']\n", "grouped_aggLSynch = group.agg(['median'])\n", "grouped_aggLSynch.rename(columns={'median':'T_iter'}, inplace=True)\n", "group = aux_df.groupby(['NP'])['T_stages']\n", "aux_column = group.apply(list).apply(lambda x: np.median(x,0))\n", "grouped_aggLSynch['T_stages'] = aux_column\n", "\n", "aux_df2 = aux_df[(aux_df.Is_Dynamic == True)]\n", "group = aux_df2.groupby(['ADR', 'Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','N_Parents'])['T_iter']\n", "grouped_aggLDyn = group.agg(['median'])\n", "grouped_aggLDyn.rename(columns={'median':'T_iter'}, inplace=True)\n", "group = aux_df2.groupby(['ADR', 'Spawn_Method','Redistribution_Method', 'Redistribution_Strategy','NP','N_Parents'])['T_stages']\n", "aux_column = group.apply(list).apply(lambda x: np.median(x,0))\n", "grouped_aggLDyn['T_stages'] = aux_column\n", "\n", "aux_df2 = aux_df[(aux_df.Is_Dynamic == False)]\n", "group = aux_df2.groupby('NP')['T_iter']\n", "grouped_aggLNDyn = group.agg(['median'])\n", "grouped_aggLNDyn.rename(columns={'median':'T_iter'}, inplace=True)\n", "group = aux_df2.groupby(['NP'])['T_stages']\n", "aux_column = group.apply(list).apply(lambda x: np.median(x,0))\n", "grouped_aggLNDyn['T_stages'] = aux_column" ] }, { "cell_type": "code", "execution_count": 35, "metadata": {}, "outputs": [], "source": [ "from bt_scheme import PartialSolution, BacktrackingSolver\n", "def elegirConf(parameters):\n", " class StatePS(PartialSolution):\n", " def __init__(self, config):\n", " self.config= config\n", " self.n= len(config) #Indica el valor a añadir\n", "\n", " def is_solution(self):\n", " return self.n == len(parameters)\n", "\n", " def get_solution(self):\n", " return tuple(self.config)\n", "\n", " def successors(self):\n", " array = parameters[self.n]\n", " for parameter_value in array: #Test all values of the next parameter\n", " self.config.append(parameter_value)\n", " yield StatePS(self.config)\n", " self.config.pop()\n", "\n", " initialPs= StatePS([])\n", " return BacktrackingSolver().solve(initialPs)\n", "\n", "\n", "def obtenerConfs(parameters):\n", " soluciones=[]\n", " for solucion in elegirConf(parameters):\n", " soluciones.append(solucion)\n", " return soluciones\n", "\n", "def modifyToGlobal(parameters, len_parameters, configuration):\n", " usable_configuration = []\n", " for i in range(len(parameters)):\n", " if len_parameters[i] > 1:\n", " aux = (parameters[i][0], configuration[i])\n", " else:\n", " aux = (configuration[i])\n", " usable_configuration.append(aux)\n", " \n", " return usable_configuration\n", "\n", "def modifyToLocalDynamic(parameters, len_parameters, configuration):\n", " usable_configuration = []\n", " for i in range(len(parameters)):\n", " if len_parameters[i] > 1:\n", " aux = (configuration[i], -1)\n", " else:\n", " aux = (-1)\n", " usable_configuration.append(aux)\n", " \n", " return tuple(usable_configuration)\n", "\n", "def CheckConfExists(configuration, dataSet, type_conf='global'):\n", " exists = False\n", " config = list(configuration)\n", " for np_aux in processes:\n", " for ns_aux in processes:\n", " if np_aux != ns_aux:\n", " \n", " if type_conf == 'global':\n", " config.append((np_aux, ns_aux))\n", " elif type_conf == 'malleability':\n", " config.append(np_aux)\n", " config.append(ns_aux)\n", " elif type_conf == 'local':\n", " config.append(np_aux)\n", " \n", " if tuple(config) in dataSet.index: \n", " exists = True # FIXME Return here true?\n", " config.pop()\n", " \n", " if type_conf == 'malleability':\n", " config.pop()\n", " return exists" ] }, { "cell_type": "code", "execution_count": 77, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "[[0, 0, 0, 1], [0, 0, 1, 1], [0, 1, 0, 1], [0, 1, 1, 1], [0, 2, 0, 1], [0, 2, 1, 1], [100, 0, 0, 1], [100, 0, 1, 1], [100, 1, 0, 1], [100, 1, 1, 1], [100, 2, 0, 1], [100, 2, 1, 1]]\n", "[[-1, (1, -1), (1, -1), (1, -1)], [-1, (0, -1), (1, -1), (1, -1)], [-1, (2, -1), (1, -1), (1, -1)], [-1, (0, -1), (0, -1), (1, -1)], [-1, (2, -1), (0, -1), (1, -1)], [-1, (1, -1), (0, -1), (1, -1)]]\n", "[[0, (0, 0), (0, 0), (1, 1)], [0, (0, 0), (0, 1), (1, 1)], [0, (0, 1), (0, 0), (1, 1)], [0, (0, 1), (0, 1), (1, 1)], [0, (0, 2), (0, 0), (1, 1)], [0, (0, 2), (0, 1), (1, 1)], [100, (0, 0), (0, 0), (1, 1)], [100, (0, 0), (0, 1), (1, 1)], [100, (0, 1), (0, 0), (1, 1)], [100, (0, 1), (0, 1), (1, 1)], [100, (0, 2), (0, 0), (1, 1)], [100, (0, 2), (0, 1), (1, 1)]]\n", "12\n" ] } ], "source": [ "adr = [0,100]\n", "sp_method = [0,1,2]\n", "rd_method = [0,1]\n", "rd_strat = [1]\n", "parameters = [adr, sp_method, rd_method, rd_strat]\n", "parameters_names = ['ADR', 'Spawn_Method', 'Redistribution_Method', 'Redistribution_Strategy']\n", "len_parameters = [1,2,2,2]\n", "configurations_aux = obtenerConfs(parameters)\n", "configurations = []\n", "configurations_local_dynamic = set()\n", "configurations_local = set()\n", "configurations_simple = []\n", "for checked_conf in configurations_aux:\n", " aux_conf = modifyToGlobal(parameters, len_parameters, checked_conf)\n", " if CheckConfExists(aux_conf, grouped_aggG):\n", " configurations.append(aux_conf)\n", "\n", " if CheckConfExists(checked_conf, grouped_aggM, 'malleability'):\n", " configurations_simple.append(list(checked_conf))\n", " \n", " aux_conf = modifyToLocalDynamic(parameters, len_parameters, checked_conf)\n", " if CheckConfExists(aux_conf, grouped_aggLDyn, 'local'):\n", " configurations_local_dynamic.add(aux_conf)\n", "\n", "configurations_local_dynamic = list(configurations_local_dynamic)\n", "for index in range(len(configurations_local_dynamic)):\n", " configurations_local_dynamic[index] = list(configurations_local_dynamic[index])\n", "\n", "print(configurations_simple)\n", "print(configurations_local_dynamic)\n", "print(configurations)\n", "print(len(configurations))" ] }, { "cell_type": "code", "execution_count": 51, "metadata": {}, "outputs": [], "source": [ "#ALPHA COMPUTATION\n", "def compute_alpha(config_a, config_b):\n", " for np_aux in processes:\n", " for ns_aux in processes:\n", " if np_aux != ns_aux:\n", " config_a.append(np_aux)\n", " config_a.append(ns_aux)\n", " config_b.append(np_aux)\n", " config_b.append(ns_aux)\n", " grouped_aggM.loc[tuple(config_b),'Alpha'] = grouped_aggM.loc[tuple(config_b),'T_Malleability'] / grouped_aggM.loc[tuple(config_a),'T_Malleability']\n", " #grouped_aggM.loc[tuple(config_b),'Alpha'] = grouped_aggM.loc[tuple(config_b),'T_Redistribution'] / grouped_aggM.loc[tuple(config_a),'T_Redistribution']\n", " config_a.pop()\n", " config_a.pop()\n", " config_b.pop()\n", " config_b.pop()\n", " \n", " \n", " config_a.insert(0,ns_aux)\n", " config_a.insert(0,np_aux)\n", " config_b.insert(0,ns_aux)\n", " config_b.insert(0,np_aux)\n", " out_grouped_M.loc[tuple(config_b),'Alpha'] = out_grouped_M.loc[tuple(config_b),'T_Malleability'] / out_grouped_M.loc[tuple(config_a),'T_Malleability']\n", " #out_grouped_M.loc[tuple(config_b),'Alpha'] = out_grouped_M.loc[tuple(config_b),'T_Redistribution'] / out_grouped_M.loc[tuple(config_a),'T_Redistribution']\n", " \n", " config_a.pop(0)\n", " config_a.pop(0)\n", " config_b.pop(0)\n", " config_b.pop(0)\n", "\n", "if not ('Alpha' in grouped_aggM.columns):\n", " for config_a in configurations_simple:\n", " for config_b in configurations_simple:\n", " if config_a[1:-1] == config_b[1:-1] and config_a[0] == 0 and config_b[0] != 0:\n", " compute_alpha(config_a, config_b)\n", "else:\n", " print(\"ALPHA already exists\")" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#OMEGA COMPUTATION\n", "def compute_omega(config):\n", " for np_aux in processes:\n", " for ns_aux in processes:\n", " if np_aux != ns_aux:\n", " if len(config) > len(parameters):\n", " config.pop()\n", " config.pop()\n", " config.append(np_aux)\n", " config.append(ns_aux)\n", " grouped_aggLAsynch.at[tuple(config),'Omega'] = grouped_aggLAsynch.at[tuple(config),'T_iter'] / grouped_aggLSynch.at[np_aux,'T_iter']\n", " value = grouped_aggLAsynch.at[tuple(config),'T_stages'] / grouped_aggLSynch.at[np_aux,'T_stages']\n", " grouped_aggLAsynch.at[tuple(config),'Omega_Stages'] = value.astype(object)\n", " config.pop()\n", " config.pop()\n", "\n", "if not ('Omega' in grouped_aggLAsynch.columns):\n", " for config in configurations:\n", " if config[0] != 0:\n", " compute_omega(config)\n", "else:\n", " print(\"OMEGA already exists\")" ] }, { "cell_type": "code", "execution_count": 52, "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "/home/usuario/miniconda3/lib/python3.9/site-packages/pandas/core/algorithms.py:1537: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.\n", " return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]\n", "/home/usuario/miniconda3/lib/python3.9/site-packages/pandas/core/algorithms.py:1537: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.\n", " return arr.searchsorted(value, side=side, sorter=sorter) # type: ignore[arg-type]\n" ] } ], "source": [ "#Dynamic Coherence COMPUTATION\n", "def compute_dyn_coherency(config):\n", " for np_aux in processes:\n", " for n_parents_aux in processes:\n", " if np_aux != n_parents_aux:\n", " config.append(np_aux)\n", " config.append(n_parents_aux)\n", " grouped_aggLDyn.at[tuple(config),'Dyn_Coherency'] = grouped_aggLDyn.at[tuple(config),'T_iter'] / grouped_aggLNDyn.at[np_aux,'T_iter']\n", " value = grouped_aggLDyn.at[tuple(config),'T_stages'] / grouped_aggLNDyn.at[np_aux,'T_stages']\n", " grouped_aggLDyn.at[tuple(config),'Dyn_Coherency_Stages'] = value.astype(object)\n", " config.pop()\n", " config.pop()\n", "\n", "if not ('Dyn_Coherency' in grouped_aggLDyn.columns):\n", " for config in configurations_local_dynamic:\n", " compute_dyn_coherency(config)\n", "else:\n", " print(\"Dyn_Coherency already exists\")" ] }, { "cell_type": "code", "execution_count": 39, "metadata": {}, "outputs": [], "source": [ "#Malleability Coherence COMPUTATION\n", "test=dfM[(dfM.Asynch_Iters > 0) & (dfM.Spawn_Strategy == 1)]\n", "\n", "for index in range(len(test)):\n", " time_malleability_aux = test[\"T_Malleability\"].values[index]\n", " \n", " total_asynch_iters = int(test[\"Asynch_Iters\"].values[index])\n", " asynch_iters = test[\"T_iter\"].values[index][-total_asynch_iters:]\n", " time_iters_aux = np.sum(asynch_iters)\n", " \n", " if time_malleability_aux < time_iters_aux:\n", " \n", " print(test.iloc[index])\n", " print(asynch_iters)\n", " print(time_iters_aux)\n", " print(time_malleability_aux)\n", " print(\"\")" ] }, { "cell_type": "code", "execution_count": 11, "metadata": {}, "outputs": [], "source": [ "out_grouped_G.to_excel(\"resultG.xlsx\") \n", "out_grouped_M.to_excel(\"resultM.xlsx\") \n", "#grouped_aggLAsynch.to_excel(\"AsynchIters.xlsx\")\n", "grouped_aggLDyn.to_excel(\"DynCoherence.xlsx\")" ] }, { "cell_type": "code", "execution_count": 64, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
\n", " | \n", " | \n", " | \n", " | \n", " | \n", " | T_Malleability | \n", "T_Redistribution | \n", "T_spawn | \n", "T_spawn_real | \n", "T_SR | \n", "T_AR | \n", "Alpha | \n", "
---|---|---|---|---|---|---|---|---|---|---|---|---|
ADR | \n", "Spawn_Method | \n", "Redistribution_Method | \n", "Redistribution_Strategy | \n", "NP | \n", "NC | \n", "\n", " | \n", " | \n", " | \n", " | \n", " | \n", " | \n", " |
0.0 | \n", "0 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "8.499887 | \n", "7.168089 | \n", "1.285624 | \n", "0.0 | \n", "7.168089 | \n", "0.000000 | \n", "NaN | \n", "
1 | \n", "1 | \n", "160 | \n", "20 | \n", "5.182835 | \n", "3.976021 | \n", "1.219105 | \n", "0.0 | \n", "3.976021 | \n", "0.000000 | \n", "NaN | \n", "||
1 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "4.037880 | \n", "3.875850 | \n", "0.133642 | \n", "0.0 | \n", "3.875850 | \n", "0.000000 | \n", "NaN | \n", "|
1 | \n", "1 | \n", "160 | \n", "20 | \n", "3.927668 | \n", "3.752015 | \n", "0.144682 | \n", "0.0 | \n", "3.752015 | \n", "0.000000 | \n", "NaN | \n", "||
2 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "7.051643 | \n", "5.734007 | \n", "1.287871 | \n", "0.0 | \n", "5.734007 | \n", "0.000000 | \n", "NaN | \n", "|
1 | \n", "1 | \n", "160 | \n", "20 | \n", "6.385955 | \n", "5.147990 | \n", "1.253753 | \n", "0.0 | \n", "5.147990 | \n", "0.000000 | \n", "NaN | \n", "||
100.0 | \n", "0 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "5.255706 | \n", "4.034014 | \n", "1.267632 | \n", "0.0 | \n", "0.000000 | \n", "4.034014 | \n", "0.618327 | \n", "
1 | \n", "1 | \n", "160 | \n", "20 | \n", "5.336817 | \n", "4.104014 | \n", "1.222010 | \n", "0.0 | \n", "0.000000 | \n", "4.104014 | \n", "1.029710 | \n", "||
1 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "4.015236 | \n", "3.916052 | \n", "0.110316 | \n", "0.0 | \n", "0.000000 | \n", "3.916052 | \n", "0.994392 | \n", "|
1 | \n", "1 | \n", "160 | \n", "20 | \n", "4.092398 | \n", "3.909810 | \n", "0.123278 | \n", "0.0 | \n", "0.000000 | \n", "3.909810 | \n", "1.041941 | \n", "||
2 | \n", "0 | \n", "1 | \n", "160 | \n", "20 | \n", "6.720818 | \n", "5.463979 | \n", "1.287527 | \n", "0.0 | \n", "0.000000 | \n", "5.463979 | \n", "0.953085 | \n", "|
1 | \n", "1 | \n", "160 | \n", "20 | \n", "6.632618 | \n", "5.458008 | \n", "1.191391 | \n", "0.0 | \n", "0.000000 | \n", "5.458008 | \n", "1.038626 | \n", "
\n", " | Total_Resizes | \n", "Total_Groups | \n", "Total_Stages | \n", "Granularity | \n", "SDR | \n", "ADR | \n", "DR | \n", "Redistribution_Method | \n", "Redistribution_Strategy | \n", "Spawn_Method | \n", "... | \n", "Iters | \n", "Asynch_Iters | \n", "T_iter | \n", "T_stages | \n", "T_spawn | \n", "T_spawn_real | \n", "T_SR | \n", "T_AR | \n", "T_Malleability | \n", "T_total | \n", "
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "100.0 | \n", "0.0 | \n", "5000000000 | \n", "(0, 2) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100074, 0.100068, 0.100163, 0.100258, 0.10... | \n", "(((0.100073,), (0.100068,), (0.100077,), (0.10... | \n", "(0.663998,) | \n", "(0,) | \n", "(1.300401,) | \n", "(0,) | \n", "[1.966417] | \n", "3.111253 | \n", "
1 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "100.0 | \n", "0.0 | \n", "5000000000 | \n", "(0, 2) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100299, 0.100173, 0.100076, 0.100077, 0.10... | \n", "(((0.100299,), (0.100166,), (0.100076,), (0.10... | \n", "(0.747897,) | \n", "(0,) | \n", "(1.225241,) | \n", "(0,) | \n", "[1.975229] | \n", "3.103462 | \n", "
2 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "100.0 | \n", "0.0 | \n", "5000000000 | \n", "(0, 2) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100071, 0.101263, 0.100182, 0.100076, 0.10... | \n", "(((0.100071,), (0.10035,), (0.100076,), (0.100... | \n", "(0.662863,) | \n", "(0,) | \n", "(1.3332,) | \n", "(0,) | \n", "[1.998003] | \n", "3.125520 | \n", "
3 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "100.0 | \n", "0.0 | \n", "5000000000 | \n", "(0, 2) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100171, 0.100067, 0.100545, 0.100076, 0.10... | \n", "(((0.100064,), (0.100066,), (0.100545,), (0.10... | \n", "(0.620327,) | \n", "(0,) | \n", "(1.144137,) | \n", "(0,) | \n", "[1.765891] | \n", "2.886963 | \n", "
4 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "100.0 | \n", "0.0 | \n", "5000000000 | \n", "(0, 2) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100296, 0.100076, 0.100311, 0.100164, 0.10... | \n", "(((0.100166,), (0.100071,), (0.100291,), (0.10... | \n", "(0.661238,) | \n", "(0,) | \n", "(1.303134,) | \n", "(0,) | \n", "[1.965799] | \n", "3.101193 | \n", "
... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "... | \n", "
1195 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "0.0 | \n", "100.0 | \n", "5000000000 | \n", "(0, 1) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100926, 0.10083, 0.101691, 0.101116, 0.100... | \n", "(((0.100808,), (0.100772,), (0.100783,), (0.10... | \n", "(0.126827,) | \n", "(0,) | \n", "(0,) | \n", "(1.269128,) | \n", "[1.397743] | \n", "2.614686 | \n", "
1196 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "0.0 | \n", "100.0 | \n", "5000000000 | \n", "(0, 1) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.105365, 0.101082, 0.104509, 0.100901, 0.10... | \n", "(((0.100806,), (0.100772,), (0.100781,), (0.10... | \n", "(0.129203,) | \n", "(0,) | \n", "(0,) | \n", "(1.361718,) | \n", "[1.496593] | \n", "2.612697 | \n", "
1197 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "0.0 | \n", "100.0 | \n", "5000000000 | \n", "(0, 1) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100876, 0.100847, 0.101514, 0.100818, 0.10... | \n", "(((0.100782,), (0.100781,), (0.100799,), (0.10... | \n", "(0.105669,) | \n", "(0,) | \n", "(0,) | \n", "(1.297036,) | \n", "[1.404399] | \n", "2.447727 | \n", "
1198 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "0.0 | \n", "100.0 | \n", "5000000000 | \n", "(0, 1) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.100859, 0.100237, 0.100222, 0.100204, 0.10... | \n", "(((0.099892,), (0.099905,), (0.099889,), (0.09... | \n", "(0.113739,) | \n", "(0,) | \n", "(0,) | \n", "(1.346015,) | \n", "[1.464036] | \n", "2.557442 | \n", "
1199 | \n", "1 | \n", "2 | \n", "1 | \n", "100000 | \n", "0.0 | \n", "100.0 | \n", "5000000000 | \n", "(0, 1) | \n", "(1, 1) | \n", "(0, 1) | \n", "... | \n", "(5, 5) | \n", "(0, 0) | \n", "((0.104668, 0.100913, 0.100822, 0.100855, 0.10... | \n", "(((0.100783,), (0.100784,), (0.100772,), (0.10... | \n", "(0.137782,) | \n", "(0,) | \n", "(0,) | \n", "(1.269511,) | \n", "[1.412476] | \n", "2.594925 | \n", "
2400 rows × 27 columns
\n", "