Commit d005e7a2 authored by cyts's avatar cyts
Browse files

Code rewrite + additions for paper revision

parent 9fc02f03
import os
import sys
import random
from pathlib import Path
filepath = Path(r"../../src/").resolve()
if os.fspath(filepath) not in sys.path:
sys.path.append(os.fspath(filepath))
from heuristic_functions import *
from utils import full_algo, dist, graph_with_calc_edge_capacity, cost_capacity_func
import numpy as np
import networkx as nx
from multiprocessing import Pool
import pandas as pd
"""
This script runs degraded versions of certain algorithms for perfomance
The results from this script serve to create the Fig. 16
"""
def main():
# Get initial spanning trees from previous generated results
results_previous = pd.read_pickle(
Path("../results_paper/random/random_publi_10_28_sources.pkl").resolve()
)
# Simply extract the point cloud and create MST
trees = [
create_compl_graph_from_other(x) for x in results_previous.graphs_delta.values
]
starting_trees = [
graph_with_calc_edge_capacity(nx.minimum_spanning_tree(x)) for x in trees
]
# Initiate results DataFrame
results = pd.DataFrame()
# Initiate pool for parallel run
pool_multi = Pool(4)
# Get results for full versions of algorithms (checking all potential nodes to reconnect)
# Run heuristics on trees in parallel
array_of_tuples = [
(tree, cost_capacity_func, high_valency_shuffle_edge_turn)
for tree in starting_trees
]
res = pool_multi.starmap(full_algo, array_of_tuples)
results["graphs_meta_edge_full"] = [x[0] for x in res]
results["costs_meta_edge_full"] = [x[1] for x in res]
results["time_meta_edge_full"] = [x[2] for x in res]
array_of_tuples = [(tree, cost_capacity_func, edge_turn) for tree in starting_trees]
res = pool_multi.starmap(full_algo, array_of_tuples)
results["graphs_edge_full"] = [x[0] for x in res]
results["costs_edge_full"] = [x[1] for x in res]
results["time_edge_full"] = [x[2] for x in res]
# Get results for degraded versions of algorithms
# n_vals gives the number of closest nodes to consider when reconnecting the edge
# See the definition of edge_turn_algo_closest_n in heuristic_functions
n_vals = [3, 5, 7, 9, 12, 15]
for n in n_vals:
# Run heuristics on trees in parallel
array_of_tuples = [
(tree, cost_capacity_func, high_valency_shuffle_edge_turn_closest, n)
for tree in starting_trees
]
res = pool_multi.starmap(full_algo, array_of_tuples)
results["graphs_meta_edge_" + str(n)] = [x[0] for x in res]
results["costs_meta_edge_" + str(n)] = [x[1] for x in res]
results["time_meta_edge_" + str(n)] = [x[2] for x in res]
array_of_tuples = [
(tree, cost_capacity_func, edge_turn_algo_closest_n, n)
for tree in starting_trees
]
res = pool_multi.starmap(full_algo, array_of_tuples)
results["graphs_edge_" + str(n)] = [x[0] for x in res]
results["costs_edge_" + str(n)] = [x[1] for x in res]
results["time_edge_" + str(n)] = [x[2] for x in res]
return results
if __name__ == "__main__":
results = main()
results.to_pickle(
Path(
"../results_paper/random/random_publi_extra_review_28_sources.pkl"
).resolve()
)
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import ticker
import matplotlib as mpl
label_size = 6
mpl.rcParams["xtick.labelsize"] = label_size
"""
This script plots results for degraded versions of certain algorithms for perfomance
The created plots correspond to the Fig. 16
This script is to be run after degraded_metaheuristic_generate.py has been run
"""
# Load results DataFrame
result_file = "../results_paper/random/random_publi_extra_review_28_sources.pkl"
df_results = pd.read_pickle(Path(result_file).resolve())
length = len(df_results)
num_sources = result_file.split("_sources")[0].split("_")[-1]
# Various algorithms to plot
algos = [
"meta_edge_full",
"edge_full",
"edge_3",
"edge_5",
"edge_7",
"edge_9",
"edge_12",
"edge_15",
"meta_edge_3",
"meta_edge_5",
"meta_edge_7",
"meta_edge_9",
"meta_edge_12",
"meta_edge_15",
]
costs_strings = ["costs_" + x for x in algos]
graph_strings = ["graphs_" + x for x in algos]
# The following code identifies the minimum cost graph without explicit optimal calculations
df_results["costs_opt"] = df_results[costs_strings].min(axis=1)
df_results["graphs_opt"] = (
df_results[costs_strings]
.idxmin(axis="columns")
.map(dict(zip(costs_strings, graph_strings)))
)
opt_gra = []
for i, row in df_results.iterrows():
gra = row.graphs_opt
opt_gra.append(row[gra])
df_results["graphs_opt"] = opt_gra
# Establish non-optimal graphs for each algorithm
for algo in algos:
not_opt = []
for i, row in df_results.iterrows():
not_opt.append(
df_results["graphs_" + algo].iloc[i].edges()
!= df_results["graphs_opt"].iloc[i].edges()
)
df_results["not_opt_" + algo] = not_opt
# Calculation times and percentage optimal
percentage_optimal_dict = {}
time_dict = {}
for algo in algos:
percentage = (
100
- len(df_results[df_results["not_opt_" + algo] == True]) / len(df_results) * 100
)
percentage_optimal_dict[algo] = percentage
time_calc = df_results["time_" + algo].mean()
time_dict[algo] = time_calc
ylims = [-5, 105]
title_word = "optimal*"
al = 0.9
small_mark = 40
big_mark = 130
col = "k"
fig, ax = plt.subplots(1, figsize=(2.5, 2.5), dpi=300)
plt.scatter(
time_dict["meta_edge_3"],
percentage_optimal_dict["meta_edge_3"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_3"],
percentage_optimal_dict["meta_edge_3"],
marker="$3$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["meta_edge_5"],
percentage_optimal_dict["meta_edge_5"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_5"],
percentage_optimal_dict["meta_edge_5"],
marker="$5$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["meta_edge_7"],
percentage_optimal_dict["meta_edge_7"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_7"],
percentage_optimal_dict["meta_edge_7"],
marker="$7$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["meta_edge_9"],
percentage_optimal_dict["meta_edge_9"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_9"],
percentage_optimal_dict["meta_edge_9"],
marker="$9$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
if int(num_sources) > 12:
plt.scatter(
time_dict["meta_edge_12"],
percentage_optimal_dict["meta_edge_12"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_12"],
percentage_optimal_dict["meta_edge_12"],
marker="$12$",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["meta_edge_15"],
percentage_optimal_dict["meta_edge_15"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_15"],
percentage_optimal_dict["meta_edge_15"],
marker="$15$",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["meta_edge_full"],
percentage_optimal_dict["meta_edge_full"],
marker="o",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.scatter(
time_dict["meta_edge_full"],
percentage_optimal_dict["meta_edge_full"],
marker="x",
s=small_mark,
c=col,
alpha=al,
)
plt.xlabel("Average calc. time (s)")
plt.ylabel("Percent " + title_word)
plt.axhline(100, c="k", alpha=0.5)
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.xaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.set_title("Random graphs " + num_sources + " sources\n(N=" + str(length) + ")")
plt.ylim(ylims)
fig, ax = plt.subplots(1, figsize=(2.5, 2.5), dpi=300)
plt.scatter(
time_dict["edge"],
percentage_optimal_dict["edge"],
marker="x",
s=40,
edgecolors=col,
alpha=0.9,
facecolors=col,
linewidth=0.5,
)
plt.scatter(
time_dict["edge_3"],
percentage_optimal_dict["edge_3"],
marker="x",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.5,
)
plt.scatter(
time_dict["edge_3"],
percentage_optimal_dict["edge_3"],
marker="$3$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["edge_5"],
percentage_optimal_dict["edge_5"],
marker="x",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.3,
)
plt.scatter(
time_dict["edge_5"],
percentage_optimal_dict["edge_5"],
marker="$5$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["edge_7"],
percentage_optimal_dict["edge_7"],
marker="x",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.3,
)
plt.scatter(
time_dict["edge_7"],
percentage_optimal_dict["edge_7"],
marker="$7$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["edge_9"],
percentage_optimal_dict["edge_9"],
marker="x",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.3,
)
plt.scatter(
time_dict["edge_9"],
percentage_optimal_dict["edge_9"],
marker="$9$",
s=small_mark,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
if int(num_sources) > 12:
plt.scatter(
time_dict["edge_12"],
percentage_optimal_dict["edge_12"],
marker="$12$",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["edge_12"],
percentage_optimal_dict["edge_12"],
marker="x",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.3,
)
plt.scatter(
time_dict["edge_15"],
percentage_optimal_dict["edge_15"],
marker="$15$",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0,
)
plt.scatter(
time_dict["edge_15"],
percentage_optimal_dict["edge_15"],
marker="x",
s=small_mark * 2,
edgecolors=col,
alpha=al,
facecolors=col,
linewidth=0.3,
)
plt.scatter(
time_dict["edge"],
percentage_optimal_dict["edge"],
marker="x",
facecolors="none",
s=big_mark,
edgecolors=col,
alpha=al,
)
plt.xlabel("Average calc. time (s)")
plt.ylabel("Percent " + title_word)
plt.axhline(100, c="k", alpha=0.5)
ax.set_xscale("log")
ax.xaxis.set_major_formatter(ticker.ScalarFormatter())
ax.xaxis.set_minor_formatter(ticker.ScalarFormatter())
ax.set_title("Random graphs " + num_sources + " sources\n(N=" + str(length) + ")")
plt.ylim(ylims)
from pathlib import Path
import os
import sys
fodler_path = Path(r"../../src/").resolve()
fodler_path = Path(r"../../src/").resolve()
if os.fspath(fodler_path) not in sys.path:
sys.path.append(os.fspath(fodler_path))
from heuristic_functions import *
import pandas as pd
import numpy as np
import networkx as nx
import geopandas as gpd
from shapely.geometry import Point
from utils import fix_same_location, dist, full_algo, graph_with_calc_edge_capacity,cost_capacity_func
import numpy as np
from utils import (
fix_same_location,
dist,
full_algo,
graph_with_calc_edge_capacity,
cost_capacity_func,
)
"""
This script generates solutions for the final large 95 node graph in the discussion of the article
"""
def main():
#Choice of cluster