Commit 92362d88 authored by Pablo Iturrieta Rebolledo's avatar Pablo Iturrieta Rebolledo
Browse files

added k function for spatial analYsis

parent 8e108d99
......@@ -2,7 +2,7 @@
<module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$" />
<orderEntry type="inheritedJdk" />
<orderEntry type="jdk" jdkName="Python 3.8 (venv)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
<component name="TestRunnerService">
......
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (venv)" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (venv)" project-jdk-type="Python SDK" />
</project>
\ No newline at end of file
......@@ -163,7 +163,7 @@ def filter_10yr_01_2010(cat):
start_date = time_utils.strptime_to_utc_epoch('2010-01-01 00:00:00.00')
end_date = time_utils.strptime_to_utc_epoch('2020-01-01 00:00:00.00')
min_mag = 4.95
max_depth = 300
max_depth = 500
cat.filter([f'origin_time >= {start_date}',
f'origin_time < {end_date}',
......
import matplotlib.pyplot as plt
import numpy as np
import os
import entropy_calc as ec
from codes import get_models
from codes import paths
import numpy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
import numpy as np
from codes import get_catalogs
import csep
import itertools
def dists(Array):
n = Array.shape[0]
Dists = np.zeros((n, n))
for i in range(n):
for j in range(n):
x1 = [np.deg2rad(Array[i][0]), np.deg2rad(Array[i][1])]
x2 = [np.deg2rad(Array[j][0]), np.deg2rad(Array[j][1])]
dtheta = np.sin(x1[1]) * np.sin(x2[1]) + \
np.cos(x1[1]) * np.cos(x2[1]) * np.cos(x2[0] - x1[0])
distances = 6371. * np.arccos(np.round(dtheta, 10))
Dists[i, j] = distances
return Dists
def K_r(Events, rates, Dists, r):
K_r = 0
n = Events.shape[0]
for i in range(n):
for j in range(n):
if j != i:
count = np.sum(Dists[i, :] < r)
K_r += count / rates[i] / rates[j]
return K_r/899900
#################### CATALOG
plt.close('all')
cat = get_catalogs.filter_10yr_01_2010(get_catalogs.bollettino())
region = cat.region
counts= cat.spatial_counts()
A = np.zeros(region.idx_map.shape)
A[ A == 0] = np.nan
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if not np.isnan(region.idx_map[i,j]):
A[i, j] = counts[int(region.idx_map[i,j])]
cat_ind = cat.get_spatial_idx()
Events = np.array([[i, j] for i,j in zip(cat.get_longitudes(), cat.get_latitudes())])
## Forecast
models = get_models.ten_years()
R_s = np.arange(1, 400, 10)
for model_i in range(19):
plt.figure()
# model_i = 11
plt.title(models[model_i].name)
forecast_data = models[model_i].spatial_counts()
rates_eqk = forecast_data[cat_ind]
Dists = dists(Events)
K_cat = []
for i in R_s:
K_cat.append(K_r(Events, rates_eqk, Dists, i))
forecast_data = models[model_i].spatial_counts()
observed_data = cat.spatial_counts()
n_obs = numpy.sum(observed_data)
n_fore = numpy.sum(forecast_data)
scale = n_obs / n_fore
expected_forecast_count = int(n_obs)
num_events_to_simulate = expected_forecast_count
sampling_weights = numpy.cumsum(forecast_data.ravel()) / numpy.sum(forecast_data)
n_sim = 100
K_sim = []
for i in range(n_sim):
sim_fore = numpy.zeros(sampling_weights.shape)
sim = csep.core.poisson_evaluations._simulate_catalog(num_events_to_simulate, sampling_weights, sim_fore)
ind_sim = np.where(sim != 0)[0]
events_sim = np.array([models[model_i].get_longitudes()[ind_sim], models[model_i].get_latitudes()[ind_sim]]).T
dists_sim = dists(events_sim)
rates_sim = forecast_data[ind_sim]
K = []
for i in R_s:
K.append(K_r(events_sim, rates_sim, dists_sim, i))
# plt.plot(R_s, K, alpha=0.1, color='red')
K_sim.append(K)
K_sim = np.array(K_sim)
K_std = np.std(K_sim)
# K_avg = np.median(K_sim, axis=0)
# K_up = np.quantile(K_sim, 0.85, axis=0)
# K_down = np.quantile(K_sim, 0.15, axis=0)
# plt.plot(R_s, K_cat, color='red')
# plt.plot(R_s, K_avg)
# plt.fill_between(R_s, K_down, K_up,
# color='gray', alpha=0.2)
L = np.divide(np.sqrt(K_sim), K_std)
L_avg = np.mean(L, axis=0)
L_up = np.quantile(L, 0.95, axis=0)
L_down = np.quantile(L, 0.05, axis=0)
L_cat = np.divide(np.sqrt(K_cat), K_std)
plt.plot(R_s, L_cat, color='red')
plt.plot(R_s, L_avg)
plt.fill_between(R_s, L_down, L_up,
color='gray', alpha=0.2)
plt.grid()
plt.show()
# cmap = plt.get_cmap('jet')
# fig = plt.figure()
# # a=plt.imshow(np.flipud(A), cmap=cmap)
# lons, lats = numpy.meshgrid(numpy.append(region.xs, region.xs[-1] + region.dh),
# numpy.append(region.ys, region.ys[-1] + region.dh))
# a = plt.contourf(lons[:-1, :-1], lats[:-1, :-1], A, cmap=cmap)
# fig.colorbar(a)
# plt.title('Catalog')
# plt.show()
#
# A[np.isnan(A)] = 0
# x,y = np.where(A!=0)
#
#
#
# ################# Forecasts
# models = get_models.ten_years()
# names = []
# cmap = plt.get_cmap('jet')
#
# ####### Cat
# model_i = 11
#
#
# n_sims = [1, 100, 200, 500, 1000, 2000, 5000, 10000]
# n_sim = 1
# AA=A.reshape((A.shape[0], A.shape[1], 1))
#
# DD = models[model_i].spatial_counts(cartesian=True).reshape((A.shape[0], A.shape[1], 1))
#
# B = np.zeros(region.idx_map.shape)
# B[B==0] = np.nan
# sim = np.zeros(sampling_weights.shape)
# for i in range(n_sim):
# sim_fore = numpy.zeros(sampling_weights.shape)
# sim += csep.core.poisson_evaluations._simulate_catalog(num_events_to_simulate, sampling_weights, sim_fore)
#
# for i in range(B.shape[0]):
# for j in range(B.shape[1]):
# if not np.isnan(region.idx_map[i,j]):
# B[i,j] = sim[int(region.idx_map[i,j])]
# B /= n_sim
# BB = B.reshape((B.shape[0], B.shape[1], 1))
\ No newline at end of file
......@@ -4,68 +4,68 @@
# from pointpats import PointPattern, PoissonPointProcess, as_window, G, F, J, K, L, Genv, Fenv, Jenv, Kenv, Lenv
#
# import matplotlib.pyplot as plt
import numpy as np # NumPy package for arrays, random number generation, etc
import matplotlib.pyplot as plt # For plotting
from scipy.optimize import minimize # For optimizing
from scipy import integrate
# Simulation window parameters
xMin = -1;
xMax = 1;
yMin = -1;
yMax = 1;
xDelta = xMax - xMin;
yDelta = yMax - yMin; # rectangle dimensions
areaTotal = xDelta * yDelta;
s = 0.5; # scale parameter
# Point process parameters
def fun_lambda(x, y):
return 100 * np.exp(-(x ** 2 + y ** 2) / s ** 2); # intensity function
###START -- find maximum lambda -- START ###
# For an intensity function lambda, given by function fun_lambda,
# finds the maximum of lambda in a rectangular region given by
# [xMin,xMax,yMin,yMax].
def fun_Neg(x):
return -fun_lambda(x[0], x[1]); # negative of lambda
xy0 = [(xMin + xMax) / 2, (yMin + yMax) / 2]; # initial value(ie centre)
# Find largest lambda value
resultsOpt = minimize(fun_Neg, xy0, bounds=((xMin, xMax), (yMin, yMax)));
lambdaNegMin = resultsOpt.fun; # retrieve minimum value found by minimize
lambdaMax = -lambdaNegMin;
###END -- find maximum lambda -- END ###
# define thinning probability function
def fun_p(x, y):
return fun_lambda(x, y) / lambdaMax;
# Simulate a Poisson point process
numbPoints = np.random.poisson(lambdaMax * areaTotal); # Poisson number of points
xx = np.random.uniform(0, xDelta, ((numbPoints, 1))) + xMin; # x coordinates of Poisson points
yy = np.random.uniform(0, yDelta, ((numbPoints, 1))) + yMin; # y coordinates of Poisson points
# calculate spatially-dependent thinning probabilities
p = fun_p(xx, yy);
# Generate Bernoulli variables (ie coin flips) for thinning
booleRetained = np.random.uniform(0, 1, ((numbPoints, 1))) < p; # points to be thinned
# x/y locations of retained points
xxRetained = xx[booleRetained];
yyRetained = yy[booleRetained];
# Plotting
plt.scatter(xxRetained, yyRetained, edgecolor='b', facecolor='none', alpha=0.5);
plt.xlabel("x");
plt.ylabel("y");
plt.show();
\ No newline at end of file
#
# import numpy as np # NumPy package for arrays, random number generation, etc
# import matplotlib.pyplot as plt # For plotting
# from scipy.optimize import minimize # For optimizing
# from scipy import integrate
#
# # Simulation window parameters
# xMin = -1;
# xMax = 1;
# yMin = -1;
# yMax = 1;
# xDelta = xMax - xMin;
# yDelta = yMax - yMin; # rectangle dimensions
# areaTotal = xDelta * yDelta;
#
# s = 0.5; # scale parameter
#
#
# # Point process parameters
# def fun_lambda(x, y):
# return 100 * np.exp(-(x ** 2 + y ** 2) / s ** 2); # intensity function
#
#
# ###START -- find maximum lambda -- START ###
# # For an intensity function lambda, given by function fun_lambda,
# # finds the maximum of lambda in a rectangular region given by
# # [xMin,xMax,yMin,yMax].
# def fun_Neg(x):
# return -fun_lambda(x[0], x[1]); # negative of lambda
#
#
# xy0 = [(xMin + xMax) / 2, (yMin + yMax) / 2]; # initial value(ie centre)
# # Find largest lambda value
# resultsOpt = minimize(fun_Neg, xy0, bounds=((xMin, xMax), (yMin, yMax)));
# lambdaNegMin = resultsOpt.fun; # retrieve minimum value found by minimize
# lambdaMax = -lambdaNegMin;
#
#
# ###END -- find maximum lambda -- END ###
#
# # define thinning probability function
# def fun_p(x, y):
# return fun_lambda(x, y) / lambdaMax;
#
#
# # Simulate a Poisson point process
# numbPoints = np.random.poisson(lambdaMax * areaTotal); # Poisson number of points
# xx = np.random.uniform(0, xDelta, ((numbPoints, 1))) + xMin; # x coordinates of Poisson points
# yy = np.random.uniform(0, yDelta, ((numbPoints, 1))) + yMin; # y coordinates of Poisson points
#
# # calculate spatially-dependent thinning probabilities
# p = fun_p(xx, yy);
#
# # Generate Bernoulli variables (ie coin flips) for thinning
# booleRetained = np.random.uniform(0, 1, ((numbPoints, 1))) < p; # points to be thinned
#
# # x/y locations of retained points
# xxRetained = xx[booleRetained];
# yyRetained = yy[booleRetained];
#
# # Plotting
# plt.scatter(xxRetained, yyRetained, edgecolor='b', facecolor='none', alpha=0.5);
# plt.xlabel("x");
# plt.ylabel("y");
# plt.show();
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment