Skip to content
Snippets Groups Projects
Commit 47813afa authored by leufen1's avatar leufen1
Browse files

index array was missing

parent 35b5308b
No related branches found
No related tags found
6 merge requests!319add all changes of dev into release v1.4.0 branch,!318Resolve "release v1.4.0",!299Draft: Merge default data handler and preprocessing support parameter use_multiprocessing....,!298Lukas issue307 bug data handler fails if no chem variable is used,!296index array was missing,!259Draft: Resolve "WRF-Datahandler should inherit from SingleStationDatahandler"
Pipeline #66395 canceled
...@@ -515,9 +515,9 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover ...@@ -515,9 +515,9 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover
plot_data_single = dict() plot_data_single = dict()
plot_data_raw_single = dict() plot_data_raw_single = dict()
plot_data_mean_single = dict() plot_data_mean_single = dict()
self.f_index = np.logspace(-3, 0 if self._sampling == "daily" else np.log10(24), 1000)
raw_data_single = self._prepare_pgram_parallel_gen(generator, m, pos, use_multiprocessing) raw_data_single = self._prepare_pgram_parallel_gen(generator, m, pos, use_multiprocessing)
# raw_data_single = self._prepare_pgram_parallel_var(generator, m, pos, use_multiprocessing) # raw_data_single = self._prepare_pgram_parallel_var(generator, m, pos, use_multiprocessing)
self.f_index = np.logspace(-3, 0 if self._sampling == "daily" else np.log10(24), 1000)
for var in raw_data_single.keys(): for var in raw_data_single.keys():
pgram_com = [] pgram_com = []
pgram_mean = 0 pgram_mean = 0
...@@ -577,14 +577,14 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover ...@@ -577,14 +577,14 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover
pool = multiprocessing.Pool( pool = multiprocessing.Pool(
min([psutil.cpu_count(logical=False), len(generator), 16])) # use only physical cpus min([psutil.cpu_count(logical=False), len(generator), 16])) # use only physical cpus
output = [ output = [
pool.apply_async(f_proc_2, args=(g, m, pos, self.variables_dim, self.time_dim)) pool.apply_async(f_proc_2, args=(g, m, pos, self.variables_dim, self.time_dim, self.f_index))
for g in generator] for g in generator]
for i, p in enumerate(output): for i, p in enumerate(output):
res.append(p.get()) res.append(p.get())
pool.close() pool.close()
else: else:
for g in generator: for g in generator:
res.append(f_proc_2(g, m, pos, self.variables_dim, self.time_dim)) res.append(f_proc_2(g, m, pos, self.variables_dim, self.time_dim, self.f_index))
for res_dict in res: for res_dict in res:
for k, v in res_dict.items(): for k, v in res_dict.items():
if k not in raw_data_single.keys(): if k not in raw_data_single.keys():
...@@ -607,7 +607,7 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover ...@@ -607,7 +607,7 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover
""" """
ax.set_yscale('log') ax.set_yscale('log')
ax.set_xscale('log') ax.set_xscale('log')
ax.set_ylabel("power", fontsize='x-large') ax.set_ylabel("power spectral density", fontsize='x-large') # unit depends on variable: [unit^2 day^-1]
ax.set_xlabel("frequency $[day^{-1}$]", fontsize='x-large') ax.set_xlabel("frequency $[day^{-1}$]", fontsize='x-large')
lims = ax.get_ylim() lims = ax.get_ylim()
self._add_annotation_line(ax, [1, 2, 3], 365.25, lims, "yr") # per year self._add_annotation_line(ax, [1, 2, 3], 365.25, lims, "yr") # per year
...@@ -698,14 +698,15 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover ...@@ -698,14 +698,15 @@ class PlotPeriodogram(AbstractPlotClass): # pragma: no cover
plt.close('all') plt.close('all')
def f_proc(var, d_var): def f_proc(var, d_var, f_index):
var_str = str(var) var_str = str(var)
t = (d_var.datetime - d_var.datetime[0]).astype("timedelta64[h]").values / np.timedelta64(1, "D") t = (d_var.datetime - d_var.datetime[0]).astype("timedelta64[h]").values / np.timedelta64(1, "D")
f, pgram = LombScargle(t, d_var.values.flatten(), nterms=1).autopower() pgram = LombScargle(t, d_var.values.flatten(), nterms=1, normalization="psd").power(f_index)
return var_str, f, pgram # f, pgram = LombScargle(t, d_var.values.flatten(), nterms=1, normalization="psd").autopower()
return var_str, f_index, pgram
def f_proc_2(g, m, pos, variables_dim, time_dim): def f_proc_2(g, m, pos, variables_dim, time_dim, f_index):
raw_data_single = dict() raw_data_single = dict()
if m == 0: if m == 0:
d = g.id_class._data d = g.id_class._data
...@@ -716,6 +717,6 @@ def f_proc_2(g, m, pos, variables_dim, time_dim): ...@@ -716,6 +717,6 @@ def f_proc_2(g, m, pos, variables_dim, time_dim):
d = d[pos] if isinstance(d, tuple) else d d = d[pos] if isinstance(d, tuple) else d
for var in d[variables_dim].values: for var in d[variables_dim].values:
d_var = d.loc[{variables_dim: var}].squeeze().dropna(time_dim) d_var = d.loc[{variables_dim: var}].squeeze().dropna(time_dim)
var_str, f, pgram = f_proc(var, d_var) var_str, f, pgram = f_proc(var, d_var, f_index)
raw_data_single[var_str] = [(f, pgram)] raw_data_single[var_str] = [(f, pgram)]
return raw_data_single return raw_data_single
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment