Coverage for bim2sim/plugins/PluginComfort/bim2sim_comfort/task/plot_comfort_results.py: 0%

851 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2025-10-01 10:24 +0000

1import json 

2import logging 

3from pathlib import Path 

4from textwrap import wrap 

5from typing import List 

6 

7import matplotlib as mpl 

8import numpy as np 

9import pandas as pd 

10from RWTHColors import ColorManager 

11from matplotlib import pyplot as plt 

12from matplotlib.colors import ListedColormap, Normalize, BoundaryNorm 

13import seaborn as sns 

14 

15from bim2sim.elements.mapping.units import ureg 

16from bim2sim.tasks.bps import PlotBEPSResults 

17from bim2sim.utilities.common_functions import filter_elements 

18from bim2sim.utilities.types import BoundaryOrientation 

19 

20INCH = 2.54 

21 

22logger = logging.getLogger(__name__) 

23cm = ColorManager() 

24plt.rcParams.update(mpl.rcParamsDefault) 

25plt.style.use(['science', 'grid', 'rwth']) 

26plt.style.use(['science', 'no-latex']) 

27 

28# Update rcParams for font settings 

29plt.rcParams.update({ 

30 'font.size': 20, 

31 'font.family': 'sans-serif', # Use sans-serif font 

32 'font.sans-serif': ['Arial', 'Helvetica', 'DejaVu Sans', 'sans-serif'], 

33 # Specify sans-serif fonts 

34 'legend.frameon': True, 

35 'legend.facecolor': 'white', 

36 'legend.framealpha': 0.5, 

37 'legend.edgecolor': 'black', 

38 "lines.linewidth": 0.4, 

39 "text.usetex": False, # use inline math for ticks 

40 "pgf.rcfonts": True, 

41}) 

42 

43 

44class PlotComfortResults(PlotBEPSResults): 

45 reads = ('df_finals', 'sim_results_path', 'ifc_files', 'elements') 

46 final = True 

47 

48 def run(self, df_finals: dict, sim_results_path: Path, 

49 ifc_files: List[Path], elements: dict): 

50 """Plots the results for BEPS simulations. 

51 

52 This holds pre configured functions to plot the results of the BEPS 

53 simulations with the EnergyPlus-based PluginComfort . 

54 

55 Args: 

56 df_finals: dict of final results where key is the building name and 

57 value is the dataframe holding the results for this building 

58 sim_results_path: base path where to store the plots 

59 ifc_files: bim2sim IfcFileClass holding the ifcopenshell ifc instance 

60 elements (dict): Dictionary of building elements. 

61 """ 

62 if not self.playground.sim_settings.create_plots: 

63 logger.info("Visualization of Comfort Results is skipped ...") 

64 return 

65 logger.info("Visualization of Comfort Results started ...") 

66 plot_single_guid = self.playground.sim_settings.plot_singe_zone_guid 

67 new_zone_names = None 

68 zone_dict_path = sim_results_path / self.prj_name / 'zone_dict.json' 

69 with open(zone_dict_path) as j: 

70 zone_dict = json.load(j) 

71 if plot_single_guid: 

72 logger.info("Check if plot_single_guid is valid space name.") 

73 if not plot_single_guid in zone_dict.keys(): 

74 plot_single_guid = '' 

75 logger.info("Requested plot_single_guid is not found in IFC " 

76 "file, plotting results for all spaces instead.") 

77 if self.playground.sim_settings.rename_plot_keys: 

78 # load keys to rename space names for plotting 

79 with open(self.playground.sim_settings.rename_plot_keys_path) as rk: 

80 rename_keys = json.load(rk) 

81 zone_dict_ext_path = sim_results_path / self.prj_name / 'zone_dict_ifc_names.json' 

82 with open(zone_dict_ext_path) as j: 

83 zone_dict_ifc_names = json.load(j) 

84 zone_dict, new_zone_names = self.rename_zone_usage(zone_dict, 

85 zone_dict_ifc_names, 

86 rename_keys, sim_results_path) 

87 if new_zone_names: 

88 zone_dict = new_zone_names 

89 for bldg_name, df in df_finals.items(): 

90 export_path = sim_results_path / bldg_name / 'plots' 

91 if not export_path.exists(): 

92 export_path.mkdir(parents=False, exist_ok=False) 

93 spaces = filter_elements(elements, 'ThermalZone') 

94 if self.playground.sim_settings.plot_zone_usages: 

95 # only plot results for a selection of zone usages. This 

96 # reduces the computational overhead, e.g., if only single 

97 # offices are plotted 

98 exclude_guids = [] 

99 for space in spaces: 

100 if not any(key.lower() in space.usage.lower() for key in 

101 self.playground.sim_settings.plot_zone_usages)\ 

102 and not space.guid.lower() == plot_single_guid: 

103 exclude_guids.append(space.guid) 

104 logger.info(f'Exclude space {space.usage} ' 

105 f'{space.guid} and its space boundaries ' 

106 f'from further evaluation due to ' 

107 f'sim_setting.') 

108 for bound in space.space_boundaries: 

109 exclude_guids.append(bound.guid) 

110 filtered_columns = [ 

111 col for col in df.columns 

112 if not any( 

113 exclude_guid.lower() in col.lower() for exclude_guid in 

114 exclude_guids) 

115 ] 

116 filtered_df = df[filtered_columns] 

117 df = filtered_df 

118 # add a limited local comfort analysis based on the surface 

119 # temperatures calculated in EnergyPlus and the limits defined in 

120 # DIN EN 16798-1 National Appendix. This evaluates 

121 # minimum/maximum surface temperatures and differences to the air 

122 # temperature. 

123 self.limited_local_comfort_DIN16798_NA(df, elements, export_path) 

124 

125 # generate DIN EN 16798-1 adaptive comfort scatter plot and 

126 # return analysis of comfort categories for further plots 

127 if not plot_single_guid: 

128 # generate plots and data for scatter plots for all spaces 

129 cat_analysis, cat_analysis_occ, cat_analysis_occ_hours = ( 

130 self.apply_en16798_to_all_zones(df, zone_dict, 

131 export_path)) 

132 else: 

133 # only generate plots and data for a selected single space GUID 

134 cat_analysis, cat_analysis_occ, cat_analysis_occ_hours = ( 

135 self.apply_en16798_to_single_zone(df, zone_dict, 

136 export_path, 

137 plot_single_guid)) 

138 # plot a barplot combined with table of comfort categories from 

139 # DIN EN 16798. 

140 # general table bar plot considering all generated data 

141 self.table_bar_plot_16798(cat_analysis, export_path) 

142 # table bar plot, only evaluating the occupied time (in %) 

143 self.table_bar_plot_16798(cat_analysis_occ, export_path, tag='occ') 

144 # table bar plot, evaluating the exceeding temperature degree 

145 # hours for the occupied time of the year. 

146 self.table_bar_plot_16798(cat_analysis_occ_hours, export_path, 

147 tag='occ_hours', normalize=False, 

148 unit=f"{ureg.kelvin*ureg.hour:~P}", 

149 unit_name='Degree hours', 

150 y_scale='linear') 

151 

152 # generate plots for Fanger's PMV 

153 fanger_pmv = df[[col for col in df.columns if 'fanger_pmv' in col]] 

154 if plot_single_guid: 

155 fanger_pmv = fanger_pmv[[col for col in fanger_pmv.columns if 

156 plot_single_guid in col]] 

157 self.pmv_plot(fanger_pmv, export_path, 

158 f"pmv_{plot_single_guid}") 

159 

160 for space in spaces: 

161 if not any(fanger_pmv.filter(like=space.guid)): 

162 continue 

163 self.logger.info(f"Space: {space.usage}, GUID: {space.guid}") 

164 col = fanger_pmv.filter(like=space.guid).columns[0] 

165 # visualize heatmap of PMV 

166 self.visualize_heatmap(fanger_pmv, col, export_path, 

167 save_as='heatmap_', 

168 zone_dict=zone_dict, 

169 color_categories='PMV', guid=space.guid) 

170 # generate calendar plot for daily mean pmv results 

171 # self.visualize_calendar(pd.DataFrame(fanger_pmv[col]), 

172 # export_path, save_as='calendar_', 

173 # add_title=True, 

174 # color_only=True, figsize=[11, 12], 

175 # zone_dict=zone_dict, guid=space.guid) 

176 

177 @staticmethod 

178 def visualize_heatmap(df, col, export_path, save_as='', 

179 add_title=False, save=True, zone_dict='', year='', 

180 color_categories='', guid=''): 

181 """ 

182 Visualize heatmap of Fanger PMV. 

183 Args: 

184 df: Input dataframe with PMV data 

185 col: PMV column 

186 export_path: plot export path 

187 save_as: prefix for saving files 

188 add_title: True if title should be added to plot 

189 save: True if plot should be saved instead of displayed 

190 zone_dict: dictionary to rename the space names 

191 year: string to add the value of the year 

192 color_categories: select 'PMV', 'PPD'. Standard heatmap is 

193 applied if left blank 

194 guid: plot individual space GUID 

195 

196 Returns: 

197 

198 """ 

199 def color_mapper_pmv(value): 

200 if -0.2 < value < 0.2: 

201 return 'green' # CAT I 

202 elif -0.5 < value <= -0.2: 

203 return 'lightblue' # CAT II low 

204 elif 0.2 <= value < 0.5: 

205 return 'yellow' # CAT II high 

206 elif -0.7 < value <= -0.5: 

207 return 'mediumblue' # CAT III low 

208 elif 0.5 <= value < 0.7: 

209 return 'orange' # CAT III high 

210 elif value <= -0.7: 

211 return 'darkblue' # CAT IV low 

212 elif value >= 0.7: 

213 return 'red' # CAT IV high 

214 else: 

215 return 'black' 

216 

217 def color_mapper_ppd(value): 

218 if value < 6: 

219 return 'green' # CAT I 

220 elif 6 <= value < 10: 

221 return 'yellow' # CAT II 

222 elif 10 <= value < 15: 

223 return 'orange' # CAT III 

224 elif 15 <= value < 25: 

225 return 'red' # CAT IV 

226 elif value >= 25: 

227 return 'purple' # out of range 

228 else: 

229 return 'black' 

230 

231 series = pd.Series(df[col], index=df.index) 

232 # Create a MultiIndex for day and hour 

233 series.index = pd.MultiIndex.from_arrays( 

234 [series.index.date, series.index.hour], 

235 names=['Month', 'Hour'] 

236 ) 

237 

238 # Aggregate the data (for example, taking the mean) 

239 heatmap_data = series.unstack(level='Hour') 

240 plt.figure(figsize=(14 / INCH, 8 / INCH)) 

241 

242 # plt.title('Heatmap of Hourly Data') 

243 if color_categories: 

244 ncol = None 

245 if color_categories == 'PMV': 

246 color_data = heatmap_data.apply( 

247 lambda col: col.map(color_mapper_pmv)) 

248 color_counts_absolute = color_data.stack().value_counts() 

249 color_counts_relative = ( 

250 round((color_counts_absolute / color_counts_absolute.sum( 

251 )) * 100, 1)) 

252 unique_colors = ['green', 

253 'white', # dummy color for legend 

254 'lightblue', 'yellow', 

255 'mediumblue', 

256 'orange', 'darkblue', 'red', 'black'] 

257 for color in unique_colors: 

258 if color not in color_counts_relative.index: 

259 color_counts_relative[color] = 0 

260 labels = { 

261 f'CAT I ({color_counts_relative["green"]}%)': 'green', 

262 '': 'white', # add dummy color for legend arrangement 

263 f'CAT II low ({color_counts_relative["lightblue"]}%)': 

264 'lightblue', 

265 f'CAT II high ({color_counts_relative["yellow"]}%)': 'yellow', 

266 f'CAT III low ({color_counts_relative["mediumblue"]}%)': 'mediumblue', 

267 f'CAT III high ({color_counts_relative["orange"]}%)': 'orange', 

268 f'CAT IV low ({color_counts_relative["darkblue"]}%)': 'darkblue', 

269 f'CAT IV high ({color_counts_relative["red"]}%)': 'red' 

270 } 

271 ncol = 4 

272 if color_categories == 'PPD': 

273 color_data = heatmap_data.apply( 

274 lambda col: col.map(color_mapper_ppd)) 

275 color_counts_absolute = color_data.stack().value_counts() 

276 color_counts_relative = ( 

277 round((color_counts_absolute / color_counts_absolute.sum( 

278 ) * 100), 1)) 

279 unique_colors = ['green', 

280 'white', # dummy color for legend arrangement 

281 'yellow', 'orange', 'red', 

282 'purple', 

283 'black'] 

284 for color in unique_colors: 

285 if color not in color_counts_relative.index: 

286 color_counts_relative[color] = 0 

287 labels = { 

288 f'CAT I ({color_counts_relative["green"]}%)': 'green', 

289 '': 'white', # dummy color for legend arrangement 

290 f'CAT II ({color_counts_relative["yellow"]}%)': 'yellow', 

291 f'CAT III ({color_counts_relative["orange"]}%)': 'orange', 

292 f'CAT IV ({color_counts_relative["red"]}%)': 'red', 

293 f'Out of range ({color_counts_relative["purple"]}%)': 'purple', 

294 } 

295 ncol = 3 

296 cmap = ListedColormap(unique_colors) 

297 color_to_num = {color: num for num, color in 

298 enumerate(unique_colors)} 

299 numeric_data = color_data.apply(lambda col: col.map(color_to_num)) 

300 norm = BoundaryNorm(range(len(unique_colors) + 1), cmap.N) 

301 sns.heatmap(numeric_data.T, cmap=cmap, norm=norm, cbar=False) 

302 handles = [plt.Line2D([0], [0], marker='o', color='w', label=label, 

303 markerfacecolor=color, markersize=4) for 

304 label, color in labels.items()] 

305 plt.subplots_adjust(right=0.9, bottom=0.3, top=0.9, 

306 left=0.1) # Adjust right side to 

307 plt.legend(handles=handles, bbox_to_anchor=(-0.1, -0.2), 

308 loc='upper left', frameon=False, ncol=ncol, 

309 columnspacing=0.8, 

310 handletextpad=-0.05, # borderaxespad=0.5, 

311 fontsize=8) 

312 else: 

313 sns.heatmap(heatmap_data.T, cmap='viridis', cbar=True) 

314 

315 # Customize x-ticks to show month abbreviations 

316 month_labels = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 

317 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] 

318 

319 # Extract unique days from columns and convert them back to datetime 

320 # objects for labeling 

321 days = pd.to_datetime(heatmap_data.T.columns) 

322 

323 # Set x-ticks with abbreviated month names and day numbers 

324 # Filter for ticks where day.day == 1 and create corresponding labels 

325 filtered_ticks = [i for i, day in enumerate(days) if day.day == 1] 

326 filtered_labels = [f"{month_labels[day.month - 1]}" for day in 

327 days if day.day == 1] 

328 

329 # Set x-ticks with abbreviated month names only at desired positions 

330 plt.xticks(ticks=filtered_ticks, labels=filtered_labels, rotation=45, 

331 fontsize=8) 

332 plt.ylim([24.01, 0]) 

333 plt.yticks(np.arange(0, 25, step=4), np.arange(0, 25, step=4), 

334 rotation=0, 

335 fontsize=8) 

336 plt.ylabel('Hour of the day', size=8) 

337 plt.xlabel('', size=8) 

338 title_name = col 

339 for key, item in zone_dict.items(): 

340 if key in title_name: 

341 title_name = title_name.replace(key, item) #+ '_' + 

342 # guid 

343 if add_title: 

344 plt.title(str(year) + ' ' + title_name, fontsize=3) 

345 if save: 

346 plt.savefig(export_path / str( 

347 save_as.replace('/','_') 

348 + title_name.replace('/', '_') + f"_{year}_" 

349 + color_categories + '.pdf'), bbox_inches='tight') 

350 

351 def limited_local_comfort_DIN16798_NA(self, df, elements, export_path, 

352 occupied=True): 

353 """ 

354 Calculate a limited local comfort using surface temperatures from 

355 EnergyPlus and limits from DIN EN 16798-1 (National Appendix). 

356 Args: 

357 df: result dataframe 

358 elements: bim2sim elements 

359 export_path: export path for result plots and data 

360 occupied: True if only occupied states should be evaluated 

361 

362 Returns: 

363 

364 """ 

365 spaces = filter_elements(elements, 'ThermalZone') 

366 local_discomfort_dict = {} 

367 global_local_discomfort = pd.DataFrame(columns=['TimeStamp', 

368 'space', 

369 'space_temperature', 

370 'wall_min', 

371 'wall_max', 

372 'floor_min', 

373 'floor_max', 

374 'ceiling_min', 

375 'ceiling_max']) 

376 local_discomfort_overview = pd.DataFrame(columns=['space', 

377 'wall_min', 

378 'wall_max', 

379 'floor_min', 

380 'floor_max', 

381 'ceiling_min', 

382 'ceiling_max']) 

383 initial_row = {col: True for col in local_discomfort_overview.columns if 

384 col != 'space'} 

385 # only proceed if surface temperatures are available in the dataframe 

386 if not any(df.filter(like='surf_inside_temp')): 

387 self.logger.warning("No surface temperatures found. Set " 

388 "sim_setting cfd_export to True to enable " 

389 "local limited comfort analysis using " 

390 "EnergyPlus.") 

391 return 

392 for space in spaces: 

393 space_local_discomfort = pd.DataFrame( 

394 columns=global_local_discomfort.columns) 

395 bound_temperatures_found = False 

396 if not any(df.filter(like=space.guid)): 

397 continue 

398 self.logger.info(f"Space: {space.usage}, GUID: {space.guid}") 

399 local_discomfort_dict.update({ 

400 space.guid: 

401 { 

402 'wall': {'min': {'count': 0, 

403 'hours': 0}, 

404 'max': {'count': 0, 

405 'hours': 0}}, 

406 'floor': {'min': {'count': 0, 

407 'hours': 0}, 

408 'max': {'count': 0, 

409 'hours': 0}}, 

410 'ceiling': 

411 {'min': {'count': 0, 

412 'hours': 0}, 

413 'max': {'count': 0, 

414 'hours': 0}}, 

415 }}) 

416 new_row = {**initial_row, 'space': space.guid} 

417 local_discomfort_overview = pd.concat( 

418 [local_discomfort_overview, pd.DataFrame([new_row])], 

419 ignore_index=True) 

420 space_temperature = df[f"air_temp_rooms_{space.guid}"].apply( 

421 lambda x: x.magnitude) 

422 if occupied: 

423 n_persons_df = df['n_persons_rooms_' + space.guid] 

424 common_index = space_temperature.index.intersection( 

425 n_persons_df.index) 

426 space_temperature = space_temperature.loc[common_index][ 

427 n_persons_df.loc[common_index] > 0] 

428 space_local_discomfort = space_local_discomfort.reindex( 

429 index=space_temperature.index) 

430 wall_df = pd.DataFrame() 

431 floor_df = pd.DataFrame() 

432 ceiling_df = pd.DataFrame() 

433 

434 for bound in space.space_boundaries: 

435 bound_temperature = df.filter(like=bound.guid) 

436 if bound_temperature.empty or bound.bound_element is None: 

437 continue 

438 else: 

439 bound_temperatures_found = True 

440 try: 

441 bound_temperature = bound_temperature.iloc[:, 0].apply( 

442 lambda x: x.magnitude) 

443 except AttributeError: 

444 self.logger.warning(f"object has no attribute 'magnitude'") 

445 if occupied: 

446 common_index = bound_temperature.index.intersection( 

447 n_persons_df.index) 

448 bound_temperature = bound_temperature.loc[common_index][ 

449 n_persons_df.loc[common_index] > 0] 

450 if 'WALL' in bound.bound_element.element_type.upper(): 

451 wall_df = pd.concat([wall_df, bound_temperature], axis=1) 

452 if (('FLOOR' in bound.bound_element.element_type.upper() and 

453 bound.top_bottom == BoundaryOrientation.top) or 

454 ('ROOF' in bound.bound_element.element_type.upper())): 

455 ceiling_df = pd.concat([ceiling_df, bound_temperature], 

456 axis=1) 

457 if ('FLOOR' in bound.bound_element.element_type.upper() 

458 and bound.top_bottom == BoundaryOrientation.bottom): 

459 floor_df = pd.concat([floor_df, bound_temperature], axis=1) 

460 space_local_discomfort = space_local_discomfort.reindex( 

461 index=bound_temperature.index) 

462 if not bound_temperatures_found: 

463 self.logger.warning(f"No bound temperatures found in space " 

464 f"{space.usage} {space.guid}. No limited " 

465 f"local comfort analyzed.") 

466 continue 

467 min_wall_df, max_wall_df = self.get_exceeded_temperature_hours( 

468 wall_df, 

469 -10, 23, 

470 space_temperature) 

471 min_floor_df, max_floor_df = self.get_exceeded_temperature_hours( 

472 floor_df, 19, 

473 29, 0) 

474 min_ceiling_df, max_ceiling_df = ( 

475 self.get_exceeded_temperature_hours( 

476 ceiling_df, 

477 -14, 

478 5, space_temperature)) 

479 if not min_wall_df.empty: 

480 num_min_wall, hours_min_wall = ( 

481 self.calc_exceeded_temperature_hours( 

482 min_wall_df, space_temperature, 10)) 

483 local_discomfort_dict[space.guid]['wall'].update( 

484 {'min': {'count': num_min_wall, 

485 'hours': hours_min_wall}}) 

486 local_discomfort_overview.iloc[ 

487 -1, local_discomfort_overview.columns.get_loc( 

488 'wall_min')] = False 

489 min_wall_df = min_wall_df.rename( 

490 columns={'MinValue': 'wall_min'}) 

491 space_local_discomfort.update(min_wall_df) 

492 if not max_wall_df.empty: 

493 num_max_wall, hours_max_wall = ( 

494 self.calc_exceeded_temperature_hours( 

495 max_wall_df, space_temperature, 23)) 

496 local_discomfort_dict[space.guid]['wall'].update( 

497 {'max': {'count': num_max_wall, 

498 'hours': num_max_wall}}) 

499 local_discomfort_overview.iloc[ 

500 -1, local_discomfort_overview.columns.get_loc( 

501 'wall_max')] = False 

502 max_wall_df = max_wall_df.rename( 

503 columns={'MaxValue': 'wall_max'}) 

504 space_local_discomfort.update(max_wall_df) 

505 if not min_floor_df.empty: 

506 num_min_floor, hours_min_floor = ( 

507 self.calc_exceeded_temperature_hours( 

508 min_floor_df, 0, 19)) 

509 local_discomfort_dict[space.guid]['floor'].update( 

510 {'min': {'count': num_min_floor, 

511 'hours': hours_min_floor}}) 

512 local_discomfort_overview.iloc[ 

513 -1, local_discomfort_overview.columns.get_loc( 

514 'floor_min')] = False 

515 min_floor_df = min_floor_df.rename( 

516 columns={'MinValue': 'floor_min'}) 

517 space_local_discomfort.update(min_floor_df) 

518 if not max_floor_df.empty: 

519 num_max_floor, hours_max_floor = ( 

520 self.calc_exceeded_temperature_hours( 

521 max_floor_df, 0, 29)) 

522 local_discomfort_dict[space.guid]['floor'].update( 

523 {'max': {'count': num_max_floor, 

524 'hours': hours_max_floor}}) 

525 local_discomfort_overview.iloc[ 

526 -1, local_discomfort_overview.columns.get_loc( 

527 'floor_max')] = False 

528 max_floor_df = max_floor_df.rename( 

529 columns={'MaxValue': 'floor_max'}) 

530 space_local_discomfort.update(max_floor_df) 

531 if not min_ceiling_df.empty: 

532 num_min_ceiling, hours_min_ceiling = ( 

533 self.calc_exceeded_temperature_hours( 

534 min_ceiling_df, 0, 14)) 

535 local_discomfort_dict[space.guid]['ceiling'].update( 

536 {'min': {'count': num_min_ceiling, 

537 'hours': hours_min_ceiling}}) 

538 local_discomfort_overview.iloc[ 

539 -1, local_discomfort_overview.columns.get_loc( 

540 'ceiling_min')] = False 

541 min_ceiling_df = min_ceiling_df.rename( 

542 columns={'MinValue': 'ceiling_min'}) 

543 space_local_discomfort.update(min_ceiling_df) 

544 if not max_ceiling_df.empty: 

545 num_max_ceiling, hours_max_ceiling = ( 

546 self.calc_exceeded_temperature_hours( 

547 max_ceiling_df, 0, 5)) 

548 local_discomfort_dict[space.guid]['ceiling'].update( 

549 {'max': {'count': num_max_ceiling, 

550 'hours': hours_max_ceiling}}) 

551 local_discomfort_overview.iloc[ 

552 -1, local_discomfort_overview.columns.get_loc( 

553 'ceiling_max')] = False 

554 max_ceiling_df = max_ceiling_df.rename( 

555 columns={'MaxValue': 'ceiling_max'}) 

556 space_local_discomfort.update(max_ceiling_df) 

557 last_row_values = local_discomfort_overview.iloc[-1] 

558 all_true_except_space = all( 

559 last_row_values[col] for col in last_row_values.index if 

560 col != 'space') 

561 if all_true_except_space: 

562 self.logger.info(f'DIN EN 16798-1 NA (GER), ' 

563 f'limited local comfort check passed for space ' 

564 f'usage "{space.usage}" with ' 

565 f'guid "{space.guid}". ') 

566 else: 

567 space_local_discomfort = space_local_discomfort.dropna(how='all') 

568 space_local_discomfort.loc[:, 'space'] = (f"{space.usage}" 

569 f"_{space.guid}") 

570 space_local_discomfort['space_temperature'] = \ 

571 space_temperature.loc[n_persons_df > 0] 

572 space_local_discomfort = space_local_discomfort.dropna( 

573 how='all', axis=1) 

574 space_local_discomfort.to_csv(export_path / 

575 f'space_' 

576 f'{space.guid}_local_discomf.csv') 

577 failed_checks = [col for col in space_local_discomfort.columns 

578 if not col in ['space', 'space_temperature']] 

579 self.logger.warning(f'DIN EN 16798-1 NA (GER), limited local ' 

580 f'comfort check FAILED the checks ' 

581 f'{failed_checks} for space usage ' 

582 f'"{space.usage}" with ' 

583 f'guid "{space.guid}". Please check ' 

584 f'"space_{space.guid}_local_discomf.csv" ' 

585 f'for details.') 

586 with open(export_path / 'beps_local_discomfort.json', 'w+') as file: 

587 json.dump(local_discomfort_dict, file, indent=4) 

588 local_discomfort_overview.to_csv( 

589 export_path / 'local_discomfort_overview.csv') 

590 

591 def calc_exceeded_temperature_hours(self, df, reference, limit): 

592 """ 

593 Calculate temperature exceeding occurrence. 

594 

595 Returns a tuple, consisting of the number of timesteps that exceed 

596 the limit and the sum of all differences exceeding the limit. 

597 

598 Args: 

599 df: temperature dataframe 

600 reference: reference temperature value 

601 limit: limit temperature based on reference value 

602 

603 Returns: 

604 num_timesteps_exceeding, sum_difference_exceeding_limit 

605 

606 """ 

607 value_over_reference = abs(df.sub(reference, axis=0).dropna()) - limit 

608 return len(value_over_reference), value_over_reference.values.sum() 

609 

610 def get_exceeded_temperature_hours(self, df, min_limit, max_limit, 

611 ref_value): 

612 """ 

613 Get all entries of the dataframe where temperature is exceeded. 

614 Args: 

615 df: original dataframe 

616 min_limit: minimum temperature limit 

617 max_limit: maximum temperature limit 

618 ref_value: reference temperature 

619 

620 Returns: 

621 df with all entries below the min_limit, df_with all entries 

622 above the max_limit 

623 

624 """ 

625 

626 df_min = pd.DataFrame() 

627 df_max = pd.DataFrame() 

628 array = df.values 

629 mask_max = df.sub(ref_value, axis=0) > max_limit 

630 if mask_max.values.any(): 

631 filtered_array = np.where(mask_max, array, np.nan) 

632 max_values = [] 

633 for row in filtered_array: 

634 if not np.isnan(row).all(): 

635 max_values.append(np.nanmax(row)) 

636 else: 

637 max_values.append(np.nan) 

638 max_values = np.array(max_values) 

639 max_indices = np.where(~np.isnan(max_values))[0] 

640 df_max = pd.DataFrame(max_values[max_indices], 

641 index=df.index[max_indices], 

642 columns=['MaxValue']) 

643 mask_min = df.sub(ref_value, axis=0) < min_limit 

644 if mask_min.values.any(): 

645 filtered_array = np.where(mask_min, array, np.nan) 

646 min_values = [] 

647 for row in filtered_array: 

648 if not np.isnan(row).all(): 

649 min_values.append(np.nanmin(row)) 

650 else: 

651 min_values.append(np.nan) 

652 min_values = np.array(min_values) 

653 min_indices = np.where(~np.isnan(min_values))[0] 

654 df_min = pd.DataFrame(min_values[min_indices], 

655 index=df.index[min_indices], 

656 columns=['MinValue']) 

657 

658 return df_min, df_max 

659 

660 @staticmethod 

661 def rename_duplicates(dictionary): 

662 value_counts = {} 

663 renamed_dict = {} 

664 for key, value in dictionary.items(): 

665 if value in value_counts: 

666 value_counts[value] += 1 

667 new_value = f"{value}_{value_counts[value]}" 

668 else: 

669 value_counts[value] = 1 

670 new_value = value 

671 

672 renamed_dict[key] = new_value 

673 return renamed_dict 

674 

675 def rename_zone_usage(self, zone_dict, zone_dict_ifc_names, rename_keys, 

676 sim_results_path, 

677 rename_duplicates=False): 

678 new_zone_names = {} 

679 for key in zone_dict.keys(): 

680 new_name = None 

681 # prepare room names 

682 if '-' in zone_dict_ifc_names[key]['Name']: 

683 room_name = zone_dict_ifc_names[key]['Name'].split('-')[ 

684 1] 

685 if '_' in room_name: 

686 room_name = room_name.split('_')[0] 

687 else: 

688 room_name = zone_dict_ifc_names[key]['Name'] 

689 # prepare storey names 

690 # rename storeys using short versions 

691 # does not consider all naming conventions yet, needs to be extended 

692 if '_' in zone_dict_ifc_names[key]['StoreyName']: 

693 storey_name = \ 

694 zone_dict_ifc_names[key]['StoreyName'].split( 

695 '_')[0] 

696 else: 

697 storey_name = zone_dict_ifc_names[key]['StoreyName'] 

698 if storey_name.upper() == 'Erdgeschoss'.upper(): 

699 storey_name = 'EG' 

700 elif storey_name.upper() == 'Dachgeschoss'.upper(): 

701 storey_name = 'OG' 

702 elif storey_name.upper() == 'UNTERGESCHOSS': 

703 storey_name = 'UG' 

704 # generate new name based on storey and space name 

705 for key2 in rename_keys.keys(): 

706 if zone_dict[key] == key2: 

707 zone_dict[key] = rename_keys[key2] 

708 if zone_dict_ifc_names[key]['ZoneUsage'] == key2: 

709 new_name = (f"{storey_name}-{int(room_name):02d}" 

710 f" {rename_keys[key2]}") 

711 if not new_name: 

712 new_name = (f"{storey_name}-{int(room_name):02d}" 

713 f" {zone_dict_ifc_names[key]['ZoneUsage']}") 

714 # update names 

715 new_zone_names.update({key: new_name}) 

716 # store new short rook keys based on storey + space name 

717 with open(sim_results_path / self.prj_name / 'short_room_keys.json', 

718 'w+') as f: 

719 json.dump(new_zone_names, f, indent=4) 

720 # rename duplicate space names (add enumeration) to prevent overwriting 

721 if rename_duplicates: 

722 zone_usage = self.rename_duplicates(zone_dict) 

723 new_zone_names = self.rename_duplicates(new_zone_names) 

724 else: 

725 zone_usage = zone_dict 

726 return zone_usage, new_zone_names 

727 

728 @staticmethod 

729 def pmv_plot(df, save_path, file_name): 

730 PlotBEPSResults.plot_dataframe(df, save_path=save_path, 

731 file_name=file_name, 

732 x_axis_title="Date", 

733 y_axis_title="PMV") 

734 

735 def apply_en16798_to_all_zones(self, df, zone_dict, export_path, 

736 use_NA=True): 

737 """Generate EN 16798 diagrams for all thermal zones. 

738 

739 Args: 

740 df: input dataframe 

741 zone_dict: zone name dictionary 

742 export_path: export path for result plots 

743 use_NA: use national appendix (Germany) 

744 

745 Returns: 

746 dataframe holding the adaptive comfort categories, 

747 dataframe holding the comfort categories for occupied hours, 

748 dataframe holding the number of occupied hours per category 

749 """ 

750 if use_NA: 

751 add_NA_str = ' NA (GER)' 

752 else: 

753 add_NA_str = '' 

754 logger.info(f"Plot DIN EN 16798{add_NA_str} diagrams for all zones ...") 

755 

756 cat_analysis = pd.DataFrame() 

757 cat_analysis_occ = pd.DataFrame() 

758 cat_analysis_occ_hours = pd.DataFrame() 

759 for guid, room_name in zone_dict.items(): 

760 if not any(df.filter(like=guid)): 

761 continue 

762 temp_cat_analysis = None 

763 temp_cat_analysis_occ = None 

764 temp_cat_analysis_occ_hours = None 

765 if use_NA: 

766 (temp_cat_analysis, temp_cat_analysis_occ, 

767 temp_cat_analysis_occ_hours) = ( 

768 self.plot_en16798_adaptive_count_NA( 

769 df, guid, room_name, export_path)) 

770 cat_analysis_occ_hours = pd.concat([cat_analysis_occ_hours, 

771 temp_cat_analysis_occ_hours]) 

772 else: 

773 temp_cat_analysis, temp_cat_analysis_occ = ( 

774 self.plot_new_en16798_adaptive_count( 

775 df, guid, room_name, export_path)) 

776 cat_analysis = pd.concat([cat_analysis, temp_cat_analysis]) 

777 cat_analysis_occ = pd.concat([cat_analysis_occ, 

778 temp_cat_analysis_occ]) 

779 return cat_analysis, cat_analysis_occ, cat_analysis_occ_hours 

780 

781 def apply_en16798_to_single_zone(self, df, zone_dict, export_path, 

782 zone_guid, use_NA=True): 

783 """Generate EN 16798 diagrams for a single thermal zones. 

784 

785 Args: 

786 df: input dataframe 

787 zone_dict: zone name dictionary 

788 export_path: export path for result plots 

789 zone_guid: GUID of zone that should be analyzed 

790 use_NA: use national appendix (Germany) 

791 

792 Returns: 

793 dataframe holding the adaptive comfort categories, 

794 dataframe holding the comfort categories for occupied hours, 

795 dataframe holding the number of occupied hours per category 

796 """ 

797 if use_NA: 

798 add_NA_str = ' NA (GER)' 

799 else: 

800 add_NA_str = '' 

801 

802 logger.info(f"Plot DIN EN 16798{add_NA_str} diagram " 

803 f"for zone {zone_guid} ...") 

804 

805 cat_analysis = pd.DataFrame() 

806 cat_analysis_occ = pd.DataFrame() 

807 cat_analysis_occ_hours = pd.DataFrame() 

808 for guid, room_name in zone_dict.items(): 

809 if not guid == zone_guid: 

810 continue 

811 temp_cat_analysis = None 

812 temp_cat_analysis_occ = None 

813 if use_NA: 

814 (temp_cat_analysis, temp_cat_analysis_occ, 

815 temp_cat_analysis_occ_hours) = ( 

816 self.plot_en16798_adaptive_count_NA(df, guid, 

817 room_name + '_' + guid, 

818 export_path)) 

819 cat_analysis_occ_hours = pd.concat([cat_analysis_occ_hours, 

820 temp_cat_analysis_occ_hours]) 

821 else: 

822 temp_cat_analysis, temp_cat_analysis_occ = ( 

823 self.plot_new_en16798_adaptive_count(df, guid, room_name 

824 + '_' + guid, 

825 export_path)) 

826 cat_analysis = pd.concat([cat_analysis, temp_cat_analysis]) 

827 cat_analysis_occ = pd.concat( 

828 [cat_analysis_occ, temp_cat_analysis_occ]) 

829 return cat_analysis, cat_analysis_occ, cat_analysis_occ_hours 

830 

831 @staticmethod 

832 def plot_new_en16798_adaptive_count(df, guid, room_name, export_path): 

833 """Plot EN 16798 diagram for thermal comfort categories for a single 

834 thermal zone. 

835 

836 """ 

837 logger.info(f"Plot DIN EN 16798 diagrams for zone {guid}: {room_name}.") 

838 

839 def is_within_thresholds_cat1_16798(row): 

840 if 10 <= row.iloc[0] <= 30: 

841 y_threshold1 = 0.33 * row.iloc[0] + 18.8 - 3 

842 y_threshold2 = 0.33 * row.iloc[0] + 18.8 + 2 

843 return y_threshold1 <= row.iloc[1] <= y_threshold2 

844 else: 

845 return False 

846 

847 def is_within_thresholds_cat2_16798(row): 

848 if 10 <= row.iloc[0] <= 30: 

849 y_threshold1a = 0.33 * row.iloc[0] + 18.8 - 4 

850 y_threshold1b = 0.33 * row.iloc[0] + 18.8 - 3 

851 y_threshold2a = 0.33 * row.iloc[0] + 18.8 + 2 

852 y_threshold2b = 0.33 * row.iloc[0] + 18.8 + 3 

853 return any([y_threshold1a <= row.iloc[1] <= y_threshold1b, 

854 y_threshold2a <= row.iloc[1] <= y_threshold2b]) 

855 else: 

856 return False 

857 

858 def is_within_thresholds_cat3_16798(row): 

859 if 10 <= row.iloc[0] <= 30: 

860 y_threshold1a = 0.33 * row.iloc[0] + 18.8 - 5 

861 y_threshold1b = 0.33 * row.iloc[0] + 18.8 - 4 

862 y_threshold2a = 0.33 * row.iloc[0] + 18.8 + 3 

863 y_threshold2b = 0.33 * row.iloc[0] + 18.8 + 4 

864 return any([y_threshold1a <= row.iloc[1] <= y_threshold1b, 

865 y_threshold2a <= row.iloc[1] <= y_threshold2b]) 

866 else: 

867 return False 

868 

869 def is_outside_thresholds_16798(row): 

870 if 10 <= row.iloc[0] <= 30: 

871 y_threshold1 = 0.33 * row.iloc[0] + 18.8 - 5 

872 y_threshold2 = 0.33 * row.iloc[0] + 18.8 + 4 

873 return any([y_threshold1 >= row.iloc[1], y_threshold2 

874 <= row.iloc[1]]) 

875 else: 

876 return False 

877 

878 def plot_scatter_en16798(cat1_df, cat2_df, cat3_df, out_df, 

879 path, name): 

880 plt.figure(figsize=(13.2 / INCH, 8.3 / INCH)) 

881 

882 plt.scatter(cat1_df.iloc[:, 0], 

883 cat1_df.iloc[:, 1], 

884 s=0.1, 

885 color='green', marker=".") 

886 plt.scatter(cat2_df.iloc[:, 0], 

887 cat2_df.iloc[:, 1], 

888 s=0.1, 

889 color='orange', marker=".") 

890 plt.scatter(cat3_df.iloc[:, 0], 

891 cat3_df.iloc[:, 1], 

892 s=0.1, 

893 color='red', marker=".") 

894 plt.scatter(out_df.iloc[:, 0], 

895 out_df.iloc[:, 1], 

896 s=0.1, color='blue', label='OUT OF RANGE', marker=".") 

897 coord_cat1_low = [[10, 0.33 * 10 + 18.8 - 3.0], 

898 [30, 0.33 * 30 + 18.8 - 3.0]] 

899 coord_cat1_up = [[10, 0.33 * 10 + 18.8 + 2.0], 

900 [30, 0.33 * 30 + 18.8 + 2.0]] 

901 cc1lx, cc1ly = zip(*coord_cat1_low) 

902 cc1ux, cc1uy = zip(*coord_cat1_up) 

903 plt.plot(cc1lx, cc1ly, linestyle='dashed', color='green', 

904 label='DIN EN 16798-1: Thresholds Category I') 

905 plt.plot(cc1ux, cc1uy, linestyle='dashed', color='green') 

906 coord_cat2_low = [[10, 0.33 * 10 + 18.8 - 4.0], 

907 [30, 0.33 * 30 + 18.8 - 4.0]] 

908 coord_cat2_up = [[10, 0.33 * 10 + 18.8 + 3.0], 

909 [30, 0.33 * 30 + 18.8 + 3.0]] 

910 cc2lx, cc2ly = zip(*coord_cat2_low) 

911 cc2ux, cc2uy = zip(*coord_cat2_up) 

912 plt.plot(cc2lx, cc2ly, linestyle='dashed', color='orange', 

913 label='DIN EN 16798-1: Thresholds Category II') 

914 plt.plot(cc2ux, cc2uy, linestyle='dashed', color='orange') 

915 

916 coord_cat3_low = [[10, 0.33 * 10 + 18.8 - 5.0], 

917 [30, 0.33 * 30 + 18.8 - 5.0]] 

918 coord_cat3_up = [[10, 0.33 * 10 + 18.8 + 4.0], 

919 [30, 0.33 * 30 + 18.8 + 4.0]] 

920 cc3lx, cc3ly = zip(*coord_cat3_low) 

921 cc3ux, cc3uy = zip(*coord_cat3_up) 

922 plt.plot(cc3lx, cc3ly, linestyle='dashed', color='red', 

923 label='DIN EN 16798-1: Thresholds Category III') 

924 plt.plot(cc3ux, cc3uy, linestyle='dashed', color='red') 

925 

926 # Customize plot 

927 plt.xlabel('Running Mean Outdoor Temperature [\u00B0C]', 

928 fontsize=8) 

929 plt.ylabel('Operative Temperature [\u00B0C]', fontsize=8) 

930 plt.xlim([lim_min, lim_max]) 

931 plt.ylim([16.5, 35.5]) 

932 plt.grid() 

933 lgnd = plt.legend(loc="upper left", scatterpoints=1, fontsize=8) 

934 plt.savefig( 

935 path / str('DIN_EN_16798_new_' + name.replace('/','_') + '.pdf')) 

936 

937 lim_min = 10 

938 lim_max = 30 

939 

940 ot = df['operative_air_temp_rooms_' + guid] 

941 out_temp = df['site_outdoor_air_temp'] 

942 n_persons_df = df['n_persons_rooms_' + guid] 

943 

944 merged_df = pd.merge(out_temp, ot, left_index=True, right_index=True) 

945 merged_df = merged_df.map(lambda x: x.m) 

946 filtered_df_cat1 = merged_df[ 

947 merged_df.apply(is_within_thresholds_cat1_16798, 

948 axis=1)] 

949 filtered_df_cat2 = merged_df[ 

950 merged_df.apply(is_within_thresholds_cat2_16798, 

951 axis=1)] 

952 filtered_df_cat3 = merged_df[ 

953 merged_df.apply(is_within_thresholds_cat3_16798, 

954 axis=1)] 

955 filtered_df_outside = merged_df[ 

956 merged_df.apply(is_outside_thresholds_16798, 

957 axis=1)] 

958 common_index_c1 = filtered_df_cat1.index.intersection( 

959 n_persons_df.index) 

960 common_index_c2 = filtered_df_cat2.index.intersection( 

961 n_persons_df.index) 

962 common_index_c3 = filtered_df_cat3.index.intersection( 

963 n_persons_df.index) 

964 common_index_out = filtered_df_outside.index.intersection( 

965 n_persons_df.index) 

966 

967 filter_occ_cat1 = filtered_df_cat1.loc[common_index_c1][ 

968 n_persons_df.loc[common_index_c1] > 0] 

969 filter_occ_cat2 = filtered_df_cat2.loc[common_index_c2][ 

970 n_persons_df.loc[common_index_c2] > 0] 

971 filter_occ_cat3 = filtered_df_cat3.loc[common_index_c3][ 

972 n_persons_df.loc[common_index_c3] > 0] 

973 filter_occ_out = filtered_df_outside.loc[common_index_out][ 

974 n_persons_df.loc[common_index_out] > 0] 

975 cat_analysis_dict = { 

976 'ROOM': room_name, 

977 'CAT1': len(filtered_df_cat1), 

978 'CAT2': len(filtered_df_cat2), 

979 'CAT3': len(filtered_df_cat3), 

980 'OUT': len(filtered_df_outside) 

981 } 

982 cat_analysis_df = pd.DataFrame(cat_analysis_dict, index=[0]) 

983 cat_analysis_occ_dict = { 

984 'ROOM': room_name, 

985 'CAT1': len(filter_occ_cat1), 

986 'CAT2': len(filter_occ_cat2), 

987 'CAT3': len(filter_occ_cat3), 

988 'OUT': len(filter_occ_out) 

989 } 

990 cat_analysis_occ_df = pd.DataFrame(cat_analysis_occ_dict, index=[0]) 

991 

992 analysis_file = export_path / 'DIN_EN_16798_analysis.csv' 

993 cat_analysis_df.to_csv(analysis_file, mode='a+', header=False, sep=';') 

994 analysis_occ_file = export_path / 'DIN_EN_16798_analysis_occ.csv' 

995 cat_analysis_occ_df.to_csv(analysis_occ_file, mode='a+', header=False, 

996 sep=';') 

997 

998 plot_scatter_en16798(filtered_df_cat1, filtered_df_cat2, 

999 filtered_df_cat3, filtered_df_outside, 

1000 export_path, room_name) 

1001 plot_scatter_en16798(filter_occ_cat1, filter_occ_cat2, filter_occ_cat3, 

1002 filtered_df_outside, export_path, 

1003 room_name + '_occ') 

1004 return cat_analysis_df, cat_analysis_occ_df 

1005 

1006 def plot_en16798_adaptive_count_NA(self, df, guid, room_name, export_path): 

1007 """Plot EN 16798 German National Appendix for a single thermal zone. 

1008 

1009 Apply limits of German National Appendix. 

1010 """ 

1011 logger.info(f"Plot DIN EN 16798 diagram (NA) for zone {guid}:" 

1012 f" {room_name}.") 

1013 

1014 def is_within_thresholds_16798_NA(row, calc_K_hours=False): 

1015 if 16 <= row.iloc[0] <= 32: 

1016 y_threshold1 = 18 + 0.25 * row.iloc[0] - 2 

1017 y_threshold2 = 18 + 0.25 * row.iloc[0] + 2 

1018 elif row.iloc[0] > 32: 

1019 y_threshold1 = 26 - 2 

1020 y_threshold2 = 26 + 2 

1021 elif row.iloc[0] < 16: 

1022 y_threshold1 = 22 - 2 

1023 y_threshold2 = 22 + 2 

1024 else: 

1025 return False 

1026 if not calc_K_hours: 

1027 return y_threshold1 <= row.iloc[1] <= y_threshold2 

1028 else: 

1029 return 0 

1030 

1031 def is_above_thresholds_16798_NA(row, calc_K_hours=False): 

1032 if 16 <= row.iloc[0] <= 32: 

1033 y_threshold1 = 18 + 0.25 * row.iloc[0] + 2 

1034 y_threshold2 = 18 + 0.25 * row.iloc[0] + 4 

1035 elif row.iloc[0] > 32: 

1036 y_threshold1 = 26 + 2 

1037 y_threshold2 = 26 + 4 

1038 elif row.iloc[0] < 16: 

1039 y_threshold1 = 22 + 2 

1040 y_threshold2 = 22 + 4 

1041 else: 

1042 return False 

1043 if not calc_K_hours: 

1044 return y_threshold1 < row.iloc[1] <= y_threshold2 

1045 else: 

1046 if y_threshold1 < row.iloc[1] <= y_threshold2: 

1047 return abs(y_threshold1 - row.iloc[1]) 

1048 else: 

1049 return 0 

1050 

1051 def is_below_thresholds_16798_NA(row, calc_K_hours=False): 

1052 if 16 <= row.iloc[0] <= 32: 

1053 y_threshold1 = 18 + 0.25 * row.iloc[0] - 2 

1054 y_threshold2 = 18 + 0.25 * row.iloc[0] - 4 

1055 elif row.iloc[0] > 32: 

1056 y_threshold1 = 26 - 2 

1057 y_threshold2 = 26 - 4 

1058 elif row.iloc[0] < 16: 

1059 y_threshold1 = 22 - 2 

1060 y_threshold2 = 22 - 4 

1061 else: 

1062 return False 

1063 if not calc_K_hours: 

1064 return y_threshold1 > row.iloc[1] >= y_threshold2 

1065 else: 

1066 if y_threshold1 > row.iloc[1] >= y_threshold2: 

1067 return abs(y_threshold1 - row.iloc[1]) 

1068 else: 

1069 return 0 

1070 

1071 def is_out_above_thresholds_16798_NA(row, calc_K_hours=False): 

1072 if 16 <= row.iloc[0] <= 32: 

1073 y_threshold2 = 18 + 0.25 * row.iloc[0] + 4 

1074 elif row.iloc[0] > 32: 

1075 y_threshold2 = 26 + 4 

1076 elif row.iloc[0] < 16: 

1077 y_threshold2 = 22 + 4 

1078 else: 

1079 return False 

1080 if not calc_K_hours: 

1081 return row.iloc[1] > y_threshold2 

1082 else: 

1083 if row.iloc[1] > y_threshold2: 

1084 return abs(y_threshold2 - row.iloc[1]) 

1085 else: 

1086 return 0 

1087 

1088 def is_out_below_thresholds_16798_NA(row, calc_K_hours=False): 

1089 if 16 <= row.iloc[0] <= 32: 

1090 y_threshold2 = 18 + 0.25 * row.iloc[0] - 4 

1091 elif row.iloc[0] > 32: 

1092 y_threshold2 = 26 - 4 

1093 elif row.iloc[0] < 16: 

1094 y_threshold2 = 22 - 4 

1095 else: 

1096 return False 

1097 if not calc_K_hours: 

1098 return row.iloc[1] < y_threshold2 

1099 else: 

1100 if row.iloc[1] < y_threshold2: 

1101 return abs(y_threshold2 - row.iloc[1]) 

1102 else: 

1103 return 0 

1104 

1105 def plot_scatter_en16798_three_colors(in_df, above_df, below_df, 

1106 out_above_df, 

1107 out_below_df, 

1108 path, name): 

1109 """Scatter plot with only three colors (in, within 2K, above/below.) 

1110 

1111 """ 

1112 plt.figure(figsize=(10 / INCH, 8 / INCH)) 

1113 

1114 plt.scatter(in_df.iloc[:, 0], 

1115 in_df.iloc[:, 1], 

1116 s=0.05, 

1117 color='green', marker=".", label='Within ' 

1118 'acceptability range') 

1119 plt.scatter(above_df.iloc[:, 0], 

1120 above_df.iloc[:, 1], 

1121 s=0.05, 

1122 color='darkorange', marker=".", label='Within 2K ' 

1123 'range') 

1124 plt.scatter(out_above_df.iloc[:, 0], 

1125 out_above_df.iloc[:, 1], 

1126 s=0.05, 

1127 color='crimson', marker=".", label='Out of 2K range') 

1128 plt.scatter(below_df.iloc[:, 0], 

1129 below_df.iloc[:, 1], 

1130 s=0.05, 

1131 color='darkorange', marker=".") 

1132 plt.scatter(out_below_df.iloc[:, 0], 

1133 out_below_df.iloc[:, 1], 

1134 s=0.05, 

1135 color='crimson', marker=".") 

1136 coord_cat1_low = [ 

1137 [10, 22 - 2], 

1138 [16, 18 + 0.25 * 16 - 2], 

1139 [32, 18 + 0.25 * 32 - 2], 

1140 [36, 26 - 2]] 

1141 coord_cat1_up = [ 

1142 [10, 22 + 2], 

1143 [16, 18 + 0.25 * 16 + 2], 

1144 [32, 18 + 0.25 * 32 + 2], 

1145 [36, 26 + 2]] 

1146 cc1lx, cc1ly = zip(*coord_cat1_low) 

1147 cc1ux, cc1uy = zip(*coord_cat1_up) 

1148 plt.plot(cc1lx, cc1ly, linestyle='dashed', color='darkorange', 

1149 label='Acceptability range', 

1150 linewidth=0.8, alpha=0.5) 

1151 plt.plot(cc1ux, cc1uy, linestyle='dashed', color='darkorange', 

1152 linewidth=0.8, alpha=0.5) 

1153 coord_2Kmax_low = [ 

1154 [10, 22 - 4], 

1155 [16, 18 + 0.25 * 16 - 4], 

1156 [32, 18 + 0.25 * 32 - 4], 

1157 [36, 26 - 4]] 

1158 coord_2Kmax_up = [ 

1159 [10, 22 + 4], 

1160 [16, 18 + 0.25 * 16 + 4], 

1161 [32, 18 + 0.25 * 32 + 4], 

1162 [36, 26 + 4]] 

1163 cc2lx, cc2ly = zip(*coord_2Kmax_low) 

1164 cc2ux, cc2uy = zip(*coord_2Kmax_up) 

1165 plt.plot(cc2lx, cc2ly, linestyle='dashed', color='crimson', 

1166 linewidth=0.8, 

1167 label='Additional 2K range', alpha=0.5) 

1168 plt.plot(cc2ux, cc2uy, linestyle='dashed', color='crimson', 

1169 linewidth=0.8, alpha=0.5) 

1170 

1171 # Customize plot 

1172 plt.xlabel('Hourly Mean Outdoor Temperature [\u00B0C]', 

1173 fontsize=8) 

1174 plt.ylabel('Operative Temperature [\u00B0C]', fontsize=8) 

1175 plt.tick_params(labelsize=8) 

1176 plt.xlim([lim_min, lim_max]) 

1177 plt.ylim([17, 33]) 

1178 plt.grid() 

1179 plt.subplots_adjust(right=0.9, bottom=0.3, 

1180 left=0.1) # Adjust right side to 

1181 # make 

1182 handles, labels = plt.gca().get_legend_handles_labels() 

1183 wrapped_labels = ['\n'.join(wrap(l, 30)) for l in labels] 

1184 lgnd = plt.legend( 

1185 labels=wrapped_labels, handles=handles, loc="upper left", 

1186 fontsize=8, 

1187 bbox_to_anchor=(-0.05, -0.2), 

1188 scatterpoints=3, markerscale=5, 

1189 handletextpad=0.3, 

1190 frameon=False, 

1191 borderaxespad=0.5, 

1192 labelspacing=0.5, ncol=2) 

1193 plt.savefig( 

1194 path / str('DIN_EN_16798_NA_' + name.replace('/','_') + '.pdf')) 

1195 

1196 def plot_scatter_en16798(in_df, above_df, below_df, out_above_df, 

1197 out_below_df, 

1198 path, name): 

1199 """Scatter plot with five colors: in, within +2k, within -2K, 

1200 above 2K, below -2K. 

1201 """ 

1202 plt.figure(figsize=(13.2 / INCH, 8.3 / INCH)) 

1203 

1204 plt.scatter(in_df.iloc[:, 0], 

1205 in_df.iloc[:, 1], 

1206 s=0.15, 

1207 color='green', marker=".", label='within range of DIN ' 

1208 'EN ' 

1209 '16798-1 NA (GER)') 

1210 plt.scatter(above_df.iloc[:, 0], 

1211 above_df.iloc[:, 1], 

1212 s=0.15, 

1213 color='darkorange', marker=".", label='above range of ' 

1214 'DIN ' 

1215 'EN 16798-1 NA (' 

1216 'GER), within 2K ' 

1217 'range') 

1218 plt.scatter(out_above_df.iloc[:, 0], 

1219 out_above_df.iloc[:, 1], 

1220 s=0.15, 

1221 color='red', marker=".", label='above range of ' 

1222 'DIN EN 16798-1 NA (' 

1223 'GER), out of 2K range') 

1224 plt.scatter(below_df.iloc[:, 0], 

1225 below_df.iloc[:, 1], 

1226 s=0.15, 

1227 color='cyan', marker=".", label='below range of DIN ' 

1228 'EN 16798-1 NA (' 

1229 'GER), within 2K range') 

1230 plt.scatter(out_below_df.iloc[:, 0], 

1231 out_below_df.iloc[:, 1], 

1232 s=0.15, 

1233 color='blue', marker=".", label='below range of DIN ' 

1234 'EN 16798-1 NA (' 

1235 'GER), out of 2K range') 

1236 coord_cat1_low = [ 

1237 [10, 22 - 2], 

1238 [16, 18 + 0.25 * 16 - 2], 

1239 [32, 18 + 0.25 * 32 - 2], 

1240 [36, 26 - 2]] 

1241 coord_cat1_up = [ 

1242 [10, 22 + 2], 

1243 [16, 18 + 0.25 * 16 + 2], 

1244 [32, 18 + 0.25 * 32 + 2], 

1245 [36, 26 + 2]] 

1246 cc1lx, cc1ly = zip(*coord_cat1_low) 

1247 cc1ux, cc1uy = zip(*coord_cat1_up) 

1248 plt.plot(cc1lx, cc1ly, linestyle='dashed', color='goldenrod', 

1249 label='DIN EN 16798-1 NA (GER): Acceptability range', 

1250 linewidth=1) 

1251 plt.plot(cc1ux, cc1uy, linestyle='dashed', color='goldenrod', 

1252 linewidth=1) 

1253 coord_2Kmax_low = [ 

1254 [10, 22 - 4], 

1255 [16, 18 + 0.25 * 16 - 4], 

1256 [32, 18 + 0.25 * 32 - 4], 

1257 [36, 26 - 4]] 

1258 coord_2Kmax_up = [ 

1259 [10, 22 + 4], 

1260 [16, 18 + 0.25 * 16 + 4], 

1261 [32, 18 + 0.25 * 32 + 4], 

1262 [36, 26 + 4]] 

1263 cc2lx, cc2ly = zip(*coord_2Kmax_low) 

1264 cc2ux, cc2uy = zip(*coord_2Kmax_up) 

1265 plt.plot(cc2lx, cc2ly, linestyle='dashed', color='darkred', 

1266 linewidth=1, 

1267 label='DIN EN 16798-1 NA (GER): Limit for additional 2K ' 

1268 'range') 

1269 plt.plot(cc2ux, cc2uy, linestyle='dashed', color='darkred', 

1270 linewidth=1) 

1271 

1272 # Customize plot 

1273 plt.xlabel('Hourly Mean Outdoor Temperature [\u00B0C]', 

1274 fontsize=8) 

1275 plt.ylabel('Operative Temperature [\u00B0C]', fontsize=8) 

1276 plt.tick_params(labelsize=8) 

1277 plt.xlim([lim_min, lim_max]) 

1278 plt.ylim([18, 34]) 

1279 plt.grid() 

1280 plt.subplots_adjust(right=0.65, bottom=0.12, 

1281 left=0.1) # Adjust right side to 

1282 # make 

1283 handles, labels = plt.gca().get_legend_handles_labels() 

1284 wrapped_labels = ['\n'.join(wrap(l, 25)) for l in labels] 

1285 lgnd = plt.legend( 

1286 labels=wrapped_labels, handles=handles, loc="center left", 

1287 fontsize=8, 

1288 bbox_to_anchor=(1, 0.45), scatterpoints=3, markerscale=5, 

1289 frameon=False, 

1290 labelspacing=0.7) 

1291 plt.savefig( 

1292 path / str('DIN_EN_16798_NA_' + name.replace('/','_') + '.pdf')) 

1293 

1294 lim_min = 10 

1295 lim_max = 36 

1296 

1297 ot = df['operative_air_temp_rooms_' + guid] 

1298 out_temp = df['site_outdoor_air_temp'] 

1299 n_persons_df = df['n_persons_rooms_' + guid] 

1300 n_persons_df = n_persons_df.map(lambda x: x.m) 

1301 n_persons_scaling_df = n_persons_df / n_persons_df.max() 

1302 

1303 merged_df = pd.merge(out_temp, ot, left_index=True, right_index=True) 

1304 merged_df = merged_df.map(lambda x: x.m) 

1305 filtered_df_within_NA = merged_df[ 

1306 merged_df.apply(is_within_thresholds_16798_NA, 

1307 axis=1)] 

1308 filtered_df_above_NA = merged_df[ 

1309 merged_df.apply(is_above_thresholds_16798_NA, 

1310 axis=1)] 

1311 filtered_df_out_above_NA = merged_df[ 

1312 merged_df.apply(is_out_above_thresholds_16798_NA, 

1313 axis=1)] 

1314 filtered_df_below_NA = merged_df[ 

1315 merged_df.apply(is_below_thresholds_16798_NA, 

1316 axis=1)] 

1317 filtered_df_out_below_NA = merged_df[ 

1318 merged_df.apply(is_out_below_thresholds_16798_NA, 

1319 axis=1)] 

1320 filtered_df_within_NA_hours = ( 

1321 filtered_df_within_NA.apply(is_within_thresholds_16798_NA, 

1322 calc_K_hours=True, axis=1)) 

1323 filtered_df_above_NA_hours = ( 

1324 filtered_df_above_NA.apply(is_above_thresholds_16798_NA, 

1325 calc_K_hours=True, axis=1)) 

1326 filtered_df_out_above_NA_hours = ( 

1327 filtered_df_out_above_NA.apply(is_out_above_thresholds_16798_NA, 

1328 calc_K_hours=True, axis=1)) 

1329 filtered_df_below_NA_hours = ( 

1330 filtered_df_below_NA.apply(is_below_thresholds_16798_NA, 

1331 calc_K_hours=True, axis=1)) 

1332 filtered_df_out_below_NA_hours = ( 

1333 filtered_df_out_below_NA.apply(is_out_below_thresholds_16798_NA, 

1334 calc_K_hours=True, 

1335 axis=1)) 

1336 common_index_within = filtered_df_within_NA.index.intersection( 

1337 n_persons_df.index) 

1338 common_index_above = filtered_df_above_NA.index.intersection( 

1339 n_persons_df.index) 

1340 common_index_out_above = filtered_df_out_above_NA.index.intersection( 

1341 n_persons_df.index) 

1342 common_index_below = filtered_df_below_NA.index.intersection( 

1343 n_persons_df.index) 

1344 common_index_out_below = filtered_df_out_below_NA.index.intersection( 

1345 n_persons_df.index) 

1346 

1347 filtered_df_within_NA_occ = filtered_df_within_NA.loc[ 

1348 common_index_within][n_persons_df.loc[common_index_within] > 0] 

1349 filtered_df_above_NA_occ = filtered_df_above_NA.loc[common_index_above][ 

1350 n_persons_df.loc[common_index_above] > 0] 

1351 filtered_df_out_above_NA_occ = filtered_df_out_above_NA.loc[ 

1352 common_index_out_above][ 

1353 n_persons_df.loc[common_index_out_above] > 0] 

1354 filtered_df_below_NA_occ = filtered_df_below_NA.loc[common_index_below][ 

1355 n_persons_df.loc[common_index_below] > 0] 

1356 filtered_df_out_below_NA_occ = filtered_df_out_below_NA.loc[ 

1357 common_index_out_below][ 

1358 n_persons_df.loc[common_index_out_below] > 0] 

1359 filtered_df_within_NA_hours_occ = filtered_df_within_NA_hours.loc[ 

1360 common_index_within][n_persons_df.loc[common_index_within] > 0] 

1361 filtered_df_above_NA_hours_occ = filtered_df_above_NA_hours.loc[ 

1362 common_index_above][ 

1363 n_persons_df.loc[common_index_above] > 0] 

1364 filtered_df_out_above_NA_hours_occ = filtered_df_out_above_NA_hours.loc[ 

1365 common_index_out_above][ 

1366 n_persons_df.loc[common_index_out_above] > 0] 

1367 filtered_df_below_NA_hours_occ = filtered_df_below_NA_hours.loc[ 

1368 common_index_below][ 

1369 n_persons_df.loc[common_index_below] > 0] 

1370 filtered_df_out_below_NA_hours_occ = filtered_df_out_below_NA_hours.loc[ 

1371 common_index_out_below][ 

1372 n_persons_df.loc[common_index_out_below] > 0] 

1373 if self.playground.sim_settings.comfort_occupancy_weighting: 

1374 filtered_df_within_NA_hours_occ = ( 

1375 filtered_df_within_NA_hours_occ * n_persons_scaling_df.loc[ 

1376 filtered_df_within_NA_hours_occ.index]) 

1377 filtered_df_above_NA_hours_occ = ( 

1378 filtered_df_above_NA_hours_occ * n_persons_scaling_df.loc[ 

1379 filtered_df_above_NA_hours_occ.index]) 

1380 filtered_df_out_above_NA_hours_occ = ( 

1381 filtered_df_out_above_NA_hours_occ * n_persons_scaling_df.loc[ 

1382 filtered_df_out_above_NA_hours_occ.index]) 

1383 filtered_df_below_NA_hours_occ = ( 

1384 filtered_df_below_NA_hours_occ * n_persons_scaling_df.loc[ 

1385 filtered_df_below_NA_hours_occ.index]) 

1386 filtered_df_out_below_NA_hours_occ = ( 

1387 filtered_df_out_below_NA_hours_occ * n_persons_scaling_df.loc[ 

1388 filtered_df_out_below_NA_hours_occ.index]) 

1389 

1390 cat_analysis_dict = { 

1391 'ROOM': room_name, 

1392 'total': 8760, 

1393 'within': len(filtered_df_within_NA), 

1394 'above': len(filtered_df_above_NA), 

1395 'below': len(filtered_df_below_NA), 

1396 'out above': len(filtered_df_out_above_NA), 

1397 'out below': len(filtered_df_out_below_NA) 

1398 } 

1399 cat_analysis_hours_dict = { 

1400 'ROOM': room_name, 

1401 'total': 8760*2, 

1402 'within': filtered_df_within_NA_hours.sum(), 

1403 'above': filtered_df_above_NA_hours.sum(), 

1404 'below': filtered_df_below_NA_hours.sum(), 

1405 'out above': filtered_df_out_above_NA_hours.sum(), 

1406 'out below': filtered_df_out_below_NA_hours.sum() 

1407 } 

1408 # acceptable over-temperature hours 

1409 occupied_2K_hours = { 

1410 'ROOM': room_name, 

1411 'total': len(n_persons_df[n_persons_df > 0]), 

1412 'within': 8760 * 2, 

1413 # random choice, over-temperature hours are zero 

1414 'above': len(n_persons_df[n_persons_df > 0]) * 2, 

1415 'below': len(n_persons_df[n_persons_df > 0]) * 2, 

1416 'out above': 0, 

1417 'out below': 0 

1418 } 

1419 

1420 cat_analysis_df = pd.DataFrame(cat_analysis_dict, index=[0]) 

1421 cat_analysis_hours_df = pd.DataFrame(cat_analysis_hours_dict, index=[0]) 

1422 cat_analysis_occ_dict = { 

1423 'ROOM': room_name, 

1424 'total': len(n_persons_df[n_persons_df > 0]), 

1425 'within': len(filtered_df_within_NA_occ), 

1426 'above': len(filtered_df_above_NA_occ), 

1427 'below': len(filtered_df_below_NA_occ), 

1428 'out above': len(filtered_df_out_above_NA_occ), 

1429 'out below': len(filtered_df_out_below_NA_occ) 

1430 } 

1431 cat_analysis_occ_hours_dict = { 

1432 'ROOM': room_name, 

1433 'total': len(n_persons_df[n_persons_df > 0]), 

1434 'within': filtered_df_within_NA_hours_occ.sum(), 

1435 'above': filtered_df_above_NA_hours_occ.sum(), 

1436 'below': filtered_df_below_NA_hours_occ.sum(), 

1437 'out above': filtered_df_out_above_NA_hours_occ.sum(), 

1438 'out below': filtered_df_out_below_NA_hours_occ.sum() 

1439 } 

1440 cat_analysis_occ_df = pd.DataFrame(cat_analysis_occ_dict, index=[0]) 

1441 cat_analysis_hours_occ_df = pd.DataFrame(cat_analysis_occ_hours_dict, 

1442 index=[0]) 

1443 failed = False 

1444 failing_reasons = dict() 

1445 for key, value in occupied_2K_hours.items(): 

1446 if key in ['ROOM', 'total']: 

1447 continue 

1448 if (0.01 * occupied_2K_hours[key] - cat_analysis_occ_hours_dict[ 

1449 key]) < -1e-4: 

1450 failed = True 

1451 if occupied_2K_hours[key] > 0: 

1452 failing_reasons.update( 

1453 {key: str(round(100*(cat_analysis_occ_hours_dict[key] / 

1454 occupied_2K_hours[key]), 2))+' %'}) 

1455 else: 

1456 failing_reasons.update({key: '100 %'}) 

1457 if failed: 

1458 logger.warning(f'Adaptive thermal comfort test failed for space ' 

1459 f'{room_name} due to exceeded limits in ' 

1460 f'{failing_reasons}.') 

1461 analysis_file = export_path / 'DIN_EN_16798_NA_analysis.csv' 

1462 cat_analysis_df.to_csv(analysis_file, mode='a+', header=False, sep=';') 

1463 analysis_hours_file = export_path / 'DIN_EN_16798_NA_hours_analysis.csv' 

1464 cat_analysis_hours_df.to_csv(analysis_hours_file, mode='a+', 

1465 header=False, 

1466 sep=';') 

1467 analysis_occ_file = export_path / 'DIN_EN_16798_NA_analysis_occ.csv' 

1468 cat_analysis_occ_df.to_csv(analysis_occ_file, mode='a+', header=False, 

1469 sep=';') 

1470 analysis_hours_occ_file = (export_path / 

1471 'DIN_EN_16798_NA_hours_analysis_occ.csv') 

1472 cat_analysis_hours_occ_df.to_csv(analysis_hours_occ_file, mode='a+', 

1473 header=False, sep=';') 

1474 

1475 plot_scatter_en16798_three_colors(filtered_df_within_NA, filtered_df_above_NA, 

1476 filtered_df_below_NA, filtered_df_out_above_NA, 

1477 filtered_df_out_below_NA, 

1478 export_path, room_name) 

1479 plot_scatter_en16798_three_colors(filtered_df_within_NA_occ, 

1480 filtered_df_above_NA_occ, 

1481 filtered_df_below_NA_occ, 

1482 filtered_df_out_above_NA_occ, 

1483 filtered_df_out_below_NA_occ, 

1484 export_path, 

1485 room_name + '_occ') 

1486 return cat_analysis_df, cat_analysis_occ_df, cat_analysis_hours_occ_df 

1487 

1488 @staticmethod 

1489 def table_bar_plot_16798(df, export_path, tag='', normalize=True, 

1490 unit='', unit_name='', y_scale='linear'): 

1491 """Create bar plot with a table below for EN 16798 thermal comfort. 

1492 

1493 This function creates a bar plot with a table below along with the 

1494 thermal comfort categories according to EN 16798. This table 

1495 considers all hours of the day, not only the occupancy hours. 

1496 

1497 """ 

1498 # with columns: 'ROOM', 'CAT1', 'CAT2', 'CAT3', 'OUT' 

1499 logger.info(f"Plot DIN EN 16798 table bar plot all zones.") 

1500 if tag: 

1501 tag = '_' + tag 

1502 rename_columns = { 

1503 'CAT1': 'CAT I', 

1504 'CAT2': 'CAT II', 

1505 'CAT3': 'CAT III', 

1506 'OUT': u'> CAT III', 

1507 # Add more entries for other columns 

1508 } 

1509 

1510 # Rename the columns of the DataFrame using the dictionary 

1511 df.rename(columns=rename_columns, inplace=True) 

1512 

1513 # Set 'ROOM' column as the index 

1514 df.set_index('ROOM', inplace=True) 

1515 sorted_df = df.sort_index(axis=0) 

1516 

1517 if normalize: 

1518 row_sums = sorted_df.loc[:, sorted_df.columns != 'total'].sum(axis=1) 

1519 # Create a new DataFrame by dividing the original DataFrame by the row 

1520 # sums 

1521 normalized_df = sorted_df.loc[:, sorted_df.columns != 'total'].div(row_sums, axis=0) 

1522 normalized_df = normalized_df * 100 

1523 else: 

1524 normalized_df = sorted_df.loc[:, sorted_df.columns != 'total'] 

1525 # normalized_df = normalized_df.sort_index(axis=0) 

1526 if normalize: 

1527 fig, ax = plt.subplots( 

1528 figsize=(0.5*len(normalized_df.index)+2, 12)) # Adjust figure 

1529 # size 

1530 else: 

1531 fig, ax = plt.subplots( 

1532 figsize=(1.05*len(normalized_df.index)+2, 12)) # Adjust figure 

1533 # size 

1534 # to allow more space 

1535 

1536 x_pos = np.arange(len(set(normalized_df.index))) * 0.8 

1537 bar_width = 0.35 

1538 bottom = np.zeros(len(normalized_df.index)) 

1539 

1540 # Create the bar chart 

1541 for i, col in enumerate(normalized_df.columns): 

1542 if col == 'total': 

1543 continue 

1544 color = 'purple' 

1545 if col == 'CAT I': 

1546 color = 'green' 

1547 elif col == 'CAT II': 

1548 color = 'blue' 

1549 elif col == 'CAT III': 

1550 color = 'orange' 

1551 elif col == u'> CAT III': 

1552 color = 'red' 

1553 elif col == 'within': 

1554 color = 'green' 

1555 elif col == 'above': 

1556 color = 'orange' 

1557 elif col == 'below': 

1558 color = 'cyan' 

1559 elif col == 'out above': 

1560 color = 'red' 

1561 elif col == 'out below': 

1562 color = 'blue' 

1563 ax.bar(normalized_df.index, normalized_df[col], width=-bar_width, 

1564 label=col, align='edge', 

1565 bottom=bottom, color=color) 

1566 bottom += normalized_df[col] 

1567 

1568 # Set consistent font size for all elements 

1569 common_fontsize = 11 

1570 if normalize: 

1571 ax.set_ylabel(u'Hours per category [%]', fontsize=common_fontsize) 

1572 else: 

1573 ax.set_ylabel(f"{unit_name} [{unit}]", fontsize=common_fontsize) 

1574 

1575 ax.tick_params(axis='y', 

1576 labelsize=common_fontsize) # Match font size for y-axis ticks 

1577 ax.tick_params(axis='x', labelrotation=90) 

1578 if normalize: 

1579 plt.ylim([0, 100]) 

1580 else: 

1581 # Set the y-axis to logarithmic scale 

1582 ax.set_yscale(y_scale) 

1583 plt.xlim([-bar_width / 2 - 0.5, 

1584 len(normalized_df.index) - bar_width / 2 - 0.5]) 

1585 plt.xticks([]) # Remove x-ticks for table 

1586 if normalize: 

1587 formatted_df = normalized_df.apply( 

1588 lambda x: x.map(lambda y: f'{y:.1f}')) 

1589 else: 

1590 def format_value(x, index): 

1591 total = sorted_df.loc[index, "total"] 

1592 return f'{x:.1f} ({(x / (total * 2)) * 100:.1f}%)' 

1593 

1594 formatted_df = normalized_df.apply( 

1595 lambda row: row.apply(lambda x: format_value(x, row.name)), 

1596 axis=1) 

1597 cell_text = [formatted_df.sort_index(axis=0)[column] for column in formatted_df.columns] 

1598 

1599 if normalize: 

1600 # Create the table 

1601 table = plt.table(cellText=cell_text, 

1602 rowLabels=formatted_df.columns + u' [%]', 

1603 colLabels=formatted_df.index, 

1604 cellLoc='center', 

1605 loc='bottom') 

1606 else: 

1607 cell_text.insert(0, sorted_df['total']) 

1608 table = plt.table(cellText=cell_text, 

1609 rowLabels=[f'occupied [{ureg.hour:~P}]'] + 

1610 list( 

1611 formatted_df.columns + f' [' 

1612 f'{unit}]'), 

1613 colLabels=formatted_df.index, 

1614 cellLoc='center', 

1615 loc='bottom') 

1616 

1617 # Ensure consistent font size for the table 

1618 table.auto_set_font_size(False) 

1619 table.set_fontsize(common_fontsize) 

1620 

1621 # Dynamically calculate the required height for rotated text 

1622 renderer = fig.canvas.get_renderer() 

1623 max_text_height = 0 # Track the maximum height 

1624 table.scale(1.0, 5.0) # Adjust scaling for overall table size 

1625 

1626 for i in range(len(cell_text[0])): 

1627 cell = table[(0, i)] # Access header cells 

1628 text = cell.get_text() 

1629 text.set_rotation(90) # Rotate the text by 90 degrees 

1630 

1631 # Measure text size dynamically 

1632 fig.canvas.draw() # Update layout for accurate text size 

1633 bbox = text.get_window_extent(renderer=renderer) 

1634 text_height = bbox.height / 200 # Convert 

1635 # height 

1636 # to figure-relative units 

1637 max_text_height = max(max_text_height, text_height) 

1638 

1639 # Apply a uniform height to all header cells 

1640 for i in range(len(cell_text[0])): 

1641 cell = table[(0, i)] 

1642 cell.set_height( 

1643 max_text_height * 1.05) # Add a slight margin factor 

1644 

1645 # Scale table rows and columns 

1646 

1647 # Adjust the layout to fit the table properly 

1648 fig.subplots_adjust( 

1649 bottom=0.7) # Allocate space below the plot for the table 

1650 

1651 # Adjust the legend placement BELOW the table 

1652 legend_y_offset = -0.8 - max_text_height # Dynamically calculate offset 

1653 lgnd = plt.legend(framealpha=0.0, prop={'size': common_fontsize}, 

1654 loc='lower center', 

1655 bbox_to_anchor=(0.5, legend_y_offset), 

1656 ncol=len(normalized_df.columns)) # Adjust legend 

1657 # position 

1658 

1659 # Save the figure 

1660 fig.savefig(export_path / f'DIN_EN_16798{tag}_all_zones_bar_table.pdf', 

1661 bbox_inches='tight', 

1662 bbox_extra_artists=(lgnd, table)) 

1663 

1664 @staticmethod 

1665 def visualize_calendar(calendar_df, export_path, year='', 

1666 color_only=False, save=True, 

1667 save_as='', 

1668 construction='', skip_legend=False, 

1669 add_title=False, figsize=[7.6, 8], zone_dict=None, 

1670 resample_type='mean', guid=''): 

1671 

1672 logger.info(f"Plot PMV calendar plot for zone {calendar_df.columns[0]}") 

1673 

1674 def visualize(zone_dict): 

1675 resampled_df = pd.DataFrame() 

1676 fig, ax = plt.subplots( 

1677 figsize=(figsize[0] / INCH, figsize[1] / INCH)) 

1678 if resample_type == 'mean': 

1679 resampled_df = calendar_df.resample('D').mean() 

1680 elif resample_type == 'sum': 

1681 resampled_df = calendar_df.resample('D').sum() 

1682 elif resample_type == 'max': 

1683 resampled_df = calendar_df.resample('D').max() 

1684 elif resample_type == 'min': 

1685 resampled_df = calendar_df.resample('D').min() 

1686 

1687 calendar_heatmap(ax, resampled_df, color_only) 

1688 title_name = calendar_df.columns[0] 

1689 for key, item in zone_dict.items(): 

1690 if key in title_name: 

1691 title_name = title_name.replace(key, item)# + '_' + guid 

1692 if add_title: 

1693 plt.title(str(year) + ' ' + title_name) 

1694 if save: 

1695 plt.savefig(export_path / 

1696 str(construction + save_as.replace('/', '_') + 

1697 title_name.replace('/', '_') + '.pdf'), 

1698 bbox_inches='tight') 

1699 if skip_legend: 

1700 plt.savefig(export_path / 'subplots' / str( 

1701 construction + save_as.replace('/','_') + 

1702 title_name.replace('/','_') + '.pdf'), 

1703 bbox_inches='tight') 

1704 plt.draw() 

1705 plt.close() 

1706 

1707 def calendar_array(dates, data): 

1708 i, j = zip(*[(d.day, d.month) for d in dates]) 

1709 i = np.array(i) - min(i) 

1710 j = np.array(j) - 1 

1711 ni = max(i) + 1 

1712 calendar = np.empty([ni, 12]) # , dtype='S10') 

1713 calendar[:] = np.nan 

1714 calendar[i, j] = data 

1715 return i, j, calendar 

1716 

1717 def calendar_heatmap(ax, df, color_only): 

1718 

1719 color_schema = ['#0232c2', '#028cc2', '#03ffff', 

1720 '#02c248', '#bbc202', '#c27f02'] 

1721 # Labels and their corresponding indices 

1722 labels = ['-3 to -2', '-2 to -1', '-1 to 0', 

1723 '0 to 1', '1 to 2', '2 to 3'] 

1724 label_indices = np.arange(len(labels) + 1) - 3 

1725 

1726 # Create a ListedColormap from the color schema 

1727 cmap = ListedColormap(color_schema) 

1728 df_dates = df.index 

1729 df_data = df[df.columns[0]].values 

1730 norm = Normalize(vmin=-3, vmax=3) 

1731 

1732 i, j, calendar = calendar_array(df_dates, df_data) 

1733 

1734 im = ax.imshow(calendar, aspect='auto', interpolation='none', 

1735 cmap=cmap, norm=norm) 

1736 label_days(ax, df_dates, i, j, calendar) 

1737 if not color_only: 

1738 label_data(ax, calendar) 

1739 label_months(ax, df_dates, i, j, calendar) 

1740 if not skip_legend: 

1741 cbar = ax.figure.colorbar(im, ticks=label_indices) 

1742 # Minor ticks 

1743 ax.set_xticks(np.arange(-.5, len(calendar[0]), 1), minor=True) 

1744 ax.set_yticks(np.arange(-.5, len(calendar[:, 0]), 1), minor=True) 

1745 

1746 ax.grid(False) 

1747 # Gridlines based on minor ticks 

1748 ax.grid(which='minor', color='w', linestyle='-', linewidth=0.5) 

1749 

1750 # Remove minor ticks 

1751 ax.tick_params(which='minor', bottom=False, 

1752 left=False) # ax.get_yaxis().set_ticks(label_indices) 

1753 # ax.get_yaxis().set_ticklabels(labels) 

1754 

1755 def label_data(ax, calendar): 

1756 for (i, j), data in np.ndenumerate(calendar): 

1757 if type(data) == str: 

1758 ax.text(j, i, data, ha='center', va='center') 

1759 elif np.isfinite(data): 

1760 ax.text(j, i, round(data, 1), ha='center', va='center') 

1761 

1762 def label_days(ax, dates, i, j, calendar): 

1763 ni, nj = calendar.shape 

1764 day_of_month = np.nan * np.zeros((ni, nj)) 

1765 day_of_month[i, j] = [d.day for d in dates] 

1766 

1767 yticks = np.arange(31) 

1768 yticklabels = [i + 1 for i in yticks] 

1769 ax.set_yticks(yticks) 

1770 ax.set_yticklabels(yticklabels, fontsize=6) 

1771 # ax.set(yticks=yticks, 

1772 # yticklabels=yticklabels) 

1773 

1774 def label_months(ax, dates, i, j, calendar): 

1775 month_labels = np.array( 

1776 ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 

1777 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']) 

1778 months = np.array([d.month for d in dates]) 

1779 uniq_months = sorted(set(months)) 

1780 # xticks = [i[months == m].mean() for m in uniq_months] 

1781 xticks = [i - 1 for i in uniq_months] 

1782 labels = [month_labels[m - 1] for m in uniq_months] 

1783 ax.set(xticks=xticks) 

1784 ax.set_xticklabels(labels, fontsize=6, rotation=90) 

1785 ax.xaxis.tick_top() 

1786 

1787 visualize(zone_dict)