ExportResults.py 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. import xlsxwriter
  2. import logging
  3. import json
  4. import baangt.base.GlobalConstants as GC
  5. from baangt.base.Timing.Timing import Timing
  6. from baangt.base.Utils import utils
  7. from baangt.base.ExportResults.Append2BaseXLS import Append2BaseXLS
  8. from pathlib import Path
  9. from typing import Optional
  10. from xlsxwriter.worksheet import (
  11. Worksheet, cell_number_tuple, cell_string_tuple)
  12. from sqlalchemy import create_engine
  13. from sqlalchemy.orm import sessionmaker
  14. from baangt.base.DataBaseORM import DATABASE_URL, TestrunLog, TestCaseSequenceLog
  15. from baangt.base.DataBaseORM import TestCaseLog, TestCaseField, GlobalAttribute, TestCaseNetworkInfo
  16. from datetime import datetime
  17. from sqlite3 import IntegrityError
  18. from baangt import plugin_manager
  19. import re
  20. import csv
  21. from dateutil.parser import parse
  22. from uuid import uuid4
  23. from pathlib import Path
  24. from baangt.base.ExportResults.SendStatistics import Statistics
  25. from baangt.base.RuntimeStatistics import Statistic
  26. from openpyxl import load_workbook
  27. logger = logging.getLogger("pyC")
  28. class ExportResults:
  29. def __init__(self, **kwargs):
  30. self.kwargs = kwargs
  31. self.testList = []
  32. self.fileName = None
  33. self.testRunInstance = kwargs.get(GC.KWARGS_TESTRUNINSTANCE)
  34. self.testCasesEndDateTimes_1D = kwargs.get('testCasesEndDateTimes_1D')
  35. self.testCasesEndDateTimes_2D = kwargs.get('testCasesEndDateTimes_2D')
  36. self.networkInfo = self._get_network_info(kwargs.get('networkInfo'))
  37. self.testRunName = self.testRunInstance.testRunName
  38. self.dataRecords = self.testRunInstance.dataRecords
  39. self.stage = self.__getStageFromDataRecordsOrGlobalSettings()
  40. self.statistics = Statistics()
  41. self.statistics.update_data(kwargs)
  42. self.statistics.update_runtimeStatistic()
  43. self.logfile = logger.handlers[1].baseFilename
  44. try:
  45. self.exportFormat = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)
  46. if isinstance(self.exportFormat, dict):
  47. self.exportFormat = self.exportFormat.get(GC.EXPORT_FORMAT)
  48. if not self.exportFormat:
  49. self.exportFormat = GC.EXP_XLSX
  50. except KeyError:
  51. self.exportFormat = GC.EXP_XLSX
  52. try:
  53. if kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME):
  54. self.fileName = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.STRUCTURE_TESTCASESEQUENCE)[1][1].get(GC.EXPORT_FILENAME)
  55. except Exception as e:
  56. # fixme: I don't know, why this error came. When a Filename is set, then the above works.
  57. # No time now to debug.
  58. pass
  59. if not self.fileName:
  60. self.fileName = self.__getOutputFileName()
  61. logger.info("Export-Sheet for results: " + self.fileName)
  62. self.__removeUnwantedFields() # Will remove Password-Contents AND fields from data records, that came from
  63. # Globals-File.
  64. # export results to DB
  65. self.testcase_uuids = []
  66. self.exportToDataBase()
  67. if self.exportFormat == GC.EXP_XLSX:
  68. self.fieldListExport = kwargs.get(GC.KWARGS_TESTRUNATTRIBUTES).get(GC.EXPORT_FORMAT)["Fieldlist"]
  69. self.workbook = xlsxwriter.Workbook(self.fileName)
  70. self.summarySheet = self.workbook.add_worksheet("Summary")
  71. self.worksheet = self.workbook.add_worksheet("Output")
  72. self.jsonSheet = self.workbook.add_worksheet(f"{self.stage}_JSON")
  73. self.timingSheet = self.workbook.add_worksheet("Timing")
  74. self.cellFormatGreen = self.workbook.add_format()
  75. self.cellFormatGreen.set_bg_color('green')
  76. self.cellFormatRed = self.workbook.add_format()
  77. self.cellFormatRed.set_bg_color('red')
  78. self.cellFormatBold = self.workbook.add_format()
  79. self.cellFormatBold.set_bold(bold=True)
  80. self.summaryRow = 0
  81. self.__setHeaderDetailSheetExcel()
  82. self.makeSummaryExcel()
  83. self.exportResultExcel()
  84. self.exportJsonExcel()
  85. self.exportAdditionalData()
  86. self.write_json_sheet()
  87. self.exportTiming = ExportTiming(self.dataRecords,
  88. self.timingSheet)
  89. if self.networkInfo:
  90. self.networkSheet = self.workbook.add_worksheet("Network")
  91. self.exportNetWork = ExportNetWork(self.networkInfo,
  92. self.testCasesEndDateTimes_1D,
  93. self.testCasesEndDateTimes_2D,
  94. self.workbook,
  95. self.networkSheet)
  96. self.closeExcel()
  97. # Call functionality for potentially exporting data to other sheets/databases
  98. Append2BaseXLS(self.testRunInstance, self.fileName)
  99. elif self.exportFormat == GC.EXP_CSV:
  100. self.export2CSV()
  101. if self.testRunInstance.globalSettings.get("DeactivateStatistics") == "True":
  102. logger.debug("Send Statistics to server is deactivated. Not sending.")
  103. elif self.testRunInstance.globalSettings.get("DeactivateStatistics") is True:
  104. logger.debug("Send Statistics to server is deactivated. Not sending.")
  105. else:
  106. try:
  107. self.statistics.send_statistics()
  108. except Exception as ex:
  109. logger.debug(ex)
  110. self.update_result_in_testrun()
  111. def __removeUnwantedFields(self):
  112. lListPasswordFieldNames = ["PASSWORD", "PASSWORT", "PASSW"]
  113. if not self.testRunInstance.globalSettings.get("LetPasswords"):
  114. # If there's a password in GlobalSettings, remove the value:
  115. for key, value in self.testRunInstance.globalSettings.items():
  116. if key.upper() in lListPasswordFieldNames:
  117. self.testRunInstance.globalSettings[key] = "*" * 8
  118. # If there's a password in the datafile, remove the value
  119. # Also remove all columns, that are anyway included in the global settings
  120. for key, fields in self.dataRecords.items():
  121. fieldsToPop = []
  122. for field, value in fields.items():
  123. if field.upper() in lListPasswordFieldNames:
  124. self.dataRecords[key][field] = "*" * 8
  125. if field in self.testRunInstance.globalSettings.keys():
  126. logger.debug(
  127. f"Added {field} to fields to be removed from data record as it exists in GlobalSettings already.")
  128. fieldsToPop.append(field)
  129. for field in fieldsToPop:
  130. if field != 'Screenshots' and field != 'Stage': # Stage and Screenshot are needed in output file
  131. logger.debug(f"Removed field {field} from data record.")
  132. fields.pop(field)
  133. def exportAdditionalData(self):
  134. # Runs only, when KWARGS-Parameter is set.
  135. if self.kwargs.get(GC.EXPORT_ADDITIONAL_DATA):
  136. addExportData = self.kwargs[GC.EXPORT_ADDITIONAL_DATA]
  137. # Loop over the items. KEY = Tabname, Value = Data to be exported.
  138. # For data KEY = Fieldname, Value = Cell-Value
  139. for key, value in addExportData.items():
  140. lExport = ExportAdditionalDataIntoTab(tabname=key, valueDict=value, outputExcelSheet=self.workbook)
  141. lExport.export()
  142. def __getStageFromDataRecordsOrGlobalSettings(self):
  143. """
  144. If "STAGE" is not provided in the data fields (should actually not happen, but who knows),
  145. we shall take it from GlobalSettings. If also not there, take the default Value GC.EXECUTIN_STAGE_TEST
  146. :return:
  147. """
  148. value = None
  149. for key, value in self.dataRecords.items():
  150. break
  151. if not value.get(GC.EXECUTION_STAGE):
  152. stage = self.testRunInstance.globalSettings.get('TC.Stage', GC.EXECUTION_STAGE_TEST)
  153. else:
  154. stage = value.get(GC.EXECUTION_STAGE)
  155. return stage
  156. def export2CSV(self):
  157. """
  158. Writes CSV-File of datarecords
  159. """
  160. f = open(self.fileName, 'w', encoding='utf-8-sig', newline='')
  161. writer = csv.DictWriter(f, self.dataRecords[0].keys())
  162. writer.writeheader()
  163. for i in range(0, len(self.dataRecords) - 1):
  164. writer.writerow(self.dataRecords[i])
  165. f.close()
  166. def exportToDataBase(self):
  167. #
  168. # writes results to DB
  169. #
  170. logger.info(f'Export results to database at: {DATABASE_URL}')
  171. engine = create_engine(DATABASE_URL)
  172. # create a Session
  173. Session = sessionmaker(bind=engine)
  174. session = Session()
  175. # get timings
  176. timing: Timing = self.testRunInstance.timing
  177. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  178. # get status
  179. success = 0
  180. error = 0
  181. waiting = 0
  182. for value in self.dataRecords.values():
  183. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  184. success += 1
  185. elif value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  186. error += 1
  187. if value[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING:
  188. waiting += 1
  189. self.statistics.update_attribute_with_value("TestCasePassed", success)
  190. self.statistics.update_attribute_with_value("TestCaseFailed", error)
  191. self.statistics.update_attribute_with_value("TestCasePaused", waiting)
  192. self.statistics.update_attribute_with_value("TestCaseExecuted", success + error + waiting)
  193. try:
  194. dic = {}
  195. for key in self.testRunInstance.json_dict:
  196. if "$(" in key:
  197. dic[key[2:-1]] = self.testRunInstance.json_dict[key]
  198. json_data = json.dumps(dic)
  199. except Exception as ex:
  200. logger.info(f"RLP Json error while updating in db : {str(ex)}")
  201. json_data = ""
  202. # get documents
  203. datafiles = self.fileName
  204. # create testrun object
  205. tr_log = TestrunLog(
  206. id=self.testRunInstance.uuid.bytes,
  207. testrunName=self.testRunName,
  208. logfileName=self.logfile,
  209. startTime=datetime.strptime(start, "%d-%m-%Y %H:%M:%S"),
  210. endTime=datetime.strptime(end, "%d-%m-%Y %H:%M:%S"),
  211. statusOk=success,
  212. statusFailed=error,
  213. statusPaused=waiting,
  214. dataFile=self.fileName,
  215. RLPJson=json_data,
  216. )
  217. # add to DataBase
  218. session.add(tr_log)
  219. # set globals
  220. for key, value in self.testRunInstance.globalSettings.items():
  221. globalVar = GlobalAttribute(
  222. name=key,
  223. value=str(value),
  224. testrun=tr_log,
  225. )
  226. session.add(globalVar)
  227. self.__save_commit(session)
  228. # create testcase sequence instance
  229. tcs_log = TestCaseSequenceLog(testrun=tr_log)
  230. # create testcases
  231. for tc in self.dataRecords.values():
  232. # get uuid
  233. uuid = uuid4()
  234. # create TestCaseLog instances
  235. tc_log = TestCaseLog(
  236. id=uuid.bytes,
  237. testcase_sequence=tcs_log
  238. )
  239. # store uuid
  240. self.testcase_uuids.append(uuid)
  241. session.add(tc_log)
  242. # add TestCase fields
  243. for key, value in tc.items():
  244. field = TestCaseField(name=key, value=str(value), testcase=tc_log)
  245. session.add(field)
  246. self.__save_commit(session)
  247. # network info
  248. if self.networkInfo:
  249. for entry in self.networkInfo:
  250. if type(entry.get('testcase')) == type(1):
  251. nw_info = TestCaseNetworkInfo(
  252. testcase=tcs_log.testcases[entry.get('testcase')-1],
  253. browserName=entry.get('browserName'),
  254. status=entry.get('status'),
  255. method=entry.get('method'),
  256. url=entry.get('url'),
  257. contentType=entry.get('contentType'),
  258. contentSize=entry.get('contentSize'),
  259. headers=str(entry.get('headers')),
  260. params=str(entry.get('params')),
  261. response=entry.get('response'),
  262. startDateTime=datetime.strptime(entry.get('startDateTime')[:19], '%Y-%m-%dT%H:%M:%S'),
  263. duration=entry.get('duration'),
  264. )
  265. session.add(nw_info)
  266. self.__save_commit(session)
  267. def __save_commit(self, session):
  268. try:
  269. session.commit()
  270. except IntegrityError as e:
  271. logger.critical(f"Integrity Error during commit to database: {e}")
  272. except Exception as e:
  273. logger.critical(f"Unknown error during database commit: {e}")
  274. def _get_test_case_num(self, start_date_time, browser_name):
  275. d_t = parse(start_date_time)
  276. d_t = d_t.replace(tzinfo=None)
  277. if self.testCasesEndDateTimes_1D:
  278. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  279. if d_t < dt_end:
  280. return index + 1
  281. elif self.testCasesEndDateTimes_2D:
  282. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  283. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  284. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  285. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  286. if d_t < tcAndDtEnd[1]:
  287. return tcAndDtEnd[0] + 1
  288. return 'unknown'
  289. def _get_network_info(self, networkInfoDict):
  290. #
  291. # extracts network info data from the given dict
  292. #
  293. if networkInfoDict:
  294. extractedNetworkInfo = []
  295. for info in networkInfoDict:
  296. #extractedEntry = {}
  297. for entry in info['log']['entries']:
  298. # extract the current entry
  299. extractedNetworkInfo.append({
  300. 'testcase': self._get_test_case_num(entry['startedDateTime'], entry['pageref']),
  301. 'browserName': entry.get('pageref'),
  302. 'status': entry['response'].get('status'),
  303. 'method': entry['request'].get('method'),
  304. 'url': entry['request'].get('url'),
  305. 'contentType': entry['response']['content'].get('mimeType'),
  306. 'contentSize': entry['response']['content'].get('size'),
  307. 'headers': entry['response']['headers'],
  308. 'params': entry['request']['queryString'],
  309. 'response': entry['response']['content'].get('text'),
  310. 'startDateTime': entry['startedDateTime'],
  311. 'duration': entry.get('time'),
  312. })
  313. return extractedNetworkInfo
  314. return None
  315. def exportResultExcel(self, **kwargs):
  316. self._exportData()
  317. def exportJsonExcel(self):
  318. # headers
  319. headers = [
  320. 'Stage',
  321. 'UUID',
  322. 'Attribute',
  323. 'Value',
  324. ]
  325. # header style
  326. header_style = self.workbook.add_format()
  327. header_style.set_bold()
  328. # write header
  329. for index in range(len(headers)):
  330. self.jsonSheet.write(0, index, headers[index], header_style)
  331. # write data
  332. row = 0
  333. for index, testcase in self.dataRecords.items():
  334. # add TestCase fields
  335. for key, value in testcase.items():
  336. row += 1
  337. self.jsonSheet.write(row, 0, self.stage)
  338. self.jsonSheet.write(row, 1, str(self.testcase_uuids[index]))
  339. self.jsonSheet.write(row, 2, key)
  340. self.jsonSheet.write(row, 3, str(value))
  341. # Autowidth
  342. for n in range(len(headers)):
  343. ExcelSheetHelperFunctions.set_column_autowidth(self.jsonSheet, n)
  344. def write_json_sheet(self):
  345. # Used to write rlp_ json in individual sheets
  346. dic = self.testRunInstance.json_dict
  347. for js in dic:
  348. if not js:
  349. continue
  350. elif js[:2] == "$(":
  351. name = js[2:-1]
  352. else:
  353. name = js
  354. jsonSheet = self.workbook.add_worksheet(f"{self.stage}_{name}")
  355. if type(dic[js][0]) == dict: # Condition to get dictionary or dictionary inside list to write headers
  356. data_dic = dic[js][0]
  357. elif type(dic[js][0][0]) == dict:
  358. data_dic = dic[js][0][0]
  359. else:
  360. logger.debug(f"{dic[js]} is not json convertible.")
  361. continue
  362. remove_header = []
  363. for key in data_dic: # Removing headers which consist nested data
  364. if type(data_dic[key]) == list or type(data_dic[key]) == dict:
  365. remove_header.append(key)
  366. for key in remove_header:
  367. del data_dic[key]
  368. headers = []
  369. for index, header in enumerate(data_dic):
  370. jsonSheet.write(0, index, header)
  371. headers.append(header)
  372. row = 1
  373. for data in dic[js]:
  374. if not data:
  375. continue
  376. dt = {}
  377. for y, dt in enumerate(data):
  378. if type(dt) != dict: # for single dictionary data
  379. jsonSheet.write(row, y, data[dt])
  380. else: # if dictionaries are inside list
  381. column = 0 # used to update individual column
  382. for d in dt:
  383. if d not in headers:
  384. continue
  385. try:
  386. jsonSheet.write(row, column, dt[d])
  387. except Exception as ex:
  388. print(ex)
  389. column += 1
  390. row += 1
  391. if type(dt) != dict:
  392. row += 1
  393. def makeSummaryExcel(self):
  394. self.summarySheet.write(0, 0, f"Testreport for {self.testRunName}", self.cellFormatBold)
  395. self.summarySheet.set_column(0, last_col=0, width=15)
  396. # get testrunname my
  397. self.testList.append(self.testRunName)
  398. # Testrecords
  399. self.__writeSummaryCell("Testrecords", len(self.dataRecords), row=2, format=self.cellFormatBold)
  400. value = len([x for x in self.dataRecords.values()
  401. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS])
  402. self.testList.append(value) # Ok my
  403. if not value:
  404. value = ""
  405. self.__writeSummaryCell("Successful", value, format=self.cellFormatGreen)
  406. self.testList.append(value) # paused my
  407. self.__writeSummaryCell("Paused", len([x for x in self.dataRecords.values()
  408. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_WAITING]))
  409. value = len([x["Screenshots"] for x in self.dataRecords.values()
  410. if x[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR])
  411. self.testList.append(value) # error my
  412. if not value:
  413. value = ""
  414. self.__writeSummaryCell("Error", value, format=self.cellFormatRed)
  415. # Logfile
  416. self.__writeSummaryCell("Logfile", logger.handlers[1].baseFilename, row=7)
  417. # get logfilename for database my
  418. self.testList.append(logger.handlers[1].baseFilename)
  419. # database id
  420. self.__writeSummaryCell("Testrun UUID", str(self.testRunInstance.uuid), row=8)
  421. # Timing
  422. timing: Timing = self.testRunInstance.timing
  423. start, end, duration = timing.returnTimeSegment(GC.TIMING_TESTRUN)
  424. self.testRun_end = end # used while updating timestamp in source file
  425. self.statistics.update_attribute_with_value("Duration", duration)
  426. self.statistics.update_attribute_with_value("TestRunUUID", str(self.testRunInstance.uuid))
  427. self.__writeSummaryCell("Starttime", start, row=10)
  428. # get start end during time my
  429. self.testList.append(start)
  430. self.testList.append(end)
  431. self.__writeSummaryCell("Endtime", end)
  432. self.__writeSummaryCell("Duration", duration, format=self.cellFormatBold)
  433. self.__writeSummaryCell("Avg. Dur", "")
  434. # Globals:
  435. self.__writeSummaryCell("Global settings for this testrun", "", format=self.cellFormatBold, row=15)
  436. for key, value in self.testRunInstance.globalSettings.items():
  437. if key.upper() in ["PASSWORD", "PASSWORT", "CONFLUENCE-PASSWORD"]:
  438. continue
  439. self.__writeSummaryCell(key, str(value))
  440. # get global data my
  441. self.testList.append(str(value))
  442. # Testcase and Testsequence setting
  443. self.summaryRow += 1
  444. self.__writeSummaryCell("TestSequence settings follow:", "", format=self.cellFormatBold)
  445. lSequence = self.testRunInstance.testRunUtils.getSequenceByNumber(testRunName=self.testRunName, sequence="1")
  446. if lSequence:
  447. for key, value in lSequence[1].items():
  448. if isinstance(value, list) or isinstance(value, dict):
  449. continue
  450. self.__writeSummaryCell(key, str(value))
  451. def __writeSummaryCell(self, lineHeader, lineText, row=None, format=None, image=False):
  452. if not row:
  453. self.summaryRow += 1
  454. else:
  455. self.summaryRow = row
  456. if not lineText:
  457. # If we have no lineText we want to apply format to the Header
  458. self.summarySheet.write(self.summaryRow, 0, lineHeader, format)
  459. else:
  460. self.summarySheet.write(self.summaryRow, 0, lineHeader)
  461. self.summarySheet.write(self.summaryRow, 1, lineText, format)
  462. def __getOutputFileName(self):
  463. l_file = Path(self.testRunInstance.managedPaths.getOrSetExportPath())
  464. if self.exportFormat == GC.EXP_XLSX:
  465. lExtension = '.xlsx'
  466. elif self.exportFormat == GC.EXP_CSV:
  467. lExtension = '.csv'
  468. else:
  469. logger.critical(f"wrong export file format: {self.exportFormat}, using 'xlsx' instead")
  470. lExtension = '.xlsx'
  471. l_file = l_file.joinpath("baangt_" + self.testRunName + "_" + utils.datetime_return() + lExtension)
  472. logger.debug(f"Filename for export: {str(l_file)}")
  473. return str(l_file)
  474. def __setHeaderDetailSheetExcel(self):
  475. # the 1st column is DB UUID
  476. self.worksheet.write(0, 0, 'UUID')
  477. # Add fields with name "RESULT_*" to output fields.
  478. i = 1
  479. self.__extendFieldList()
  480. for column in self.fieldListExport:
  481. self.worksheet.write(0, i, column)
  482. i += 1
  483. # add JSON field
  484. self.worksheet.write(0, len(self.fieldListExport)+1, "JSON")
  485. def __extendFieldList(self):
  486. """
  487. Fields, that start with "RESULT_" shall always be exported.
  488. Other fields, that shall always be exported are also added (Testcaseerrorlog, etc.)
  489. If global Parameter "TC.ExportAllFields" is set to True ALL fields will be exported
  490. @return:
  491. """
  492. if self.testRunInstance.globalSettings.get("TC.ExportAllFields", False):
  493. self.fieldListExport = [] # Make an empty list, so that we don't have duplicates
  494. for key in self.dataRecords[0].keys():
  495. self.fieldListExport.append(key)
  496. return
  497. try:
  498. for key in self.dataRecords[0].keys():
  499. if "RESULT_" in key.upper():
  500. if not key in self.fieldListExport:
  501. self.fieldListExport.append(key)
  502. except Exception as e:
  503. logger.critical(
  504. f'looks like we have no data in records: {self.dataRecords}, len of dataRecords: {len(self.dataRecords)}')
  505. # They are added here, because they'll not necessarily appear in the first record of the export data:
  506. if not GC.TESTCASEERRORLOG in self.fieldListExport:
  507. self.fieldListExport.append(GC.TESTCASEERRORLOG)
  508. if not GC.SCREENSHOTS in self.fieldListExport:
  509. self.fieldListExport.append(GC.SCREENSHOTS)
  510. if not GC.EXECUTION_STAGE in self.fieldListExport:
  511. self.fieldListExport.append(GC.EXECUTION_STAGE)
  512. def _exportData(self):
  513. for key, value in self.dataRecords.items():
  514. # write DB UUID
  515. try:
  516. self.worksheet.write(key + 1, 0, str(self.testcase_uuids[key]))
  517. # write RESULT fields
  518. for (n, column) in enumerate(self.fieldListExport):
  519. self.__writeCell(key + 1, n + 1, value, column)
  520. # Also write everything as JSON-String into the last column
  521. self.worksheet.write(key + 1, len(self.fieldListExport) + 1, json.dumps(value))
  522. except IndexError as e:
  523. logger.error(f"List of testcase_uuids didn't have a value for {key}. That shouldn't happen!")
  524. except BaseException as e:
  525. logger.error(f"Error happened where it shouldn't. Error was {e}")
  526. # Create autofilter
  527. self.worksheet.autofilter(0, 0, len(self.dataRecords.items()), len(self.fieldListExport))
  528. # Make cells wide enough
  529. for n in range(0, len(self.fieldListExport)):
  530. ExcelSheetHelperFunctions.set_column_autowidth(self.worksheet, n)
  531. def update_result_in_testrun(self):
  532. # To update source testrun file
  533. testrun_column = self.dataRecords[0]["testcase_column"]
  534. if testrun_column: # if testrun_column is greater than 0 that means testresult header is present in source file
  535. testrun_file = load_workbook(self.dataRecords[0]["testcase_file"])
  536. testrun_sheet = testrun_file.get_sheet_by_name(self.dataRecords[0]["testcase_sheet"])
  537. for key, value in self.dataRecords.items():
  538. print(value)
  539. data = f"TestCaseStatus: {value['TestCaseStatus']}\r\n" \
  540. f"Timestamp: {self.testRun_end}\r\n" \
  541. f"Duration: {value['Duration']}\r\n" \
  542. f"TCErrorLog: {value['TCErrorLog']}\r\n" \
  543. f"TestRun_UUID: {str(self.testRunInstance.uuid)}\r\n" \
  544. f"TestCase_UUID: {str(self.testcase_uuids[key])}\r\n\r\n"
  545. old_value = testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value
  546. testrun_sheet.cell(value["testcase_row"] + 1, value["testcase_column"]).value = data + old_value
  547. testrun_file.save(self.dataRecords[0]["testcase_file"])
  548. logger.info("Source TestRun file updated.")
  549. def __writeCell(self, line, cellNumber, testRecordDict, fieldName, strip=False):
  550. if fieldName in testRecordDict.keys() and testRecordDict[fieldName]:
  551. # Convert boolean for Output
  552. if isinstance(testRecordDict[fieldName], bool):
  553. testRecordDict[fieldName] = "True" if testRecordDict[fieldName] else "False"
  554. # Remove leading New-Line:
  555. if isinstance(testRecordDict[fieldName], str):
  556. if '\n' in testRecordDict[fieldName][0:5] or strip:
  557. testRecordDict[fieldName] = testRecordDict[fieldName].strip()
  558. # Do different stuff for Dicts and Lists:
  559. if isinstance(testRecordDict[fieldName], dict):
  560. self.worksheet.write(line, cellNumber, testRecordDict[fieldName])
  561. elif isinstance(testRecordDict[fieldName], list):
  562. if fieldName == GC.SCREENSHOTS:
  563. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  564. else:
  565. self.worksheet.write(line, cellNumber,
  566. utils.listToString(testRecordDict[fieldName]))
  567. else:
  568. if fieldName == GC.TESTCASESTATUS:
  569. if testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_SUCCESS:
  570. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatGreen)
  571. elif testRecordDict[GC.TESTCASESTATUS] == GC.TESTCASESTATUS_ERROR:
  572. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]), self.cellFormatRed)
  573. elif fieldName == GC.SCREENSHOTS:
  574. self.__attachScreenshotsToExcelCells(cellNumber, fieldName, line, testRecordDict)
  575. else:
  576. self.worksheet.write(line, cellNumber, str(testRecordDict[fieldName]))
  577. def __attachScreenshotsToExcelCells(self, cellNumber, fieldName, line, testRecordDict):
  578. # Place the screenshot images "on" the appropriate cell
  579. try:
  580. if type(testRecordDict[fieldName]) == list:
  581. if Path(testRecordDict[fieldName][-1]).is_file():
  582. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName][-1], {'x_scale': 0.05,
  583. 'y_scale': 0.05})
  584. else:
  585. logger.error(f"Sceenshot file {testRecordDict[fieldName][-1]} can't be found")
  586. for nextScreenshotNumber in range(len(testRecordDict[fieldName]) - 1):
  587. if Path(testRecordDict[fieldName][nextScreenshotNumber]).is_file():
  588. self.worksheet.insert_image(line, len(self.fieldListExport) + nextScreenshotNumber + 1,
  589. testRecordDict[fieldName][nextScreenshotNumber],
  590. {'x_scale': 0.05, 'y_scale': 0.05})
  591. else:
  592. logger.error(f"Screenshot file {testRecordDict[fieldName][nextScreenshotNumber]} can't be found")
  593. else:
  594. if Path(testRecordDict[fieldName]).is_file():
  595. self.worksheet.insert_image(line, cellNumber, testRecordDict[fieldName], {'x_scale': 0.05,
  596. 'y_scale': 0.05})
  597. else:
  598. logger.error(f"Screenshot file {testRecordDict[fieldName]} can't be found")
  599. except Exception as e:
  600. logger.error(f"Problem with screenshots - can't attach them {e}")
  601. self.worksheet.set_row(line, 35)
  602. def closeExcel(self):
  603. self.workbook.close()
  604. # Next line doesn't work on MAC. Returns "not authorized"
  605. # subprocess.Popen([self.filename], shell=True)
  606. class ExportAdditionalDataIntoTab:
  607. def __init__(self, tabname, valueDict, outputExcelSheet: xlsxwriter.Workbook):
  608. self.tab = outputExcelSheet.add_worksheet(tabname)
  609. self.values = valueDict
  610. def export(self):
  611. self.makeHeader()
  612. self.writeLines()
  613. def makeHeader(self):
  614. for cellNumber, entries in self.values.items():
  615. for column, (key, value) in enumerate(entries.items()):
  616. self.tab.write(0, column, key)
  617. break # Write header only for first line.
  618. def writeLines(self):
  619. currentLine = 1
  620. for line, values in self.values.items():
  621. for column, (key, value) in enumerate(values.items()):
  622. self.tab.write(currentLine, column, value)
  623. currentLine += 1
  624. class ExcelSheetHelperFunctions:
  625. def __init__(self):
  626. pass
  627. @staticmethod
  628. def set_column_autowidth(worksheet: Worksheet, column: int):
  629. """
  630. Set the width automatically on a column in the `Worksheet`.
  631. !!! Make sure you run this function AFTER having all cells filled in
  632. the worksheet!
  633. """
  634. maxwidth = ExcelSheetHelperFunctions.get_column_width(worksheet=worksheet, column=column)
  635. if maxwidth is None:
  636. return
  637. elif maxwidth > 45:
  638. maxwidth = 45
  639. worksheet.set_column(first_col=column, last_col=column, width=maxwidth)
  640. @staticmethod
  641. def get_column_width(worksheet: Worksheet, column: int) -> Optional[int]:
  642. """Get the max column width in a `Worksheet` column."""
  643. strings = getattr(worksheet, '_ts_all_strings', None)
  644. if strings is None:
  645. strings = worksheet._ts_all_strings = sorted(
  646. worksheet.str_table.string_table,
  647. key=worksheet.str_table.string_table.__getitem__)
  648. lengths = set()
  649. for row_id, colums_dict in worksheet.table.items(): # type: int, dict
  650. data = colums_dict.get(column)
  651. if not data:
  652. continue
  653. if type(data) is cell_string_tuple:
  654. iter_length = len(strings[data.string])
  655. if not iter_length:
  656. continue
  657. lengths.add(iter_length)
  658. continue
  659. if type(data) is cell_number_tuple:
  660. iter_length = len(str(data.number))
  661. if not iter_length:
  662. continue
  663. lengths.add(iter_length)
  664. if not lengths:
  665. return None
  666. return max(lengths)
  667. class ExportNetWork:
  668. headers = ['BrowserName', 'TestCaseNum', 'Status', 'Method', 'URL', 'ContentType', 'ContentSize', 'Headers',
  669. 'Params', 'Response', 'startDateTime', 'Duration/ms']
  670. def __init__(self, networkInfo: dict, testCasesEndDateTimes_1D: list,
  671. testCasesEndDateTimes_2D: list, workbook: xlsxwriter.Workbook, sheet: xlsxwriter.worksheet):
  672. self.networkInfo = networkInfo
  673. #self.testCasesEndDateTimes_1D = testCasesEndDateTimes_1D
  674. #self.testCasesEndDateTimes_2D = testCasesEndDateTimes_2D
  675. self.workbook = workbook
  676. self.sheet = sheet
  677. header_style = self.get_header_style()
  678. self.write_header(style=header_style)
  679. self.set_column_align()
  680. self.write_content()
  681. self.set_column_width()
  682. def set_column_align(self):
  683. right_align_indexes = list()
  684. right_align_indexes.append(ExportNetWork.headers.index('ContentSize'))
  685. right_align_indexes.append(ExportNetWork.headers.index('Duration/ms'))
  686. right_align_style = self.get_column_style(alignment='right')
  687. left_align_style = self.get_column_style(alignment='left')
  688. [self.sheet.set_column(i, i, cell_format=right_align_style) if i in right_align_indexes else
  689. self.sheet.set_column(i, i, cell_format=left_align_style) for i in range(len(ExportNetWork.headers))]
  690. def set_column_width(self):
  691. [ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, i) for i in range(len(ExportNetWork.headers))]
  692. def get_header_style(self):
  693. header_style = self.workbook.add_format()
  694. header_style.set_bg_color("#00CCFF")
  695. header_style.set_color("#FFFFFF")
  696. header_style.set_bold()
  697. header_style.set_border()
  698. return header_style
  699. def get_column_style(self, alignment=None):
  700. column_style = self.workbook.add_format()
  701. column_style.set_color("black")
  702. column_style.set_align('right') if alignment == 'right' \
  703. else column_style.set_align('left') if alignment == 'left' else None
  704. column_style.set_border()
  705. return column_style
  706. def write_header(self, style=None):
  707. for index, value in enumerate(ExportNetWork.headers):
  708. self.sheet.write(0, index, value, style)
  709. def _get_test_case_num(self, start_date_time, browser_name):
  710. d_t = parse(start_date_time)
  711. d_t = d_t.replace(tzinfo=None)
  712. if self.testCasesEndDateTimes_1D:
  713. for index, dt_end in enumerate(self.testCasesEndDateTimes_1D):
  714. if d_t < dt_end:
  715. return index + 1
  716. elif self.testCasesEndDateTimes_2D:
  717. browser_num = re.findall(r"\d+\.?\d*", str(browser_name))[-1] \
  718. if re.findall(r"\d+\.?\d*", str(browser_name)) else 0
  719. dt_list_index = int(browser_num) if int(browser_num) > 0 else 0
  720. for i, tcAndDtEnd in enumerate(self.testCasesEndDateTimes_2D[dt_list_index]):
  721. if d_t < tcAndDtEnd[1]:
  722. return tcAndDtEnd[0] + 1
  723. return 'unknown'
  724. def write_content(self):
  725. if not self.networkInfo:
  726. return
  727. #partition_index = 0
  728. for index in range(len(self.networkInfo)):
  729. data_list = [
  730. self.networkInfo[index]['browserName'],
  731. self.networkInfo[index]['testcase'],
  732. self.networkInfo[index]['status'],
  733. self.networkInfo[index]['method'],
  734. self.networkInfo[index]['url'],
  735. self.networkInfo[index]['contentType'],
  736. self.networkInfo[index]['contentSize'],
  737. self.networkInfo[index]['headers'],
  738. self.networkInfo[index]['params'],
  739. self.networkInfo[index]['response'],
  740. self.networkInfo[index]['startDateTime'],
  741. self.networkInfo[index]['duration'],
  742. ]
  743. for i in range(len(data_list)):
  744. self.sheet.write(index + 1, i, str(data_list[i]) or 'null')
  745. class ExportTiming:
  746. def __init__(self, testdataRecords: dict, sheet: xlsxwriter.worksheet):
  747. self.testdataRecords = testdataRecords
  748. self.sheet: xlsxwriter.worksheet = sheet
  749. self.sections = {}
  750. self.findAllTimingSections()
  751. self.writeHeader()
  752. self.writeLines()
  753. # Autowidth
  754. for n in range(0, len(self.sections) + 1):
  755. ExcelSheetHelperFunctions.set_column_autowidth(self.sheet, n)
  756. def writeHeader(self):
  757. self.wc(0, 0, "Testcase#")
  758. for index, key in enumerate(self.sections.keys(), start=1):
  759. self.wc(0, index, key)
  760. def writeLines(self):
  761. for tcNumber, (key, line) in enumerate(self.testdataRecords.items(), start=1):
  762. self.wc(tcNumber, 0, tcNumber)
  763. lSections = self.interpretTimeLog(line[GC.TIMELOG])
  764. for section, timingValue in lSections.items():
  765. # find, in which column this section should be written:
  766. for column, key in enumerate(self.sections.keys(), 1):
  767. if key == section:
  768. self.wc(tcNumber, column,
  769. timingValue[GC.TIMING_DURATION])
  770. continue
  771. @staticmethod
  772. def shortenTimingValue(timingValue):
  773. # TimingValue is seconds in Float. 2 decimals is enough:
  774. timingValue = int(float(timingValue) * 100)
  775. return timingValue / 100
  776. def writeCell(self, row, col, content, format=None):
  777. self.sheet.write(row, col, content, format)
  778. wc = writeCell
  779. def findAllTimingSections(self):
  780. """
  781. We try to have an ordered list of Timing Sequences. As each Testcase might have different sections we'll have
  782. to make guesses
  783. @return:
  784. """
  785. lSections = {}
  786. for key, line in self.testdataRecords.items():
  787. lTiming: dict = ExportTiming.interpretTimeLog(line[GC.TIMELOG])
  788. for key in lTiming.keys():
  789. if lSections.get(key):
  790. continue
  791. else:
  792. lSections[key] = None
  793. self.sections = lSections
  794. @staticmethod
  795. def interpretTimeLog(lTimeLog):
  796. """Example Time Log:
  797. Complete Testrun: Start: 1579553837.241974 - no end recorded
  798. TestCaseSequenceMaster: Start: 1579553837.243414 - no end recorded
  799. CustTestCaseMaster: Start: 1579553838.97329 - no end recorded
  800. Browser Start: , since last call: 2.3161418437957764
  801. Empfehlungen: , since last call: 6.440968036651611, ZIDs:[175aeac023237a73], TS:2020-01-20 21:57:46.525577
  802. Annahme_RABAZ: , since last call: 2.002716064453125e-05, ZIDs:[6be7d0a44e59acf6], TS:2020-01-20 21:58:37.203583
  803. Antrag drucken: , since last call: 9.075241088867188, ZIDs:[6be7d0a44e59acf6, b27c3875ddcbb4fa], TS:2020-01-20 21:58:38.040137
  804. Warten auf Senden an Bestand Button: , since last call: 1.3927149772644043
  805. Senden an Bestand: , since last call: 9.60469913482666, ZIDs:[66b12fa4869cf8a0, ad1f3d47c4694e26], TS:2020-01-20 21:58:49.472288
  806. where the first part before ":" is the section, "since last call:" is the duration, TS: is the timestamp
  807. Update 29.3.2020: Format changed to "since last call: 00:xx:xx,", rest looks identical.
  808. """
  809. lExport = {}
  810. lLines = lTimeLog.split("\n")
  811. for line in lLines:
  812. parts = line.split(",")
  813. if len(parts) < 2:
  814. continue
  815. if "Start:" in line:
  816. # Format <sequence>: <Start>: <time.loctime>
  817. continue
  818. else:
  819. lSection = parts[0].replace(":", "").strip()
  820. lDuration = parts[1].split("since last call: ")[1]
  821. lExport[lSection] = {GC.TIMING_DURATION: lDuration}
  822. return lExport