Coverage for C:\Repos\leo-editor\leo\commands\convertCommands.py: 96%

144 statements  

« prev     ^ index     » next       coverage.py v6.4, created at 2022-05-24 10:21 -0500

1# -*- coding: utf-8 -*- 

2#@+leo-ver=5-thin 

3#@+node:ekr.20160316095222.1: * @file ../commands/convertCommands.py 

4#@@first 

5"""Leo's file-conversion commands.""" 

6 

7import re 

8from typing import Any, Dict, List, Optional, Tuple 

9from leo.core import leoGlobals as g 

10from leo.core import leoBeautify 

11from leo.commands.baseCommands import BaseEditCommandsClass 

12 

13def cmd(name): 

14 """Command decorator for the ConvertCommandsClass class.""" 

15 return g.new_cmd_decorator(name, ['c', 'convertCommands',]) 

16 

17#@+<< class To_Python >> 

18#@+node:ekr.20150514063305.123: ** << class To_Python >> 

19class To_Python: # pragma: no cover 

20 """The base class for x-to-python commands.""" 

21 #@+others 

22 #@+node:ekr.20150514063305.125: *3* To_Python.ctor 

23 def __init__(self, c): 

24 """Ctor for To_Python class.""" 

25 self.c = c 

26 self.p = self.c.p.copy() 

27 aList = g.get_directives_dict_list(self.p) 

28 self.tab_width = g.scanAtTabwidthDirectives(aList) or 4 

29 #@+node:ekr.20150514063305.126: *3* To_Python.go 

30 def go(self): 

31 import time 

32 t1 = time.time() 

33 c = self.c 

34 u, undoType = c.undoer, 'typescript-to-python' 

35 pp = leoBeautify.CPrettyPrinter(c) 

36 u.beforeChangeGroup(c.p, undoType) 

37 changed = False 

38 n_files, n_nodes = 0, 0 

39 special = ('class ', 'module ', '@file ', '@@file ') 

40 files = ('@file ', '@@file ') 

41 for p in self.p.self_and_subtree(copy=False): 

42 if p.b: 

43 n_nodes += 1 

44 if any(p.h.startswith(z) for z in special): 

45 g.es_print(p.h) 

46 if any(p.h.startswith(z) for z in files): 

47 n_files += 1 

48 bunch = u.beforeChangeNodeContents(p) 

49 s = pp.indent(p, giveWarnings=False) 

50 aList = list(s) 

51 self.convertCodeList(aList) 

52 s = ''.join(aList) 

53 if s != p.b: 

54 p.b = s 

55 p.setDirty() 

56 u.afterChangeNodeContents(p, undoType, bunch) 

57 changed = True 

58 # Call this only once, at end. 

59 if changed: 

60 u.afterChangeGroup(c.p, undoType, reportFlag=False) 

61 t2 = time.time() 

62 g.es_print(f"done! {n_files} files, {n_nodes} nodes, {t2 - t1:2.2f} sec") 

63 #@+node:ekr.20150514063305.127: *3* To_Python.convertCodeList 

64 def convertCodeList(self, aList): 

65 """The main search/replace method.""" 

66 g.trace('must be defined in subclasses.') 

67 #@+node:ekr.20150514063305.128: *3* To_Python.Utils 

68 #@+node:ekr.20150514063305.129: *4* match... 

69 #@+node:ekr.20150514063305.130: *5* match 

70 def match(self, s, i, pat): 

71 """ 

72 Return True if s[i:] matches the pat string. 

73 

74 We can't use g.match because s is usually a list. 

75 """ 

76 assert pat 

77 j = 0 

78 while i + j < len(s) and j < len(pat): 

79 if s[i + j] == pat[j]: 

80 j += 1 

81 if j == len(pat): 

82 return True 

83 else: 

84 return False 

85 return False 

86 #@+node:ekr.20150514063305.131: *5* match_word 

87 def match_word(self, s, i, pat): 

88 """ 

89 Return True if s[i:] word matches the pat string. 

90 

91 We can't use g.match_word because s is usually a list 

92 and g.match_word uses s.find. 

93 """ 

94 if self.match(s, i, pat): 

95 j = i + len(pat) 

96 if j >= len(s): 

97 return True 

98 if not pat[-1].isalnum(): 

99 # Bug fix 10/16/2012: The pattern terminates the word. 

100 return True 

101 ch = s[j] 

102 return not ch.isalnum() and ch != '_' 

103 return False 

104 #@+node:ekr.20150514063305.132: *4* insert_not 

105 def insert_not(self, aList): 

106 """Change "!" to "not" except before an equal sign.""" 

107 i = 0 

108 while i < len(aList): 

109 if self.is_string_or_comment(aList, i): 

110 i = self.skip_string_or_comment(aList, i) 

111 elif aList[i] == '!' and not self.match(aList, i + 1, '='): 

112 aList[i : i + 1] = list('not ') 

113 i += 4 

114 else: 

115 i += 1 

116 #@+node:ekr.20150514063305.133: *4* is... 

117 #@+node:ekr.20150514063305.134: *5* is_section_def/ref 

118 def is_section_def(self, p): 

119 return self.is_section_ref(p.h) 

120 

121 def is_section_ref(self, s): 

122 n1 = s.find("<<", 0) 

123 n2 = s.find(">>", 0) 

124 return -1 < n1 < n2 and s[n1 + 2 : n2].strip() 

125 #@+node:ekr.20150514063305.135: *5* is_string_or_comment 

126 def is_string_or_comment(self, s, i): 

127 # Does range checking. 

128 m = self.match 

129 return m(s, i, "'") or m(s, i, '"') or m(s, i, "//") or m(s, i, "/*") 

130 #@+node:ekr.20150514063305.136: *5* is_ws and is_ws_or_nl 

131 def is_ws(self, ch): 

132 return ch in ' \t' 

133 

134 def is_ws_or_nl(self, ch): 

135 return ch in ' \t\n' 

136 #@+node:ekr.20150514063305.137: *4* prevNonWsChar and prevNonWsOrNlChar 

137 def prevNonWsChar(self, s, i): 

138 i -= 1 

139 while i >= 0 and self.is_ws(s[i]): 

140 i -= 1 

141 return i 

142 

143 def prevNonWsOrNlChar(self, s, i): 

144 i -= 1 

145 while i >= 0 and self.is_ws_or_nl(s[i]): 

146 i -= 1 

147 return i 

148 #@+node:ekr.20150514063305.138: *4* remove... 

149 #@+node:ekr.20150514063305.139: *5* removeBlankLines 

150 def removeBlankLines(self, aList): 

151 i = 0 

152 while i < len(aList): 

153 j = i 

154 while j < len(aList) and aList[j] in " \t": 

155 j += 1 

156 if j == len(aList) or aList[j] == '\n': 

157 del aList[i : j + 1] 

158 else: 

159 i = self.skip_past_line(aList, i) 

160 #@+node:ekr.20150514063305.140: *5* removeExcessWs 

161 def removeExcessWs(self, aList): 

162 i = 0 

163 i = self.removeExcessWsFromLine(aList, i) 

164 while i < len(aList): 

165 if self.is_string_or_comment(aList, i): 

166 i = self.skip_string_or_comment(aList, i) 

167 elif self.match(aList, i, '\n'): 

168 i += 1 

169 i = self.removeExcessWsFromLine(aList, i) 

170 else: i += 1 

171 #@+node:ekr.20150514063305.141: *5* removeExessWsFromLine 

172 def removeExcessWsFromLine(self, aList, i): 

173 assert(i == 0 or aList[i - 1] == '\n') 

174 i = self.skip_ws(aList, i) # Retain the leading whitespace. 

175 while i < len(aList): 

176 if self.is_string_or_comment(aList, i): 

177 break # safe 

178 elif self.match(aList, i, '\n'): 

179 break 

180 elif self.match(aList, i, ' ') or self.match(aList, i, '\t'): 

181 # Replace all whitespace by one blank. 

182 j = self.skip_ws(aList, i) 

183 assert j > i 

184 aList[i:j] = [' '] 

185 i += 1 # make sure we don't go past a newline! 

186 else: i += 1 

187 return i 

188 #@+node:ekr.20150514063305.142: *5* removeMatchingBrackets 

189 def removeMatchingBrackets(self, aList, i): 

190 j = self.skip_to_matching_bracket(aList, i) 

191 if i < j < len(aList): 

192 c = aList[j] 

193 if c == ')' or c == ']' or c == '}': 

194 del aList[j : j + 1] 

195 del aList[i : i + 1] 

196 return j - 1 

197 return j + 1 

198 return j 

199 #@+node:ekr.20150514063305.143: *5* removeSemicolonsAtEndOfLines 

200 def removeSemicolonsAtEndOfLines(self, aList): 

201 i = 0 

202 while i < len(aList): 

203 if self.is_string_or_comment(aList, i): 

204 i = self.skip_string_or_comment(aList, i) 

205 elif aList[i] == ';': 

206 j = self.skip_ws(aList, i + 1) 

207 if ( 

208 j >= len(aList) or 

209 self.match(aList, j, '\n') or 

210 self.match(aList, j, '#') or 

211 self.match(aList, j, "//") 

212 ): 

213 del aList[i] 

214 else: i += 1 

215 else: i += 1 

216 #@+node:ekr.20150514063305.144: *5* removeTrailingWs 

217 def removeTrailingWs(self, aList): 

218 i = 0 

219 while i < len(aList): 

220 if self.is_ws(aList[i]): 

221 j = i 

222 i = self.skip_ws(aList, i) 

223 assert j < i 

224 if i >= len(aList) or aList[i] == '\n': 

225 # print "removing trailing ws:", `i-j` 

226 del aList[j:i] 

227 i = j 

228 else: i += 1 

229 #@+node:ekr.20150514063305.145: *4* replace... & safe_replace 

230 #@+node:ekr.20150514063305.146: *5* replace 

231 def replace(self, aList, findString, changeString): 

232 """ 

233 Replaces all occurances of findString by changeString. 

234 changeString may be the empty string, but not None. 

235 """ 

236 if not findString: 

237 return 

238 changeList = list(changeString) 

239 i = 0 

240 while i < len(aList): 

241 if self.match(aList, i, findString): 

242 aList[i : i + len(findString)] = changeList 

243 i += len(changeList) 

244 else: 

245 i += 1 

246 #@+node:ekr.20150514063305.147: *5* replaceComments 

247 def replaceComments(self, aList): 

248 i = 0 

249 while i < len(aList): 

250 # Loop invariant: j > progress at end. 

251 progress = i 

252 if self.match(aList, i, "//"): 

253 aList[i : i + 2] = ['#'] 

254 j = self.skip_past_line(aList, i) 

255 elif self.match(aList, i, "/*"): 

256 j = self.skip_c_block_comment(aList, i) 

257 k = i 

258 while k - 1 >= 0 and aList[k - 1] in ' \t': 

259 k -= 1 

260 assert k == 0 or aList[k - 1] not in ' \t' 

261 lws = ''.join(aList[k:i]) 

262 comment_body = ''.join(aList[i + 2 : j - 2]) 

263 comment_lines = g.splitLines(lws + comment_body) 

264 comment_lines = self.munge_block_comment(comment_lines) 

265 comment = '\n'.join(comment_lines) # A list of lines. 

266 comment_list = list(comment) # A list of characters. 

267 aList[k:j] = comment_list 

268 j = k + len(comment_list) 

269 progress = j - 1 # Disable the check below. 

270 elif self.match(aList, i, '"') or self.match(aList, i, "'"): 

271 j = self.skip_string(aList, i) 

272 else: 

273 j = i + 1 

274 # Defensive programming. 

275 if j == progress: 

276 j += 1 

277 assert j > progress 

278 i = j 

279 #@+node:ekr.20150514063305.148: *6* munge_block_comment 

280 def munge_block_comment(self, comment_lines): 

281 

282 n = len(comment_lines) 

283 assert n > 0 

284 s = comment_lines[0] 

285 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4) 

286 if n == 1: 

287 return [f"{' ' * (w - 1)}# {s.strip()}"] 

288 junk, w = g.skip_leading_ws_with_indent(s, 0, tab_width=4) 

289 result = [] 

290 for i, s in enumerate(comment_lines): 

291 if s.strip(): 

292 result.append(f"{' ' * w}# {s.strip()}") 

293 elif i == n - 1: 

294 pass # Omit the line entirely. 

295 else: 

296 result.append('') # Add a blank line 

297 return result 

298 #@+node:ekr.20150514063305.149: *5* replaceSectionDefs 

299 def replaceSectionDefs(self, aList): 

300 """Replaces < < x > > = by @c (at the start of lines).""" 

301 if not aList: 

302 return 

303 i = 0 

304 j = self.is_section_def(aList[i]) 

305 if j > 0: 

306 aList[i:j] = list("@c ") 

307 while i < len(aList): 

308 if self.is_string_or_comment(aList, i): 

309 i = self.skip_string_or_comment(aList, i) 

310 elif self.match(aList, i, "\n"): 

311 i += 1 

312 j = self.is_section_def(aList[i]) 

313 if j > i: 

314 aList[i:j] = list("@c ") 

315 else: i += 1 

316 #@+node:ekr.20150514063305.150: *5* safe_replace 

317 def safe_replace(self, aList, findString, changeString): 

318 """ 

319 Replaces occurances of findString by changeString, 

320 but only outside of C comments and strings. 

321 changeString may be the empty string, but not None. 

322 """ 

323 if not findString: 

324 return 

325 changeList = list(changeString) 

326 i = 0 

327 if findString[0].isalpha(): # use self.match_word 

328 while i < len(aList): 

329 if self.is_string_or_comment(aList, i): 

330 i = self.skip_string_or_comment(aList, i) 

331 elif self.match_word(aList, i, findString): 

332 aList[i : i + len(findString)] = changeList 

333 i += len(changeList) 

334 else: 

335 i += 1 

336 else: #use self.match 

337 while i < len(aList): 

338 if self.match(aList, i, findString): 

339 aList[i : i + len(findString)] = changeList 

340 i += len(changeList) 

341 else: 

342 i += 1 

343 #@+node:ekr.20150514063305.151: *4* skip 

344 #@+node:ekr.20150514063305.152: *5* skip_c_block_comment 

345 def skip_c_block_comment(self, s, i): 

346 assert self.match(s, i, "/*") 

347 i += 2 

348 while i < len(s): 

349 if self.match(s, i, "*/"): 

350 return i + 2 

351 i += 1 

352 return i 

353 #@+node:ekr.20150514063305.153: *5* skip_line 

354 def skip_line(self, s, i): 

355 while i < len(s) and s[i] != '\n': 

356 i += 1 

357 return i 

358 #@+node:ekr.20150514063305.154: *5* skip_past_line 

359 def skip_past_line(self, s, i): 

360 while i < len(s) and s[i] != '\n': 

361 i += 1 

362 if i < len(s) and s[i] == '\n': 

363 i += 1 

364 return i 

365 #@+node:ekr.20150514063305.155: *5* skip_past_word 

366 def skip_past_word(self, s, i): 

367 assert s[i].isalpha() or s[i] == '~' 

368 # Kludge: this helps recognize dtors. 

369 if s[i] == '~': 

370 i += 1 

371 while i < len(s): 

372 ch = s[i] 

373 if ch.isalnum() or ch == '_': 

374 i += 1 

375 else: 

376 break 

377 return i 

378 #@+node:ekr.20150514063305.156: *5* skip_string 

379 def skip_string(self, s, i): 

380 delim = s[i] # handle either single or double-quoted strings 

381 assert delim == '"' or delim == "'" 

382 i += 1 

383 while i < len(s): 

384 if s[i] == delim: 

385 return i + 1 

386 if s[i] == '\\': 

387 i += 2 

388 else: 

389 i += 1 

390 return i 

391 #@+node:ekr.20150514063305.157: *5* skip_string_or_comment 

392 def skip_string_or_comment(self, s, i): 

393 if self.match(s, i, "'") or self.match(s, i, '"'): 

394 j = self.skip_string(s, i) 

395 elif self.match(s, i, "//"): 

396 j = self.skip_past_line(s, i) 

397 elif self.match(s, i, "/*"): 

398 j = self.skip_c_block_comment(s, i) 

399 else: 

400 assert False 

401 return j 

402 #@+node:ekr.20150514063305.158: *5* skip_to_matching_bracket 

403 def skip_to_matching_bracket(self, s, i): 

404 ch = s[i] 

405 if ch == '(': 

406 delim = ')' 

407 elif ch == '{': 

408 delim = '}' 

409 elif ch == '[': 

410 delim = ']' 

411 else: 

412 assert False 

413 i += 1 

414 while i < len(s): 

415 ch = s[i] 

416 if self.is_string_or_comment(s, i): 

417 i = self.skip_string_or_comment(s, i) 

418 elif ch == delim: 

419 return i 

420 elif ch == '(' or ch == '[' or ch == '{': 

421 i = self.skip_to_matching_bracket(s, i) 

422 i += 1 # skip the closing bracket. 

423 else: i += 1 

424 return i 

425 #@+node:ekr.20150514063305.159: *5* skip_ws and skip_ws_and_nl 

426 def skip_ws(self, aList, i): 

427 while i < len(aList): 

428 c = aList[i] 

429 if c == ' ' or c == '\t': 

430 i += 1 

431 else: break 

432 return i 

433 

434 def skip_ws_and_nl(self, aList, i): 

435 while i < len(aList): 

436 c = aList[i] 

437 if c == ' ' or c == '\t' or c == '\n': 

438 i += 1 

439 else: break 

440 return i 

441 #@-others 

442#@-<< class To_Python >> 

443 

444#@+others 

445#@+node:ekr.20210830070921.1: ** function: convert_at_test_nodes 

446def convert_at_test_nodes(c, converter, root, copy_tree=False): # pragma: no cover 

447 """ 

448 Use converter.convert() to convert all the @test nodes in the 

449 root's tree to children a new last top-level node. 

450 """ 

451 if not root: 

452 print('no root') 

453 return 

454 last = c.lastTopLevel() 

455 target = last.insertAfter() 

456 target.h = 'Converted nodes' 

457 count = 0 

458 for p in root.subtree(): 

459 if p.h.startswith('@test'): 

460 converter.convert_node(c, p, target) 

461 if copy_tree and p.hasChildren(): 

462 converter.copy_children(c, p, target.lastChild()) 

463 count += 1 

464 target.expand() 

465 c.redraw(target) 

466 print(f"converted {count} @test nodes") 

467#@+node:ekr.20220416082017.1: ** class AnnotationError 

468class AnnotationError(Exception): 

469 pass 

470#@+node:ekr.20160316111303.1: ** class ConvertCommandsClass 

471class ConvertCommandsClass(BaseEditCommandsClass): 

472 """Leo's file-conversion commands""" 

473 

474 def __init__(self, c): 

475 """Ctor for EditCommandsClass class.""" 

476 # pylint: disable=super-init-not-called 

477 self.c = c 

478 

479 #@+others 

480 #@+node:ekr.20220105151235.1: *3* ccc.add-mypy-annotations 

481 @cmd('add-mypy-annotations') 

482 def addMypyAnnotations(self, event): # pragma: no cover 

483 """ 

484 The add-mypy-annotations command adds mypy annotations to function and 

485 method definitions based on naming conventions. 

486 

487 To use, select an @<file> node for a python external file and execute 

488 add-mypy-annotations. The command rewrites the @<file> tree, adding 

489 mypy annotations for untyped function/method arguments. 

490 

491 The command attempts no type analysis. It uses "Any" as the type of 

492 functions and methods that do not specify a return type. As as special 

493 case, the type of __init__ methods is "None". 

494 

495 @data add-mypy-annotations in leoSettings.leo contains a list of 

496 key/value pairs. Keys are argument names (as used in Leo); values are 

497 mypy type names. 

498 

499 This command adds annotations for kwargs that have a constant initial 

500 value. 

501 """ 

502 self.Add_Mypy_Annotations(self.c).add_annotations() 

503 self.c.bodyWantsFocus() 

504 #@+node:ekr.20220105152521.1: *4* class Add_Mypy_Annotations 

505 

506 class Add_Mypy_Annotations: 

507 

508 """A class that implements the add-mypy-annotations command.""" 

509 

510 changed_lines = 0 

511 default_annotation = 'Any' # The 'DEFAULT' @data add-mypy-annotations key overrides this. 

512 default_return_annotation = 'None' 

513 tag = 'add-mypy-annotations' 

514 types_d: Dict[str, str] = {} # Keys are argument names. Values are mypy types. 

515 

516 def __init__(self, c): 

517 self.c = c 

518 

519 class AnnotationError(Exception): 

520 pass 

521 

522 #@+others 

523 #@+node:ekr.20220105154019.1: *5* ama.init_types_d 

524 def init_types_d(self): # pragma: no cover 

525 """Init the annotations dict.""" 

526 c, d, tag = self.c, self.types_d, self.tag 

527 data = c.config.getData(tag) 

528 if not data: 

529 print(f"@data {tag} not found") 

530 return 

531 for s in data: 

532 try: 

533 key, val = s.split(',', 1) 

534 if key in d: 

535 print(f"{tag}: ignoring duplicate key: {s!r}") 

536 elif key == 'DEFAULT': 

537 self.default_annotation = val.strip() 

538 elif key == 'DEFAULT_RETURN': 

539 self.default_return_annotation = val.strip() 

540 else: 

541 d[key] = val.strip() 

542 except ValueError: 

543 print(f"{tag}: ignoring invalid key/value pair: {s!r}") 

544 self.types_d = d 

545 #@+node:ekr.20220105154158.1: *5* ama.add_annotations (entry) 

546 def add_annotations(self): # pragma: no cover 

547 

548 c, p, tag = self.c, self.c.p, self.tag 

549 # Checks. 

550 if not p.isAnyAtFileNode(): 

551 g.es_print(f"{tag}: not an @file node: {p.h}") 

552 return 

553 if not p.h.endswith('.py'): 

554 g.es_print(f"{tag}: not a python file: {p.h}") 

555 return 

556 # Init. 

557 self.init_types_d() 

558 if not self.types_d: 

559 print(f"{self.tag}: no types given") 

560 return 

561 try: 

562 # Convert p and (recursively) all its descendants. 

563 self.convert_node(p) 

564 # Redraw. 

565 c.expandAllSubheads(p) 

566 c.treeWantsFocusNow() 

567 except Exception: 

568 g.es_exception() 

569 #@+node:ekr.20220105155837.4: *5* ama.convert_node 

570 def convert_node(self, p): # pragma: no cover 

571 """Convert p and all its descendants.""" 

572 # Convert p.b. 

573 self.convert_body(p) 

574 # Recursively create all descendants. 

575 for child in p.children(): 

576 self.convert_node(child) 

577 #@+node:ekr.20220105173331.1: *5* ama.convert_body 

578 def convert_body(self, p): 

579 """Convert p.b in place.""" 

580 c = self.c 

581 if not p.b.strip(): 

582 return # pragma: no cover 

583 try: 

584 s = self.def_pat.sub(self.do_def, p.b) 

585 except AnnotationError as e: 

586 print(f"Unchanged: {p.h}: {e!r}") 

587 return 

588 if p.b != s: 

589 self.changed_lines += 1 

590 if not g.unitTesting: 

591 print(f"changed {p.h}") # pragma: no cover 

592 p.setDirty() 

593 c.setChanged() 

594 p.b = s 

595 #@+node:ekr.20220105174453.1: *5* ama.do_def 

596 # The old regex recognizes existing return values. 

597 # def_pat = re.compile(r'^([ \t]*)def[ \t]+(\w+)\s*\((.*?)\)(.*?):(.*?)\n', re.MULTILINE + re.DOTALL) 

598 

599 # Alas, the old regex can put too much in the return value, thereby putting too little in the argument. 

600 # *Warning*: a greedy (MULTILINE) search for arguments would match to the *next* def! 

601 

602 # #2606: End the pattern at the *first* "):" so arguments don't end prematurely. 

603 # Alas, now we can't convert defs that already have return values. 

604 def_pat = re.compile(r'^([ \t]*)def[ \t]+(\w+)\s*\((.*?)\):(.*?)\n', re.MULTILINE + re.DOTALL) 

605 

606 return_dict: Dict[str, str] = { 

607 '__init__': 'None', 

608 '__repr__': 'str', 

609 '__str__': 'str', 

610 } 

611 

612 def do_def(self, m): 

613 lws, name, args, tail = m.group(1), m.group(2), m.group(3), m.group(4) 

614 args = self.do_args(args) 

615 return_val_s = self.return_dict.get(name, self.default_return_annotation) 

616 return_val = f" -> {return_val_s}" 

617 if not tail.strip(): 

618 tail = '' 

619 return f"{lws}def {name}({args}){return_val}:{tail}\n" 

620 #@+node:ekr.20220105174453.2: *5* ama.do_args 

621 arg_pat = re.compile(r'(\s*[\*\w]+\s*)([:,=])?') 

622 comment_pat = re.compile(r'(\s*#.*?\n)') 

623 

624 def do_args(self, args): 

625 """Add type annotations for all arguments.""" 

626 multiline = '\n' in args.strip() 

627 comma = ',\n' if multiline else ', ' 

628 lws = ' ' * 4 if multiline else '' 

629 result: List[str] = [] 

630 i = 0 

631 while i < len(args): 

632 rest = args[i:] 

633 if not rest.strip(): 

634 break 

635 # Handle comments following arguments. 

636 if multiline and result: 

637 m = self.comment_pat.match(rest) 

638 if m: 

639 comment = m.group(0) 

640 i += len(comment) 

641 last = result.pop() 

642 result.append(f"{last.rstrip()} {comment.strip()}\n") 

643 continue 

644 m = self.arg_pat.match(rest) 

645 if not m: # pragma: no cover 

646 g.printObj(args, tag='args') 

647 raise AnnotationError(f"no match for arg_pat.match({rest})") 

648 name1, tail = m.group(1), m.group(2) 

649 name = name1.strip() 

650 i += len(name1) 

651 if name == 'self': 

652 # Don't annotate self, but allow unusual self=expr. 

653 if tail == '=': 

654 arg, i = self.find_arg(args, i) 

655 result.append(f"{lws}{name}={arg}{comma}") 

656 else: 

657 result.append(f"{lws}{name}{comma}") 

658 if i < len(args) and args[i] == ',': 

659 i += 1 

660 elif tail == ':': 

661 # Never change an already-annotated arg. 

662 arg, i = self.find_arg(args, i) 

663 result.append(f"{lws}{name}: {arg}{comma}") 

664 elif tail == '=': 

665 arg, i = self.find_arg(args, i) 

666 if arg == 'None': 

667 # Use a known type for the arg, if it exists. 

668 kind = self.types_d.get(name, self.default_annotation) 

669 else: 

670 kind = self.kind(arg) 

671 result.append(f"{lws}{name}: {kind}={arg}{comma}") 

672 elif tail == ',': 

673 kind = self.types_d.get(name.strip(), self.default_annotation) 

674 result.append(f"{lws}{name}: {kind}{comma}") 

675 i += 1 

676 else: 

677 kind = self.types_d.get(name.strip(), self.default_annotation) 

678 result.append(f"{lws}{name}: {kind}{comma}") 

679 s = ''.join(result) 

680 if multiline: 

681 s = '\n' + s 

682 if not multiline and s.endswith(', '): 

683 s = s[:-2] 

684 return s 

685 #@+node:ekr.20220105190332.1: *5* ama.find_arg 

686 def find_arg(self, s, i): 

687 """ 

688 Scan over type annotations or initializers. 

689 

690 Return (arg, j), the index of the character following the argument starting at s[i]. 

691 """ 

692 assert s[i] in ':=', (i, s[i], s) 

693 i += 1 

694 while i < len(s) and s[i] == ' ': 

695 i += 1 

696 i1 = i 

697 level = 0 # Assume balanced parens, brackets and strings. 

698 while i < len(s): 

699 ch = s[i] 

700 i += 1 

701 if ch in '[{(': 

702 level += 1 

703 elif ch in ')]}': 

704 level -= 1 

705 elif ch in '\'"': 

706 i = g.skip_python_string(s, i - 1) 

707 elif ch == ',' and level == 0: 

708 # Skip the comma, but don't include it in the result. 

709 break 

710 if level > 0: 

711 raise AnnotationError(f"Bad level: {level}, {s!r}") 

712 result = s[i1:i].strip() 

713 if result.endswith(','): 

714 result = result[:-1].strip() 

715 return result, i 

716 #@+node:ekr.20220105222028.1: *5* ama.kind 

717 bool_pat = re.compile(r'(True|False)') 

718 float_pat = re.compile(r'[0-9]*\.[0-9]*') 

719 int_pat = re.compile(r'[0-9]+') 

720 string_pat = re.compile(r'[\'"].*[\'"]') 

721 

722 def kind(self, s): 

723 """Return the kind of the initial value s.""" 

724 if self.bool_pat.match(s): 

725 return 'bool' 

726 if self.float_pat.match(s): 

727 return 'float' 

728 if self.int_pat.match(s): 

729 return 'int' 

730 if self.string_pat.match(s): 

731 return 'str' 

732 return self.default_annotation # pragma: no cover 

733 #@-others 

734 #@+node:ekr.20160316091843.1: *3* ccc.c-to-python 

735 @cmd('c-to-python') 

736 def cToPy(self, event): # pragma: no cover 

737 """ 

738 The c-to-python command converts c or c++ text to python text. 

739 The conversion is not perfect, but it eliminates a lot of tedious 

740 text manipulation. 

741 """ 

742 self.C_To_Python(self.c).go() 

743 self.c.bodyWantsFocus() 

744 #@+node:ekr.20150514063305.160: *4* class C_To_Python (To_Python) 

745 class C_To_Python(To_Python): # pragma: no cover 

746 #@+others 

747 #@+node:ekr.20150514063305.161: *5* ctor & helpers (C_To_Python) 

748 def __init__(self, c): 

749 """Ctor for C_To_Python class.""" 

750 super().__init__(c) 

751 # 

752 # Internal state... 

753 # The class name for the present function. Used to modify ivars. 

754 self.class_name = '' 

755 # List of ivars to be converted to self.ivar 

756 self.ivars = [] 

757 self.get_user_types() 

758 #@+node:ekr.20150514063305.162: *6* get_user_types (C_To_Python) 

759 def get_user_types(self): 

760 c = self.c 

761 self.class_list = c.config.getData('c-to-python-class-list') or [] 

762 self.type_list = ( 

763 c.config.getData('c-to-python-type-list') or 

764 ["char", "void", "short", "long", "int", "double", "float"] 

765 ) 

766 aList = c.config.getData('c-to-python-ivars-dict') 

767 if aList: 

768 self.ivars_dict = self.parse_ivars_data(aList) 

769 else: 

770 self.ivars_dict = {} 

771 #@+node:ekr.20150514063305.163: *6* parse_ivars_data 

772 def parse_ivars_data(self, aList): 

773 d: Dict[str, List[str]] = {} 

774 key = None 

775 aList = [z.strip() for z in aList if z.strip()] 

776 for s in aList: 

777 if s.endswith(':'): 

778 key = s[:-1].strip() 

779 elif key: 

780 ivars = [z.strip() for z in s.split(',') if z.strip()] 

781 aList = d.get(key, []) 

782 aList.extend(ivars) 

783 d[key] = aList 

784 else: 

785 g.error('invalid @data c-to-python-ivars-dict', repr(s)) 

786 return {} 

787 return d 

788 #@+node:ekr.20150514063305.164: *5* convertCodeList (C_To_Python) & helpers 

789 def convertCodeList(self, aList): 

790 r, sr = self.replace, self.safe_replace 

791 # First... 

792 r(aList, "\r", '') 

793 # self.convertLeadingBlanks(aList) # Now done by indent. 

794 # if leoFlag: replaceSectionDefs(aList) 

795 self.mungeAllFunctions(aList) 

796 # Next... 

797 if 1: 

798 # CC2 stuff: 

799 sr(aList, "TRACEPB", "if trace: g.trace") 

800 sr(aList, "TRACEPN", "if trace: g.trace") 

801 sr(aList, "TRACEPX", "if trace: g.trace") 

802 sr(aList, "TICKB", "if trace: g.trace") 

803 sr(aList, "TICKN", "if trace: g.trace") 

804 sr(aList, "TICKX", "if trace: g.trace") 

805 sr(aList, "g.trace(ftag,", "g.trace(") 

806 sr(aList, "ASSERT_TRACE", "assert") 

807 sr(aList, "ASSERT", "assert") 

808 sr(aList, " -> ", '.') 

809 sr(aList, "->", '.') 

810 sr(aList, " . ", '.') 

811 sr(aList, "this.self", "self") 

812 sr(aList, "{", '') 

813 sr(aList, "}", '') 

814 sr(aList, "#if", "if") 

815 sr(aList, "#else", "else") 

816 sr(aList, "#endif", '') 

817 sr(aList, "else if", "elif") 

818 sr(aList, "else", "else:") 

819 sr(aList, "&&", " and ") 

820 sr(aList, "||", " or ") 

821 sr(aList, "TRUE", "True") 

822 sr(aList, "FALSE", "False") 

823 sr(aList, "NULL", "None") 

824 sr(aList, "this", "self") 

825 sr(aList, "try", "try:") 

826 sr(aList, "catch", "except:") 

827 # if leoFlag: sr(aList, "@code", "@c") 

828 # Next... 

829 self.handle_all_keywords(aList) 

830 self.insert_not(aList) 

831 # after processing for keywords 

832 self.removeSemicolonsAtEndOfLines(aList) 

833 # Last... 

834 # if firstPart and leoFlag: removeLeadingAtCode(aList) 

835 self.removeBlankLines(aList) 

836 self.removeExcessWs(aList) 

837 # your taste may vary: in Python I don't like extra whitespace 

838 sr(aList, " :", ":") 

839 sr(aList, ", ", ",") 

840 sr(aList, " ,", ",") 

841 sr(aList, " (", "(") 

842 sr(aList, "( ", "(") 

843 sr(aList, " )", ")") 

844 sr(aList, ") ", ")") 

845 sr(aList, "@language c", "@language python") 

846 self.replaceComments(aList) # should follow all calls to safe_replace 

847 self.removeTrailingWs(aList) 

848 r(aList, "\t ", "\t") # happens when deleting declarations. 

849 #@+node:ekr.20150514063305.165: *6* handle_all_keywords 

850 def handle_all_keywords(self, aList): 

851 """ 

852 converts if ( x ) to if x: 

853 converts while ( x ) to while x: 

854 """ 

855 i = 0 

856 while i < len(aList): 

857 if self.is_string_or_comment(aList, i): 

858 i = self.skip_string_or_comment(aList, i) 

859 elif ( 

860 self.match_word(aList, i, "if") or 

861 self.match_word(aList, i, "while") or 

862 self.match_word(aList, i, "for") or 

863 self.match_word(aList, i, "elif") 

864 ): 

865 i = self.handle_keyword(aList, i) 

866 else: 

867 i += 1 

868 # print "handAllKeywords2:", ''.join(aList) 

869 #@+node:ekr.20150514063305.166: *7* handle_keyword 

870 def handle_keyword(self, aList, i): 

871 if self.match_word(aList, i, "if"): 

872 i += 2 

873 elif self.match_word(aList, i, "elif"): 

874 i += 4 

875 elif self.match_word(aList, i, "while"): 

876 i += 5 

877 elif self.match_word(aList, i, "for"): 

878 i += 3 

879 else: 

880 assert False 

881 # Make sure one space follows the keyword. 

882 k = i 

883 i = self.skip_ws(aList, i) 

884 if k == i: 

885 c = aList[i] 

886 aList[i : i + 1] = [' ', c] 

887 i += 1 

888 # Remove '(' and matching ')' and add a ':' 

889 if aList[i] == "(": 

890 # Look ahead. Don't remove if we span a line. 

891 j = self.skip_to_matching_bracket(aList, i) 

892 k = i 

893 found = False 

894 while k < j and not found: 

895 found = aList[k] == '\n' 

896 k += 1 

897 if not found: 

898 j = self.removeMatchingBrackets(aList, i) 

899 if i < j < len(aList): 

900 ch = aList[j] 

901 aList[j : j + 1] = [ch, ":", " "] 

902 j = j + 2 

903 return j 

904 return i 

905 #@+node:ekr.20150514063305.167: *6* mungeAllFunctions 

906 def mungeAllFunctions(self, aList): 

907 """Scan for a '{' at the top level that is preceeded by ')' """ 

908 prevSemi = 0 # Previous semicolon: header contains all previous text 

909 i = 0 

910 firstOpen = None 

911 while i < len(aList): 

912 progress = i 

913 if self.is_string_or_comment(aList, i): 

914 j = self.skip_string_or_comment(aList, i) 

915 prevSemi = j 

916 elif self.match(aList, i, '('): 

917 if not firstOpen: 

918 firstOpen = i 

919 j = i + 1 

920 elif self.match(aList, i, '#'): 

921 # At this point, it is a preprocessor directive. 

922 j = self.skip_past_line(aList, i) 

923 prevSemi = j 

924 elif self.match(aList, i, ';'): 

925 j = i + 1 

926 prevSemi = j 

927 elif self.match(aList, i, "{"): 

928 j = self.handlePossibleFunctionHeader(aList, i, prevSemi, firstOpen) 

929 prevSemi = j 

930 firstOpen = None # restart the scan 

931 else: 

932 j = i + 1 

933 # Handle unusual cases. 

934 if j <= progress: 

935 j = progress + 1 

936 assert j > progress 

937 i = j 

938 #@+node:ekr.20150514063305.168: *7* handlePossibleFunctionHeader 

939 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen): 

940 """ 

941 Converts function header lines from c++ format to python format. 

942 That is, converts 

943 x1..nn w::y ( t1 z1,..tn zn) { 

944 to 

945 def y (z1,..zn): { 

946 """ 

947 assert self.match(aList, i, "{") 

948 prevSemi = self.skip_ws_and_nl(aList, prevSemi) 

949 close = self.prevNonWsOrNlChar(aList, i) 

950 if close < 0 or aList[close] != ')': 

951 # Should not increase *Python* indent. 

952 return 1 + self.skip_to_matching_bracket(aList, i) 

953 if not firstOpen: 

954 return 1 + self.skip_to_matching_bracket(aList, i) 

955 close2 = self.skip_to_matching_bracket(aList, firstOpen) 

956 if close2 != close: 

957 return 1 + self.skip_to_matching_bracket(aList, i) 

958 open_paren = firstOpen 

959 assert aList[open_paren] == '(' 

960 head = aList[prevSemi:open_paren] 

961 # do nothing if the head starts with "if", "for" or "while" 

962 k = self.skip_ws(head, 0) 

963 if k >= len(head) or not head[k].isalpha(): 

964 return 1 + self.skip_to_matching_bracket(aList, i) 

965 kk = self.skip_past_word(head, k) 

966 if kk > k: 

967 headString = ''.join(head[k:kk]) 

968 # C keywords that might be followed by '{' 

969 # print "headString:", headString 

970 if headString in [ 

971 "class", "do", "for", "if", "struct", "switch", "while"]: 

972 return 1 + self.skip_to_matching_bracket(aList, i) 

973 args = aList[open_paren : close + 1] 

974 k = 1 + self.skip_to_matching_bracket(aList, i) 

975 body = aList[close + 1 : k] 

976 head = self.massageFunctionHead(head) 

977 args = self.massageFunctionArgs(args) 

978 body = self.massageFunctionBody(body) 

979 result = [] 

980 if head: 

981 result.extend(head) 

982 if args: 

983 result.extend(args) 

984 if body: 

985 result.extend(body) 

986 aList[prevSemi:k] = result 

987 return prevSemi + len(result) 

988 #@+node:ekr.20150514063305.170: *7* massageFunctionHead (sets .class_name) 

989 def massageFunctionHead(self, head): 

990 result: List[Any] = [] 

991 prevWord = [] 

992 self.class_name = '' 

993 i = 0 

994 while i < len(head): 

995 i = self.skip_ws_and_nl(head, i) 

996 if i < len(head) and head[i].isalpha(): 

997 result = [] 

998 j = self.skip_past_word(head, i) 

999 prevWord = head[i:j] 

1000 i = j 

1001 # look for ::word2 

1002 i = self.skip_ws(head, i) 

1003 if self.match(head, i, "::"): 

1004 # Set the global to the class name. 

1005 self.class_name = ''.join(prevWord) 

1006 # print(class name:", self.class_name) 

1007 i = self.skip_ws(head, i + 2) 

1008 if i < len(head) and (head[i] == '~' or head[i].isalpha()): 

1009 j = self.skip_past_word(head, i) 

1010 if head[i:j] == prevWord: 

1011 result.extend('__init__') 

1012 elif head[i] == '~' and head[i + 1 : j] == prevWord: 

1013 result.extend('__del__') 

1014 else: 

1015 # result.extend(list('::')) 

1016 result.extend(head[i:j]) 

1017 i = j 

1018 else: 

1019 result.extend(prevWord) 

1020 else: i += 1 

1021 finalResult = list("def ") 

1022 finalResult.extend(result) 

1023 return finalResult 

1024 #@+node:ekr.20150514063305.169: *7* massageFunctionArgs 

1025 def massageFunctionArgs(self, args): 

1026 assert args[0] == '(' 

1027 assert args[-1] == ')' 

1028 result = ['('] 

1029 lastWord = [] 

1030 if self.class_name: 

1031 for item in list("self,"): 

1032 result.append(item) #can put extra comma 

1033 i = 1 

1034 while i < len(args): 

1035 i = self.skip_ws_and_nl(args, i) 

1036 ch = args[i] 

1037 if ch.isalpha(): 

1038 j = self.skip_past_word(args, i) 

1039 lastWord = args[i:j] 

1040 i = j 

1041 elif ch == ',' or ch == ')': 

1042 for item in lastWord: 

1043 result.append(item) 

1044 if lastWord and ch == ',': 

1045 result.append(',') 

1046 lastWord = [] 

1047 i += 1 

1048 else: i += 1 

1049 if result[-1] == ',': 

1050 del result[-1] 

1051 result.append(')') 

1052 result.append(':') 

1053 # print "new args:", ''.join(result) 

1054 return result 

1055 #@+node:ekr.20150514063305.171: *7* massageFunctionBody & helpers 

1056 def massageFunctionBody(self, body): 

1057 body = self.massageIvars(body) 

1058 body = self.removeCasts(body) 

1059 body = self.removeTypeNames(body) 

1060 body = self.dedentBlocks(body) 

1061 return body 

1062 #@+node:ekr.20150514063305.172: *8* dedentBlocks 

1063 def dedentBlocks(self, body): 

1064 """Look for '{' preceded by '{' or '}' or ';' 

1065 (with intervening whitespace and comments). 

1066 """ 

1067 i = 0 

1068 while i < len(body): 

1069 j = i 

1070 ch = body[i] 

1071 if self.is_string_or_comment(body, i): 

1072 j = self.skip_string_or_comment(body, i) 

1073 elif ch in '{};': 

1074 # Look ahead ofr '{' 

1075 j += 1 

1076 while True: 

1077 k = j 

1078 j = self.skip_ws_and_nl(body, j) 

1079 if self.is_string_or_comment(body, j): 

1080 j = self.skip_string_or_comment(body, j) 

1081 if k == j: 

1082 break 

1083 assert k < j 

1084 if self.match(body, j, '{'): 

1085 k = j 

1086 j = self.skip_to_matching_bracket(body, j) 

1087 m = '# <Start dedented block>...' 

1088 body[k : k + 1] = list(m) 

1089 j += len(m) 

1090 while k < j: 

1091 progress = k 

1092 if body[k] == '\n': 

1093 k += 1 

1094 spaces = 0 

1095 while spaces < 4 and k < j: 

1096 if body[k] == ' ': 

1097 spaces += 1 

1098 k += 1 

1099 else: 

1100 break 

1101 if spaces > 0: 

1102 del body[k - spaces : k] 

1103 k -= spaces 

1104 j -= spaces 

1105 else: 

1106 k += 1 

1107 assert progress < k 

1108 m = ' # <End dedented block>' 

1109 body[j : j + 1] = list(m) 

1110 j += len(m) 

1111 else: 

1112 j = i + 1 

1113 # Defensive programming. 

1114 if i == j: 

1115 j += 1 

1116 assert i < j 

1117 i = j 

1118 return body 

1119 #@+node:ekr.20150514063305.173: *8* massageIvars 

1120 def massageIvars(self, body): 

1121 ivars = self.ivars_dict.get(self.class_name, []) 

1122 i = 0 

1123 while i < len(body): 

1124 if self.is_string_or_comment(body, i): 

1125 i = self.skip_string_or_comment(body, i) 

1126 elif body[i].isalpha(): 

1127 j = self.skip_past_word(body, i) 

1128 word = ''.join(body[i:j]) 

1129 # print "looking up:", word 

1130 if word in ivars: 

1131 # replace word by self.word 

1132 # print "replacing", word, " by self.", word 

1133 word = "self." + word 

1134 word = list(word) # type:ignore 

1135 body[i:j] = word 

1136 delta = len(word) - (j - i) 

1137 i = j + delta 

1138 else: i = j 

1139 else: i += 1 

1140 return body 

1141 #@+node:ekr.20150514063305.174: *8* removeCasts 

1142 def removeCasts(self, body): 

1143 i = 0 

1144 while i < len(body): 

1145 if self.is_string_or_comment(body, i): 

1146 i = self.skip_string_or_comment(body, i) 

1147 elif self.match(body, i, '('): 

1148 start = i 

1149 i = self.skip_ws(body, i + 1) 

1150 if body[i].isalpha(): 

1151 j = self.skip_past_word(body, i) 

1152 word = ''.join(body[i:j]) 

1153 i = j 

1154 if word in self.class_list or word in self.type_list: 

1155 i = self.skip_ws(body, i) 

1156 while self.match(body, i, '*'): 

1157 i += 1 

1158 i = self.skip_ws(body, i) 

1159 if self.match(body, i, ')'): 

1160 i += 1 

1161 # print "removing cast:", ''.join(body[start:i]) 

1162 del body[start:i] 

1163 i = start 

1164 else: i += 1 

1165 return body 

1166 #@+node:ekr.20150514063305.175: *8* removeTypeNames 

1167 def removeTypeNames(self, body): 

1168 """Do _not_ remove type names when preceeded by new.""" 

1169 i = 0 

1170 while i < len(body): 

1171 if self.is_string_or_comment(body, i): 

1172 i = self.skip_string_or_comment(body, i) 

1173 elif self.match_word(body, i, "new"): 

1174 i = self.skip_past_word(body, i) 

1175 i = self.skip_ws(body, i) 

1176 # don't remove what follows new. 

1177 if body[i].isalpha(): 

1178 i = self.skip_past_word(body, i) 

1179 elif body[i].isalpha(): 

1180 j = self.skip_past_word(body, i) 

1181 word = ''.join(body[i:j]) 

1182 if word in self.class_list or word in self.type_list: 

1183 j = self.skip_ws(body, j) 

1184 while self.match(body, j, '*'): 

1185 j += 1 

1186 # print "Deleting type name:", ''.join(body[i:j]) 

1187 j = self.skip_ws(body, j) 

1188 del body[i:j] 

1189 else: 

1190 i = j 

1191 else: i += 1 

1192 return body 

1193 #@-others 

1194 #@+node:ekr.20160111190632.1: *3* ccc.makeStubFiles 

1195 @cmd('make-stub-files') 

1196 def make_stub_files(self, event): # pragma: no cover 

1197 """ 

1198 Make stub files for all nearby @<file> nodes. 

1199 Take configuration settings from @x stub-y nodes. 

1200 """ 

1201 #@+others 

1202 #@+node:ekr.20160213070235.1: *4* class MakeStubFileAdapter 

1203 class MakeStubFileAdapter: # pragma: no cover 

1204 """ 

1205 An class that adapts leo/external/make_stub_files.py to Leo. 

1206 

1207 Settings are taken from Leo settings nodes, not a .cfg file. 

1208 """ 

1209 #@+others 

1210 #@+node:ekr.20160213070235.2: *5* msf.ctor & helpers 

1211 def __init__(self, c): 

1212 """MakeStubFile.ctor. From StandAloneMakeStubFile.ctor.""" 

1213 self.c = c 

1214 self.msf = msf = g.import_module('make_stub_files') 

1215 x = msf.StandAloneMakeStubFile() # x is used *only* to init ivars. 

1216 # Ivars set on the command line... 

1217 self.config_fn = None 

1218 self.enable_unit_tests = False 

1219 self.files = [] # May also be set in the config file. 

1220 self.output_directory = self.finalize( 

1221 c.config.getString('stub-output-directory') or '.') 

1222 self.output_fn = None 

1223 self.overwrite = c.config.getBool('stub-overwrite', default=False) 

1224 self.trace_matches = c.config.getBool( 

1225 'stub-trace-matches', default=False) 

1226 self.trace_patterns = c.config.getBool( 

1227 'stub-trace-patterns', default=False) 

1228 self.trace_reduce = c.config.getBool('stub-trace-reduce', default=False) 

1229 self.trace_visitors = c.config.getBool( 

1230 'stub-trace-visitors', default=False) 

1231 self.update_flag = c.config.getBool('stub-update', default=False) 

1232 self.verbose = c.config.getBool('stub-verbose', default=False) 

1233 self.warn = c.config.getBool('stub-warn', default=False) 

1234 # Pattern lists & dicts, set by config sections... 

1235 self.patterns_dict = {} 

1236 self.names_dict = {} 

1237 self.def_patterns = self.scan_patterns('stub-def-name-patterns') 

1238 self.general_patterns = self.scan_patterns('stub-general-patterns') 

1239 self.prefix_lines = self.scan('stub-prefix-lines') 

1240 self.regex_patterns = self.scan_patterns('stub-regex-patterns') 

1241 # Complete the dicts. 

1242 x.make_patterns_dict() 

1243 self.patterns_dict = x.patterns_dict 

1244 self.op_name_dict = x.op_name_dict = x.make_op_name_dict() 

1245 # Copy the ivars. 

1246 x.def_patterns = self.def_patterns 

1247 x.general_patterns = self.general_patterns 

1248 x.regex_patterns = self.regex_patterns 

1249 x.prefix_lines = self.prefix_lines 

1250 #@+node:ekr.20160213070235.3: *6* msf.scan 

1251 def scan(self, kind): 

1252 """Return a list of *all* lines from an @data node, including comments.""" 

1253 c = self.c 

1254 aList = c.config.getData(kind, strip_comments=False, strip_data=False) 

1255 if not aList: 

1256 g.trace(f"warning: no @data {kind} node") 

1257 return aList 

1258 #@+node:ekr.20160213070235.4: *6* msf.scan_d 

1259 def scan_d(self, kind): 

1260 """Return a dict created from an @data node of the given kind.""" 

1261 c = self.c 

1262 aList = c.config.getData(kind, strip_comments=True, strip_data=True) 

1263 d = {} 

1264 if aList is None: 

1265 g.trace(f"warning: no @data {kind} node") 

1266 for s in aList or []: 

1267 name, value = s.split(':', 1) 

1268 d[name.strip()] = value.strip() 

1269 return d 

1270 #@+node:ekr.20160213070235.5: *6* msf.scan_patterns 

1271 def scan_patterns(self, kind): 

1272 """Parse the config section into a list of patterns, preserving order.""" 

1273 d = self.scan_d(kind) 

1274 aList = [] 

1275 seen = set() 

1276 for key in d: 

1277 value = d.get(key) 

1278 if key in seen: 

1279 g.trace('duplicate key', key) 

1280 else: 

1281 seen.add(key) 

1282 aList.append(self.msf.Pattern(key, value)) 

1283 return aList 

1284 #@+node:ekr.20160213070235.6: *5* msf.finalize 

1285 def finalize(self, fn): 

1286 """Finalize and regularize a filename.""" 

1287 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn))) 

1288 #@+node:ekr.20160213070235.7: *5* msf.make_stub_file 

1289 def make_stub_file(self, p): 

1290 """Make a stub file in ~/stubs for the @<file> node at p.""" 

1291 import ast 

1292 assert p.isAnyAtFileNode() 

1293 c = self.c 

1294 fn = p.anyAtFileNodeName() 

1295 if not fn.endswith('.py'): 

1296 g.es_print('not a python file', fn) 

1297 return 

1298 abs_fn = g.fullPath(c, p) 

1299 if not g.os_path_exists(abs_fn): 

1300 g.es_print('not found', abs_fn) 

1301 return 

1302 if g.os_path_exists(self.output_directory): 

1303 base_fn = g.os_path_basename(fn) 

1304 out_fn = g.os_path_finalize_join(self.output_directory, base_fn) 

1305 else: 

1306 g.es_print('not found', self.output_directory) 

1307 return 

1308 out_fn = out_fn[:-3] + '.pyi' 

1309 out_fn = g.os_path_normpath(out_fn) 

1310 self.output_fn = out_fn # compatibility with stand-alone script 

1311 s = open(abs_fn).read() 

1312 node = ast.parse(s, filename=fn, mode='exec') 

1313 # Make the traverser *after* creating output_fn and output_directory ivars. 

1314 x = self.msf.StubTraverser(controller=self) 

1315 x.output_fn = self.output_fn 

1316 x.output_directory = self.output_directory 

1317 x.trace_matches = self.trace_matches 

1318 x.trace_patterns = self.trace_patterns 

1319 x.trace_reduce = self.trace_reduce 

1320 x.trace_visitors = self.trace_visitors 

1321 x.run(node) 

1322 #@+node:ekr.20160213070235.8: *5* msf.run 

1323 def run(self, p): 

1324 """Make stub files for all files in p's tree.""" 

1325 if p.isAnyAtFileNode(): 

1326 self.make_stub_file(p) 

1327 return 

1328 # First, look down tree. 

1329 after, p2 = p.nodeAfterTree(), p.firstChild() 

1330 found = False 

1331 while p2 and p != after: 

1332 if p2.isAnyAtFileNode(): 

1333 self.make_stub_file(p2) 

1334 p2.moveToNext() 

1335 found = True 

1336 else: 

1337 p2.moveToThreadNext() 

1338 if not found: 

1339 # Look up the tree. 

1340 for p2 in p.parents(): 

1341 if p2.isAnyAtFileNode(): 

1342 self.make_stub_file(p2) 

1343 break 

1344 else: 

1345 g.es('no files found in tree:', p.h) 

1346 #@-others 

1347 #@-others 

1348 MakeStubFileAdapter(self.c).run(self.c.p) 

1349 #@+node:ekr.20160316091923.1: *3* ccc.python-to-coffeescript 

1350 @cmd('python-to-coffeescript') 

1351 def python2coffeescript(self, event): # pragma: no cover 

1352 """ 

1353 Converts python text to coffeescript text. The conversion is not 

1354 perfect, but it eliminates a lot of tedious text manipulation. 

1355 """ 

1356 #@+others 

1357 #@+node:ekr.20160316092837.1: *4* class Python_To_Coffeescript_Adapter 

1358 class Python_To_Coffeescript_Adapter: # pragma: no cover 

1359 """An interface class between Leo and leo/external/py2cs.py.""" 

1360 #@+others 

1361 #@+node:ekr.20160316112717.1: *5* py2cs.ctor 

1362 def __init__(self, c): 

1363 """Ctor for Python_To_Coffeescript_Adapter class.""" 

1364 self.c = c 

1365 self.files = [] 

1366 self.output_directory = self.finalize( 

1367 c.config.getString('py2cs-output-directory')) 

1368 # self.output_fn = None 

1369 self.overwrite = c.config.getBool('py2cs-overwrite', default=False) 

1370 # Connect to the external module. 

1371 self.py2cs = g.import_module('leo.external.py2cs') 

1372 #@+node:ekr.20160316093019.1: *5* py2cs.main 

1373 def main(self): 

1374 """Main line for Python_To_CoffeeScript class.""" 

1375 if self.py2cs: 

1376 self.run() 

1377 else: 

1378 g.es_print('can not load py2cs.py') 

1379 #@+node:ekr.20160316094011.7: *5* py2cs.finalize 

1380 def finalize(self, fn): 

1381 """Finalize and regularize a filename.""" 

1382 return g.os_path_normpath(g.os_path_abspath(g.os_path_expanduser(fn))) 

1383 #@+node:ekr.20160316094011.8: *5* py2cs.to_coffeescript 

1384 def to_coffeescript(self, p): 

1385 """Convert the @<file> node at p to a .coffee file.""" 

1386 assert p.isAnyAtFileNode() 

1387 c = self.c 

1388 fn = p.anyAtFileNodeName() 

1389 if not fn.endswith('.py'): 

1390 g.es_print('not a python file', fn) 

1391 return 

1392 abs_fn = g.fullPath(c, p) 

1393 if not g.os_path_exists(abs_fn): 

1394 g.es_print('not found', abs_fn) 

1395 return 

1396 if g.os_path_exists(self.output_directory): 

1397 base_fn = g.os_path_basename(fn) 

1398 out_fn = g.os_path_finalize_join(self.output_directory, base_fn) 

1399 else: 

1400 g.es_print('not found', self.output_directory) 

1401 return 

1402 out_fn = out_fn[:-3] + '.coffee' 

1403 out_fn = g.os_path_normpath(out_fn) 

1404 s = open(abs_fn).read() 

1405 # s = self.strip_sentinels(s) 

1406 if 0: 

1407 for z in g.splitLines(s)[:20]: 

1408 print(z.rstrip()) 

1409 x = self.py2cs.MakeCoffeeScriptController() 

1410 # copy ivars and run. 

1411 x.enable_unit_tests = False 

1412 x.files = [abs_fn,] 

1413 x.output_directory = self.output_directory 

1414 x.overwrite = self.overwrite 

1415 x.make_coffeescript_file(abs_fn, s=s) 

1416 #@+node:ekr.20160316094011.9: *5* py2cs.run 

1417 def run(self): 

1418 """Create .coffee files for all @<file> nodes in p's tree.""" 

1419 p = c.p 

1420 if p.isAnyAtFileNode(): 

1421 self.to_coffeescript(p) 

1422 return 

1423 # First, look down tree. 

1424 after, p2 = p.nodeAfterTree(), p.firstChild() 

1425 found = False 

1426 while p2 and p != after: 

1427 if p2.isAnyAtFileNode(): 

1428 self.to_coffeescript(p2) 

1429 p2.moveToNext() 

1430 found = True 

1431 else: 

1432 p2.moveToThreadNext() 

1433 if not found: 

1434 # Look up the tree. 

1435 for p2 in p.parents(): 

1436 if p2.isAnyAtFileNode(): 

1437 self.to_coffeescript(p2) 

1438 return 

1439 g.es_print('no files found in tree:', p.h) 

1440 #@+node:ekr.20160316141812.1: *5* py2cs.strip_sentinels 

1441 def strip_sentinels(self, s): 

1442 """ 

1443 Strip s of all sentinel lines. 

1444 This may be dubious because it destroys outline structure. 

1445 """ 

1446 delims = ['#', None, None] 

1447 return ''.join( 

1448 [z for z in g.splitLines(s) if not g.is_sentinel(z, delims)]) 

1449 #@-others 

1450 #@-others 

1451 c = self.c 

1452 Python_To_Coffeescript_Adapter(c).main() 

1453 c.bodyWantsFocus() 

1454 #@+node:ekr.20211013080132.1: *3* ccc.python-to-typescript 

1455 @cmd('python-to-typescript') 

1456 def pythonToTypescriptCommand(self, event): # pragma: no cover 

1457 """ 

1458 The python-to-typescript command converts python to typescript text. 

1459 The conversion is not perfect, but it eliminates a lot of tedious text 

1460 manipulation. 

1461 

1462 To use, select any @<file> node and execute python-to-typescript. The 

1463 command creates (safe!) results in the last top-level node of the 

1464 outline. 

1465 

1466 The command attempts no type analysis. It uses "void" as the type of 

1467 all functions and methods. However, the script will annotate 

1468 function/method arguments: 

1469 

1470 @data python-to-typescript-types in leoSettings.leo contains a list of 

1471 key/value pairs. Keys are argument names (as used in Leo); values are 

1472 typescript type names. 

1473 """ 

1474 c = self.c 

1475 self.PythonToTypescript(c).convert(c.p) 

1476 self.c.bodyWantsFocus() 

1477 #@+node:ekr.20211013080132.2: *4* class PythonToTypescript 

1478 #@@nobeautify 

1479 class PythonToTypescript: # pragma: no cover 

1480 

1481 # The handlers are clear as they are. 

1482 # pylint: disable=no-else-return 

1483 

1484 # Keys are argument names. Values are typescript types. 

1485 # Typescript can infer types of initialized kwargs. 

1486 types_d: Dict[str, str] = {} 

1487 

1488 #@+others 

1489 #@+node:ekr.20211020162251.1: *5* py2ts.ctor 

1490 def __init__(self, c, alias=None): 

1491 self.c = c 

1492 self.alias = alias # For scripts. An alias for 'self'. 

1493 data = c.config.getData('python-to-typescript-types') or [] 

1494 for line in data: 

1495 try: 

1496 key, value = line.split(',') 

1497 self.types_d[key.strip()] = value.strip() 

1498 except Exception: 

1499 g.es_print('ignoring bad key/value pair in @data python-to-typescript-types') 

1500 g.es_print(repr(line)) 

1501 #@+node:ekr.20211013081549.1: *5* py2ts.convert 

1502 def convert(self, p): 

1503 """ 

1504 The main line. 

1505 

1506 Convert p and all descendants as a child of a new last top-level node. 

1507 """ 

1508 c = self.c 

1509 # Create the parent node. It will be deleted. 

1510 parent = c.lastTopLevel().insertAfter() 

1511 # Convert p and all its descendants. 

1512 try: 

1513 self.convert_node(p, parent) 

1514 # Promote the translated node. 

1515 parent.promote() 

1516 parent.doDelete() 

1517 p = c.lastTopLevel() 

1518 p.h = p.h.replace('.py', '.ts').replace('@', '@@') 

1519 c.redraw(p) 

1520 c.expandAllSubheads(p) 

1521 c.treeWantsFocusNow() 

1522 except Exception: 

1523 g.es_exception() 

1524 #@+node:ekr.20211013101327.1: *5* py2ts.convert_node 

1525 def convert_node(self, p, parent): 

1526 # Create a copy of p as the last child of parent. 

1527 target = parent.insertAsLastChild() 

1528 target.h = p.h # The caller will rename this node. 

1529 # Convert p.b into child.b 

1530 self.convert_body(p, target) 

1531 # Recursively create all descendants. 

1532 for child in p.children(): 

1533 self.convert_node(child, target) 

1534 #@+node:ekr.20211013102209.1: *5* py2ts.convert_body, handlers &helpers 

1535 patterns: Optional[Tuple] = None 

1536 

1537 def convert_body(self, p, target): 

1538 """ 

1539 Convert p.b into target.b. 

1540 

1541 This is the heart of the algorithm. 

1542 """ 

1543 # Calculate this table only once. 

1544 if not self.patterns: 

1545 self.patterns = ( 

1546 # Head: order matters. 

1547 (self.comment_pat, self.do_comment), 

1548 (self.docstring_pat, self.do_docstring), 

1549 (self.section_ref_pat, self.do_section_ref), 

1550 # Middle: order doesn't matter. 

1551 (self.class_pat, self.do_class), 

1552 (self.def_pat, self.do_def), 

1553 (self.elif_pat, self.do_elif), 

1554 (self.else_pat, self.do_else), 

1555 (self.except_pat, self.do_except), 

1556 (self.finally_pat, self.do_finally), 

1557 (self.for_pat, self.do_for), 

1558 (self.if_pat, self.do_if), 

1559 (self.import_pat, self.do_import), 

1560 (self.try_pat, self.do_try), 

1561 (self.while_pat, self.do_while), 

1562 (self.with_pat, self.do_with), 

1563 # Tail: order matters. 

1564 (self.trailing_comment_pat, self.do_trailing_comment) 

1565 ) 

1566 # The loop may change lines, but each line is scanned only once. 

1567 i, lines = 0, g.splitLines(self.pre_pass(p.b)) 

1568 old_lines = lines[:] 

1569 while i < len(lines): 

1570 progress = i 

1571 line = lines[i] 

1572 for (pattern, handler) in self.patterns: 

1573 m = pattern.match(line) 

1574 if m: 

1575 i = handler(i, lines, m, p) # May change lines. 

1576 break 

1577 else: 

1578 self.do_operators(i, lines, p) 

1579 self.do_semicolon(i, lines, p) 

1580 i += 1 

1581 assert progress < i 

1582 if False and g.unitTesting and lines != old_lines: 

1583 print(f"\nchanged {p.h}:\n") 

1584 for z in lines: 

1585 print(z.rstrip()) 

1586 # Run the post-pass 

1587 target.b = self.post_pass(lines) 

1588 # Munge target.h. 

1589 target.h = target.h.replace('__init__', 'constructor') 

1590 #@+node:ekr.20211018154815.1: *6* handlers 

1591 #@+node:ekr.20211014023141.1: *7* py2ts.do_class 

1592 class_pat = re.compile(r'^([ \t]*)class(.*):(.*)\n') 

1593 

1594 def do_class(self, i, lines, m, p): 

1595 

1596 j = self.find_indented_block(i, lines, m, p) 

1597 lws, base, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1598 base_s = f" {base} " if base else '' 

1599 tail_s = f" // {tail}" if tail else '' 

1600 lines[i] = f"{lws}class{base_s}{{{tail_s}\n" 

1601 lines.insert(j, f"{lws}}}\n") 

1602 return i + 1 

1603 #@+node:ekr.20211013165615.1: *7* py2ts.do_comment 

1604 comment_pat = re.compile(r'^([ \t]*)#(.*)\n') 

1605 

1606 def do_comment(self, i, lines, m, p): 

1607 """Handle a stand-alone comment line.""" 

1608 lws, comment = m.group(1), m.group(2).strip() 

1609 if comment: 

1610 lines[i] = f"{lws}// {comment}\n" 

1611 else: 

1612 lines[i] = '\n' # Write blank line for an empty comment. 

1613 return i + 1 

1614 #@+node:ekr.20211013130041.1: *7* py2ts.do_def & helper 

1615 def_pat = re.compile(r'^([ \t]*)def[ \t]+([\w_]+)\s*\((.*)\):(.*)\n') 

1616 this_pat = re.compile(r'^.*?\bthis\b') # 'self' has already become 'this'. 

1617 

1618 def do_def(self, i, lines, m, p): 

1619 

1620 j = self.find_indented_block(i, lines, m, p) 

1621 lws, name, args, tail = m.group(1), m.group(2), m.group(3).strip(), m.group(4).strip() 

1622 args = self.do_args(args) 

1623 if name == '__init__': 

1624 name = 'constructor' 

1625 tail_s = f" // {tail}" if tail else '' 

1626 # Use void as a placeholder type. 

1627 type_s = ' ' if name == 'constructor' else ': void ' 

1628 function_s = ' ' if self.this_pat.match(lines[i]) else ' function ' 

1629 lines[i] = f"{lws}public{function_s}{name}({args}){type_s}{{{tail_s}\n" 

1630 lines.insert(j, f"{lws}}}\n") 

1631 return i + 1 

1632 #@+node:ekr.20211014031722.1: *8* py2ts.do_args 

1633 def do_args(self, args): 

1634 """Add type annotations and remove the 'self' argument.""" 

1635 result = [] 

1636 for arg in (z.strip() for z in args.split(',')): 

1637 # Omit the self arg. 

1638 if arg != 'this': # Already converted. 

1639 val = self.types_d.get(arg) 

1640 result.append(f"{arg}: {val}" if val else arg) 

1641 return ', '.join(result) 

1642 #@+node:ekr.20211013165952.1: *7* py2ts.do_docstring 

1643 docstring_pat = re.compile(r'^([ \t]*)r?("""|\'\'\')(.*)\n') 

1644 

1645 def do_docstring(self, i, lines, m, p): 

1646 """ 

1647 Convert a python docstring. 

1648 

1649 Always use the full multi-line typescript format, even for single-line 

1650 python docstrings. 

1651 """ 

1652 lws, delim, docstring = m.group(1), m.group(2), m.group(3).strip() 

1653 tail = docstring.replace(delim, '').strip() 

1654 lines[i] = f"{lws}/**\n" 

1655 if tail: 

1656 lines.insert(i + 1, f"{lws} * {tail}\n") 

1657 i += 1 

1658 if delim in docstring: 

1659 lines.insert(i + 1, f"{lws} */\n") 

1660 return i + 2 

1661 i += 1 

1662 while i < len(lines): 

1663 line = lines[i] 

1664 # Buglet: ignores whatever might follow. 

1665 tail = line.replace(delim, '').strip() 

1666 # pylint: disable=no-else-return 

1667 if delim in line: 

1668 if tail: 

1669 lines[i] = f"{lws} * {tail}\n" 

1670 lines.insert(i + 1, f"{lws} */\n") 

1671 return i + 2 

1672 else: 

1673 lines[i] = f"{lws} */\n" 

1674 return i + 1 

1675 elif tail: 

1676 lines[i] = f"{lws} * {tail}\n" 

1677 else: 

1678 lines[i] = f"{lws} *\n" 

1679 i += 1 

1680 return i 

1681 #@+node:ekr.20211014030113.1: *7* py2ts.do_except 

1682 except_pat = re.compile(r'^([ \t]*)except(.*):(.*)\n') 

1683 

1684 def do_except(self, i, lines, m, p): 

1685 

1686 j = self.find_indented_block(i, lines, m, p) 

1687 lws, error, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1688 tail_s = f" // {tail}" if tail else '' 

1689 error_s = f" ({error}) " if error else '' 

1690 lines[i] = f"{lws}catch{error_s}{{{tail_s}\n" 

1691 lines.insert(j, f"{lws}}}\n") 

1692 return i + 1 

1693 #@+node:ekr.20211013141725.1: *7* py2ts.do_for 

1694 for1_s = r'^([ \t]*)for[ \t]+(.*):(.*)\n' # for (cond): 

1695 for2_s = r'^([ \t]*)for[ \t]*\((.*)\n' # for ( 

1696 

1697 for1_pat = re.compile(for1_s) 

1698 for2_pat = re.compile(for2_s) 

1699 for_pat = re.compile(fr"{for1_s}|{for2_s}") # Used by main loop. 

1700 

1701 def do_for(self, i, lines, m, p): 

1702 

1703 line = lines[i] 

1704 m1 = self.for1_pat.match(line) 

1705 m2 = self.for2_pat.match(line) 

1706 if m1: 

1707 j = self.find_indented_block(i, lines, m, p) 

1708 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1709 cond_s = cond if cond.startswith('(') else f"({cond})" 

1710 tail_s = f" // {tail}" if tail else '' 

1711 lines[i] = f"{lws}for {cond_s} {{{tail_s}\n" 

1712 self.do_operators(i, lines, p) 

1713 lines.insert(j, f"{lws}}}\n") 

1714 return i + 1 

1715 else: 

1716 j = self.find_indented_block(i, lines, m2, p) 

1717 # Generate the 'for' line. 

1718 lws, tail = m2.group(1), m2.group(2).strip() 

1719 tail_s = f" // {tail}" if tail else '' 

1720 lines[i] = f"{lws}for ({tail_s}\n" 

1721 # Tell do_semicolons that lines[i:j] are not statements. 

1722 self.kill_semicolons(lines, i, j) 

1723 # Assume line[j] closes the paren. Insert '{' 

1724 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1725 # Insert '}' 

1726 k = self.find_indented_block(j, lines, m2, p) 

1727 lines.insert(k, f"{lws}}}\n") 

1728 return i + 1 

1729 #@+node:ekr.20211017202104.1: *7* py2ts.do_import 

1730 import_s = r'^([ \t]*)import[ \t]+(.*)\n' 

1731 import_from_s = r'^([ \t]*)from[ \t]+(.*)[ \t]+import[ \t]+(.*)\n' 

1732 import_pat = re.compile(fr"{import_s}|{import_from_s}") # Used by main loop. 

1733 import1_pat = re.compile(import_s) 

1734 import2_pat = re.compile(import_from_s) 

1735 

1736 def do_import(self, i, lines, m, p): 

1737 

1738 line = lines[i] 

1739 m1 = self.import1_pat.match(line) 

1740 m2 = self.import2_pat.match(line) 

1741 # Comment out all imports. 

1742 if m1: 

1743 lws, import_list = m1.group(1), m1.group(2).strip() 

1744 lines[i] = f'{lws}// import "{import_list}"\n' 

1745 else: 

1746 lws, module, import_list = m2.group(1), m2.group(2).strip(), m2.group(3).strip() 

1747 lines[i] = f'{lws}// from "{module}" import {import_list}\n' 

1748 return i + 1 

1749 #@+node:ekr.20211014022432.1: *7* py2ts.do_elif 

1750 elif1_s = r'^([ \t]*)elif[ \t]+(.*):(.*)\n' # elif (cond): 

1751 elif2_s = r'^([ \t]*)elif[ \t]*\((.*)\n' # elif ( 

1752 

1753 elif1_pat = re.compile(elif1_s) 

1754 elif2_pat = re.compile(elif2_s) 

1755 elif_pat = re.compile(fr"{elif1_s}|{elif2_s}") # Used by main loop. 

1756 

1757 def do_elif(self, i, lines, m, p): 

1758 

1759 line = lines[i] 

1760 m1 = self.elif1_pat.match(line) 

1761 m2 = self.elif2_pat.match(line) 

1762 if m1: 

1763 j = self.find_indented_block(i, lines, m, p) 

1764 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1765 cond_s = cond if cond.startswith('(') else f"({cond})" 

1766 tail_s = f" // {tail}" if tail else '' 

1767 lines[i] = f"{lws}else if {cond_s} {{{tail_s}\n" 

1768 lines.insert(j, f"{lws}}}\n") 

1769 self.do_operators(i, lines, p) 

1770 return i + 1 

1771 else: 

1772 j = self.find_indented_block(i, lines, m2, p) 

1773 # Generate the 'else if' line. 

1774 lws, tail = m2.group(1), m2.group(2).strip() 

1775 tail_s = f" // {tail}" if tail else '' 

1776 lines[i] = f"{lws}else if ({tail_s}\n" 

1777 # Tell do_semicolons that lines[i:j] are not statements. 

1778 self.kill_semicolons(lines, i, j) 

1779 # Assume line[j] closes the paren. Insert '{' 

1780 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1781 # Insert '}' 

1782 k = self.find_indented_block(j, lines, m2, p) 

1783 lines.insert(k, f"{lws}}}\n") 

1784 return i + 1 

1785 

1786 #@+node:ekr.20211014022445.1: *7* py2ts.do_else 

1787 else_pat = re.compile(r'^([ \t]*)else:(.*)\n') 

1788 

1789 def do_else(self, i, lines, m, p): 

1790 

1791 j = self.find_indented_block(i, lines, m, p) 

1792 lws, tail = m.group(1), m.group(2).strip() 

1793 tail_s = f" // {tail}" if tail else '' 

1794 lines[i] = f"{lws}else {{{tail_s}\n" 

1795 lines.insert(j, f"{lws}}}\n") 

1796 return i + 1 

1797 #@+node:ekr.20211014022453.1: *7* py2ts.do_finally 

1798 finally_pat = re.compile(r'^([ \t]*)finally:(.*)\n') 

1799 

1800 def do_finally(self, i, lines, m, p): 

1801 

1802 j = self.find_indented_block(i, lines, m, p) 

1803 lws, tail = m.group(1), m.group(2).strip() 

1804 tail_s = f" // {tail}" if tail else '' 

1805 lines[i] = f"{lws}finally {{{tail_s}\n" 

1806 lines.insert(j, f"{lws}}}\n") 

1807 return i + 1 

1808 #@+node:ekr.20211013131016.1: *7* py2ts.do_if 

1809 if1_s = r'^([ \t]*)if[ \t]+(.*):(.*)\n' # if (cond): 

1810 if2_s = r'^([ \t]*)if[ \t]*\((.*)\n' # if ( 

1811 

1812 if1_pat = re.compile(if1_s) 

1813 if2_pat = re.compile(if2_s) 

1814 if_pat = re.compile(fr"{if1_s}|{if2_s}") # Used by main loop. 

1815 

1816 def do_if(self, i, lines, m, p): 

1817 

1818 line = lines[i] 

1819 m1 = self.if1_pat.match(line) 

1820 m2 = self.if2_pat.match(line) 

1821 if m1: 

1822 j = self.find_indented_block(i, lines, m1, p) 

1823 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1824 cond_s = cond if cond.startswith('(') else f"({cond})" 

1825 tail_s = f" // {tail}" if tail else '' 

1826 lines[i] = f"{lws}if {cond_s} {{{tail_s}\n" 

1827 self.do_operators(i, lines, p) 

1828 lines.insert(j, f"{lws}}}\n") 

1829 return i + 1 

1830 else: 

1831 j = self.find_indented_block(i, lines, m2, p) 

1832 # Generate the 'if' line. 

1833 lws, tail = m2.group(1), m2.group(2).strip() 

1834 tail_s = f" // {tail}" if tail else '' 

1835 lines[i] = f"{lws}if ({tail_s}\n" 

1836 # Tell do_semicolons that lines[i:j] are not statements. 

1837 self.kill_semicolons(lines, i, j) 

1838 # Assume line[j] closes the paren. Insert '{' 

1839 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1840 # Insert '}' 

1841 k = self.find_indented_block(j, lines, m2, p) 

1842 lines.insert(k, f"{lws}}}\n") 

1843 return i + 1 

1844 #@+node:ekr.20211018125503.1: *7* py2ts.do_section_ref 

1845 section_ref_pat = re.compile(r"^([ \t]*)(\<\<.*?\>\>)\s*(.*)$") 

1846 

1847 def do_section_ref(self, i, lines, m, p): 

1848 # Handle trailing code. 

1849 lws, section_name, tail = m.group(1), m.group(2), m.group(3).strip() 

1850 if tail.startswith('#'): 

1851 lines[i] = f"{lws}{section_name} // {tail[1:]}\n" 

1852 return i + 1 

1853 #@+node:ekr.20211014022506.1: *7* py2ts.do_try 

1854 try_pat = re.compile(r'^([ \t]*)try:(.*)\n') 

1855 

1856 def do_try(self, i, lines, m, p): 

1857 

1858 j = self.find_indented_block(i, lines, m, p) 

1859 lws, tail = m.group(1), m.group(2).strip() 

1860 tail_s = f" // {tail}" if tail else '' 

1861 lines[i] = f"{lws}try {{{tail_s}\n" 

1862 lines.insert(j, f"{lws}}}\n") 

1863 return i + 1 

1864 #@+node:ekr.20211013141809.1: *7* py2ts.do_while 

1865 while1_s = r'^([ \t]*)while[ \t]+(.*):(.*)\n' # while (cond): 

1866 while2_s = r'^([ \t]*)while[ \t]*\((.*)\n' # while ( 

1867 

1868 while1_pat = re.compile(while1_s) 

1869 while2_pat = re.compile(while2_s) 

1870 while_pat = re.compile(fr"{while1_s}|{while2_s}") # Used by main loop. 

1871 

1872 def do_while(self, i, lines, m, p): 

1873 

1874 line = lines[i] 

1875 m1 = self.while1_pat.match(line) 

1876 m2 = self.while2_pat.match(line) 

1877 if m1: 

1878 j = self.find_indented_block(i, lines, m, p) 

1879 lws, cond, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1880 cond_s = cond if cond.startswith('(') else f"({cond})" 

1881 tail_s = f" // {tail}" if tail else '' 

1882 lines[i] = f"{lws}while {cond_s} {{{tail_s}\n" 

1883 self.do_operators(i, lines, p) 

1884 lines.insert(j, f"{lws}}}\n") 

1885 return i + 1 

1886 else: 

1887 j = self.find_indented_block(i, lines, m2, p) 

1888 # Generate the 'while' line. 

1889 lws, tail = m2.group(1), m2.group(2).strip() 

1890 tail_s = f" // {tail}" if tail else '' 

1891 lines[i] = f"{lws}while ({tail_s}\n" 

1892 # Tell do_semicolons that lines[i:j] are not statements. 

1893 self.kill_semicolons(lines, i, j) 

1894 # Assume line[j] closes the paren. Insert '{' 

1895 lines[j] = lines[j].rstrip().replace(':', '') + ' {\n' 

1896 # Insert '}' 

1897 k = self.find_indented_block(j, lines, m2, p) 

1898 lines.insert(k, f"{lws}}}\n") 

1899 return i + 1 

1900 

1901 #@+node:ekr.20211014022554.1: *7* py2ts.do_with 

1902 with_pat = re.compile(r'^([ \t]*)with(.*):(.*)\n') 

1903 

1904 def do_with(self, i, lines, m, p): 

1905 

1906 j = self.find_indented_block(i, lines, m, p) 

1907 lws, clause, tail = m.group(1), m.group(2).strip(), m.group(3).strip() 

1908 tail_s = f" // {tail}" if tail else '' 

1909 clause_s = f" ({clause}) " if clause else '' 

1910 lines[i] = f"{lws}with{clause_s}{{{tail_s}\n" 

1911 lines.insert(j, f"{lws}}}\n") 

1912 return i + 1 

1913 #@+node:ekr.20211013172540.1: *7* py2ts.do_trailing_comment 

1914 trailing_comment_pat = re.compile(r'^([ \t]*)(.*)#(.*)\n') 

1915 

1916 def do_trailing_comment(self, i, lines, m, p): 

1917 """ 

1918 Handle a trailing comment line. 

1919 

1920 All other patterns have already been scanned on the line. 

1921 """ 

1922 lws, statement, trailing_comment = m.group(1), m.group(2).rstrip(), m.group(3).strip() 

1923 statement_s = f"{statement};" if self.ends_statement(i, lines) else statement 

1924 lines[i] = f"{lws}{statement_s} // {trailing_comment}\n" 

1925 return i + 1 

1926 #@+node:ekr.20211022090919.1: *6* helpers 

1927 #@+node:ekr.20211017210122.1: *7* py2ts.do_operators 

1928 def do_operators(self, i, lines, p): 

1929 

1930 # Regex replacements. 

1931 table = ( 

1932 ('True', 'true'), 

1933 ('False', 'false'), 

1934 # ('None', 'null'), # Done in post-pass. 

1935 ('default', 'default_val'), 

1936 ('and', '&&'), 

1937 ('or', '||'), 

1938 ('is not', '!='), 

1939 ('is', '=='), 

1940 ('not', '!'), 

1941 ('assert', '// assert'), 

1942 ) 

1943 for a, b in table: 

1944 lines[i] = re.sub(fr"\b{a}\b", b, lines[i]) 

1945 

1946 #@+node:ekr.20211017134103.1: *7* py2ts.do_semicolon 

1947 def do_semicolon(self, i, lines, p): 

1948 """ 

1949 Insert a semicolon in lines[i] is appropriate. 

1950 

1951 No other handler has matched, so we know that the line: 

1952 - Does not end in a comment. 

1953 - Is not part of a docstring. 

1954 """ 

1955 # Honor the flag inserted by kill_semicolons. 

1956 flag = self.kill_semicolons_flag 

1957 if lines[i].endswith(flag): 

1958 lines[i] = lines[i].replace(flag, '\n') 

1959 return 

1960 # For now, use a maximal policy. 

1961 if self.ends_statement(i, lines): 

1962 lines[i] = f"{lines[i].rstrip()};\n" 

1963 

1964 

1965 #@+node:ekr.20211017135603.1: *7* py2ts.ends_statement 

1966 def ends_statement(self, i, lines): 

1967 """ 

1968 Return True if lines[i] ends a statement. 

1969 

1970 If so, the line should end with a semicolon, 

1971 before any trailing comment, that is. 

1972 """ 

1973 # https://stackoverflow.com/questions/38823062/ 

1974 s = lines[i].strip() 

1975 next_line = lines[i + 1] if i + 1 < len(lines) else '' 

1976 # Return False for blank lines. 

1977 if not s: 

1978 return False 

1979 # Return False for Leo directives. 

1980 if s.startswith('@'): 

1981 return False 

1982 # Return False for section references. 

1983 i = s.find('<<') 

1984 j = s.find('>>') 

1985 if -1 < i < j: 

1986 return False 

1987 # Return False if this line ends in any of the following: 

1988 if s.endswith(('{', '(', '[', ':', '||', '&&', '!', ',', '`')): 

1989 return False 

1990 # Return False if the next line starts with '{', '(', '['. 

1991 if next_line.lstrip().startswith(('[', '(', '[', '&&', '||', '!')): 

1992 return False 

1993 # Return False for '}' lines. 

1994 if s.startswith('}'): 

1995 return False 

1996 return True 

1997 #@+node:ekr.20211013123001.1: *7* py2ts.find_indented_block 

1998 lws_pat = re.compile(r'^([ \t]*)') 

1999 

2000 def find_indented_block(self, i, lines, m, p): 

2001 """Return j, the index of the line *after* the indented block.""" 

2002 # Scan for the first non-empty line with the same or less indentation. 

2003 lws = m.group(1) 

2004 j = i + 1 

2005 while j < len(lines): 

2006 line = lines[j] 

2007 m2 = self.lws_pat.match(line) 

2008 lws2 = m2.group(1) 

2009 if line.strip() and len(lws2) <= len(lws): 

2010 # Don't add a blank line at the end of a block. 

2011 if j > 1 and not lines[j - 1].strip(): 

2012 j -= 1 

2013 break 

2014 j += 1 

2015 return j 

2016 

2017 #@+node:ekr.20211020101415.1: *7* py2ts.kill_semicolons 

2018 kill_semicolons_flag = ' // **kill-semicolon**\n' # Must end with a newline. 

2019 

2020 def kill_semicolons(self, lines, i, j): 

2021 """ 

2022 Tell later calls to do_semicolon that lines[i : j] should *not* end with a semicolon. 

2023 """ 

2024 for n in range(i, j): 

2025 lines[n] = lines[n].rstrip() + self.kill_semicolons_flag 

2026 #@+node:ekr.20211016214742.1: *7* py2ts.move_docstrings 

2027 class_or_def_pat = re.compile(r'^(\s*)(public|class)\s+([\w_]+)') 

2028 

2029 def move_docstrings(self, lines): 

2030 """Move docstrings before the preceding class or def line.""" 

2031 i = 0 

2032 while i < len(lines): 

2033 m = self.class_or_def_pat.match(lines[i]) 

2034 i += 1 

2035 if not m: 

2036 continue 

2037 # Set j to the start of the docstring. 

2038 j = i 

2039 while j < len(lines): 

2040 if lines[j].strip(): 

2041 break 

2042 j += 1 

2043 if j >= len(lines): 

2044 continue 

2045 if not lines[j].strip().startswith('/**'): 

2046 continue 

2047 # Set k to the end of the docstring. 

2048 k = j 

2049 while k < len(lines) and '*/' not in lines[k]: 

2050 k += 1 

2051 if k >= len(lines): 

2052 g.printObj(lines[i - 1 : len(lines) - 1], tag='OOPS') 

2053 continue 

2054 # Remove 4 blanks from the docstrings. 

2055 for n in range(j, k + 1): 

2056 if lines[n].startswith(' ' * 4): 

2057 lines[n] = lines[n][4:] 

2058 # Rearrange the lines. 

2059 lines[i - 1 : k + 1] = lines[j : k + 1] + [lines[i - 1]] 

2060 i = k + 1 

2061 #@+node:ekr.20211016200908.1: *7* py2ts.post_pass & helpers 

2062 def post_pass(self, lines): 

2063 

2064 # Munge lines in place 

2065 self.move_docstrings(lines) 

2066 self.do_f_strings(lines) 

2067 self.do_ternary(lines) 

2068 self.do_assignment(lines) # Do this last, so it doesn't add 'const' to inserted comments. 

2069 s = (''.join(lines) 

2070 .replace('@language python', '@language typescript') 

2071 .replace(self.kill_semicolons_flag, '\n') 

2072 ) 

2073 return re.sub(r'\bNone\b', 'null', s) 

2074 

2075 

2076 #@+node:ekr.20211021061023.1: *8* py2ts.do_assignment 

2077 assignment_pat = re.compile(r'^([ \t]*)(.*?)\s+=\s+(.*)$') # Require whitespace around the '=' 

2078 

2079 def do_assignment(self, lines): 

2080 """Add const to all non-tuple assignments.""" 

2081 # Do this late so that we can test for the ending semicolon. 

2082 

2083 # Suppression table. 

2084 # Missing elements are likely to cause this method to generate '= ='. 

2085 table = ( 

2086 ',', # Tuple assignment or mutli-line argument lists. 

2087 '*', # A converted docstring. 

2088 '`', # f-string. 

2089 '//', # Comment. 

2090 '=', # Condition. 

2091 # Keywords that might be followed by '=' 

2092 'class', 'def', 'elif', 'for', 'if', 'print', 'public', 'return', 'with', 'while', 

2093 ) 

2094 for i, s in enumerate(lines): 

2095 m = self.assignment_pat.match(s) 

2096 if m: 

2097 lws, lhs, rhs = m.group(1), m.group(2), m.group(3).rstrip() 

2098 if not any(z in lhs for z in table): 

2099 lines[i] = f"{lws}const {lhs} = {rhs}\n" 

2100 #@+node:ekr.20211020185016.1: *8* py2ts.do_f_strings 

2101 f_string_pat = re.compile(r'([ \t]*)(.*?)f"(.*?)"(.*)$') 

2102 

2103 def do_f_strings(self, lines): 

2104 

2105 i = 0 

2106 while i < len(lines): 

2107 progress = i 

2108 s = lines[i] 

2109 m = self.f_string_pat.match(s) 

2110 if not m: 

2111 i += 1 

2112 continue 

2113 lws, head, string, tail = m.group(1), m.group(2), m.group(3), m.group(4).rstrip() 

2114 string_s = ( 

2115 string.replace('{', '${') # Add the '$' 

2116 .replace('! ', 'not ') # Undo erroneous replacement. 

2117 ) 

2118 # Remove format strings. Not perfect, but seemingly good enough. 

2119 string_s = re.sub(r'\:[0-9]\.+[0-9]+[frs]', '', string_s) 

2120 string_s = re.sub(r'\![frs]', '', string_s) 

2121 # A hack. If the fstring is on a line by itself, remove a trailing ';' 

2122 if not head.strip() and tail.endswith(';'): 

2123 tail = tail[:-1].strip() 

2124 if 1: # Just replace the line. 

2125 lines[i] = f"{lws}{head}`{string_s}`{tail.rstrip()}\n" 

2126 i += 1 

2127 else: 

2128 # These comments quickly become annoying. 

2129 # Add the original line as a comment as a check. 

2130 lines[i] = f"{lws}// {s.strip()}\n" # Add the replacement line. 

2131 lines.insert(i + 1, f"{lws}{head}`{string_s}`{tail.rstrip()}\n") 

2132 i += 2 

2133 assert i > progress 

2134 #@+node:ekr.20211021051033.1: *8* py2ts.do_ternary 

2135 ternary_pat1 = re.compile(r'^([ \t]*)(.*?)\s*=\s*(.*?) if (.*?) else (.*);$') # assignment 

2136 ternary_pat2 = re.compile(r'^([ \t]*)return\s+(.*?) if (.*?) else (.*);$') # return statement 

2137 

2138 def do_ternary(self, lines): 

2139 

2140 i = 0 

2141 while i < len(lines): 

2142 progress = i 

2143 s = lines[i] 

2144 m1 = self.ternary_pat1.match(s) 

2145 m2 = self.ternary_pat2.match(s) 

2146 if m1: 

2147 lws, target, a, cond, b = m1.group(1), m1.group(2), m1.group(3), m1.group(4), m1.group(5) 

2148 lines[i] = f"{lws}// {s.strip()}\n" 

2149 lines.insert(i + 1, f"{lws}{target} = {cond} ? {a} : {b};\n") 

2150 i += 2 

2151 elif m2: 

2152 lws, a, cond, b = m2.group(1), m2.group(2), m2.group(3), m2.group(4) 

2153 lines[i] = f"{lws}// {s.strip()}\n" 

2154 lines.insert(i + 1, f"{lws}return {cond} ? {a} : {b};\n") 

2155 i += 2 

2156 else: 

2157 i += 1 

2158 assert progress < i 

2159 #@+node:ekr.20211017044939.1: *7* py2ts.pre_pass 

2160 def pre_pass(self, s): 

2161 

2162 # Remove the python encoding lines. 

2163 s = s.replace('@first # -*- coding: utf-8 -*-\n', '') 

2164 

2165 # Replace 'self' by 'this' *everywhere*. 

2166 s = re.sub(r'\bself\b', 'this', s) 

2167 

2168 # Comment out @cmd decorators. 

2169 s = re.sub(r"^@cmd(.*?)$", r'// @cmd\1\n', s, flags=re.MULTILINE) 

2170 

2171 # Replace the alias for 'self' by 'this' *only* in specif contexts. 

2172 # Do *not* replace the alias everywhere: that could do great harm. 

2173 if self.alias: 

2174 s = re.sub(fr"\b{self.alias}\.", 'this.', s) 

2175 # Remove lines like `at = self`. 

2176 s = re.sub(fr"^\s*{self.alias}\s*=\s*this\s*\n", '', s, flags=re.MULTILINE) 

2177 # Remove lines like `at, c = self, self.c`. 

2178 s = re.sub( 

2179 fr"^(\s*){self.alias}\s*,\s*c\s*=\s*this,\s*this.c\n", 

2180 r'\1c = this.c\n', # do_assignment adds const. 

2181 s, 

2182 flags=re.MULTILINE) 

2183 # Remove lines like `at, p = self, self.p`. 

2184 s = re.sub(fr"^(\s*){self.alias}\s*,\s*p\s*=\s*this,\s*this.p\n", 

2185 r'\1p = this.p\n', # do_assignment adds const. 

2186 s, 

2187 flags=re.MULTILINE) 

2188 # Do this last. 

2189 s = re.sub(fr"\b{self.alias},", 'this,', s) 

2190 return s 

2191 #@-others 

2192 #@+node:ekr.20160316091843.2: *3* ccc.typescript-to-py 

2193 @cmd('typescript-to-py') 

2194 def tsToPy(self, event): # pragma: no cover 

2195 """ 

2196 The typescript-to-python command converts typescript text to python 

2197 text. The conversion is not perfect, but it eliminates a lot of tedious 

2198 text manipulation. 

2199 """ 

2200 #@+others 

2201 #@+node:ekr.20150514063305.176: *4* class TS_To_Python (To_Python) 

2202 class TS_To_Python(To_Python): # pragma: no cover 

2203 #@+others 

2204 #@+node:ekr.20150514063305.177: *5* ctor (TS_To_Python) 

2205 def __init__(self, c): 

2206 """Ctor for TS_To_Python class.""" 

2207 super().__init__(c) 

2208 # The class name for the present function. Used to modify ivars. 

2209 self.class_name = '' 

2210 #@+node:ekr.20150514063305.178: *5* convertCodeList (TS_To_Python) & helpers 

2211 def convertCodeList(self, aList): 

2212 r, sr = self.replace, self.safe_replace 

2213 # First... 

2214 r(aList, '\r', '') 

2215 self.mungeAllFunctions(aList) 

2216 self.mungeAllClasses(aList) 

2217 # Second... 

2218 sr(aList, ' -> ', '.') 

2219 sr(aList, '->', '.') 

2220 sr(aList, ' . ', '.') 

2221 # sr(aList, 'this.self', 'self') 

2222 sr(aList, '{', '') 

2223 sr(aList, '}', '') 

2224 sr(aList, 'else if', 'elif') 

2225 sr(aList, 'else', 'else:') 

2226 sr(aList, '&&', ' and ') 

2227 sr(aList, '||', ' or ') 

2228 sr(aList, 'true', 'True') 

2229 sr(aList, 'false', 'False') 

2230 sr(aList, 'null', 'None') 

2231 sr(aList, 'this', 'self') 

2232 sr(aList, 'try', 'try:') 

2233 sr(aList, 'catch', 'except:') 

2234 sr(aList, 'constructor', '__init__') 

2235 sr(aList, 'new ', '') 

2236 # sr(aList, 'var ','') 

2237 # var usually indicates something weird, or an uninited var, 

2238 # so it may be good to retain as a marker. 

2239 # Third... 

2240 self.handle_all_keywords(aList) 

2241 self.insert_not(aList) 

2242 # after processing for keywords 

2243 self.removeSemicolonsAtEndOfLines(aList) 

2244 self.comment_scope_ids(aList) 

2245 # Last... 

2246 self.removeBlankLines(aList) 

2247 self.removeExcessWs(aList) 

2248 # I usually don't like extra whitespace. YMMV. 

2249 sr(aList, ' and ', ' and ') 

2250 sr(aList, ' not ', ' not ') 

2251 sr(aList, ' or ', ' or ') 

2252 sr(aList, ' and ', ' and ') 

2253 sr(aList, ' not ', ' not ') 

2254 sr(aList, ' or ', ' or ') 

2255 sr(aList, ' :', ':') 

2256 sr(aList, ', ', ',') 

2257 sr(aList, ' ,', ',') 

2258 sr(aList, ' (', '(') 

2259 sr(aList, '( ', '(') 

2260 sr(aList, ' )', ')') 

2261 sr(aList, ') ', ')') 

2262 sr(aList, ' and(', ' and (') 

2263 sr(aList, ' not(', ' not (') 

2264 sr(aList, ' or(', ' or (') 

2265 sr(aList, ')and ', ') and ') 

2266 sr(aList, ')not ', ') not ') 

2267 sr(aList, ')or ', ') or ') 

2268 sr(aList, ')and(', ') and (') 

2269 sr(aList, ')not(', ') not (') 

2270 sr(aList, ')or(', ') or (') 

2271 sr(aList, '@language javascript', '@language python') 

2272 self.replaceComments(aList) # should follow all calls to safe_replace 

2273 self.removeTrailingWs(aList) 

2274 r(aList, '\t ', '\t') # happens when deleting declarations. 

2275 #@+node:ekr.20150514063305.179: *6* comment_scope_ids 

2276 def comment_scope_ids(self, aList): 

2277 """convert (public|private|export) aLine to aLine # (public|private|export)""" 

2278 scope_ids = ('public', 'private', 'export',) 

2279 i = 0 

2280 if any(self.match_word(aList, i, z) for z in scope_ids): 

2281 i = self.handle_scope_keyword(aList, i) 

2282 while i < len(aList): 

2283 progress = i 

2284 if self.is_string_or_comment(aList, i): 

2285 i = self.skip_string_or_comment(aList, i) 

2286 elif aList[i] == '\n': 

2287 i += 1 

2288 i = self.skip_ws(aList, i) 

2289 if any(self.match_word(aList, i, z) for z in scope_ids): 

2290 i = self.handle_scope_keyword(aList, i) 

2291 else: 

2292 i += 1 

2293 assert i > progress 

2294 # print "handAllKeywords2:", ''.join(aList) 

2295 #@+node:ekr.20150514063305.180: *7* handle_scope_keyword 

2296 def handle_scope_keyword(self, aList, i): 

2297 i1 = i 

2298 # pylint: disable=undefined-loop-variable 

2299 for word in ('public', 'private', 'export'): 

2300 if self.match_word(aList, i, word): 

2301 i += len(word) 

2302 break 

2303 else: 

2304 return None 

2305 # Skip any following spaces. 

2306 i2 = self.skip_ws(aList, i) 

2307 # Scan to the next newline: 

2308 i3 = self.skip_line(aList, i) 

2309 # Optional: move the word to a trailing comment. 

2310 comment: List[str] = list(f" # {word}") if False else [] 

2311 # Change the list in place. 

2312 aList[i1:i3] = aList[i2:i3] + comment 

2313 i = i1 + (i3 - i2) + len(comment) 

2314 return i 

2315 #@+node:ekr.20150514063305.181: *6* handle_all_keywords 

2316 def handle_all_keywords(self, aList): 

2317 """ 

2318 converts if ( x ) to if x: 

2319 converts while ( x ) to while x: 

2320 """ 

2321 statements = ('elif', 'for', 'if', 'while',) 

2322 i = 0 

2323 while i < len(aList): 

2324 if self.is_string_or_comment(aList, i): 

2325 i = self.skip_string_or_comment(aList, i) 

2326 elif any(self.match_word(aList, i, z) for z in statements): 

2327 i = self.handle_keyword(aList, i) 

2328 # elif ( 

2329 # self.match_word(aList,i,"if") or 

2330 # self.match_word(aList,i,"while") or 

2331 # self.match_word(aList,i,"for") or 

2332 # self.match_word(aList,i,"elif") 

2333 # ): 

2334 # i = self.handle_keyword(aList,i) 

2335 else: 

2336 i += 1 

2337 # print "handAllKeywords2:", ''.join(aList) 

2338 #@+node:ekr.20150514063305.182: *7* handle_keyword 

2339 def handle_keyword(self, aList, i): 

2340 if self.match_word(aList, i, "if"): 

2341 i += 2 

2342 elif self.match_word(aList, i, "elif"): 

2343 i += 4 

2344 elif self.match_word(aList, i, "while"): 

2345 i += 5 

2346 elif self.match_word(aList, i, "for"): 

2347 i += 3 

2348 else: assert False, 'not a keyword' 

2349 # Make sure one space follows the keyword. 

2350 k = i 

2351 i = self.skip_ws(aList, i) 

2352 if k == i: 

2353 c = aList[i] 

2354 aList[i : i + 1] = [' ', c] 

2355 i += 1 

2356 # Remove '(' and matching ')' and add a ':' 

2357 if aList[i] == "(": 

2358 # Look ahead. Don't remove if we span a line. 

2359 j = self.skip_to_matching_bracket(aList, i) 

2360 k = i 

2361 found = False 

2362 while k < j and not found: 

2363 found = aList[k] == '\n' 

2364 k += 1 

2365 if not found: 

2366 j = self.removeMatchingBrackets(aList, i) 

2367 if i < j < len(aList): 

2368 ch = aList[j] 

2369 aList[j : j + 1] = [ch, ":", " "] 

2370 j = j + 2 

2371 return j 

2372 return i 

2373 #@+node:ekr.20150514063305.183: *6* mungeAllClasses 

2374 def mungeAllClasses(self, aList): 

2375 """Scan for a '{' at the top level that is preceeded by ')' """ 

2376 i = 0 

2377 while i < len(aList): 

2378 progress = i 

2379 if self.is_string_or_comment(aList, i): 

2380 i = self.skip_string_or_comment(aList, i) 

2381 elif self.match_word(aList, i, 'class'): 

2382 i1 = i 

2383 i = self.skip_line(aList, i) 

2384 aList[i - 1 : i] = list(f"{aList[i - 1]}:") 

2385 s = ''.join(aList[i1:i]) 

2386 k = s.find(' extends ') 

2387 if k > -1: 

2388 k1 = k 

2389 k = g.skip_id(s, k + 1) 

2390 k = g.skip_ws(s, k) 

2391 if k < len(s) and g.is_c_id(s[k]): 

2392 k2 = g.skip_id(s, k) 

2393 word = s[k:k2] 

2394 aList[i1:i] = list(f"{s[:k1]} ({word})") 

2395 elif self.match_word(aList, i, 'interface'): 

2396 aList[i : i + len('interface')] = list('class') 

2397 i = self.skip_line(aList, i) 

2398 aList[i - 1 : i] = list(f"{aList[i - 1]}: # interface") 

2399 i = self.skip_line(aList, i) # Essential. 

2400 else: 

2401 i += 1 

2402 assert i > progress 

2403 #@+node:ekr.20150514063305.184: *6* mungeAllFunctions & helpers 

2404 def mungeAllFunctions(self, aList): 

2405 """Scan for a '{' at the top level that is preceeded by ')' """ 

2406 prevSemi = 0 # Previous semicolon: header contains all previous text 

2407 i = 0 

2408 firstOpen = None 

2409 while i < len(aList): 

2410 progress = i 

2411 if self.is_string_or_comment(aList, i): 

2412 j = self.skip_string_or_comment(aList, i) 

2413 prevSemi = j 

2414 elif self.match(aList, i, '('): 

2415 if not firstOpen: 

2416 firstOpen = i 

2417 j = i + 1 

2418 elif self.match(aList, i, ';'): 

2419 j = i + 1 

2420 prevSemi = j 

2421 elif self.match(aList, i, "{"): 

2422 j = self.handlePossibleFunctionHeader( 

2423 aList, i, prevSemi, firstOpen) 

2424 prevSemi = j 

2425 firstOpen = None # restart the scan 

2426 else: 

2427 j = i + 1 

2428 # Handle unusual cases. 

2429 if j <= progress: 

2430 j = progress + 1 

2431 assert j > progress 

2432 i = j 

2433 #@+node:ekr.20150514063305.185: *7* handlePossibleFunctionHeader 

2434 def handlePossibleFunctionHeader(self, aList, i, prevSemi, firstOpen): 

2435 """ 

2436 converts function header lines from typescript format to python format. 

2437 That is, converts 

2438 x1..nn w::y ( t1 z1,..tn zn) { C++ 

2439 (public|private|export) name (t1: z1, ... tn: zn { 

2440 to 

2441 def y (z1,..zn): { # (public|private|export) 

2442 """ 

2443 assert self.match(aList, i, "{") 

2444 prevSemi = self.skip_ws_and_nl(aList, prevSemi) 

2445 close = self.prevNonWsOrNlChar(aList, i) 

2446 if close < 0 or aList[close] != ')': 

2447 # Should not increase *Python* indent. 

2448 return 1 + self.skip_to_matching_bracket(aList, i) 

2449 if not firstOpen: 

2450 return 1 + self.skip_to_matching_bracket(aList, i) 

2451 close2 = self.skip_to_matching_bracket(aList, firstOpen) 

2452 if close2 != close: 

2453 return 1 + self.skip_to_matching_bracket(aList, i) 

2454 open_paren = firstOpen 

2455 assert aList[open_paren] == '(' 

2456 head = aList[prevSemi:open_paren] 

2457 # do nothing if the head starts with "if", "for" or "while" 

2458 k = self.skip_ws(head, 0) 

2459 if k >= len(head) or not head[k].isalpha(): 

2460 return 1 + self.skip_to_matching_bracket(aList, i) 

2461 kk = self.skip_past_word(head, k) 

2462 if kk > k: 

2463 headString = ''.join(head[k:kk]) 

2464 # C keywords that might be followed by '{' 

2465 # print "headString:", headString 

2466 if headString in ["do", "for", "if", "struct", "switch", "while"]: 

2467 return 1 + self.skip_to_matching_bracket(aList, i) 

2468 args = aList[open_paren : close + 1] 

2469 k = 1 + self.skip_to_matching_bracket(aList, i) 

2470 body = aList[close + 1 : k] 

2471 head = self.massageFunctionHead(head) 

2472 args = self.massageFunctionArgs(args) 

2473 body = self.massageFunctionBody(body) 

2474 result = [] 

2475 if head: 

2476 result.extend(head) 

2477 if args: 

2478 result.extend(args) 

2479 if body: 

2480 result.extend(body) 

2481 aList[prevSemi:k] = result 

2482 return prevSemi + len(result) 

2483 #@+node:ekr.20150514063305.186: *7* massageFunctionArgs 

2484 def massageFunctionArgs(self, args): 

2485 assert args[0] == '(' 

2486 assert args[-1] == ')' 

2487 result = ['('] 

2488 lastWord = [] 

2489 if self.class_name: 

2490 for item in list("self,"): 

2491 result.append(item) #can put extra comma 

2492 i = 1 

2493 while i < len(args): 

2494 i = self.skip_ws_and_nl(args, i) 

2495 ch = args[i] 

2496 if ch.isalpha(): 

2497 j = self.skip_past_word(args, i) 

2498 lastWord = args[i:j] 

2499 i = j 

2500 elif ch == ',' or ch == ')': 

2501 for item in lastWord: 

2502 result.append(item) 

2503 if lastWord and ch == ',': 

2504 result.append(',') 

2505 lastWord = [] 

2506 i += 1 

2507 else: i += 1 

2508 if result[-1] == ',': 

2509 del result[-1] 

2510 result.append(')') 

2511 result.append(':') 

2512 return result 

2513 #@+node:ekr.20150514063305.187: *7* massageFunctionHead (sets .class_name) 

2514 def massageFunctionHead(self, head): 

2515 result: List[Any] = [] 

2516 prevWord = [] 

2517 self.class_name = '' 

2518 i = 0 

2519 while i < len(head): 

2520 i = self.skip_ws_and_nl(head, i) 

2521 if i < len(head) and head[i].isalpha(): 

2522 result = [] 

2523 j = self.skip_past_word(head, i) 

2524 prevWord = head[i:j] 

2525 i = j 

2526 # look for ::word2 

2527 i = self.skip_ws(head, i) 

2528 if self.match(head, i, "::"): 

2529 # Set the global to the class name. 

2530 self.class_name = ''.join(prevWord) 

2531 # print(class name:", self.class_name) 

2532 i = self.skip_ws(head, i + 2) 

2533 if i < len(head) and (head[i] == '~' or head[i].isalpha()): 

2534 j = self.skip_past_word(head, i) 

2535 if head[i:j] == prevWord: 

2536 result.extend('__init__') 

2537 elif head[i] == '~' and head[i + 1 : j] == prevWord: 

2538 result.extend('__del__') 

2539 else: 

2540 # result.extend(list('::')) 

2541 result.extend(head[i:j]) 

2542 i = j 

2543 else: 

2544 result.extend(prevWord) 

2545 else: i += 1 

2546 finalResult = list("def ") 

2547 finalResult.extend(result) 

2548 return finalResult 

2549 #@+node:ekr.20150514063305.188: *7* massageFunctionBody & helper 

2550 def massageFunctionBody(self, body): 

2551 # body = self.massageIvars(body) 

2552 # body = self.removeCasts(body) 

2553 # body = self.removeTypeNames(body) 

2554 body = self.dedentBlocks(body) 

2555 return body 

2556 #@+node:ekr.20150514063305.189: *8* dedentBlocks 

2557 def dedentBlocks(self, body): 

2558 """ 

2559 Look for '{' preceded by '{' or '}' or ';' 

2560 (with intervening whitespace and comments). 

2561 """ 

2562 i = 0 

2563 while i < len(body): 

2564 j = i 

2565 ch = body[i] 

2566 if self.is_string_or_comment(body, i): 

2567 j = self.skip_string_or_comment(body, i) 

2568 elif ch in '{};': 

2569 # Look ahead ofr '{' 

2570 j += 1 

2571 while True: 

2572 k = j 

2573 j = self.skip_ws_and_nl(body, j) 

2574 if self.is_string_or_comment(body, j): 

2575 j = self.skip_string_or_comment(body, j) 

2576 if k == j: 

2577 break 

2578 assert k < j 

2579 if self.match(body, j, '{'): 

2580 k = j 

2581 j = self.skip_to_matching_bracket(body, j) 

2582 m = '# <Start dedented block>...' 

2583 body[k : k + 1] = list(m) 

2584 j += len(m) 

2585 while k < j: 

2586 progress = k 

2587 if body[k] == '\n': 

2588 k += 1 

2589 spaces = 0 

2590 while spaces < 4 and k < j: 

2591 if body[k] == ' ': 

2592 spaces += 1 

2593 k += 1 

2594 else: 

2595 break 

2596 if spaces > 0: 

2597 del body[k - spaces : k] 

2598 k -= spaces 

2599 j -= spaces 

2600 else: 

2601 k += 1 

2602 assert progress < k 

2603 m = ' # <End dedented block>' 

2604 body[j : j + 1] = list(m) 

2605 j += len(m) 

2606 else: 

2607 j = i + 1 

2608 # Defensive programming. 

2609 if i == j: 

2610 j += 1 

2611 assert i < j 

2612 i = j 

2613 return body 

2614 #@-others 

2615 #@-others 

2616 c = self.c 

2617 TS_To_Python(c).go() 

2618 c.bodyWantsFocus() 

2619 #@+node:ekr.20160321042444.1: *3* ccc.import-jupyter-notebook 

2620 @cmd('import-jupyter-notebook') 

2621 def importJupyterNotebook(self, event): # pragma: no cover 

2622 """Prompt for a Jupyter (.ipynb) file and convert it to a Leo outline.""" 

2623 try: 

2624 import nbformat 

2625 assert nbformat 

2626 except ImportError: 

2627 g.es_print('import-jupyter-notebook requires nbformat package') 

2628 return 

2629 from leo.plugins.importers.ipynb import Import_IPYNB 

2630 # was @-others 

2631 c = self.c 

2632 x = Import_IPYNB(c) 

2633 fn = x.get_file_name() 

2634 if fn: 

2635 p = c.lastTopLevel() 

2636 root = p.insertAfter() 

2637 root.h = fn 

2638 x.import_file(fn, root) 

2639 c.redraw(root) 

2640 c.bodyWantsFocus() 

2641 #@+node:ekr.20160321072007.1: *3* ccc.export-jupyter-notebook 

2642 @cmd('export-jupyter-notebook') 

2643 def exportJupyterNotebook(self, event): # pragma: no cover 

2644 """Convert the present outline to a .ipynb file.""" 

2645 from leo.plugins.writers.ipynb import Export_IPYNB 

2646 c = self.c 

2647 Export_IPYNB(c).export_outline(c.p) 

2648 #@-others 

2649#@-others 

2650#@-leo