Coverage for src/debputy/lsp/lsp_debian_tests_control.py: 20%
160 statements
« prev ^ index » next coverage.py v7.2.7, created at 2024-04-07 12:14 +0200
« prev ^ index » next coverage.py v7.2.7, created at 2024-04-07 12:14 +0200
1import re
2from typing import (
3 Union,
4 Sequence,
5 Tuple,
6 Iterator,
7 Optional,
8 Iterable,
9 Mapping,
10 List,
11)
13from lsprotocol.types import (
14 DiagnosticSeverity,
15 Range,
16 Diagnostic,
17 Position,
18 CompletionItem,
19 CompletionList,
20 CompletionParams,
21 TEXT_DOCUMENT_WILL_SAVE_WAIT_UNTIL,
22 DiagnosticRelatedInformation,
23 Location,
24 HoverParams,
25 Hover,
26 TEXT_DOCUMENT_CODE_ACTION,
27 SemanticTokens,
28 SemanticTokensParams,
29 FoldingRangeParams,
30 FoldingRange,
31)
33from debputy.linting.lint_util import LintState
34from debputy.lsp.lsp_debian_control_reference_data import (
35 Deb822KnownField,
36 DTestsCtrlFileMetadata,
37 _DTESTSCTRL_FIELDS,
38)
39from debputy.lsp.lsp_features import (
40 lint_diagnostics,
41 lsp_completer,
42 lsp_hover,
43 lsp_standard_handler,
44 lsp_folding_ranges,
45 lsp_semantic_tokens_full,
46)
47from debputy.lsp.lsp_generic_deb822 import (
48 deb822_completer,
49 deb822_hover,
50 deb822_folding_ranges,
51 deb822_semantic_tokens_full,
52)
53from debputy.lsp.quickfixes import (
54 propose_correct_text_quick_fix,
55)
56from debputy.lsp.spellchecking import default_spellchecker
57from debputy.lsp.text_util import (
58 normalize_dctrl_field_name,
59 LintCapablePositionCodec,
60 detect_possible_typo,
61 te_range_to_lsp,
62)
63from debputy.lsp.vendoring._deb822_repro import (
64 parse_deb822_file,
65 Deb822FileElement,
66 Deb822ParagraphElement,
67)
68from debputy.lsp.vendoring._deb822_repro.parsing import (
69 Deb822KeyValuePairElement,
70 LIST_SPACE_SEPARATED_INTERPRETATION,
71)
72from debputy.lsp.vendoring._deb822_repro.tokens import (
73 Deb822Token,
74)
76try:
77 from debputy.lsp.vendoring._deb822_repro.locatable import (
78 Position as TEPosition,
79 Range as TERange,
80 START_POSITION,
81 )
83 from pygls.server import LanguageServer
84 from pygls.workspace import TextDocument
85except ImportError:
86 pass
89_CONTAINS_SPACE_OR_COLON = re.compile(r"[\s:]")
90_LANGUAGE_IDS = [
91 "debian/tests/control",
92 # emacs's name - expected in elpa-dpkg-dev-el (>> 37.11)
93 "debian-autopkgtest-control-mode",
94 # Likely to be vim's name if it had support
95 "debtestscontrol",
96]
98_DEP5_FILE_METADATA = DTestsCtrlFileMetadata()
100lsp_standard_handler(_LANGUAGE_IDS, TEXT_DOCUMENT_CODE_ACTION)
101lsp_standard_handler(_LANGUAGE_IDS, TEXT_DOCUMENT_WILL_SAVE_WAIT_UNTIL)
104@lsp_hover(_LANGUAGE_IDS)
105def debian_tests_control_hover(
106 ls: "LanguageServer",
107 params: HoverParams,
108) -> Optional[Hover]:
109 return deb822_hover(ls, params, _DEP5_FILE_METADATA)
112@lsp_completer(_LANGUAGE_IDS)
113def debian_tests_control_completions(
114 ls: "LanguageServer",
115 params: CompletionParams,
116) -> Optional[Union[CompletionList, Sequence[CompletionItem]]]:
117 return deb822_completer(ls, params, _DEP5_FILE_METADATA)
120@lsp_folding_ranges(_LANGUAGE_IDS)
121def debian_tests_control_folding_ranges(
122 ls: "LanguageServer",
123 params: FoldingRangeParams,
124) -> Optional[Sequence[FoldingRange]]:
125 return deb822_folding_ranges(ls, params, _DEP5_FILE_METADATA)
128def _deb822_token_iter(
129 tokens: Iterable[Deb822Token],
130) -> Iterator[Tuple[Deb822Token, int, int, int, int, int]]:
131 line_no = 0
132 line_offset = 0
134 for token in tokens:
135 start_line = line_no
136 start_line_offset = line_offset
138 newlines = token.text.count("\n")
139 line_no += newlines
140 text_len = len(token.text)
141 if newlines:
142 if token.text.endswith("\n"):
143 line_offset = 0
144 else:
145 # -2, one to remove the "\n" and one to get 0-offset
146 line_offset = text_len - token.text.rindex("\n") - 2
147 else:
148 line_offset += text_len
150 yield token, start_line, start_line_offset, line_no, line_offset
153def _paragraph_representation_field(
154 paragraph: Deb822ParagraphElement,
155) -> Deb822KeyValuePairElement:
156 return next(iter(paragraph.iter_parts_of_type(Deb822KeyValuePairElement)))
159def _diagnostics_for_paragraph(
160 stanza: Deb822ParagraphElement,
161 stanza_position: "TEPosition",
162 known_fields: Mapping[str, Deb822KnownField],
163 doc_reference: str,
164 position_codec: "LintCapablePositionCodec",
165 lines: List[str],
166 diagnostics: List[Diagnostic],
167) -> None:
168 representation_field = _paragraph_representation_field(stanza)
169 representation_field_pos = representation_field.position_in_parent().relative_to(
170 stanza_position
171 )
172 representation_field_range_server_units = te_range_to_lsp(
173 TERange.from_position_and_size(
174 representation_field_pos, representation_field.size()
175 )
176 )
177 representation_field_range = position_codec.range_to_client_units(
178 lines,
179 representation_field_range_server_units,
180 )
181 for known_field in known_fields.values():
182 missing_field_severity = known_field.missing_field_severity
183 if missing_field_severity is None or known_field.name in stanza:
184 continue
186 diagnostics.append(
187 Diagnostic(
188 representation_field_range,
189 f"Stanza is missing field {known_field.name}",
190 severity=missing_field_severity,
191 source="debputy",
192 )
193 )
195 if "Tests" not in stanza and "Test-Command" not in stanza:
196 diagnostics.append(
197 Diagnostic(
198 representation_field_range,
199 f'Stanza must have either a "Tests" or a "Test-Command" field',
200 severity=DiagnosticSeverity.Error,
201 source="debputy",
202 )
203 )
204 if "Tests" in stanza and "Test-Command" in stanza:
205 diagnostics.append(
206 Diagnostic(
207 representation_field_range,
208 'Stanza cannot have both a "Tests" and a "Test-Command" field',
209 severity=DiagnosticSeverity.Error,
210 source="debputy",
211 )
212 )
214 seen_fields = {}
216 for kvpair in stanza.iter_parts_of_type(Deb822KeyValuePairElement):
217 field_name_token = kvpair.field_token
218 field_name = field_name_token.text
219 field_name_lc = field_name.lower()
220 normalized_field_name_lc = normalize_dctrl_field_name(field_name_lc)
221 known_field = known_fields.get(normalized_field_name_lc)
222 field_value = stanza[field_name]
223 field_range_te = kvpair.range_in_parent().relative_to(stanza_position)
224 field_position_te = field_range_te.start_pos
225 field_range_server_units = te_range_to_lsp(field_range_te)
226 field_range = position_codec.range_to_client_units(
227 lines,
228 field_range_server_units,
229 )
230 field_name_typo_detected = False
231 existing_field_range = seen_fields.get(normalized_field_name_lc)
232 if existing_field_range is not None:
233 existing_field_range[3].append(field_range)
234 else:
235 normalized_field_name = normalize_dctrl_field_name(field_name)
236 seen_fields[field_name_lc] = (
237 field_name,
238 normalized_field_name,
239 field_range,
240 [],
241 )
243 if known_field is None:
244 candidates = detect_possible_typo(normalized_field_name_lc, known_fields)
245 if candidates:
246 known_field = known_fields[candidates[0]]
247 token_range_server_units = te_range_to_lsp(
248 TERange.from_position_and_size(
249 field_position_te, kvpair.field_token.size()
250 )
251 )
252 field_range = position_codec.range_to_client_units(
253 lines,
254 token_range_server_units,
255 )
256 field_name_typo_detected = True
257 diagnostics.append(
258 Diagnostic(
259 field_range,
260 f'The "{field_name}" looks like a typo of "{known_field.name}".',
261 severity=DiagnosticSeverity.Warning,
262 source="debputy",
263 data=[
264 propose_correct_text_quick_fix(known_fields[m].name)
265 for m in candidates
266 ],
267 )
268 )
269 if field_value.strip() == "":
270 diagnostics.append(
271 Diagnostic(
272 field_range,
273 f"The {field_name} has no value. Either provide a value or remove it.",
274 severity=DiagnosticSeverity.Error,
275 source="debputy",
276 )
277 )
278 continue
279 diagnostics.extend(
280 known_field.field_diagnostics(
281 kvpair,
282 stanza,
283 stanza_position,
284 position_codec,
285 lines,
286 field_name_typo_reported=field_name_typo_detected,
287 )
288 )
289 if known_field.spellcheck_value:
290 words = kvpair.interpret_as(LIST_SPACE_SEPARATED_INTERPRETATION)
291 spell_checker = default_spellchecker()
292 value_position = kvpair.value_element.position_in_parent().relative_to(
293 field_position_te
294 )
295 for word_ref in words.iter_value_references():
296 token = word_ref.value
297 for word, pos, endpos in spell_checker.iter_words(token):
298 corrections = spell_checker.provide_corrections_for(word)
299 if not corrections:
300 continue
301 word_loc = word_ref.locatable
302 word_pos_te = word_loc.position_in_parent().relative_to(
303 value_position
304 )
305 if pos:
306 word_pos_te = TEPosition(0, pos).relative_to(word_pos_te)
307 word_range = TERange(
308 START_POSITION,
309 TEPosition(0, endpos - pos),
310 )
311 word_range_server_units = te_range_to_lsp(
312 TERange.from_position_and_size(word_pos_te, word_range)
313 )
314 word_range = position_codec.range_to_client_units(
315 lines,
316 word_range_server_units,
317 )
318 diagnostics.append(
319 Diagnostic(
320 word_range,
321 f'Spelling "{word}"',
322 severity=DiagnosticSeverity.Hint,
323 source="debputy",
324 data=[
325 propose_correct_text_quick_fix(c) for c in corrections
326 ],
327 )
328 )
329 if known_field.warn_if_default and field_value == known_field.default_value:
330 diagnostics.append(
331 Diagnostic(
332 field_range,
333 f"The {field_name} is redundant as it is set to the default value and the field should only be"
334 " used in exceptional cases.",
335 severity=DiagnosticSeverity.Warning,
336 source="debputy",
337 )
338 )
339 for (
340 field_name,
341 normalized_field_name,
342 field_range,
343 duplicates,
344 ) in seen_fields.values():
345 if not duplicates:
346 continue
347 related_information = [
348 DiagnosticRelatedInformation(
349 location=Location(doc_reference, field_range),
350 message=f"First definition of {field_name}",
351 )
352 ]
353 related_information.extend(
354 DiagnosticRelatedInformation(
355 location=Location(doc_reference, r),
356 message=f"Duplicate of {field_name}",
357 )
358 for r in duplicates
359 )
360 for dup_range in duplicates:
361 diagnostics.append(
362 Diagnostic(
363 dup_range,
364 f"The {normalized_field_name} field name was used multiple times in this stanza."
365 f" Please ensure the field is only used once per stanza.",
366 severity=DiagnosticSeverity.Error,
367 source="debputy",
368 related_information=related_information,
369 )
370 )
373def _scan_for_syntax_errors_and_token_level_diagnostics(
374 deb822_file: Deb822FileElement,
375 position_codec: LintCapablePositionCodec,
376 lines: List[str],
377 diagnostics: List[Diagnostic],
378) -> int:
379 first_error = len(lines) + 1
380 spell_checker = default_spellchecker()
381 for (
382 token,
383 start_line,
384 start_offset,
385 end_line,
386 end_offset,
387 ) in _deb822_token_iter(deb822_file.iter_tokens()):
388 if token.is_error:
389 first_error = min(first_error, start_line)
390 start_pos = Position(
391 start_line,
392 start_offset,
393 )
394 end_pos = Position(
395 end_line,
396 end_offset,
397 )
398 token_range = position_codec.range_to_client_units(
399 lines, Range(start_pos, end_pos)
400 )
401 diagnostics.append(
402 Diagnostic(
403 token_range,
404 "Syntax error",
405 severity=DiagnosticSeverity.Error,
406 source="debputy (python-debian parser)",
407 )
408 )
409 elif token.is_comment:
410 for word, pos, end_pos in spell_checker.iter_words(token.text):
411 corrections = spell_checker.provide_corrections_for(word)
412 if not corrections:
413 continue
414 start_pos = Position(
415 start_line,
416 pos,
417 )
418 end_pos = Position(
419 start_line,
420 end_pos,
421 )
422 word_range = position_codec.range_to_client_units(
423 lines, Range(start_pos, end_pos)
424 )
425 diagnostics.append(
426 Diagnostic(
427 word_range,
428 f'Spelling "{word}"',
429 severity=DiagnosticSeverity.Hint,
430 source="debputy",
431 data=[propose_correct_text_quick_fix(c) for c in corrections],
432 )
433 )
434 return first_error
437@lint_diagnostics(_LANGUAGE_IDS)
438def _lint_debian_tests_control(
439 lint_state: LintState,
440) -> Optional[List[Diagnostic]]:
441 lines = lint_state.lines
442 position_codec = lint_state.position_codec
443 doc_reference = lint_state.doc_uri
444 diagnostics = []
445 deb822_file = parse_deb822_file(
446 lines,
447 accept_files_with_duplicated_fields=True,
448 accept_files_with_error_tokens=True,
449 )
451 first_error = _scan_for_syntax_errors_and_token_level_diagnostics(
452 deb822_file,
453 position_codec,
454 lines,
455 diagnostics,
456 )
458 paragraphs = list(deb822_file)
460 for paragraph_no, paragraph in enumerate(paragraphs, start=1):
461 paragraph_pos = paragraph.position_in_file()
462 if paragraph_pos.line_position >= first_error:
463 break
464 known_fields = _DTESTSCTRL_FIELDS
465 _diagnostics_for_paragraph(
466 paragraph,
467 paragraph_pos,
468 known_fields,
469 doc_reference,
470 position_codec,
471 lines,
472 diagnostics,
473 )
474 return diagnostics
477@lsp_semantic_tokens_full(_LANGUAGE_IDS)
478def _semantic_tokens_full(
479 ls: "LanguageServer",
480 request: SemanticTokensParams,
481) -> Optional[SemanticTokens]:
482 return deb822_semantic_tokens_full(
483 ls,
484 request,
485 _DEP5_FILE_METADATA,
486 )