1/* Subroutines used by or related to instruction recognition.
2 Copyright (C) 1987-2026 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "tree.h"
28#include "stmt.h"
29#include "cfghooks.h"
30#include "df.h"
31#include "memmodel.h"
32#include "tm_p.h"
33#include "insn-config.h"
34#include "regs.h"
35#include "emit-rtl.h"
36#include "recog.h"
37#include "insn-attr.h"
38#include "addresses.h"
39#include "cfgrtl.h"
40#include "cfgbuild.h"
41#include "cfgcleanup.h"
42#include "reload.h"
43#include "tree-pass.h"
44#include "function-abi.h"
45#include "rtl-iter.h"
46
47#ifndef STACK_POP_CODE
48#if STACK_GROWS_DOWNWARD
49#define STACK_POP_CODE POST_INC
50#else
51#define STACK_POP_CODE POST_DEC
52#endif
53#endif
54
55static void validate_replace_rtx_1 (rtx *, rtx, rtx, rtx_insn *, bool);
56static void validate_replace_src_1 (rtx *, void *);
57static rtx_insn *split_insn (rtx_insn *);
58
59struct target_recog default_target_recog;
60#if SWITCHABLE_TARGET
61struct target_recog *this_target_recog = &default_target_recog;
62#endif
63
64/* Nonzero means allow operands to be volatile.
65 This should be 0 if you are generating rtl, such as if you are calling
66 the functions in optabs.cc and expmed.cc (most of the time).
67 This should be 1 if all valid insns need to be recognized,
68 such as in reginfo.cc and final.cc and reload.cc.
69
70 init_recog and init_recog_no_volatile are responsible for setting this. */
71
72int volatile_ok;
73
74struct recog_data_d recog_data;
75
76/* Contains a vector of operand_alternative structures, such that
77 operand OP of alternative A is at index A * n_operands + OP.
78 Set up by preprocess_constraints. */
79const operand_alternative *recog_op_alt;
80
81/* Used to provide recog_op_alt for asms. */
82static operand_alternative asm_op_alt[MAX_RECOG_OPERANDS
83 * MAX_RECOG_ALTERNATIVES];
84
85/* On return from `constrain_operands', indicate which alternative
86 was satisfied. */
87
88int which_alternative;
89
90/* True for inline asm operands with - constraint modifier. */
91bool raw_constraint_p;
92
93/* Nonzero after end of reload pass.
94 Set to 1 or 0 by toplev.cc.
95 Controls the significance of (SUBREG (MEM)). */
96
97int reload_completed;
98
99/* Nonzero after thread_prologue_and_epilogue_insns has run. */
100int epilogue_completed;
101
102/* Initialize data used by the function `recog'.
103 This must be called once in the compilation of a function
104 before any insn recognition may be done in the function. */
105
106void
107init_recog_no_volatile (void)
108{
109 volatile_ok = 0;
110}
111
112void
113init_recog (void)
114{
115 volatile_ok = 1;
116}
117
118
119/* Return true if labels in asm operands BODY are LABEL_REFs. */
120
121static bool
122asm_labels_ok (rtx body)
123{
124 rtx asmop;
125 int i;
126
127 asmop = extract_asm_operands (body);
128 if (asmop == NULL_RTX)
129 return true;
130
131 for (i = 0; i < ASM_OPERANDS_LABEL_LENGTH (asmop); i++)
132 if (GET_CODE (ASM_OPERANDS_LABEL (asmop, i)) != LABEL_REF)
133 return false;
134
135 return true;
136}
137
138/* Check that X is an insn-body for an `asm' with operands
139 and that the operands mentioned in it are legitimate. */
140
141bool
142check_asm_operands (rtx x)
143{
144 int noperands;
145 rtx *operands;
146 const char **constraints;
147 int i;
148
149 if (!asm_labels_ok (body: x))
150 return false;
151
152 /* Post-reload, be more strict with things. */
153 if (reload_completed)
154 {
155 /* ??? Doh! We've not got the wrapping insn. Cook one up. */
156 rtx_insn *insn = make_insn_raw (x);
157 extract_insn (insn);
158 constrain_operands (1, get_enabled_alternatives (insn));
159 return which_alternative >= 0;
160 }
161
162 noperands = asm_noperands (x);
163 if (noperands < 0)
164 return false;
165 if (noperands == 0)
166 return true;
167
168 operands = XALLOCAVEC (rtx, noperands);
169 constraints = XALLOCAVEC (const char *, noperands);
170
171 decode_asm_operands (x, operands, NULL, constraints, NULL, NULL);
172
173 for (i = 0; i < noperands; i++)
174 {
175 const char *c = constraints[i];
176 if (c[0] == '%')
177 c++;
178 if (! asm_operand_ok (operands[i], c, constraints))
179 return false;
180 }
181
182 return true;
183}
184
185/* Static data for the next two routines. */
186
187struct change_t
188{
189 rtx object;
190 int old_code;
191 int old_len;
192 bool unshare;
193 rtx *loc;
194 rtx old;
195};
196
197static change_t *changes;
198static int changes_allocated;
199
200static int num_changes = 0;
201int undo_recog_changes::s_num_changes = 0;
202
203/* Validate a proposed change to OBJECT. LOC is the location in the rtl
204 at which NEW_RTX will be placed. If NEW_LEN is >= 0, XVECLEN (NEW_RTX, 0)
205 will also be changed to NEW_LEN, which is no greater than the current
206 XVECLEN. If OBJECT is zero, no validation is done, the change is
207 simply made.
208
209 Two types of objects are supported: If OBJECT is a MEM, memory_address_p
210 will be called with the address and mode as parameters. If OBJECT is
211 an INSN, CALL_INSN, or JUMP_INSN, the insn will be re-recognized with
212 the change in place.
213
214 IN_GROUP is nonzero if this is part of a group of changes that must be
215 performed as a group. In that case, the changes will be stored. The
216 function `apply_change_group' will validate and apply the changes.
217
218 If IN_GROUP is zero, this is a single change. Try to recognize the insn
219 or validate the memory reference with the change applied. If the result
220 is not valid for the machine, suppress the change and return false.
221 Otherwise, perform the change and return true. */
222
223static bool
224validate_change_1 (rtx object, rtx *loc, rtx new_rtx, bool in_group,
225 bool unshare, int new_len = -1)
226{
227 gcc_assert (!undo_recog_changes::is_active ());
228 rtx old = *loc;
229
230 /* Single-element parallels aren't valid and won't match anything.
231 Replace them with the single element. */
232 if (new_len == 1 && GET_CODE (new_rtx) == PARALLEL)
233 {
234 new_rtx = XVECEXP (new_rtx, 0, 0);
235 new_len = -1;
236 }
237
238 /* When a change is part of a group, callers expect to be able to change
239 INSN_CODE after making the change and have the code reset to its old
240 value by a later cancel_changes. We therefore need to register group
241 changes even if they're no-ops. */
242 if (!in_group
243 && (old == new_rtx || rtx_equal_p (old, new_rtx))
244 && (new_len < 0 || XVECLEN (new_rtx, 0) == new_len))
245 return true;
246
247 gcc_assert ((in_group != 0 || num_changes == 0)
248 && (new_len < 0 || new_rtx == *loc));
249
250 *loc = new_rtx;
251
252 /* Save the information describing this change. */
253 if (num_changes >= changes_allocated)
254 {
255 if (changes_allocated == 0)
256 /* This value allows for repeated substitutions inside complex
257 indexed addresses, or changes in up to 5 insns. */
258 changes_allocated = MAX_RECOG_OPERANDS * 5;
259 else
260 changes_allocated *= 2;
261
262 changes = XRESIZEVEC (change_t, changes, changes_allocated);
263 }
264
265 changes[num_changes].object = object;
266 changes[num_changes].loc = loc;
267 changes[num_changes].old = old;
268 changes[num_changes].old_len = (new_len >= 0 ? XVECLEN (new_rtx, 0) : -1);
269 changes[num_changes].unshare = unshare;
270
271 if (new_len >= 0)
272 XVECLEN (new_rtx, 0) = new_len;
273
274 if (object && !MEM_P (object))
275 {
276 /* Set INSN_CODE to force rerecognition of insn. Save old code in
277 case invalid. */
278 changes[num_changes].old_code = INSN_CODE (object);
279 INSN_CODE (object) = -1;
280 }
281
282 num_changes++;
283
284 /* If we are making a group of changes, return 1. Otherwise, validate the
285 change group we made. */
286
287 if (in_group)
288 return true;
289 else
290 return apply_change_group ();
291}
292
293/* Wrapper for validate_change_1 without the UNSHARE argument defaulting
294 UNSHARE to false. */
295
296bool
297validate_change (rtx object, rtx *loc, rtx new_rtx, bool in_group)
298{
299 return validate_change_1 (object, loc, new_rtx, in_group, unshare: false);
300}
301
302/* Wrapper for validate_change_1 without the UNSHARE argument defaulting
303 UNSHARE to true. */
304
305bool
306validate_unshare_change (rtx object, rtx *loc, rtx new_rtx, bool in_group)
307{
308 return validate_change_1 (object, loc, new_rtx, in_group, unshare: true);
309}
310
311/* Change XVECLEN (*LOC, 0) to NEW_LEN. OBJECT, IN_GROUP and the return
312 value are as for validate_change_1. */
313
314bool
315validate_change_xveclen (rtx object, rtx *loc, int new_len, bool in_group)
316{
317 return validate_change_1 (object, loc, new_rtx: *loc, in_group, unshare: false, new_len);
318}
319
320/* Keep X canonicalized if some changes have made it non-canonical; only
321 modifies the operands of X, not (for example) its code. Simplifications
322 are not the job of this routine.
323
324 Return true if anything was changed. */
325bool
326canonicalize_change_group (rtx_insn *insn, rtx x)
327{
328 if (COMMUTATIVE_P (x)
329 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
330 {
331 /* Oops, the caller has made X no longer canonical.
332 Let's redo the changes in the correct order. */
333 rtx tem = XEXP (x, 0);
334 validate_unshare_change (object: insn, loc: &XEXP (x, 0), XEXP (x, 1), in_group: 1);
335 validate_unshare_change (object: insn, loc: &XEXP (x, 1), new_rtx: tem, in_group: 1);
336 return true;
337 }
338 else
339 return false;
340}
341
342/* Check if REG_INC argument in *data overlaps a stored REG. */
343
344static void
345check_invalid_inc_dec (rtx reg, const_rtx, void *data)
346{
347 rtx *pinc = (rtx *) data;
348 if (*pinc == NULL_RTX || MEM_P (reg))
349 return;
350 if (reg_overlap_mentioned_p (reg, *pinc))
351 *pinc = NULL_RTX;
352}
353
354/* This subroutine of apply_change_group verifies whether the changes to INSN
355 were valid; i.e. whether INSN can still be recognized.
356
357 If IN_GROUP is true clobbers which have to be added in order to
358 match the instructions will be added to the current change group.
359 Otherwise the changes will take effect immediately. */
360
361bool
362insn_invalid_p (rtx_insn *insn, bool in_group)
363{
364 rtx pat = PATTERN (insn);
365 int num_clobbers = 0;
366 /* If we are before reload and the pattern is a SET, see if we can add
367 clobbers. */
368 int icode = recog (pat, insn,
369 (GET_CODE (pat) == SET
370 && ! reload_completed
371 && ! reload_in_progress)
372 ? &num_clobbers : 0);
373 bool is_asm = icode < 0 && asm_noperands (PATTERN (insn)) >= 0;
374
375
376 /* If this is an asm and the operand aren't legal, then fail. Likewise if
377 this is not an asm and the insn wasn't recognized. */
378 if ((is_asm && ! check_asm_operands (x: PATTERN (insn)))
379 || (!is_asm && icode < 0))
380 return true;
381
382 /* If we have to add CLOBBERs, fail if we have to add ones that reference
383 hard registers since our callers can't know if they are live or not.
384 Otherwise, add them. */
385 if (num_clobbers > 0)
386 {
387 rtx newpat;
388
389 if (added_clobbers_hard_reg_p (icode))
390 return true;
391
392 newpat = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_clobbers + 1));
393 XVECEXP (newpat, 0, 0) = pat;
394 add_clobbers (newpat, icode);
395 if (in_group)
396 validate_change (object: insn, loc: &PATTERN (insn), new_rtx: newpat, in_group: 1);
397 else
398 PATTERN (insn) = pat = newpat;
399 }
400
401 /* After reload, verify that all constraints are satisfied. */
402 if (reload_completed)
403 {
404 extract_insn (insn);
405
406 if (! constrain_operands (1, get_preferred_alternatives (insn)))
407 return true;
408 }
409
410 /* Punt if REG_INC argument overlaps some stored REG. */
411 for (rtx link = FIND_REG_INC_NOTE (insn, NULL_RTX);
412 link; link = XEXP (link, 1))
413 if (REG_NOTE_KIND (link) == REG_INC)
414 {
415 rtx reg = XEXP (link, 0);
416 note_stores (insn, check_invalid_inc_dec, &reg);
417 if (reg == NULL_RTX)
418 return true;
419 }
420
421 INSN_CODE (insn) = icode;
422 return false;
423}
424
425/* Return number of changes made and not validated yet. */
426int
427num_changes_pending (void)
428{
429 return num_changes;
430}
431
432/* Tentatively apply the changes numbered NUM and up.
433 Return true if all changes are valid, false otherwise. */
434
435bool
436verify_changes (int num)
437{
438 int i;
439 rtx last_validated = NULL_RTX;
440
441 /* The changes have been applied and all INSN_CODEs have been reset to force
442 rerecognition.
443
444 The changes are valid if we aren't given an object, or if we are
445 given a MEM and it still is a valid address, or if this is in insn
446 and it is recognized. In the latter case, if reload has completed,
447 we also require that the operands meet the constraints for
448 the insn. */
449
450 for (i = num; i < num_changes; i++)
451 {
452 rtx object = changes[i].object;
453
454 /* If there is no object to test or if it is the same as the one we
455 already tested, ignore it. */
456 if (object == 0 || object == last_validated)
457 continue;
458
459 if (MEM_P (object))
460 {
461 if (! memory_address_addr_space_p (GET_MODE (object),
462 XEXP (object, 0),
463 MEM_ADDR_SPACE (object)))
464 break;
465 }
466 else if (/* changes[i].old might be zero, e.g. when putting a
467 REG_FRAME_RELATED_EXPR into a previously empty list. */
468 changes[i].old
469 && REG_P (changes[i].old)
470 && asm_noperands (PATTERN (insn: object)) > 0
471 && register_asm_p (changes[i].old))
472 {
473 /* Don't allow changes of hard register operands to inline
474 assemblies if they have been defined as register asm ("x"). */
475 break;
476 }
477 else if (DEBUG_INSN_P (object))
478 continue;
479 else if (insn_invalid_p (insn: as_a <rtx_insn *> (p: object), in_group: true))
480 {
481 rtx pat = PATTERN (insn: object);
482
483 /* Perhaps we couldn't recognize the insn because there were
484 extra CLOBBERs at the end. If so, try to re-recognize
485 without the last CLOBBER (later iterations will cause each of
486 them to be eliminated, in turn). But don't do this if we
487 have an ASM_OPERAND. */
488 if (GET_CODE (pat) == PARALLEL
489 && GET_CODE (XVECEXP (pat, 0, XVECLEN (pat, 0) - 1)) == CLOBBER
490 && asm_noperands (PATTERN (insn: object)) < 0)
491 {
492 rtx newpat;
493
494 if (XVECLEN (pat, 0) == 2)
495 newpat = XVECEXP (pat, 0, 0);
496 else
497 {
498 int j;
499
500 newpat
501 = gen_rtx_PARALLEL (VOIDmode,
502 rtvec_alloc (XVECLEN (pat, 0) - 1));
503 for (j = 0; j < XVECLEN (newpat, 0); j++)
504 XVECEXP (newpat, 0, j) = XVECEXP (pat, 0, j);
505 }
506
507 /* Add a new change to this group to replace the pattern
508 with this new pattern. Then consider this change
509 as having succeeded. The change we added will
510 cause the entire call to fail if things remain invalid.
511
512 Note that this can lose if a later change than the one
513 we are processing specified &XVECEXP (PATTERN (object), 0, X)
514 but this shouldn't occur. */
515
516 validate_change (object, loc: &PATTERN (insn: object), new_rtx: newpat, in_group: 1);
517 continue;
518 }
519 else if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER
520 || GET_CODE (pat) == VAR_LOCATION)
521 /* If this insn is a CLOBBER or USE, it is always valid, but is
522 never recognized. */
523 continue;
524 else
525 break;
526 }
527 last_validated = object;
528 }
529
530 return (i == num_changes);
531}
532
533/* A group of changes has previously been issued with validate_change
534 and verified with verify_changes. Call df_insn_rescan for each of
535 the insn changed and clear num_changes. */
536
537void
538confirm_change_group (void)
539{
540 int i;
541 rtx last_object = NULL;
542
543 gcc_assert (!undo_recog_changes::is_active ());
544 for (i = 0; i < num_changes; i++)
545 {
546 rtx object = changes[i].object;
547
548 if (changes[i].unshare)
549 *changes[i].loc = copy_rtx (*changes[i].loc);
550
551 /* Avoid unnecessary rescanning when multiple changes to same instruction
552 are made. */
553 if (object)
554 {
555 if (object != last_object && last_object && INSN_P (last_object))
556 df_insn_rescan (as_a <rtx_insn *> (p: last_object));
557 last_object = object;
558 }
559 }
560
561 if (last_object && INSN_P (last_object))
562 df_insn_rescan (as_a <rtx_insn *> (p: last_object));
563 num_changes = 0;
564}
565
566/* Apply a group of changes previously issued with `validate_change'.
567 If all changes are valid, call confirm_change_group and return true,
568 otherwise, call cancel_changes and return false. */
569
570bool
571apply_change_group (void)
572{
573 if (verify_changes (num: 0))
574 {
575 confirm_change_group ();
576 return true;
577 }
578 else
579 {
580 cancel_changes (0);
581 return false;
582 }
583}
584
585
586/* Return the number of changes so far in the current group. */
587
588int
589num_validated_changes (void)
590{
591 return num_changes;
592}
593
594/* Retract the changes numbered NUM and up. */
595
596void
597cancel_changes (int num)
598{
599 gcc_assert (!undo_recog_changes::is_active ());
600 int i;
601
602 /* Back out all the changes. Do this in the opposite order in which
603 they were made. */
604 for (i = num_changes - 1; i >= num; i--)
605 {
606 if (changes[i].old_len >= 0)
607 XVECLEN (*changes[i].loc, 0) = changes[i].old_len;
608 else
609 *changes[i].loc = changes[i].old;
610 if (changes[i].object && !MEM_P (changes[i].object))
611 {
612 INSN_CODE (changes[i].object) = changes[i].old_code;
613 if (recog_data.insn == changes[i].object)
614 recog_data.insn = nullptr;
615 }
616 }
617 num_changes = num;
618}
619
620/* Swap the status of change NUM from being applied to not being applied,
621 or vice versa. */
622
623static void
624swap_change (int num)
625{
626 if (changes[num].old_len >= 0)
627 std::swap (XVECLEN (*changes[num].loc, 0), b&: changes[num].old_len);
628 else
629 std::swap (a&: *changes[num].loc, b&: changes[num].old);
630 if (changes[num].object && !MEM_P (changes[num].object))
631 {
632 std::swap (INSN_CODE (changes[num].object), b&: changes[num].old_code);
633 if (recog_data.insn == changes[num].object)
634 recog_data.insn = nullptr;
635 }
636}
637
638undo_recog_changes::undo_recog_changes (int num)
639 : m_old_num_changes (s_num_changes)
640{
641 gcc_assert (num <= num_changes - s_num_changes);
642 for (int i = num_changes - s_num_changes - 1; i >= num; i--)
643 swap_change (num: i);
644 s_num_changes = num_changes - num;
645}
646
647undo_recog_changes::~undo_recog_changes ()
648{
649 for (int i = num_changes - s_num_changes;
650 i < num_changes - m_old_num_changes; ++i)
651 swap_change (num: i);
652 s_num_changes = m_old_num_changes;
653}
654
655/* Reduce conditional compilation elsewhere. */
656/* A subroutine of validate_replace_rtx_1 that tries to simplify the resulting
657 rtx. */
658
659static void
660simplify_while_replacing (rtx *loc, rtx to, rtx_insn *object,
661 machine_mode op0_mode)
662{
663 rtx x = *loc;
664 enum rtx_code code = GET_CODE (x);
665 rtx new_rtx = NULL_RTX;
666 scalar_int_mode is_mode;
667
668 if (SWAPPABLE_OPERANDS_P (x)
669 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
670 {
671 validate_unshare_change (object, loc,
672 gen_rtx_fmt_ee (COMMUTATIVE_ARITH_P (x) ? code
673 : swap_condition (code),
674 GET_MODE (x), XEXP (x, 1),
675 XEXP (x, 0)), in_group: 1);
676 x = *loc;
677 code = GET_CODE (x);
678 }
679
680 /* Canonicalize arithmetics with all constant operands. */
681 switch (GET_RTX_CLASS (code))
682 {
683 case RTX_UNARY:
684 if (CONSTANT_P (XEXP (x, 0)))
685 new_rtx = simplify_unary_operation (code, GET_MODE (x), XEXP (x, 0),
686 op_mode: op0_mode);
687 break;
688 case RTX_COMM_ARITH:
689 case RTX_BIN_ARITH:
690 if (CONSTANT_P (XEXP (x, 0)) && CONSTANT_P (XEXP (x, 1)))
691 new_rtx = simplify_binary_operation (code, GET_MODE (x), XEXP (x, 0),
692 XEXP (x, 1));
693 break;
694 case RTX_COMPARE:
695 case RTX_COMM_COMPARE:
696 if (CONSTANT_P (XEXP (x, 0)) && CONSTANT_P (XEXP (x, 1)))
697 new_rtx = simplify_relational_operation (code, GET_MODE (x), op_mode: op0_mode,
698 XEXP (x, 0), XEXP (x, 1));
699 break;
700 default:
701 break;
702 }
703 if (new_rtx)
704 {
705 validate_change (object, loc, new_rtx, in_group: 1);
706 return;
707 }
708
709 switch (code)
710 {
711 case PLUS:
712 /* If we have a PLUS whose second operand is now a CONST_INT, use
713 simplify_gen_binary to try to simplify it.
714 ??? We may want later to remove this, once simplification is
715 separated from this function. */
716 if (CONST_INT_P (XEXP (x, 1)) && XEXP (x, 1) == to)
717 validate_change (object, loc,
718 new_rtx: simplify_gen_binary
719 (code: PLUS, GET_MODE (x), XEXP (x, 0), XEXP (x, 1)), in_group: 1);
720 break;
721 case MINUS:
722 if (CONST_SCALAR_INT_P (XEXP (x, 1)))
723 validate_change (object, loc,
724 new_rtx: simplify_gen_binary
725 (code: PLUS, GET_MODE (x), XEXP (x, 0),
726 op1: simplify_gen_unary (code: NEG,
727 GET_MODE (x), XEXP (x, 1),
728 GET_MODE (x))), in_group: 1);
729 break;
730 case ZERO_EXTEND:
731 case SIGN_EXTEND:
732 if (GET_MODE (XEXP (x, 0)) == VOIDmode)
733 {
734 new_rtx = simplify_gen_unary (code, GET_MODE (x), XEXP (x, 0),
735 op_mode: op0_mode);
736 /* If any of the above failed, substitute in something that
737 we know won't be recognized. */
738 if (!new_rtx)
739 new_rtx = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
740 validate_change (object, loc, new_rtx, in_group: 1);
741 }
742 break;
743 case SUBREG:
744 /* All subregs possible to simplify should be simplified. */
745 new_rtx = simplify_subreg (GET_MODE (x), SUBREG_REG (x), innermode: op0_mode,
746 SUBREG_BYTE (x));
747
748 /* Subregs of VOIDmode operands are incorrect. */
749 if (!new_rtx && GET_MODE (SUBREG_REG (x)) == VOIDmode)
750 new_rtx = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx);
751 if (new_rtx)
752 validate_change (object, loc, new_rtx, in_group: 1);
753 break;
754 case ZERO_EXTRACT:
755 case SIGN_EXTRACT:
756 /* If we are replacing a register with memory, try to change the memory
757 to be the mode required for memory in extract operations (this isn't
758 likely to be an insertion operation; if it was, nothing bad will
759 happen, we might just fail in some cases). */
760
761 if (MEM_P (XEXP (x, 0))
762 && is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0)), result: &is_mode)
763 && CONST_INT_P (XEXP (x, 1))
764 && CONST_INT_P (XEXP (x, 2))
765 && !mode_dependent_address_p (XEXP (XEXP (x, 0), 0),
766 MEM_ADDR_SPACE (XEXP (x, 0)))
767 && !MEM_VOLATILE_P (XEXP (x, 0)))
768 {
769 int pos = INTVAL (XEXP (x, 2));
770 machine_mode new_mode = is_mode;
771 if (GET_CODE (x) == ZERO_EXTRACT && targetm.have_extzv ())
772 new_mode = insn_data[targetm.code_for_extzv].operand[1].mode;
773 else if (GET_CODE (x) == SIGN_EXTRACT && targetm.have_extv ())
774 new_mode = insn_data[targetm.code_for_extv].operand[1].mode;
775 scalar_int_mode wanted_mode = (new_mode == VOIDmode
776 ? word_mode
777 : as_a <scalar_int_mode> (m: new_mode));
778
779 /* If we have a narrower mode, we can do something. */
780 if (GET_MODE_SIZE (mode: wanted_mode) < GET_MODE_SIZE (mode: is_mode))
781 {
782 int offset = pos / BITS_PER_UNIT;
783 rtx newmem;
784
785 /* If the bytes and bits are counted differently, we
786 must adjust the offset. */
787 if (BYTES_BIG_ENDIAN != BITS_BIG_ENDIAN)
788 offset =
789 (GET_MODE_SIZE (mode: is_mode) - GET_MODE_SIZE (mode: wanted_mode) -
790 offset);
791
792 gcc_assert (GET_MODE_PRECISION (wanted_mode)
793 == GET_MODE_BITSIZE (wanted_mode));
794 pos %= GET_MODE_BITSIZE (mode: wanted_mode);
795
796 newmem = adjust_address_nv (XEXP (x, 0), wanted_mode, offset);
797
798 validate_change (object, loc: &XEXP (x, 2), GEN_INT (pos), in_group: 1);
799 validate_change (object, loc: &XEXP (x, 0), new_rtx: newmem, in_group: 1);
800 }
801 }
802
803 break;
804
805 default:
806 break;
807 }
808}
809
810/* Replace every occurrence of FROM in X with TO. Mark each change with
811 validate_change passing OBJECT. */
812
813static void
814validate_replace_rtx_1 (rtx *loc, rtx from, rtx to, rtx_insn *object,
815 bool simplify)
816{
817 int i, j;
818 const char *fmt;
819 rtx x = *loc;
820 enum rtx_code code;
821 machine_mode op0_mode = VOIDmode;
822 int prev_changes = num_changes;
823
824 if (!x)
825 return;
826
827 code = GET_CODE (x);
828 fmt = GET_RTX_FORMAT (code);
829 if (fmt[0] == 'e')
830 op0_mode = GET_MODE (XEXP (x, 0));
831
832 /* X matches FROM if it is the same rtx or they are both referring to the
833 same register in the same mode. Avoid calling rtx_equal_p unless the
834 operands look similar. */
835
836 if (x == from
837 || (REG_P (x) && REG_P (from)
838 && GET_MODE (x) == GET_MODE (from)
839 && REGNO (x) == REGNO (from))
840 || (GET_CODE (x) == GET_CODE (from) && GET_MODE (x) == GET_MODE (from)
841 && rtx_equal_p (x, from)))
842 {
843 validate_unshare_change (object, loc, new_rtx: to, in_group: 1);
844 return;
845 }
846
847 /* Call ourself recursively to perform the replacements.
848 We must not replace inside already replaced expression, otherwise we
849 get infinite recursion for replacements like (reg X)->(subreg (reg X))
850 so we must special case shared ASM_OPERANDS. */
851
852 if (GET_CODE (x) == PARALLEL)
853 {
854 for (j = XVECLEN (x, 0) - 1; j >= 0; j--)
855 {
856 if (j && GET_CODE (XVECEXP (x, 0, j)) == SET
857 && GET_CODE (SET_SRC (XVECEXP (x, 0, j))) == ASM_OPERANDS)
858 {
859 /* Verify that operands are really shared. */
860 gcc_assert (ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP (x, 0, 0)))
861 == ASM_OPERANDS_INPUT_VEC (SET_SRC (XVECEXP
862 (x, 0, j))));
863 validate_replace_rtx_1 (loc: &SET_DEST (XVECEXP (x, 0, j)),
864 from, to, object, simplify);
865 }
866 else
867 validate_replace_rtx_1 (loc: &XVECEXP (x, 0, j), from, to, object,
868 simplify);
869 }
870 }
871 else
872 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
873 {
874 if (fmt[i] == 'e')
875 validate_replace_rtx_1 (loc: &XEXP (x, i), from, to, object, simplify);
876 else if (fmt[i] == 'E')
877 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
878 validate_replace_rtx_1 (loc: &XVECEXP (x, i, j), from, to, object,
879 simplify);
880 }
881
882 /* If we didn't substitute, there is nothing more to do. */
883 if (num_changes == prev_changes)
884 return;
885
886 /* ??? The regmove is no more, so is this aberration still necessary? */
887 /* Allow substituted expression to have different mode. This is used by
888 regmove to change mode of pseudo register. */
889 if (fmt[0] == 'e' && GET_MODE (XEXP (x, 0)) != VOIDmode)
890 op0_mode = GET_MODE (XEXP (x, 0));
891
892 /* Do changes needed to keep rtx consistent. Don't do any other
893 simplifications, as it is not our job. */
894 if (simplify)
895 simplify_while_replacing (loc, to, object, op0_mode);
896}
897
898/* Try replacing every occurrence of FROM in subexpression LOC of INSN
899 with TO. After all changes have been made, validate by seeing
900 if INSN is still valid. */
901
902bool
903validate_replace_rtx_subexp (rtx from, rtx to, rtx_insn *insn, rtx *loc)
904{
905 validate_replace_rtx_1 (loc, from, to, object: insn, simplify: true);
906 return apply_change_group ();
907}
908
909/* Try replacing every occurrence of FROM in INSN with TO. After all
910 changes have been made, validate by seeing if INSN is still valid. */
911
912bool
913validate_replace_rtx (rtx from, rtx to, rtx_insn *insn)
914{
915 validate_replace_rtx_1 (loc: &PATTERN (insn), from, to, object: insn, simplify: true);
916 return apply_change_group ();
917}
918
919/* Try replacing every occurrence of FROM in WHERE with TO. Assume that WHERE
920 is a part of INSN. After all changes have been made, validate by seeing if
921 INSN is still valid.
922 validate_replace_rtx (from, to, insn) is equivalent to
923 validate_replace_rtx_part (from, to, &PATTERN (insn), insn). */
924
925bool
926validate_replace_rtx_part (rtx from, rtx to, rtx *where, rtx_insn *insn)
927{
928 validate_replace_rtx_1 (loc: where, from, to, object: insn, simplify: true);
929 return apply_change_group ();
930}
931
932/* Same as above, but do not simplify rtx afterwards. */
933bool
934validate_replace_rtx_part_nosimplify (rtx from, rtx to, rtx *where,
935 rtx_insn *insn)
936{
937 validate_replace_rtx_1 (loc: where, from, to, object: insn, simplify: false);
938 return apply_change_group ();
939
940}
941
942/* Try replacing every occurrence of FROM in INSN with TO. This also
943 will replace in REG_EQUAL and REG_EQUIV notes. */
944
945void
946validate_replace_rtx_group (rtx from, rtx to, rtx_insn *insn)
947{
948 rtx note;
949 validate_replace_rtx_1 (loc: &PATTERN (insn), from, to, object: insn, simplify: true);
950 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
951 if (REG_NOTE_KIND (note) == REG_EQUAL
952 || REG_NOTE_KIND (note) == REG_EQUIV)
953 validate_replace_rtx_1 (loc: &XEXP (note, 0), from, to, object: insn, simplify: true);
954}
955
956/* Function called by note_uses to replace used subexpressions. */
957struct validate_replace_src_data
958{
959 rtx from; /* Old RTX */
960 rtx to; /* New RTX */
961 rtx_insn *insn; /* Insn in which substitution is occurring. */
962};
963
964static void
965validate_replace_src_1 (rtx *x, void *data)
966{
967 struct validate_replace_src_data *d
968 = (struct validate_replace_src_data *) data;
969
970 validate_replace_rtx_1 (loc: x, from: d->from, to: d->to, object: d->insn, simplify: true);
971}
972
973/* Try replacing every occurrence of FROM in INSN with TO, avoiding
974 SET_DESTs. */
975
976void
977validate_replace_src_group (rtx from, rtx to, rtx_insn *insn)
978{
979 struct validate_replace_src_data d;
980
981 d.from = from;
982 d.to = to;
983 d.insn = insn;
984 note_uses (&PATTERN (insn), validate_replace_src_1, &d);
985}
986
987/* Try simplify INSN.
988 Invoke simplify_rtx () on every SET_SRC and SET_DEST inside the INSN's
989 pattern and return true if something was simplified. */
990
991bool
992validate_simplify_insn (rtx_insn *insn)
993{
994 int i;
995 rtx pat = NULL;
996 rtx newpat = NULL;
997
998 pat = PATTERN (insn);
999
1000 if (GET_CODE (pat) == SET)
1001 {
1002 newpat = simplify_rtx (SET_SRC (pat));
1003 if (newpat && !rtx_equal_p (SET_SRC (pat), newpat))
1004 validate_change (object: insn, loc: &SET_SRC (pat), new_rtx: newpat, in_group: 1);
1005 newpat = simplify_rtx (SET_DEST (pat));
1006 if (newpat && !rtx_equal_p (SET_DEST (pat), newpat))
1007 validate_change (object: insn, loc: &SET_DEST (pat), new_rtx: newpat, in_group: 1);
1008 }
1009 else if (GET_CODE (pat) == PARALLEL)
1010 for (i = 0; i < XVECLEN (pat, 0); i++)
1011 {
1012 rtx s = XVECEXP (pat, 0, i);
1013
1014 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
1015 {
1016 newpat = simplify_rtx (SET_SRC (s));
1017 if (newpat && !rtx_equal_p (SET_SRC (s), newpat))
1018 validate_change (object: insn, loc: &SET_SRC (s), new_rtx: newpat, in_group: 1);
1019 newpat = simplify_rtx (SET_DEST (s));
1020 if (newpat && !rtx_equal_p (SET_DEST (s), newpat))
1021 validate_change (object: insn, loc: &SET_DEST (s), new_rtx: newpat, in_group: 1);
1022 }
1023 }
1024 return ((num_changes_pending () > 0) && (apply_change_group () > 0));
1025}
1026
1027/* Try to process the address of memory expression MEM. Return true on
1028 success; leave the caller to clean up on failure. */
1029
1030bool
1031insn_propagation::apply_to_mem_1 (rtx mem)
1032{
1033 auto old_num_changes = num_validated_changes ();
1034 mem_depth += 1;
1035 bool res = apply_to_rvalue_1 (&XEXP (mem, 0));
1036 mem_depth -= 1;
1037 if (!res)
1038 return false;
1039
1040 if (old_num_changes != num_validated_changes ()
1041 && should_check_mems
1042 && !check_mem (old_num_changes, mem))
1043 return false;
1044
1045 return true;
1046}
1047
1048/* Try to process the rvalue expression at *LOC. Return true on success;
1049 leave the caller to clean up on failure. */
1050
1051bool
1052insn_propagation::apply_to_rvalue_1 (rtx *loc)
1053{
1054 rtx x = *loc;
1055 enum rtx_code code = GET_CODE (x);
1056 machine_mode mode = GET_MODE (x);
1057
1058 auto old_num_changes = num_validated_changes ();
1059 if (from
1060 && GET_CODE (x) == GET_CODE (from)
1061 && (REG_P (x)
1062 ? REGNO (x) == REGNO (from)
1063 : rtx_equal_p (x, from)))
1064 {
1065 /* Don't replace register asms in asm statements; we mustn't
1066 change the user's register allocation. */
1067 if (REG_P (x)
1068 && HARD_REGISTER_P (x)
1069 && register_asm_p (x)
1070 && asm_noperands (PATTERN (insn)) > 0)
1071 return false;
1072
1073 rtx newval = to;
1074 if (GET_MODE (x) != GET_MODE (from))
1075 {
1076 gcc_assert (REG_P (x) && HARD_REGISTER_P (x));
1077 if (REG_NREGS (x) != REG_NREGS (from)
1078 || !REG_CAN_CHANGE_MODE_P (REGNO (x), GET_MODE (from),
1079 GET_MODE (x)))
1080 return false;
1081
1082 /* If the reference is paradoxical and the replacement
1083 value contains registers, we would need to check that the
1084 simplification below does not increase REG_NREGS for those
1085 registers either. It seems simpler to punt on nonconstant
1086 values instead. */
1087 if (paradoxical_subreg_p (GET_MODE (x), GET_MODE (from))
1088 && !CONSTANT_P (to))
1089 return false;
1090
1091 newval = simplify_subreg (GET_MODE (x), to, GET_MODE (from),
1092 subreg_lowpart_offset (GET_MODE (x),
1093 GET_MODE (from)));
1094 if (!newval)
1095 return false;
1096
1097 /* Check that the simplification didn't just push an explicit
1098 subreg down into subexpressions. In particular, for a register
1099 R that has a fixed mode, such as the stack pointer, a subreg of:
1100
1101 (plus:M (reg:M R) (const_int C))
1102
1103 would be:
1104
1105 (plus:N (subreg:N (reg:M R) ...) (const_int C'))
1106
1107 But targets can legitimately assume that subregs of hard registers
1108 will not be created after RA (except in special circumstances,
1109 such as strict_low_part). */
1110 subrtx_iterator::array_type array;
1111 FOR_EACH_SUBRTX (iter, array, newval, NONCONST)
1112 if (GET_CODE (*iter) == SUBREG)
1113 return false;
1114 }
1115
1116 if (should_unshare)
1117 validate_unshare_change (object: insn, loc, new_rtx: newval, in_group: 1);
1118 else
1119 validate_change (object: insn, loc, new_rtx: newval, in_group: 1);
1120 if (mem_depth && !REG_P (newval) && !CONSTANT_P (newval))
1121 {
1122 /* We're substituting into an address, but TO will have the
1123 form expected outside an address. Canonicalize it if
1124 necessary. */
1125 insn_propagation subprop (insn);
1126 subprop.mem_depth += 1;
1127 if (!subprop.apply_to_rvalue (loc))
1128 gcc_unreachable ();
1129 if (should_unshare
1130 && num_validated_changes () != old_num_changes + 1)
1131 {
1132 /* TO is owned by someone else, so create a copy and
1133 return TO to its original form. */
1134 newval = copy_rtx (*loc);
1135 cancel_changes (num: old_num_changes);
1136 validate_change (object: insn, loc, new_rtx: newval, in_group: 1);
1137 }
1138 }
1139 num_replacements += 1;
1140 should_unshare = true;
1141 result_flags |= UNSIMPLIFIED;
1142 return true;
1143 }
1144
1145 /* Recursively apply the substitution and see if we can simplify
1146 the result. This specifically shouldn't use simplify_gen_* for
1147 speculative simplifications, since we want to avoid generating new
1148 expressions where possible. */
1149 auto old_result_flags = result_flags;
1150 rtx newx = NULL_RTX;
1151 bool recurse_p = false;
1152 switch (GET_RTX_CLASS (code))
1153 {
1154 case RTX_UNARY:
1155 {
1156 machine_mode op0_mode = GET_MODE (XEXP (x, 0));
1157 if (!apply_to_rvalue_1 (loc: &XEXP (x, 0)))
1158 return false;
1159 if (from && old_num_changes == num_validated_changes ())
1160 return true;
1161
1162 newx = simplify_unary_operation (code, mode, XEXP (x, 0), op0_mode);
1163 break;
1164 }
1165
1166 case RTX_BIN_ARITH:
1167 case RTX_COMM_ARITH:
1168 {
1169 if (!apply_to_rvalue_1 (loc: &XEXP (x, 0))
1170 || !apply_to_rvalue_1 (loc: &XEXP (x, 1)))
1171 return false;
1172 if (from && old_num_changes == num_validated_changes ())
1173 return true;
1174
1175 if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
1176 && swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
1177 newx = simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
1178 else
1179 newx = simplify_binary_operation (code, mode,
1180 XEXP (x, 0), XEXP (x, 1));
1181 break;
1182 }
1183
1184 case RTX_COMPARE:
1185 case RTX_COMM_COMPARE:
1186 {
1187 machine_mode op_mode = (GET_MODE (XEXP (x, 0)) != VOIDmode
1188 ? GET_MODE (XEXP (x, 0))
1189 : GET_MODE (XEXP (x, 1)));
1190 if (!apply_to_rvalue_1 (loc: &XEXP (x, 0))
1191 || !apply_to_rvalue_1 (loc: &XEXP (x, 1)))
1192 return false;
1193 if (from && old_num_changes == num_validated_changes ())
1194 return true;
1195
1196 newx = simplify_relational_operation (code, mode, op_mode,
1197 XEXP (x, 0), XEXP (x, 1));
1198 break;
1199 }
1200
1201 case RTX_TERNARY:
1202 case RTX_BITFIELD_OPS:
1203 {
1204 machine_mode op0_mode = GET_MODE (XEXP (x, 0));
1205 if (!apply_to_rvalue_1 (loc: &XEXP (x, 0))
1206 || !apply_to_rvalue_1 (loc: &XEXP (x, 1))
1207 || !apply_to_rvalue_1 (loc: &XEXP (x, 2)))
1208 return false;
1209 if (from && old_num_changes == num_validated_changes ())
1210 return true;
1211
1212 newx = simplify_ternary_operation (code, mode, op0_mode,
1213 XEXP (x, 0), XEXP (x, 1),
1214 XEXP (x, 2));
1215 break;
1216 }
1217
1218 case RTX_EXTRA:
1219 if (code == SUBREG)
1220 {
1221 machine_mode inner_mode = GET_MODE (SUBREG_REG (x));
1222 if (!apply_to_rvalue_1 (loc: &SUBREG_REG (x)))
1223 return false;
1224 if (from && old_num_changes == num_validated_changes ())
1225 return true;
1226
1227 rtx inner = SUBREG_REG (x);
1228 newx = simplify_subreg (mode, inner, inner_mode, SUBREG_BYTE (x));
1229 /* Reject the same cases that simplify_gen_subreg would. */
1230 if (!newx
1231 && (GET_CODE (inner) == SUBREG
1232 || GET_CODE (inner) == CONCAT
1233 || GET_MODE (inner) == VOIDmode
1234 || !validate_subreg (mode, inner_mode,
1235 inner, SUBREG_BYTE (x))))
1236 {
1237 failure_reason = "would create an invalid subreg";
1238 return false;
1239 }
1240 break;
1241 }
1242 else
1243 recurse_p = true;
1244 break;
1245
1246 case RTX_OBJ:
1247 if (code == LO_SUM)
1248 {
1249 if (!apply_to_rvalue_1 (loc: &XEXP (x, 0))
1250 || !apply_to_rvalue_1 (loc: &XEXP (x, 1)))
1251 return false;
1252 if (from && old_num_changes == num_validated_changes ())
1253 return true;
1254
1255 /* (lo_sum (high x) y) -> y where x and y have the same base. */
1256 rtx op0 = XEXP (x, 0);
1257 rtx op1 = XEXP (x, 1);
1258 if (GET_CODE (op0) == HIGH)
1259 {
1260 rtx base0, base1, offset0, offset1;
1261 split_const (XEXP (op0, 0), &base0, &offset0);
1262 split_const (op1, &base1, &offset1);
1263 if (rtx_equal_p (base0, base1))
1264 newx = op1;
1265 }
1266 }
1267 else if (code == REG)
1268 {
1269 if (from && REG_P (from) && reg_overlap_mentioned_p (x, from))
1270 {
1271 failure_reason = "inexact register overlap";
1272 return false;
1273 }
1274 }
1275 else if (code == MEM)
1276 return apply_to_mem_1 (mem: x);
1277 else
1278 recurse_p = true;
1279 break;
1280
1281 case RTX_CONST_OBJ:
1282 break;
1283
1284 case RTX_AUTOINC:
1285 if (from && reg_overlap_mentioned_p (XEXP (x, 0), from))
1286 {
1287 failure_reason = "is subject to autoinc";
1288 return false;
1289 }
1290 recurse_p = true;
1291 break;
1292
1293 case RTX_MATCH:
1294 case RTX_INSN:
1295 gcc_unreachable ();
1296 }
1297
1298 if (recurse_p)
1299 {
1300 const char *fmt = GET_RTX_FORMAT (code);
1301 for (int i = 0; fmt[i]; i++)
1302 switch (fmt[i])
1303 {
1304 case 'E':
1305 for (int j = 0; j < XVECLEN (x, i); j++)
1306 if (!apply_to_rvalue_1 (loc: &XVECEXP (x, i, j)))
1307 return false;
1308 break;
1309
1310 case 'e':
1311 if (XEXP (x, i) && !apply_to_rvalue_1 (loc: &XEXP (x, i)))
1312 return false;
1313 break;
1314 }
1315 }
1316 else if (newx && !rtx_equal_p (x, newx))
1317 {
1318 /* All substitutions made by OLD_NUM_CHANGES onwards have been
1319 simplified. */
1320 result_flags = ((result_flags & ~UNSIMPLIFIED)
1321 | (old_result_flags & UNSIMPLIFIED));
1322
1323 if (should_note_simplifications)
1324 note_simplification (old_num_changes, old_result_flags, x, newx);
1325
1326 /* There's no longer any point unsharing the substitutions made
1327 for subexpressions, since we'll just copy this one instead. */
1328 bool unshare = false;
1329 for (int i = old_num_changes; i < num_changes; ++i)
1330 {
1331 unshare |= changes[i].unshare;
1332 changes[i].unshare = false;
1333 }
1334 if (unshare)
1335 validate_unshare_change (object: insn, loc, new_rtx: newx, in_group: 1);
1336 else
1337 validate_change (object: insn, loc, new_rtx: newx, in_group: 1);
1338 }
1339
1340 return true;
1341}
1342
1343/* Try to process the lvalue expression at *LOC. Return true on success;
1344 leave the caller to clean up on failure. */
1345
1346bool
1347insn_propagation::apply_to_lvalue_1 (rtx dest)
1348{
1349 rtx old_dest = dest;
1350 while (GET_CODE (dest) == SUBREG
1351 || GET_CODE (dest) == ZERO_EXTRACT
1352 || GET_CODE (dest) == STRICT_LOW_PART)
1353 {
1354 if (GET_CODE (dest) == ZERO_EXTRACT
1355 && (!apply_to_rvalue_1 (loc: &XEXP (dest, 1))
1356 || !apply_to_rvalue_1 (loc: &XEXP (dest, 2))))
1357 return false;
1358 dest = XEXP (dest, 0);
1359 }
1360
1361 if (MEM_P (dest))
1362 return apply_to_mem_1 (mem: dest);
1363
1364 /* Check whether the substitution is safe in the presence of this lvalue. */
1365 if (!from
1366 || dest == old_dest
1367 || !REG_P (dest)
1368 || !reg_overlap_mentioned_p (dest, from))
1369 return true;
1370
1371 if (SUBREG_P (old_dest)
1372 && SUBREG_REG (old_dest) == dest
1373 && !read_modify_subreg_p (old_dest))
1374 return true;
1375
1376 failure_reason = "is part of a read-write destination";
1377 return false;
1378}
1379
1380/* Try to process the instruction pattern at *LOC. Return true on success;
1381 leave the caller to clean up on failure. */
1382
1383bool
1384insn_propagation::apply_to_pattern_1 (rtx *loc)
1385{
1386 rtx body = *loc;
1387 switch (GET_CODE (body))
1388 {
1389 case COND_EXEC:
1390 return (apply_to_rvalue_1 (loc: &COND_EXEC_TEST (body))
1391 && apply_to_pattern_1 (loc: &COND_EXEC_CODE (body)));
1392
1393 case PARALLEL:
1394 for (int i = 0; i < XVECLEN (body, 0); ++i)
1395 {
1396 rtx *subloc = &XVECEXP (body, 0, i);
1397 if (GET_CODE (*subloc) == SET)
1398 {
1399 if (!apply_to_lvalue_1 (SET_DEST (*subloc)))
1400 return false;
1401 /* ASM_OPERANDS are shared between SETs in the same PARALLEL.
1402 Only process them on the first iteration. */
1403 if ((i == 0 || GET_CODE (SET_SRC (*subloc)) != ASM_OPERANDS)
1404 && !apply_to_rvalue_1 (loc: &SET_SRC (*subloc)))
1405 return false;
1406 }
1407 else
1408 {
1409 if (!apply_to_pattern_1 (loc: subloc))
1410 return false;
1411 }
1412 }
1413 return true;
1414
1415 case ASM_OPERANDS:
1416 for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (body); i < len; ++i)
1417 if (!apply_to_rvalue_1 (loc: &ASM_OPERANDS_INPUT (body, i)))
1418 return false;
1419 return true;
1420
1421 case CLOBBER:
1422 return apply_to_lvalue_1 (XEXP (body, 0));
1423
1424 case SET:
1425 return (apply_to_lvalue_1 (SET_DEST (body))
1426 && apply_to_rvalue_1 (loc: &SET_SRC (body)));
1427
1428 default:
1429 /* All the other possibilities never store and can use a normal
1430 rtx walk. This includes:
1431
1432 - USE
1433 - TRAP_IF
1434 - PREFETCH
1435 - UNSPEC
1436 - UNSPEC_VOLATILE. */
1437 return apply_to_rvalue_1 (loc);
1438 }
1439}
1440
1441/* Apply this insn_propagation object's simplification or substitution
1442 to the instruction pattern at LOC. */
1443
1444bool
1445insn_propagation::apply_to_pattern (rtx *loc)
1446{
1447 unsigned int num_changes = num_validated_changes ();
1448 bool res = apply_to_pattern_1 (loc);
1449 if (!res)
1450 cancel_changes (num: num_changes);
1451 return res;
1452}
1453
1454/* Apply this insn_propagation object's simplification or substitution
1455 to the rvalue expression at LOC. */
1456
1457bool
1458insn_propagation::apply_to_rvalue (rtx *loc)
1459{
1460 unsigned int num_changes = num_validated_changes ();
1461 bool res = apply_to_rvalue_1 (loc);
1462 if (!res)
1463 cancel_changes (num: num_changes);
1464 return res;
1465}
1466
1467/* Like apply_to_rvalue, but specifically for the case where *LOC is in
1468 a note. This never changes the INSN_CODE. */
1469
1470bool
1471insn_propagation::apply_to_note (rtx *loc)
1472{
1473 auto old_code = INSN_CODE (insn);
1474 bool res = apply_to_rvalue (loc);
1475 if (INSN_CODE (insn) != old_code)
1476 INSN_CODE (insn) = old_code;
1477 return res;
1478}
1479
1480/* Check whether INSN matches a specific alternative of an .md pattern. */
1481
1482bool
1483valid_insn_p (rtx_insn *insn)
1484{
1485 recog_memoized (insn);
1486 if (INSN_CODE (insn) < 0)
1487 return false;
1488 extract_insn (insn);
1489 /* We don't know whether the insn will be in code that is optimized
1490 for size or speed, so consider all enabled alternatives. */
1491 if (!constrain_operands (1, get_enabled_alternatives (insn)))
1492 return false;
1493 return true;
1494}
1495
1496/* Return true if OP is a valid general operand for machine mode MODE.
1497 This is either a register reference, a memory reference,
1498 or a constant. In the case of a memory reference, the address
1499 is checked for general validity for the target machine.
1500
1501 Register and memory references must have mode MODE in order to be valid,
1502 but some constants have no machine mode and are valid for any mode.
1503
1504 If MODE is VOIDmode, OP is checked for validity for whatever mode
1505 it has.
1506
1507 The main use of this function is as a predicate in match_operand
1508 expressions in the machine description. */
1509
1510bool
1511general_operand (rtx op, machine_mode mode)
1512{
1513 enum rtx_code code = GET_CODE (op);
1514
1515 if (mode == VOIDmode)
1516 mode = GET_MODE (op);
1517
1518 /* Don't accept CONST_INT or anything similar
1519 if the caller wants something floating. */
1520 if (GET_MODE (op) == VOIDmode && mode != VOIDmode
1521 && GET_MODE_CLASS (mode) != MODE_INT
1522 && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
1523 return false;
1524
1525 if (CONST_INT_P (op)
1526 && mode != VOIDmode
1527 && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
1528 return false;
1529
1530 if (CONSTANT_P (op))
1531 return ((GET_MODE (op) == VOIDmode || GET_MODE (op) == mode
1532 || mode == VOIDmode)
1533 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
1534 && targetm.legitimate_constant_p (mode == VOIDmode
1535 ? GET_MODE (op)
1536 : mode, op));
1537
1538 /* Except for certain constants with VOIDmode, already checked for,
1539 OP's mode must match MODE if MODE specifies a mode. */
1540
1541 if (GET_MODE (op) != mode)
1542 return false;
1543
1544 if (code == SUBREG)
1545 {
1546 rtx sub = SUBREG_REG (op);
1547
1548#ifdef INSN_SCHEDULING
1549 /* On machines that have insn scheduling, we want all memory
1550 reference to be explicit, so outlaw paradoxical SUBREGs.
1551 However, we must allow them after reload so that they can
1552 get cleaned up by cleanup_subreg_operands. */
1553 if (!reload_completed && MEM_P (sub)
1554 && paradoxical_subreg_p (x: op))
1555 return false;
1556#endif
1557 /* Avoid memories with nonzero SUBREG_BYTE, as offsetting the memory
1558 may result in incorrect reference. We should simplify all valid
1559 subregs of MEM anyway. But allow this after reload because we
1560 might be called from cleanup_subreg_operands.
1561
1562 ??? This is a kludge. */
1563 if (!reload_completed
1564 && maybe_ne (SUBREG_BYTE (op), b: 0)
1565 && MEM_P (sub))
1566 return false;
1567
1568 if (REG_P (sub)
1569 && REGNO (sub) < FIRST_PSEUDO_REGISTER
1570 && !REG_CAN_CHANGE_MODE_P (REGNO (sub), GET_MODE (sub), mode)
1571 && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_INT
1572 && GET_MODE_CLASS (GET_MODE (sub)) != MODE_COMPLEX_FLOAT
1573 /* LRA can generate some invalid SUBREGS just for matched
1574 operand reload presentation. LRA needs to treat them as
1575 valid. */
1576 && ! LRA_SUBREG_P (op))
1577 return false;
1578
1579 /* FLOAT_MODE subregs can't be paradoxical. Combine will occasionally
1580 create such rtl, and we must reject it. */
1581 if (SCALAR_FLOAT_MODE_P (GET_MODE (op))
1582 /* LRA can use subreg to store a floating point value in an
1583 integer mode. Although the floating point and the
1584 integer modes need the same number of hard registers, the
1585 size of floating point mode can be less than the integer
1586 mode. */
1587 && ! lra_in_progress
1588 && paradoxical_subreg_p (x: op))
1589 return false;
1590
1591 op = sub;
1592 code = GET_CODE (op);
1593 }
1594
1595 if (code == REG)
1596 return (REGNO (op) >= FIRST_PSEUDO_REGISTER
1597 || in_hard_reg_set_p (operand_reg_set, GET_MODE (op), REGNO (op)));
1598
1599 if (code == MEM)
1600 {
1601 rtx y = XEXP (op, 0);
1602
1603 /* If -ffuse-ops-with-volatile-access is enabled, allow volatile
1604 memory reference. */
1605 if (!flag_fuse_ops_with_volatile_access
1606 && !volatile_ok
1607 && MEM_VOLATILE_P (op))
1608 return false;
1609
1610 /* Use the mem's mode, since it will be reloaded thus. LRA can
1611 generate move insn with invalid addresses which is made valid
1612 and efficiently calculated by LRA through further numerous
1613 transformations. */
1614 if (lra_in_progress
1615 || memory_address_addr_space_p (GET_MODE (op), y, MEM_ADDR_SPACE (op)))
1616 return true;
1617 }
1618
1619 return false;
1620}
1621
1622/* Return true if OP is a valid memory address for a memory reference
1623 of mode MODE.
1624
1625 The main use of this function is as a predicate in match_operand
1626 expressions in the machine description. */
1627
1628bool
1629address_operand (rtx op, machine_mode mode)
1630{
1631 /* Wrong mode for an address expr. */
1632 if (GET_MODE (op) != VOIDmode
1633 && ! SCALAR_INT_MODE_P (GET_MODE (op)))
1634 return false;
1635
1636 return memory_address_p (mode, op);
1637}
1638
1639/* Return true if OP is a register reference of mode MODE.
1640 If MODE is VOIDmode, accept a register in any mode.
1641
1642 The main use of this function is as a predicate in match_operand
1643 expressions in the machine description. */
1644
1645bool
1646register_operand (rtx op, machine_mode mode)
1647{
1648 if (GET_CODE (op) == SUBREG)
1649 {
1650 rtx sub = SUBREG_REG (op);
1651
1652 /* Before reload, we can allow (SUBREG (MEM...)) as a register operand
1653 because it is guaranteed to be reloaded into one.
1654 Just make sure the MEM is valid in itself.
1655 (Ideally, (SUBREG (MEM)...) should not exist after reload,
1656 but currently it does result from (SUBREG (REG)...) where the
1657 reg went on the stack.) */
1658 if (!REG_P (sub) && (reload_completed || !MEM_P (sub)))
1659 return false;
1660 }
1661 else if (!REG_P (op))
1662 return false;
1663 return general_operand (op, mode);
1664}
1665
1666/* Return true for a register in Pmode; ignore the tested mode. */
1667
1668bool
1669pmode_register_operand (rtx op, machine_mode mode ATTRIBUTE_UNUSED)
1670{
1671 return register_operand (op, Pmode);
1672}
1673
1674/* Return true if OP should match a MATCH_SCRATCH, i.e., if it is a SCRATCH
1675 or a hard register. */
1676
1677bool
1678scratch_operand (rtx op, machine_mode mode)
1679{
1680 if (GET_MODE (op) != mode && mode != VOIDmode)
1681 return false;
1682
1683 return (GET_CODE (op) == SCRATCH
1684 || (REG_P (op)
1685 && (lra_in_progress
1686 || (REGNO (op) < FIRST_PSEUDO_REGISTER
1687 && REGNO_REG_CLASS (REGNO (op)) != NO_REGS))));
1688}
1689
1690/* Return true if OP is a valid immediate operand for mode MODE.
1691
1692 The main use of this function is as a predicate in match_operand
1693 expressions in the machine description. */
1694
1695bool
1696immediate_operand (rtx op, machine_mode mode)
1697{
1698 /* Don't accept CONST_INT or anything similar
1699 if the caller wants something floating. */
1700 if (GET_MODE (op) == VOIDmode && mode != VOIDmode
1701 && GET_MODE_CLASS (mode) != MODE_INT
1702 && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
1703 return false;
1704
1705 if (CONST_INT_P (op)
1706 && mode != VOIDmode
1707 && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
1708 return false;
1709
1710 return (CONSTANT_P (op)
1711 && (GET_MODE (op) == mode || mode == VOIDmode
1712 || GET_MODE (op) == VOIDmode)
1713 && (! flag_pic || LEGITIMATE_PIC_OPERAND_P (op))
1714 && targetm.legitimate_constant_p (mode == VOIDmode
1715 ? GET_MODE (op)
1716 : mode, op));
1717}
1718
1719/* Return true if OP is an operand that is a CONST_INT of mode MODE. */
1720
1721bool
1722const_int_operand (rtx op, machine_mode mode)
1723{
1724 if (!CONST_INT_P (op))
1725 return false;
1726
1727 if (mode != VOIDmode
1728 && trunc_int_for_mode (INTVAL (op), mode) != INTVAL (op))
1729 return false;
1730
1731 return true;
1732}
1733
1734#if TARGET_SUPPORTS_WIDE_INT
1735/* Return true if OP is an operand that is a CONST_INT or CONST_WIDE_INT
1736 of mode MODE. */
1737bool
1738const_scalar_int_operand (rtx op, machine_mode mode)
1739{
1740 if (!CONST_SCALAR_INT_P (op))
1741 return false;
1742
1743 if (CONST_INT_P (op))
1744 return const_int_operand (op, mode);
1745
1746 if (mode != VOIDmode)
1747 {
1748 scalar_int_mode int_mode = as_a <scalar_int_mode> (m: mode);
1749 int prec = GET_MODE_PRECISION (mode: int_mode);
1750 int bitsize = GET_MODE_BITSIZE (mode: int_mode);
1751
1752 if (CONST_WIDE_INT_NUNITS (op) * HOST_BITS_PER_WIDE_INT > bitsize)
1753 return false;
1754
1755 if (prec == bitsize)
1756 return true;
1757 else
1758 {
1759 /* Multiword partial int. */
1760 HOST_WIDE_INT x
1761 = CONST_WIDE_INT_ELT (op, CONST_WIDE_INT_NUNITS (op) - 1);
1762 return (sext_hwi (src: x, prec: prec & (HOST_BITS_PER_WIDE_INT - 1)) == x);
1763 }
1764 }
1765 return true;
1766}
1767
1768/* Return true if OP is an operand that is a constant integer or constant
1769 floating-point number of MODE. */
1770
1771bool
1772const_double_operand (rtx op, machine_mode mode)
1773{
1774 return (GET_CODE (op) == CONST_DOUBLE)
1775 && (GET_MODE (op) == mode || mode == VOIDmode);
1776}
1777#else
1778/* Return true if OP is an operand that is a constant integer or constant
1779 floating-point number of MODE. */
1780
1781bool
1782const_double_operand (rtx op, machine_mode mode)
1783{
1784 /* Don't accept CONST_INT or anything similar
1785 if the caller wants something floating. */
1786 if (GET_MODE (op) == VOIDmode && mode != VOIDmode
1787 && GET_MODE_CLASS (mode) != MODE_INT
1788 && GET_MODE_CLASS (mode) != MODE_PARTIAL_INT)
1789 return false;
1790
1791 return ((CONST_DOUBLE_P (op) || CONST_INT_P (op))
1792 && (mode == VOIDmode || GET_MODE (op) == mode
1793 || GET_MODE (op) == VOIDmode));
1794}
1795#endif
1796/* Return true if OP is a general operand that is not an immediate
1797 operand of mode MODE. */
1798
1799bool
1800nonimmediate_operand (rtx op, machine_mode mode)
1801{
1802 return (general_operand (op, mode) && ! CONSTANT_P (op));
1803}
1804
1805/* Return true if OP is a register reference or
1806 immediate value of mode MODE. */
1807
1808bool
1809nonmemory_operand (rtx op, machine_mode mode)
1810{
1811 if (CONSTANT_P (op))
1812 return immediate_operand (op, mode);
1813 return register_operand (op, mode);
1814}
1815
1816/* Return true if OP is a valid operand that stands for pushing a
1817 value of mode MODE onto the stack.
1818
1819 The main use of this function is as a predicate in match_operand
1820 expressions in the machine description. */
1821
1822bool
1823push_operand (rtx op, machine_mode mode)
1824{
1825 if (!MEM_P (op))
1826 return false;
1827
1828 if (mode != VOIDmode && GET_MODE (op) != mode)
1829 return false;
1830
1831 poly_int64 rounded_size = GET_MODE_SIZE (mode);
1832
1833#ifdef PUSH_ROUNDING
1834 rounded_size = PUSH_ROUNDING (MACRO_INT (rounded_size));
1835#endif
1836
1837 op = XEXP (op, 0);
1838
1839 if (known_eq (rounded_size, GET_MODE_SIZE (mode)))
1840 {
1841 if (GET_CODE (op) != STACK_PUSH_CODE)
1842 return false;
1843 }
1844 else
1845 {
1846 poly_int64 offset;
1847 if (GET_CODE (op) != PRE_MODIFY
1848 || GET_CODE (XEXP (op, 1)) != PLUS
1849 || XEXP (XEXP (op, 1), 0) != XEXP (op, 0)
1850 || !poly_int_rtx_p (XEXP (XEXP (op, 1), 1), res: &offset)
1851 || (STACK_GROWS_DOWNWARD
1852 ? maybe_ne (a: offset, b: -rounded_size)
1853 : maybe_ne (a: offset, b: rounded_size)))
1854 return false;
1855 }
1856
1857 return XEXP (op, 0) == stack_pointer_rtx;
1858}
1859
1860/* Return true if OP is a valid operand that stands for popping a
1861 value of mode MODE off the stack.
1862
1863 The main use of this function is as a predicate in match_operand
1864 expressions in the machine description. */
1865
1866bool
1867pop_operand (rtx op, machine_mode mode)
1868{
1869 if (!MEM_P (op))
1870 return false;
1871
1872 if (mode != VOIDmode && GET_MODE (op) != mode)
1873 return false;
1874
1875 op = XEXP (op, 0);
1876
1877 if (GET_CODE (op) != STACK_POP_CODE)
1878 return false;
1879
1880 return XEXP (op, 0) == stack_pointer_rtx;
1881}
1882
1883/* Return true if ADDR is a valid memory address
1884 for mode MODE in address space AS. */
1885
1886bool
1887memory_address_addr_space_p (machine_mode mode ATTRIBUTE_UNUSED, rtx addr,
1888 addr_space_t as, code_helper ch ATTRIBUTE_UNUSED)
1889{
1890#ifdef GO_IF_LEGITIMATE_ADDRESS
1891 gcc_assert (ADDR_SPACE_GENERIC_P (as));
1892 GO_IF_LEGITIMATE_ADDRESS (mode, addr, win);
1893 return false;
1894
1895 win:
1896 return true;
1897#else
1898 return targetm.addr_space.legitimate_address_p (mode, addr, 0, as, ch);
1899#endif
1900}
1901
1902/* Return true if OP is a valid memory reference with mode MODE,
1903 including a valid address.
1904
1905 The main use of this function is as a predicate in match_operand
1906 expressions in the machine description. */
1907
1908bool
1909memory_operand (rtx op, machine_mode mode)
1910{
1911 rtx inner;
1912
1913 if (! reload_completed)
1914 /* Note that no SUBREG is a memory operand before end of reload pass,
1915 because (SUBREG (MEM...)) forces reloading into a register. */
1916 return MEM_P (op) && general_operand (op, mode);
1917
1918 if (mode != VOIDmode && GET_MODE (op) != mode)
1919 return false;
1920
1921 inner = op;
1922 if (GET_CODE (inner) == SUBREG)
1923 inner = SUBREG_REG (inner);
1924
1925 return (MEM_P (inner) && general_operand (op, mode));
1926}
1927
1928/* Return true if OP is a valid indirect memory reference with mode MODE;
1929 that is, a memory reference whose address is a general_operand. */
1930
1931bool
1932indirect_operand (rtx op, machine_mode mode)
1933{
1934 /* Before reload, a SUBREG isn't in memory (see memory_operand, above). */
1935 if (! reload_completed
1936 && GET_CODE (op) == SUBREG && MEM_P (SUBREG_REG (op)))
1937 {
1938 if (mode != VOIDmode && GET_MODE (op) != mode)
1939 return false;
1940
1941 /* The only way that we can have a general_operand as the resulting
1942 address is if OFFSET is zero and the address already is an operand
1943 or if the address is (plus Y (const_int -OFFSET)) and Y is an
1944 operand. */
1945 poly_int64 offset;
1946 rtx addr = strip_offset (XEXP (SUBREG_REG (op), 0), &offset);
1947 return (known_eq (offset + SUBREG_BYTE (op), 0)
1948 && general_operand (op: addr, Pmode));
1949 }
1950
1951 return (MEM_P (op)
1952 && memory_operand (op, mode)
1953 && general_operand (XEXP (op, 0), Pmode));
1954}
1955
1956/* Return true if this is an ordered comparison operator (not including
1957 ORDERED and UNORDERED). */
1958
1959bool
1960ordered_comparison_operator (rtx op, machine_mode mode)
1961{
1962 if (mode != VOIDmode && GET_MODE (op) != mode)
1963 return false;
1964 switch (GET_CODE (op))
1965 {
1966 case EQ:
1967 case NE:
1968 case LT:
1969 case LTU:
1970 case LE:
1971 case LEU:
1972 case GT:
1973 case GTU:
1974 case GE:
1975 case GEU:
1976 return true;
1977 default:
1978 return false;
1979 }
1980}
1981
1982/* Return true if this is a comparison operator. This allows the use of
1983 MATCH_OPERATOR to recognize all the branch insns. */
1984
1985bool
1986comparison_operator (rtx op, machine_mode mode)
1987{
1988 return ((mode == VOIDmode || GET_MODE (op) == mode)
1989 && COMPARISON_P (op));
1990}
1991
1992/* If BODY is an insn body that uses ASM_OPERANDS, return it. */
1993
1994rtx
1995extract_asm_operands (rtx body)
1996{
1997 rtx tmp;
1998 switch (GET_CODE (body))
1999 {
2000 case ASM_OPERANDS:
2001 return body;
2002
2003 case SET:
2004 /* Single output operand: BODY is (set OUTPUT (asm_operands ...)). */
2005 tmp = SET_SRC (body);
2006 if (GET_CODE (tmp) == ASM_OPERANDS)
2007 return tmp;
2008 break;
2009
2010 case PARALLEL:
2011 tmp = XVECEXP (body, 0, 0);
2012 if (GET_CODE (tmp) == ASM_OPERANDS)
2013 return tmp;
2014 if (GET_CODE (tmp) == SET)
2015 {
2016 tmp = SET_SRC (tmp);
2017 if (GET_CODE (tmp) == ASM_OPERANDS)
2018 return tmp;
2019 }
2020 break;
2021
2022 default:
2023 break;
2024 }
2025 return NULL;
2026}
2027
2028/* If BODY is an insn body that uses ASM_OPERANDS,
2029 return the number of operands (both input and output) in the insn.
2030 If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
2031 return 0.
2032 Otherwise return -1. */
2033
2034int
2035asm_noperands (const_rtx body)
2036{
2037 rtx asm_op = extract_asm_operands (body: const_cast<rtx> (body));
2038 int i, n_sets = 0;
2039
2040 if (asm_op == NULL)
2041 {
2042 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) >= 2
2043 && GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
2044 {
2045 /* body is [(asm_input ...) (clobber (reg ...))...]. */
2046 for (i = XVECLEN (body, 0) - 1; i > 0; i--)
2047 if (GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
2048 return -1;
2049 return 0;
2050 }
2051 return -1;
2052 }
2053
2054 if (GET_CODE (body) == SET)
2055 n_sets = 1;
2056 else if (GET_CODE (body) == PARALLEL)
2057 {
2058 if (GET_CODE (XVECEXP (body, 0, 0)) == SET)
2059 {
2060 /* Multiple output operands, or 1 output plus some clobbers:
2061 body is
2062 [(set OUTPUT (asm_operands ...))...
2063 (use (reg ...))...
2064 (clobber (reg ...))...]. */
2065 /* Count backwards through USEs and CLOBBERs to determine
2066 number of SETs. */
2067 for (i = XVECLEN (body, 0); i > 0; i--)
2068 {
2069 if (GET_CODE (XVECEXP (body, 0, i - 1)) == SET)
2070 break;
2071 if (GET_CODE (XVECEXP (body, 0, i - 1)) != USE
2072 && GET_CODE (XVECEXP (body, 0, i - 1)) != CLOBBER)
2073 return -1;
2074 }
2075
2076 /* N_SETS is now number of output operands. */
2077 n_sets = i;
2078
2079 /* Verify that all the SETs we have
2080 came from a single original asm_operands insn
2081 (so that invalid combinations are blocked). */
2082 for (i = 0; i < n_sets; i++)
2083 {
2084 rtx elt = XVECEXP (body, 0, i);
2085 if (GET_CODE (elt) != SET)
2086 return -1;
2087 if (GET_CODE (SET_SRC (elt)) != ASM_OPERANDS)
2088 return -1;
2089 /* If these ASM_OPERANDS rtx's came from different original insns
2090 then they aren't allowed together. */
2091 if (ASM_OPERANDS_INPUT_VEC (SET_SRC (elt))
2092 != ASM_OPERANDS_INPUT_VEC (asm_op))
2093 return -1;
2094 }
2095 }
2096 else
2097 {
2098 /* 0 outputs, but some clobbers:
2099 body is [(asm_operands ...)
2100 (use (reg ...))...
2101 (clobber (reg ...))...]. */
2102 /* Make sure all the other parallel things really are clobbers. */
2103 for (i = XVECLEN (body, 0) - 1; i > 0; i--)
2104 if (GET_CODE (XVECEXP (body, 0, i)) != USE
2105 && GET_CODE (XVECEXP (body, 0, i)) != CLOBBER)
2106 return -1;
2107 }
2108 }
2109
2110 return (ASM_OPERANDS_INPUT_LENGTH (asm_op)
2111 + ASM_OPERANDS_LABEL_LENGTH (asm_op) + n_sets);
2112}
2113
2114/* Assuming BODY is an insn body that uses ASM_OPERANDS,
2115 copy its operands (both input and output) into the vector OPERANDS,
2116 the locations of the operands within the insn into the vector OPERAND_LOCS,
2117 and the constraints for the operands into CONSTRAINTS.
2118 Write the modes of the operands into MODES.
2119 Write the location info into LOC.
2120 Return the assembler-template.
2121 If BODY is an insn body that uses ASM_INPUT with CLOBBERS in PARALLEL,
2122 return the basic assembly string.
2123
2124 If LOC, MODES, OPERAND_LOCS, CONSTRAINTS or OPERANDS is 0,
2125 we don't store that info. */
2126
2127const char *
2128decode_asm_operands (rtx body, rtx *operands, rtx **operand_locs,
2129 const char **constraints, machine_mode *modes,
2130 location_t *loc)
2131{
2132 int nbase = 0, n, i;
2133 rtx asmop;
2134
2135 switch (GET_CODE (body))
2136 {
2137 case ASM_OPERANDS:
2138 /* Zero output asm: BODY is (asm_operands ...). */
2139 asmop = body;
2140 break;
2141
2142 case SET:
2143 /* Single output asm: BODY is (set OUTPUT (asm_operands ...)). */
2144 asmop = SET_SRC (body);
2145
2146 /* The output is in the SET.
2147 Its constraint is in the ASM_OPERANDS itself. */
2148 if (operands)
2149 operands[0] = SET_DEST (body);
2150 if (operand_locs)
2151 operand_locs[0] = &SET_DEST (body);
2152 if (constraints)
2153 constraints[0] = ASM_OPERANDS_OUTPUT_CONSTRAINT (asmop);
2154 if (modes)
2155 modes[0] = GET_MODE (SET_DEST (body));
2156 nbase = 1;
2157 break;
2158
2159 case PARALLEL:
2160 {
2161 int nparallel = XVECLEN (body, 0); /* Includes CLOBBERs. */
2162
2163 asmop = XVECEXP (body, 0, 0);
2164 if (GET_CODE (asmop) == SET)
2165 {
2166 asmop = SET_SRC (asmop);
2167
2168 /* At least one output, plus some CLOBBERs. The outputs are in
2169 the SETs. Their constraints are in the ASM_OPERANDS itself. */
2170 for (i = 0; i < nparallel; i++)
2171 {
2172 if (GET_CODE (XVECEXP (body, 0, i)) == USE
2173 || GET_CODE (XVECEXP (body, 0, i)) == CLOBBER)
2174 break; /* Past last SET */
2175 gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
2176 if (operands)
2177 operands[i] = SET_DEST (XVECEXP (body, 0, i));
2178 if (operand_locs)
2179 operand_locs[i] = &SET_DEST (XVECEXP (body, 0, i));
2180 if (constraints)
2181 constraints[i] = XSTR (SET_SRC (XVECEXP (body, 0, i)), 1);
2182 if (modes)
2183 modes[i] = GET_MODE (SET_DEST (XVECEXP (body, 0, i)));
2184 }
2185 nbase = i;
2186 }
2187 else if (GET_CODE (asmop) == ASM_INPUT)
2188 {
2189 if (loc)
2190 *loc = ASM_INPUT_SOURCE_LOCATION (asmop);
2191 return XSTR (asmop, 0);
2192 }
2193 break;
2194 }
2195
2196 default:
2197 gcc_unreachable ();
2198 }
2199
2200 n = ASM_OPERANDS_INPUT_LENGTH (asmop);
2201 for (i = 0; i < n; i++)
2202 {
2203 if (operand_locs)
2204 operand_locs[nbase + i] = &ASM_OPERANDS_INPUT (asmop, i);
2205 if (operands)
2206 operands[nbase + i] = ASM_OPERANDS_INPUT (asmop, i);
2207 if (constraints)
2208 constraints[nbase + i] = ASM_OPERANDS_INPUT_CONSTRAINT (asmop, i);
2209 if (modes)
2210 modes[nbase + i] = ASM_OPERANDS_INPUT_MODE (asmop, i);
2211 }
2212 nbase += n;
2213
2214 n = ASM_OPERANDS_LABEL_LENGTH (asmop);
2215 for (i = 0; i < n; i++)
2216 {
2217 if (operand_locs)
2218 operand_locs[nbase + i] = &ASM_OPERANDS_LABEL (asmop, i);
2219 if (operands)
2220 operands[nbase + i] = ASM_OPERANDS_LABEL (asmop, i);
2221 if (constraints)
2222 constraints[nbase + i] = "";
2223 if (modes)
2224 modes[nbase + i] = Pmode;
2225 }
2226
2227 if (loc)
2228 *loc = ASM_OPERANDS_SOURCE_LOCATION (asmop);
2229
2230 return ASM_OPERANDS_TEMPLATE (asmop);
2231}
2232
2233/* Parse inline assembly string STRING and determine which operands are
2234 referenced by % markers. For the first NOPERANDS operands, set USED[I]
2235 to true if operand I is referenced.
2236
2237 This is intended to distinguish barrier-like asms such as:
2238
2239 asm ("" : "=m" (...));
2240
2241 from real references such as:
2242
2243 asm ("sw\t$0, %0" : "=m" (...)); */
2244
2245void
2246get_referenced_operands (const char *string, bool *used,
2247 unsigned int noperands)
2248{
2249 memset (s: used, c: 0, n: sizeof (bool) * noperands);
2250 const char *p = string;
2251 while (*p)
2252 switch (*p)
2253 {
2254 case '%':
2255 p += 1;
2256 /* A letter followed by a digit indicates an operand number. */
2257 if (ISALPHA (p[0]) && ISDIGIT (p[1]))
2258 p += 1;
2259 if (ISDIGIT (*p))
2260 {
2261 char *endptr;
2262 unsigned long opnum = strtoul (nptr: p, endptr: &endptr, base: 10);
2263 if (endptr != p && opnum < noperands)
2264 used[opnum] = true;
2265 p = endptr;
2266 }
2267 else
2268 p += 1;
2269 break;
2270
2271 default:
2272 p++;
2273 break;
2274 }
2275}
2276
2277/* Check if an asm_operand matches its constraints.
2278 Return > 0 if ok, = 0 if bad, < 0 if inconclusive. */
2279
2280int
2281asm_operand_ok (rtx op, const char *constraint, const char **constraints)
2282{
2283 int result = 0;
2284 bool incdec_ok = false;
2285
2286 /* Use constrain_operands after reload. */
2287 gcc_assert (!reload_completed);
2288
2289 /* Empty constraint string is the same as "X,...,X", i.e. X for as
2290 many alternatives as required to match the other operands. */
2291 if (*constraint == '\0')
2292 result = 1;
2293
2294 while (*constraint)
2295 {
2296 enum constraint_num cn;
2297 char c = *constraint;
2298 int len;
2299 switch (c)
2300 {
2301 case ',':
2302 raw_constraint_p = false;
2303 constraint++;
2304 continue;
2305
2306 case '0': case '1': case '2': case '3': case '4':
2307 case '5': case '6': case '7': case '8': case '9':
2308 /* If caller provided constraints pointer, look up
2309 the matching constraint. Otherwise, our caller should have
2310 given us the proper matching constraint, but we can't
2311 actually fail the check if they didn't. Indicate that
2312 results are inconclusive. */
2313 if (constraints)
2314 {
2315 char *end;
2316 unsigned long match;
2317
2318 match = strtoul (nptr: constraint, endptr: &end, base: 10);
2319 if (!result)
2320 result = asm_operand_ok (op, constraint: constraints[match], NULL);
2321 constraint = (const char *) end;
2322 }
2323 else
2324 {
2325 do
2326 constraint++;
2327 while (ISDIGIT (*constraint));
2328 if (! result)
2329 result = -1;
2330 }
2331 continue;
2332
2333 /* The rest of the compiler assumes that reloading the address
2334 of a MEM into a register will make it fit an 'o' constraint.
2335 That is, if it sees a MEM operand for an 'o' constraint,
2336 it assumes that (mem (base-reg)) will fit.
2337
2338 That assumption fails on targets that don't have offsettable
2339 addresses at all. We therefore need to treat 'o' asm
2340 constraints as a special case and only accept operands that
2341 are already offsettable, thus proving that at least one
2342 offsettable address exists. */
2343 case 'o': /* offsettable */
2344 if (offsettable_nonstrict_memref_p (op))
2345 result = 1;
2346 break;
2347
2348 case 'g':
2349 if (general_operand (op, VOIDmode))
2350 result = 1;
2351 break;
2352
2353 case '-':
2354 raw_constraint_p = true;
2355 constraint++;
2356 continue;
2357
2358 case '<':
2359 case '>':
2360 /* ??? Before auto-inc-dec, auto inc/dec insns are not supposed
2361 to exist, excepting those that expand_call created. Further,
2362 on some machines which do not have generalized auto inc/dec,
2363 an inc/dec is not a memory_operand.
2364
2365 Match any memory and hope things are resolved after reload. */
2366 incdec_ok = true;
2367 /* FALLTHRU */
2368 default:
2369 cn = lookup_constraint (p: constraint);
2370 rtx mem = NULL;
2371 switch (get_constraint_type (c: cn))
2372 {
2373 case CT_REGISTER:
2374 if (!result
2375 && (reg_class_for_constraint (c: cn) != NO_REGS
2376 || constraint[0] == '{')
2377 && GET_MODE (op) != BLKmode
2378 && register_operand (op, VOIDmode))
2379 result = 1;
2380 break;
2381
2382 case CT_CONST_INT:
2383 if (!result
2384 && CONST_INT_P (op)
2385 && insn_const_int_ok_for_constraint (INTVAL (op), cn))
2386 result = 1;
2387 break;
2388
2389 case CT_MEMORY:
2390 case CT_RELAXED_MEMORY:
2391 mem = op;
2392 /* Fall through. */
2393 case CT_SPECIAL_MEMORY:
2394 /* Every memory operand can be reloaded to fit. */
2395 if (!mem)
2396 mem = extract_mem_from_operand (op);
2397 result = result || memory_operand (op: mem, VOIDmode);
2398 break;
2399
2400 case CT_ADDRESS:
2401 /* Every address operand can be reloaded to fit. */
2402 result = result || address_operand (op, VOIDmode);
2403 break;
2404
2405 case CT_FIXED_FORM:
2406 result = result || constraint_satisfied_p (x: op, c: cn);
2407 break;
2408 }
2409 break;
2410 }
2411 len = CONSTRAINT_LEN (c, constraint);
2412 do
2413 constraint++;
2414 while (--len && *constraint && *constraint != ',');
2415 if (len)
2416 {
2417 raw_constraint_p = false;
2418 return 0;
2419 }
2420 }
2421 raw_constraint_p = false;
2422
2423 /* For operands without < or > constraints reject side-effects. */
2424 if (AUTO_INC_DEC && !incdec_ok && result && MEM_P (op))
2425 switch (GET_CODE (XEXP (op, 0)))
2426 {
2427 case PRE_INC:
2428 case POST_INC:
2429 case PRE_DEC:
2430 case POST_DEC:
2431 case PRE_MODIFY:
2432 case POST_MODIFY:
2433 return 0;
2434 default:
2435 break;
2436 }
2437
2438 return result;
2439}
2440
2441/* Given an rtx *P, if it is a sum containing an integer constant term,
2442 return the location (type rtx *) of the pointer to that constant term.
2443 Otherwise, return a null pointer. */
2444
2445rtx *
2446find_constant_term_loc (rtx *p)
2447{
2448 rtx *tem;
2449 enum rtx_code code = GET_CODE (*p);
2450
2451 /* If *P IS such a constant term, P is its location. */
2452
2453 if (code == CONST_INT || code == SYMBOL_REF || code == LABEL_REF
2454 || code == CONST)
2455 return p;
2456
2457 /* Otherwise, if not a sum, it has no constant term. */
2458
2459 if (GET_CODE (*p) != PLUS)
2460 return 0;
2461
2462 /* If one of the summands is constant, return its location. */
2463
2464 if (XEXP (*p, 0) && CONSTANT_P (XEXP (*p, 0))
2465 && XEXP (*p, 1) && CONSTANT_P (XEXP (*p, 1)))
2466 return p;
2467
2468 /* Otherwise, check each summand for containing a constant term. */
2469
2470 if (XEXP (*p, 0) != 0)
2471 {
2472 tem = find_constant_term_loc (p: &XEXP (*p, 0));
2473 if (tem != 0)
2474 return tem;
2475 }
2476
2477 if (XEXP (*p, 1) != 0)
2478 {
2479 tem = find_constant_term_loc (p: &XEXP (*p, 1));
2480 if (tem != 0)
2481 return tem;
2482 }
2483
2484 return 0;
2485}
2486
2487/* Return true if OP is a memory reference whose address contains
2488 no side effects and remains valid after the addition of a positive
2489 integer less than the size of the object being referenced.
2490
2491 We assume that the original address is valid and do not check it.
2492
2493 This uses strict_memory_address_p as a subroutine, so
2494 don't use it before reload. */
2495
2496bool
2497offsettable_memref_p (rtx op)
2498{
2499 return ((MEM_P (op))
2500 && offsettable_address_addr_space_p (1, GET_MODE (op), XEXP (op, 0),
2501 MEM_ADDR_SPACE (op)));
2502}
2503
2504/* Similar, but don't require a strictly valid mem ref:
2505 consider pseudo-regs valid as index or base regs. */
2506
2507bool
2508offsettable_nonstrict_memref_p (rtx op)
2509{
2510 return ((MEM_P (op))
2511 && offsettable_address_addr_space_p (0, GET_MODE (op), XEXP (op, 0),
2512 MEM_ADDR_SPACE (op)));
2513}
2514
2515/* Return true if Y is a memory address which contains no side effects
2516 and would remain valid for address space AS after the addition of
2517 a positive integer less than the size of that mode.
2518
2519 We assume that the original address is valid and do not check it.
2520 We do check that it is valid for narrower modes.
2521
2522 If STRICTP is nonzero, we require a strictly valid address,
2523 for the sake of use in reload.cc. */
2524
2525bool
2526offsettable_address_addr_space_p (int strictp, machine_mode mode, rtx y,
2527 addr_space_t as)
2528{
2529 enum rtx_code ycode = GET_CODE (y);
2530 rtx z;
2531 rtx y1 = y;
2532 rtx *y2;
2533 bool (*addressp) (machine_mode, rtx, addr_space_t, code_helper) =
2534 (strictp ? strict_memory_address_addr_space_p
2535 : memory_address_addr_space_p);
2536 poly_int64 mode_sz = GET_MODE_SIZE (mode);
2537
2538 if (CONSTANT_ADDRESS_P (y))
2539 return true;
2540
2541 /* Adjusting an offsettable address involves changing to a narrower mode.
2542 Make sure that's OK. */
2543
2544 if (mode_dependent_address_p (y, as))
2545 return false;
2546
2547 machine_mode address_mode = GET_MODE (y);
2548 if (address_mode == VOIDmode)
2549 address_mode = targetm.addr_space.address_mode (as);
2550#ifdef POINTERS_EXTEND_UNSIGNED
2551 machine_mode pointer_mode = targetm.addr_space.pointer_mode (as);
2552#endif
2553
2554 /* ??? How much offset does an offsettable BLKmode reference need?
2555 Clearly that depends on the situation in which it's being used.
2556 However, the current situation in which we test 0xffffffff is
2557 less than ideal. Caveat user. */
2558 if (known_eq (mode_sz, 0))
2559 mode_sz = BIGGEST_ALIGNMENT / BITS_PER_UNIT;
2560
2561 /* If the expression contains a constant term,
2562 see if it remains valid when max possible offset is added. */
2563
2564 if ((ycode == PLUS) && (y2 = find_constant_term_loc (p: &y1)))
2565 {
2566 bool good;
2567
2568 y1 = *y2;
2569 *y2 = plus_constant (address_mode, *y2, mode_sz - 1);
2570 /* Use QImode because an odd displacement may be automatically invalid
2571 for any wider mode. But it should be valid for a single byte. */
2572 good = (*addressp) (QImode, y, as, ERROR_MARK);
2573
2574 /* In any case, restore old contents of memory. */
2575 *y2 = y1;
2576 return good;
2577 }
2578
2579 if (GET_RTX_CLASS (ycode) == RTX_AUTOINC)
2580 return false;
2581
2582 /* The offset added here is chosen as the maximum offset that
2583 any instruction could need to add when operating on something
2584 of the specified mode. We assume that if Y and Y+c are
2585 valid addresses then so is Y+d for all 0<d<c. adjust_address will
2586 go inside a LO_SUM here, so we do so as well. */
2587 if (GET_CODE (y) == LO_SUM
2588 && mode != BLKmode
2589 && known_le (mode_sz, GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT))
2590 z = gen_rtx_LO_SUM (address_mode, XEXP (y, 0),
2591 plus_constant (address_mode, XEXP (y, 1),
2592 mode_sz - 1));
2593#ifdef POINTERS_EXTEND_UNSIGNED
2594 /* Likewise for a ZERO_EXTEND from pointer_mode. */
2595 else if (POINTERS_EXTEND_UNSIGNED > 0
2596 && GET_CODE (y) == ZERO_EXTEND
2597 && GET_MODE (XEXP (y, 0)) == pointer_mode)
2598 z = gen_rtx_ZERO_EXTEND (address_mode,
2599 plus_constant (pointer_mode, XEXP (y, 0),
2600 mode_sz - 1));
2601#endif
2602 else
2603 z = plus_constant (address_mode, y, mode_sz - 1);
2604
2605 /* Use QImode because an odd displacement may be automatically invalid
2606 for any wider mode. But it should be valid for a single byte. */
2607 return (*addressp) (QImode, z, as, ERROR_MARK);
2608}
2609
2610/* Return true if ADDR is an address-expression whose effect depends
2611 on the mode of the memory reference it is used in.
2612
2613 ADDRSPACE is the address space associated with the address.
2614
2615 Autoincrement addressing is a typical example of mode-dependence
2616 because the amount of the increment depends on the mode. */
2617
2618bool
2619mode_dependent_address_p (rtx addr, addr_space_t addrspace)
2620{
2621 /* Auto-increment addressing with anything other than post_modify
2622 or pre_modify always introduces a mode dependency. Catch such
2623 cases now instead of deferring to the target. */
2624 if (GET_CODE (addr) == PRE_INC
2625 || GET_CODE (addr) == POST_INC
2626 || GET_CODE (addr) == PRE_DEC
2627 || GET_CODE (addr) == POST_DEC)
2628 return true;
2629
2630 return targetm.mode_dependent_address_p (addr, addrspace);
2631}
2632
2633/* Return true if boolean attribute ATTR is supported. */
2634
2635static bool
2636have_bool_attr (bool_attr attr)
2637{
2638 switch (attr)
2639 {
2640 case BA_ENABLED:
2641 return HAVE_ATTR_enabled;
2642 case BA_PREFERRED_FOR_SIZE:
2643 return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_size;
2644 case BA_PREFERRED_FOR_SPEED:
2645 return HAVE_ATTR_enabled || HAVE_ATTR_preferred_for_speed;
2646 }
2647 gcc_unreachable ();
2648}
2649
2650/* Return the value of ATTR for instruction INSN. */
2651
2652static bool
2653get_bool_attr (rtx_insn *insn, bool_attr attr)
2654{
2655 switch (attr)
2656 {
2657 case BA_ENABLED:
2658 return get_attr_enabled (insn);
2659 case BA_PREFERRED_FOR_SIZE:
2660 return get_attr_enabled (insn) && get_attr_preferred_for_size (insn);
2661 case BA_PREFERRED_FOR_SPEED:
2662 return get_attr_enabled (insn) && get_attr_preferred_for_speed (insn);
2663 }
2664 gcc_unreachable ();
2665}
2666
2667/* Like get_bool_attr_mask, but don't use the cache. */
2668
2669static alternative_mask
2670get_bool_attr_mask_uncached (rtx_insn *insn, bool_attr attr)
2671{
2672 /* Temporarily install enough information for get_attr_<foo> to assume
2673 that the insn operands are already cached. As above, the attribute
2674 mustn't depend on the values of operands, so we don't provide their
2675 real values here. */
2676 rtx_insn *old_insn = recog_data.insn;
2677 int old_alternative = which_alternative;
2678
2679 recog_data.insn = insn;
2680 alternative_mask mask = ALL_ALTERNATIVES;
2681 int n_alternatives = insn_data[INSN_CODE (insn)].n_alternatives;
2682 for (int i = 0; i < n_alternatives; i++)
2683 {
2684 which_alternative = i;
2685 if (!get_bool_attr (insn, attr))
2686 mask &= ~ALTERNATIVE_BIT (i);
2687 }
2688
2689 recog_data.insn = old_insn;
2690 which_alternative = old_alternative;
2691 return mask;
2692}
2693
2694/* Return the mask of operand alternatives that are allowed for INSN
2695 by boolean attribute ATTR. This mask depends only on INSN and on
2696 the current target; it does not depend on things like the values of
2697 operands. */
2698
2699static alternative_mask
2700get_bool_attr_mask (rtx_insn *insn, bool_attr attr)
2701{
2702 /* Quick exit for asms and for targets that don't use these attributes. */
2703 int code = INSN_CODE (insn);
2704 if (code < 0 || !have_bool_attr (attr))
2705 return ALL_ALTERNATIVES;
2706
2707 /* Calling get_attr_<foo> can be expensive, so cache the mask
2708 for speed. */
2709 if (!this_target_recog->x_bool_attr_masks[code][attr])
2710 this_target_recog->x_bool_attr_masks[code][attr]
2711 = get_bool_attr_mask_uncached (insn, attr);
2712 return this_target_recog->x_bool_attr_masks[code][attr];
2713}
2714
2715/* Return the set of alternatives of INSN that are allowed by the current
2716 target. */
2717
2718alternative_mask
2719get_enabled_alternatives (rtx_insn *insn)
2720{
2721 return get_bool_attr_mask (insn, attr: BA_ENABLED);
2722}
2723
2724/* Return the set of alternatives of INSN that are allowed by the current
2725 target and are preferred for the current size/speed optimization
2726 choice. */
2727
2728alternative_mask
2729get_preferred_alternatives (rtx_insn *insn)
2730{
2731 if (optimize_bb_for_speed_p (BLOCK_FOR_INSN (insn)))
2732 return get_bool_attr_mask (insn, attr: BA_PREFERRED_FOR_SPEED);
2733 else
2734 return get_bool_attr_mask (insn, attr: BA_PREFERRED_FOR_SIZE);
2735}
2736
2737/* Return the set of alternatives of INSN that are allowed by the current
2738 target and are preferred for the size/speed optimization choice
2739 associated with BB. Passing a separate BB is useful if INSN has not
2740 been emitted yet or if we are considering moving it to a different
2741 block. */
2742
2743alternative_mask
2744get_preferred_alternatives (rtx_insn *insn, basic_block bb)
2745{
2746 if (optimize_bb_for_speed_p (bb))
2747 return get_bool_attr_mask (insn, attr: BA_PREFERRED_FOR_SPEED);
2748 else
2749 return get_bool_attr_mask (insn, attr: BA_PREFERRED_FOR_SIZE);
2750}
2751
2752/* Assert that the cached boolean attributes for INSN are still accurate.
2753 The backend is required to define these attributes in a way that only
2754 depends on the current target (rather than operands, compiler phase,
2755 etc.). */
2756
2757bool
2758check_bool_attrs (rtx_insn *insn)
2759{
2760 int code = INSN_CODE (insn);
2761 if (code >= 0)
2762 for (int i = 0; i <= BA_LAST; ++i)
2763 {
2764 enum bool_attr attr = (enum bool_attr) i;
2765 if (this_target_recog->x_bool_attr_masks[code][attr])
2766 gcc_assert (this_target_recog->x_bool_attr_masks[code][attr]
2767 == get_bool_attr_mask_uncached (insn, attr));
2768 }
2769 return true;
2770}
2771
2772/* Like extract_insn, but save insn extracted and don't extract again, when
2773 called again for the same insn expecting that recog_data still contain the
2774 valid information. This is used primary by gen_attr infrastructure that
2775 often does extract insn again and again. */
2776void
2777extract_insn_cached (rtx_insn *insn)
2778{
2779 if (recog_data.insn == insn && INSN_CODE (insn) >= 0)
2780 return;
2781 extract_insn (insn);
2782 recog_data.insn = insn;
2783}
2784
2785/* Do uncached extract_insn, constrain_operands and complain about failures.
2786 This should be used when extracting a pre-existing constrained instruction
2787 if the caller wants to know which alternative was chosen. */
2788void
2789extract_constrain_insn (rtx_insn *insn)
2790{
2791 extract_insn (insn);
2792 if (!constrain_operands (reload_completed, get_enabled_alternatives (insn)))
2793 fatal_insn_not_found (insn);
2794}
2795
2796/* Do cached extract_insn, constrain_operands and complain about failures.
2797 Used by insn_attrtab. */
2798void
2799extract_constrain_insn_cached (rtx_insn *insn)
2800{
2801 extract_insn_cached (insn);
2802 if (which_alternative == -1
2803 && !constrain_operands (reload_completed,
2804 get_enabled_alternatives (insn)))
2805 fatal_insn_not_found (insn);
2806}
2807
2808/* Do cached constrain_operands on INSN and complain about failures. */
2809bool
2810constrain_operands_cached (rtx_insn *insn, int strict)
2811{
2812 if (which_alternative == -1)
2813 return constrain_operands (strict, get_enabled_alternatives (insn));
2814 else
2815 return true;
2816}
2817
2818/* Analyze INSN and fill in recog_data. */
2819
2820void
2821extract_insn (rtx_insn *insn)
2822{
2823 int i;
2824 int icode;
2825 int noperands;
2826 rtx body = PATTERN (insn);
2827
2828 recog_data.n_operands = 0;
2829 recog_data.n_alternatives = 0;
2830 recog_data.n_dups = 0;
2831 recog_data.is_asm = false;
2832
2833 switch (GET_CODE (body))
2834 {
2835 case USE:
2836 case CLOBBER:
2837 case ASM_INPUT:
2838 case ADDR_VEC:
2839 case ADDR_DIFF_VEC:
2840 case VAR_LOCATION:
2841 case DEBUG_MARKER:
2842 return;
2843
2844 case SET:
2845 if (GET_CODE (SET_SRC (body)) == ASM_OPERANDS)
2846 goto asm_insn;
2847 else
2848 goto normal_insn;
2849 case PARALLEL:
2850 if ((GET_CODE (XVECEXP (body, 0, 0)) == SET
2851 && GET_CODE (SET_SRC (XVECEXP (body, 0, 0))) == ASM_OPERANDS)
2852 || GET_CODE (XVECEXP (body, 0, 0)) == ASM_OPERANDS
2853 || GET_CODE (XVECEXP (body, 0, 0)) == ASM_INPUT)
2854 goto asm_insn;
2855 else
2856 goto normal_insn;
2857 case ASM_OPERANDS:
2858 asm_insn:
2859 recog_data.n_operands = noperands = asm_noperands (body);
2860 if (noperands >= 0)
2861 {
2862 /* This insn is an `asm' with operands. */
2863
2864 /* expand_asm_operands makes sure there aren't too many operands. */
2865 gcc_assert (noperands <= MAX_RECOG_OPERANDS);
2866
2867 /* Now get the operand values and constraints out of the insn. */
2868 decode_asm_operands (body, operands: recog_data.operand,
2869 operand_locs: recog_data.operand_loc,
2870 constraints: recog_data.constraints,
2871 modes: recog_data.operand_mode, NULL);
2872 memset (s: recog_data.is_operator, c: 0, n: sizeof recog_data.is_operator);
2873 if (noperands > 0)
2874 {
2875 const char *p = recog_data.constraints[0];
2876 recog_data.n_alternatives = 1;
2877 while (*p)
2878 recog_data.n_alternatives += (*p++ == ',');
2879 }
2880 recog_data.is_asm = true;
2881 break;
2882 }
2883 fatal_insn_not_found (insn);
2884
2885 default:
2886 normal_insn:
2887 /* Ordinary insn: recognize it, get the operands via insn_extract
2888 and get the constraints. */
2889
2890 icode = recog_memoized (insn);
2891 if (icode < 0)
2892 fatal_insn_not_found (insn);
2893
2894 recog_data.n_operands = noperands = insn_data[icode].n_operands;
2895 recog_data.n_alternatives = insn_data[icode].n_alternatives;
2896 recog_data.n_dups = insn_data[icode].n_dups;
2897
2898 insn_extract (insn);
2899
2900 for (i = 0; i < noperands; i++)
2901 {
2902 recog_data.constraints[i] = insn_data[icode].operand[i].constraint;
2903 recog_data.is_operator[i] = insn_data[icode].operand[i].is_operator;
2904 recog_data.operand_mode[i] = insn_data[icode].operand[i].mode;
2905 /* VOIDmode match_operands gets mode from their real operand. */
2906 if (recog_data.operand_mode[i] == VOIDmode)
2907 recog_data.operand_mode[i] = GET_MODE (recog_data.operand[i]);
2908 }
2909 }
2910 for (i = 0; i < noperands; i++)
2911 recog_data.operand_type[i]
2912 = (recog_data.constraints[i][0] == '=' ? OP_OUT
2913 : recog_data.constraints[i][0] == '+' ? OP_INOUT
2914 : OP_IN);
2915
2916 gcc_assert (recog_data.n_alternatives <= MAX_RECOG_ALTERNATIVES);
2917
2918 recog_data.insn = NULL;
2919 which_alternative = -1;
2920}
2921
2922/* Fill in OP_ALT_BASE for an instruction that has N_OPERANDS
2923 operands, N_ALTERNATIVES alternatives and constraint strings
2924 CONSTRAINTS. OP_ALT_BASE has N_ALTERNATIVES * N_OPERANDS entries
2925 and CONSTRAINTS has N_OPERANDS entries. OPLOC should be passed in
2926 if the insn is an asm statement and preprocessing should take the
2927 asm operands into account, e.g. to determine whether they could be
2928 addresses in constraints that require addresses; it should then
2929 point to an array of pointers to each operand. */
2930
2931void
2932preprocess_constraints (int n_operands, int n_alternatives,
2933 const char **constraints,
2934 operand_alternative *op_alt_base,
2935 rtx **oploc)
2936{
2937 for (int i = 0; i < n_operands; i++)
2938 {
2939 int j;
2940 struct operand_alternative *op_alt;
2941 const char *p = constraints[i];
2942
2943 op_alt = op_alt_base;
2944
2945 for (j = 0; j < n_alternatives; j++, op_alt += n_operands)
2946 {
2947 op_alt[i].cl = NO_REGS;
2948 op_alt[i].register_filters = 0;
2949 op_alt[i].constraint = p;
2950 op_alt[i].matches = -1;
2951 op_alt[i].matched = -1;
2952
2953 if (*p == '\0' || *p == ',')
2954 {
2955 op_alt[i].anything_ok = 1;
2956 continue;
2957 }
2958
2959 for (;;)
2960 {
2961 char c = *p;
2962 if (c == '#')
2963 do
2964 c = *++p;
2965 while (c != ',' && c != '\0');
2966 if (c == ',' || c == '\0')
2967 {
2968 p++;
2969 break;
2970 }
2971
2972 switch (c)
2973 {
2974 case '?':
2975 op_alt[i].reject += 6;
2976 break;
2977 case '!':
2978 op_alt[i].reject += 600;
2979 break;
2980 case '&':
2981 op_alt[i].earlyclobber = 1;
2982 break;
2983
2984 case '0': case '1': case '2': case '3': case '4':
2985 case '5': case '6': case '7': case '8': case '9':
2986 {
2987 char *end;
2988 op_alt[i].matches = strtoul (nptr: p, endptr: &end, base: 10);
2989 op_alt[op_alt[i].matches].matched = i;
2990 p = end;
2991 }
2992 continue;
2993
2994 case 'X':
2995 op_alt[i].anything_ok = 1;
2996 break;
2997
2998 case 'g':
2999 op_alt[i].cl =
3000 reg_class_subunion[(int) op_alt[i].cl][(int) GENERAL_REGS];
3001 break;
3002
3003 default:
3004 enum constraint_num cn = lookup_constraint (p);
3005 enum reg_class cl;
3006 switch (get_constraint_type (c: cn))
3007 {
3008 case CT_REGISTER:
3009 cl = reg_class_for_constraint (c: cn);
3010 if (cl != NO_REGS)
3011 {
3012 op_alt[i].cl = reg_class_subunion[op_alt[i].cl][cl];
3013 auto filter_id = get_register_filter_id (cn);
3014 if (filter_id >= 0)
3015 op_alt[i].register_filters |= 1U << filter_id;
3016 }
3017 break;
3018
3019 case CT_CONST_INT:
3020 break;
3021
3022 case CT_MEMORY:
3023 case CT_SPECIAL_MEMORY:
3024 case CT_RELAXED_MEMORY:
3025 op_alt[i].memory_ok = 1;
3026 break;
3027
3028 case CT_ADDRESS:
3029 if (oploc && !address_operand (op: *oploc[i], VOIDmode))
3030 break;
3031
3032 op_alt[i].is_address = 1;
3033 op_alt[i].cl
3034 = (reg_class_subunion
3035 [(int) op_alt[i].cl]
3036 [(int) base_reg_class (VOIDmode, ADDR_SPACE_GENERIC,
3037 outer_code: ADDRESS, index_code: SCRATCH)]);
3038 break;
3039
3040 case CT_FIXED_FORM:
3041 break;
3042 }
3043 break;
3044 }
3045 p += CONSTRAINT_LEN (c, p);
3046 }
3047 }
3048 }
3049}
3050
3051/* Return an array of operand_alternative instructions for
3052 instruction ICODE. */
3053
3054const operand_alternative *
3055preprocess_insn_constraints (unsigned int icode)
3056{
3057 gcc_checking_assert (IN_RANGE (icode, 0, NUM_INSN_CODES - 1));
3058 if (this_target_recog->x_op_alt[icode])
3059 return this_target_recog->x_op_alt[icode];
3060
3061 int n_operands = insn_data[icode].n_operands;
3062 if (n_operands == 0)
3063 return 0;
3064 /* Always provide at least one alternative so that which_op_alt ()
3065 works correctly. If the instruction has 0 alternatives (i.e. all
3066 constraint strings are empty) then each operand in this alternative
3067 will have anything_ok set. */
3068 int n_alternatives = MAX (insn_data[icode].n_alternatives, 1);
3069 int n_entries = n_operands * n_alternatives;
3070
3071 operand_alternative *op_alt = XCNEWVEC (operand_alternative, n_entries);
3072 const char **constraints = XALLOCAVEC (const char *, n_operands);
3073
3074 for (int i = 0; i < n_operands; ++i)
3075 constraints[i] = insn_data[icode].operand[i].constraint;
3076 preprocess_constraints (n_operands, n_alternatives, constraints, op_alt_base: op_alt,
3077 NULL);
3078
3079 this_target_recog->x_op_alt[icode] = op_alt;
3080 return op_alt;
3081}
3082
3083/* After calling extract_insn, you can use this function to extract some
3084 information from the constraint strings into a more usable form.
3085 The collected data is stored in recog_op_alt. */
3086
3087void
3088preprocess_constraints (rtx_insn *insn)
3089{
3090 int icode = INSN_CODE (insn);
3091 if (icode >= 0)
3092 recog_op_alt = preprocess_insn_constraints (icode);
3093 else
3094 {
3095 int n_operands = recog_data.n_operands;
3096 int n_alternatives = recog_data.n_alternatives;
3097 int n_entries = n_operands * n_alternatives;
3098 memset (s: asm_op_alt, c: 0, n: n_entries * sizeof (operand_alternative));
3099 preprocess_constraints (n_operands, n_alternatives,
3100 constraints: recog_data.constraints, op_alt_base: asm_op_alt,
3101 NULL);
3102 recog_op_alt = asm_op_alt;
3103 }
3104}
3105
3106/* Check the operands of an insn against the insn's operand constraints
3107 and return 1 if they match any of the alternatives in ALTERNATIVES.
3108
3109 The information about the insn's operands, constraints, operand modes
3110 etc. is obtained from the global variables set up by extract_insn.
3111
3112 WHICH_ALTERNATIVE is set to a number which indicates which
3113 alternative of constraints was matched: 0 for the first alternative,
3114 1 for the next, etc.
3115
3116 In addition, when two operands are required to match
3117 and it happens that the output operand is (reg) while the
3118 input operand is --(reg) or ++(reg) (a pre-inc or pre-dec),
3119 make the output operand look like the input.
3120 This is because the output operand is the one the template will print.
3121
3122 This is used in final, just before printing the assembler code and by
3123 the routines that determine an insn's attribute.
3124
3125 If STRICT is a positive nonzero value, it means that we have been
3126 called after reload has been completed. In that case, we must
3127 do all checks strictly. If it is zero, it means that we have been called
3128 before reload has completed. In that case, we first try to see if we can
3129 find an alternative that matches strictly. If not, we try again, this
3130 time assuming that reload will fix up the insn. This provides a "best
3131 guess" for the alternative and is used to compute attributes of insns prior
3132 to reload. A negative value of STRICT is used for this internal call. */
3133
3134struct funny_match
3135{
3136 int this_op, other;
3137};
3138
3139bool
3140constrain_operands (int strict, alternative_mask alternatives)
3141{
3142 const char *constraints[MAX_RECOG_OPERANDS];
3143 int matching_operands[MAX_RECOG_OPERANDS];
3144 int earlyclobber[MAX_RECOG_OPERANDS];
3145 int c;
3146
3147 struct funny_match funny_match[MAX_RECOG_OPERANDS];
3148 int funny_match_index;
3149
3150 which_alternative = 0;
3151 if (recog_data.n_operands == 0 || recog_data.n_alternatives == 0)
3152 return true;
3153
3154 for (c = 0; c < recog_data.n_operands; c++)
3155 constraints[c] = recog_data.constraints[c];
3156
3157 do
3158 {
3159 int seen_earlyclobber_at = -1;
3160 int opno;
3161 bool lose = false;
3162 funny_match_index = 0;
3163
3164 if (!TEST_BIT (alternatives, which_alternative))
3165 {
3166 int i;
3167
3168 for (i = 0; i < recog_data.n_operands; i++)
3169 constraints[i] = skip_alternative (p: constraints[i]);
3170
3171 which_alternative++;
3172 continue;
3173 }
3174
3175 for (opno = 0; opno < recog_data.n_operands; opno++)
3176 matching_operands[opno] = -1;
3177
3178 for (opno = 0; opno < recog_data.n_operands; opno++)
3179 {
3180 rtx op = recog_data.operand[opno];
3181 machine_mode mode = GET_MODE (op);
3182 const char *p = constraints[opno];
3183 int offset = 0;
3184 bool win = false;
3185 int val;
3186 int len;
3187
3188 earlyclobber[opno] = 0;
3189
3190 if (GET_CODE (op) == SUBREG)
3191 {
3192 if (REG_P (SUBREG_REG (op))
3193 && REGNO (SUBREG_REG (op)) < FIRST_PSEUDO_REGISTER)
3194 offset = subreg_regno_offset (REGNO (SUBREG_REG (op)),
3195 GET_MODE (SUBREG_REG (op)),
3196 SUBREG_BYTE (op),
3197 GET_MODE (op));
3198 op = SUBREG_REG (op);
3199 }
3200
3201 /* An empty constraint or empty alternative
3202 allows anything which matched the pattern. */
3203 if (*p == 0 || *p == ',')
3204 win = true;
3205
3206 do
3207 switch (c = *p, len = CONSTRAINT_LEN (c, p), c)
3208 {
3209 case '\0':
3210 len = 0;
3211 break;
3212 case ',':
3213 c = '\0';
3214 break;
3215 case '-':
3216 raw_constraint_p = true;
3217 break;
3218
3219 case '#':
3220 /* Ignore rest of this alternative as far as
3221 constraint checking is concerned. */
3222 do
3223 p++;
3224 while (*p && *p != ',');
3225 len = 0;
3226 break;
3227
3228 case '&':
3229 earlyclobber[opno] = 1;
3230 if (seen_earlyclobber_at < 0)
3231 seen_earlyclobber_at = opno;
3232 break;
3233
3234 case '0': case '1': case '2': case '3': case '4':
3235 case '5': case '6': case '7': case '8': case '9':
3236 {
3237 /* This operand must be the same as a previous one.
3238 This kind of constraint is used for instructions such
3239 as add when they take only two operands.
3240
3241 Note that the lower-numbered operand is passed first.
3242
3243 If we are not testing strictly, assume that this
3244 constraint will be satisfied. */
3245
3246 char *end;
3247 int match;
3248
3249 match = strtoul (nptr: p, endptr: &end, base: 10);
3250 p = end;
3251
3252 if (strict < 0)
3253 val = 1;
3254 else
3255 {
3256 rtx op1 = recog_data.operand[match];
3257 rtx op2 = recog_data.operand[opno];
3258 val = operands_match_p (op1, op2);
3259 }
3260
3261 matching_operands[opno] = match;
3262 matching_operands[match] = opno;
3263
3264 if (val != 0)
3265 win = true;
3266
3267 /* If output is *x and input is *--x, arrange later
3268 to change the output to *--x as well, since the
3269 output op is the one that will be printed. */
3270 if (val == 2 && strict > 0)
3271 {
3272 funny_match[funny_match_index].this_op = opno;
3273 funny_match[funny_match_index++].other = match;
3274 }
3275 }
3276 len = 0;
3277 break;
3278
3279 case 'p':
3280 /* p is used for address_operands. When we are called by
3281 gen_reload, no one will have checked that the address is
3282 strictly valid, i.e., that all pseudos requiring hard regs
3283 have gotten them. We also want to make sure we have a
3284 valid mode. */
3285 {
3286 auto mem_mode = (recog_data.is_asm
3287 ? VOIDmode
3288 : recog_data.operand_mode[opno]);
3289 if ((GET_MODE (op) == VOIDmode
3290 || SCALAR_INT_MODE_P (GET_MODE (op)))
3291 && (strict <= 0
3292 || strict_memory_address_p (mem_mode, op)))
3293 win = true;
3294 break;
3295 }
3296
3297 /* No need to check general_operand again;
3298 it was done in insn-recog.cc. Well, except that reload
3299 doesn't check the validity of its replacements, but
3300 that should only matter when there's a bug. */
3301 case 'g':
3302 /* Anything goes unless it is a REG and really has a hard reg
3303 but the hard reg is not in the class GENERAL_REGS. */
3304 if (REG_P (op))
3305 {
3306 if (strict < 0
3307 || GENERAL_REGS == ALL_REGS
3308 || (reload_in_progress
3309 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
3310 || reg_fits_class_p (op, GENERAL_REGS, offset, mode))
3311 win = true;
3312 }
3313 else if (strict < 0 || general_operand (op, mode))
3314 win = true;
3315 break;
3316
3317 case '{':
3318 if ((REG_P (op) && HARD_REGISTER_P (op)
3319 && (int) REGNO (op) == decode_hard_reg_constraint (p))
3320 || !reload_completed)
3321 win = true;
3322 break;
3323
3324 default:
3325 {
3326 enum constraint_num cn = lookup_constraint (p);
3327 enum reg_class cl = reg_class_for_constraint (c: cn);
3328 if (cl != NO_REGS)
3329 {
3330 auto *filter = get_register_filter (cn);
3331 if (strict < 0
3332 || (strict == 0
3333 && REG_P (op)
3334 && REGNO (op) >= FIRST_PSEUDO_REGISTER)
3335 || (strict == 0 && GET_CODE (op) == SCRATCH)
3336 || (REG_P (op)
3337 && reg_fits_class_p (op, cl, offset, mode)
3338 && (!filter
3339 || TEST_HARD_REG_BIT (set: *filter,
3340 REGNO (op) + offset))))
3341 win = true;
3342 }
3343
3344 else if (constraint_satisfied_p (x: op, c: cn))
3345 win = true;
3346
3347 else if ((insn_extra_memory_constraint (c: cn)
3348 || insn_extra_relaxed_memory_constraint (cn))
3349 /* Every memory operand can be reloaded to fit. */
3350 && ((strict < 0 && MEM_P (op))
3351 /* Before reload, accept what reload can turn
3352 into a mem. */
3353 || (strict < 0 && CONSTANT_P (op))
3354 /* Before reload, accept a pseudo or hard register,
3355 since LRA can turn it into a mem. */
3356 || (strict < 0 && targetm.lra_p () && REG_P (op))
3357 /* During reload, accept a pseudo */
3358 || (reload_in_progress && REG_P (op)
3359 && REGNO (op) >= FIRST_PSEUDO_REGISTER)))
3360 win = true;
3361 else if (insn_extra_address_constraint (c: cn)
3362 /* Every address operand can be reloaded to fit. */
3363 && strict < 0)
3364 win = true;
3365 /* Cater to architectures like IA-64 that define extra memory
3366 constraints without using define_memory_constraint. */
3367 else if (reload_in_progress
3368 && REG_P (op)
3369 && REGNO (op) >= FIRST_PSEUDO_REGISTER
3370 && reg_renumber[REGNO (op)] < 0
3371 && reg_equiv_mem (REGNO (op)) != 0
3372 && constraint_satisfied_p
3373 (reg_equiv_mem (REGNO (op)), c: cn))
3374 win = true;
3375 break;
3376 }
3377 }
3378 while (p += len, c);
3379
3380 raw_constraint_p = false;
3381 constraints[opno] = p;
3382 /* If this operand did not win somehow,
3383 this alternative loses. */
3384 if (! win)
3385 lose = true;
3386 }
3387 /* This alternative won; the operands are ok.
3388 Change whichever operands this alternative says to change. */
3389 if (! lose)
3390 {
3391 int opno, eopno;
3392
3393 /* See if any earlyclobber operand conflicts with some other
3394 operand. */
3395
3396 if (strict > 0 && seen_earlyclobber_at >= 0)
3397 for (eopno = seen_earlyclobber_at;
3398 eopno < recog_data.n_operands;
3399 eopno++)
3400 /* Ignore earlyclobber operands now in memory,
3401 because we would often report failure when we have
3402 two memory operands, one of which was formerly a REG. */
3403 if (earlyclobber[eopno]
3404 && REG_P (recog_data.operand[eopno]))
3405 for (opno = 0; opno < recog_data.n_operands; opno++)
3406 if ((MEM_P (recog_data.operand[opno])
3407 || recog_data.operand_type[opno] != OP_OUT)
3408 && opno != eopno
3409 /* Ignore things like match_operator operands. */
3410 && *recog_data.constraints[opno] != 0
3411 && ! (matching_operands[opno] == eopno
3412 && operands_match_p (recog_data.operand[opno],
3413 recog_data.operand[eopno]))
3414 && ! safe_from_earlyclobber (recog_data.operand[opno],
3415 recog_data.operand[eopno]))
3416 lose = true;
3417
3418 if (! lose)
3419 {
3420 while (--funny_match_index >= 0)
3421 {
3422 recog_data.operand[funny_match[funny_match_index].other]
3423 = recog_data.operand[funny_match[funny_match_index].this_op];
3424 }
3425
3426 /* For operands without < or > constraints reject side-effects. */
3427 if (AUTO_INC_DEC && recog_data.is_asm)
3428 {
3429 for (opno = 0; opno < recog_data.n_operands; opno++)
3430 if (MEM_P (recog_data.operand[opno]))
3431 switch (GET_CODE (XEXP (recog_data.operand[opno], 0)))
3432 {
3433 case PRE_INC:
3434 case POST_INC:
3435 case PRE_DEC:
3436 case POST_DEC:
3437 case PRE_MODIFY:
3438 case POST_MODIFY:
3439 if (strchr (s: recog_data.constraints[opno], c: '<') == NULL
3440 && strchr (s: recog_data.constraints[opno], c: '>')
3441 == NULL)
3442 return false;
3443 break;
3444 default:
3445 break;
3446 }
3447 }
3448
3449 return true;
3450 }
3451 }
3452
3453 which_alternative++;
3454 }
3455 while (which_alternative < recog_data.n_alternatives);
3456
3457 which_alternative = -1;
3458 /* If we are about to reject this, but we are not to test strictly,
3459 try a very loose test. Only return failure if it fails also. */
3460 if (strict == 0)
3461 return constrain_operands (strict: -1, alternatives);
3462 else
3463 return false;
3464}
3465
3466/* Return true iff OPERAND (assumed to be a REG rtx)
3467 is a hard reg in class CLASS when its regno is offset by OFFSET
3468 and changed to mode MODE.
3469 If REG occupies multiple hard regs, all of them must be in CLASS. */
3470
3471bool
3472reg_fits_class_p (const_rtx operand, reg_class_t cl, int offset,
3473 machine_mode mode)
3474{
3475 unsigned int regno = REGNO (operand);
3476
3477 if (cl == NO_REGS)
3478 return false;
3479
3480 /* Regno must not be a pseudo register. Offset may be negative. */
3481 return (HARD_REGISTER_NUM_P (regno)
3482 && HARD_REGISTER_NUM_P (regno + offset)
3483 && in_hard_reg_set_p (reg_class_contents[(int) cl], mode,
3484 regno: regno + offset));
3485}
3486
3487/* Split single instruction. Helper function for split_all_insns and
3488 split_all_insns_noflow. Return last insn in the sequence if successful,
3489 or NULL if unsuccessful. */
3490
3491static rtx_insn *
3492split_insn (rtx_insn *insn)
3493{
3494 /* Split insns here to get max fine-grain parallelism. */
3495 rtx_insn *first = PREV_INSN (insn);
3496 rtx_insn *last = try_split (PATTERN (insn), insn, 1);
3497 rtx insn_set, last_set, note;
3498
3499 if (last == insn)
3500 return NULL;
3501
3502 /* If the original instruction was a single set that was known to be
3503 equivalent to a constant, see if we can say the same about the last
3504 instruction in the split sequence. The two instructions must set
3505 the same destination. */
3506 insn_set = single_set (insn);
3507 if (insn_set)
3508 {
3509 last_set = single_set (insn: last);
3510 if (last_set && rtx_equal_p (SET_DEST (last_set), SET_DEST (insn_set)))
3511 {
3512 note = find_reg_equal_equiv_note (insn);
3513 if (note && CONSTANT_P (XEXP (note, 0)))
3514 set_unique_reg_note (last, REG_EQUAL, XEXP (note, 0));
3515 else if (CONSTANT_P (SET_SRC (insn_set)))
3516 set_unique_reg_note (last, REG_EQUAL,
3517 copy_rtx (SET_SRC (insn_set)));
3518 }
3519 }
3520
3521 /* try_split returns the NOTE that INSN became. */
3522 SET_INSN_DELETED (insn);
3523
3524 /* ??? Coddle to md files that generate subregs in post-reload
3525 splitters instead of computing the proper hard register. */
3526 if (reload_completed && first != last)
3527 {
3528 first = NEXT_INSN (insn: first);
3529 for (;;)
3530 {
3531 if (INSN_P (first))
3532 cleanup_subreg_operands (first);
3533 if (first == last)
3534 break;
3535 first = NEXT_INSN (insn: first);
3536 }
3537 }
3538
3539 return last;
3540}
3541
3542/* Split all insns in the function. If UPD_LIFE, update life info after. */
3543
3544void
3545split_all_insns (void)
3546{
3547 bool changed;
3548 bool need_cfg_cleanup = false;
3549 basic_block bb;
3550
3551 auto_sbitmap blocks (last_basic_block_for_fn (cfun));
3552 bitmap_clear (blocks);
3553 changed = false;
3554
3555 FOR_EACH_BB_REVERSE_FN (bb, cfun)
3556 {
3557 rtx_insn *insn, *next;
3558 bool finish = false;
3559
3560 rtl_profile_for_bb (bb);
3561 for (insn = BB_HEAD (bb); !finish ; insn = next)
3562 {
3563 /* Can't use `next_real_insn' because that might go across
3564 CODE_LABELS and short-out basic blocks. */
3565 next = NEXT_INSN (insn);
3566 finish = (insn == BB_END (bb));
3567
3568 /* If INSN has a REG_EH_REGION note and we split INSN, the
3569 resulting split may not have/need REG_EH_REGION notes.
3570
3571 If that happens and INSN was the last reference to the
3572 given EH region, then the EH region will become unreachable.
3573 We cannot leave the unreachable blocks in the CFG as that
3574 will trigger a checking failure.
3575
3576 So track if INSN has a REG_EH_REGION note. If so and we
3577 split INSN, then trigger a CFG cleanup. */
3578 rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
3579 if (INSN_P (insn))
3580 {
3581 rtx set = single_set (insn);
3582
3583 /* Don't split no-op move insns. These should silently
3584 disappear later in final. Splitting such insns would
3585 break the code that handles LIBCALL blocks. */
3586 if (set && set_noop_p (set))
3587 {
3588 /* Nops get in the way while scheduling, so delete them
3589 now if register allocation has already been done. It
3590 is too risky to try to do this before register
3591 allocation, and there are unlikely to be very many
3592 nops then anyways. */
3593 if (reload_completed)
3594 delete_insn_and_edges (insn);
3595 if (note)
3596 need_cfg_cleanup = true;
3597 }
3598 else
3599 {
3600 if (split_insn (insn))
3601 {
3602 bitmap_set_bit (map: blocks, bitno: bb->index);
3603 changed = true;
3604 if (note)
3605 need_cfg_cleanup = true;
3606 }
3607 }
3608 }
3609 }
3610 }
3611
3612 default_rtl_profile ();
3613 if (changed)
3614 {
3615 find_many_sub_basic_blocks (blocks);
3616
3617 /* Splitting could drop an REG_EH_REGION if it potentially
3618 trapped in its original form, but does not in its split
3619 form. Consider a FLOAT_TRUNCATE which splits into a memory
3620 store/load pair and -fnon-call-exceptions. */
3621 if (need_cfg_cleanup)
3622 cleanup_cfg (0);
3623 }
3624
3625 checking_verify_flow_info ();
3626}
3627
3628/* Same as split_all_insns, but do not expect CFG to be available.
3629 Used by machine dependent reorg passes. */
3630
3631void
3632split_all_insns_noflow (void)
3633{
3634 rtx_insn *next, *insn;
3635
3636 for (insn = get_insns (); insn; insn = next)
3637 {
3638 next = NEXT_INSN (insn);
3639 if (INSN_P (insn))
3640 {
3641 /* Don't split no-op move insns. These should silently
3642 disappear later in final. Splitting such insns would
3643 break the code that handles LIBCALL blocks. */
3644 rtx set = single_set (insn);
3645 if (set && set_noop_p (set))
3646 {
3647 /* Nops get in the way while scheduling, so delete them
3648 now if register allocation has already been done. It
3649 is too risky to try to do this before register
3650 allocation, and there are unlikely to be very many
3651 nops then anyways.
3652
3653 ??? Should we use delete_insn when the CFG isn't valid? */
3654 if (reload_completed)
3655 delete_insn_and_edges (insn);
3656 }
3657 else
3658 split_insn (insn);
3659 }
3660 }
3661}
3662
3663struct peep2_insn_data
3664{
3665 rtx_insn *insn;
3666 regset live_before;
3667};
3668
3669static struct peep2_insn_data peep2_insn_data[MAX_INSNS_PER_PEEP2 + 1];
3670static int peep2_current;
3671
3672static bool peep2_do_rebuild_jump_labels;
3673static bool peep2_do_cleanup_cfg;
3674
3675/* The number of instructions available to match a peep2. */
3676int peep2_current_count;
3677
3678/* A marker indicating the last insn of the block. The live_before regset
3679 for this element is correct, indicating DF_LIVE_OUT for the block. */
3680#define PEEP2_EOB invalid_insn_rtx
3681
3682/* Wrap N to fit into the peep2_insn_data buffer. */
3683
3684static int
3685peep2_buf_position (int n)
3686{
3687 if (n >= MAX_INSNS_PER_PEEP2 + 1)
3688 n -= MAX_INSNS_PER_PEEP2 + 1;
3689 return n;
3690}
3691
3692/* Return the Nth non-note insn after `current', or return NULL_RTX if it
3693 does not exist. Used by the recognizer to find the next insn to match
3694 in a multi-insn pattern. */
3695
3696rtx_insn *
3697peep2_next_insn (int n)
3698{
3699 gcc_assert (n <= peep2_current_count);
3700
3701 n = peep2_buf_position (n: peep2_current + n);
3702
3703 return peep2_insn_data[n].insn;
3704}
3705
3706/* Return true if REGNO is dead before the Nth non-note insn
3707 after `current'. */
3708
3709bool
3710peep2_regno_dead_p (int ofs, int regno)
3711{
3712 gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
3713
3714 ofs = peep2_buf_position (n: peep2_current + ofs);
3715
3716 gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
3717
3718 return ! REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno);
3719}
3720
3721/* Similarly for a REG. */
3722
3723bool
3724peep2_reg_dead_p (int ofs, rtx reg)
3725{
3726 gcc_assert (ofs < MAX_INSNS_PER_PEEP2 + 1);
3727
3728 ofs = peep2_buf_position (n: peep2_current + ofs);
3729
3730 gcc_assert (peep2_insn_data[ofs].insn != NULL_RTX);
3731
3732 unsigned int end_regno = END_REGNO (x: reg);
3733 for (unsigned int regno = REGNO (reg); regno < end_regno; ++regno)
3734 if (REGNO_REG_SET_P (peep2_insn_data[ofs].live_before, regno))
3735 return false;
3736 return true;
3737}
3738
3739/* Regno offset to be used in the register search. */
3740static int search_ofs;
3741
3742/* Try to find a hard register of mode MODE, matching the register class in
3743 CLASS_STR, which is available at the beginning of insn CURRENT_INSN and
3744 remains available until the end of LAST_INSN. LAST_INSN may be NULL_RTX,
3745 in which case the only condition is that the register must be available
3746 before CURRENT_INSN.
3747 Registers that already have bits set in REG_SET will not be considered.
3748
3749 If an appropriate register is available, it will be returned and the
3750 corresponding bit(s) in REG_SET will be set; otherwise, NULL_RTX is
3751 returned. */
3752
3753rtx
3754peep2_find_free_register (int from, int to, const char *class_str,
3755 machine_mode mode, HARD_REG_SET *reg_set)
3756{
3757 enum reg_class cl;
3758 HARD_REG_SET live;
3759 df_ref def;
3760 int i;
3761
3762 gcc_assert (from < MAX_INSNS_PER_PEEP2 + 1);
3763 gcc_assert (to < MAX_INSNS_PER_PEEP2 + 1);
3764
3765 from = peep2_buf_position (n: peep2_current + from);
3766 to = peep2_buf_position (n: peep2_current + to);
3767
3768 gcc_assert (peep2_insn_data[from].insn != NULL_RTX);
3769 REG_SET_TO_HARD_REG_SET (live, peep2_insn_data[from].live_before);
3770
3771 while (from != to)
3772 {
3773 gcc_assert (peep2_insn_data[from].insn != NULL_RTX);
3774
3775 /* Don't use registers set or clobbered by the insn. */
3776 FOR_EACH_INSN_DEF (def, peep2_insn_data[from].insn)
3777 SET_HARD_REG_BIT (set&: live, DF_REF_REGNO (def));
3778
3779 from = peep2_buf_position (n: from + 1);
3780 }
3781
3782 cl = reg_class_for_constraint (c: lookup_constraint (p: class_str));
3783
3784 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3785 {
3786 int raw_regno, regno, j;
3787 bool success;
3788
3789 /* Distribute the free registers as much as possible. */
3790 raw_regno = search_ofs + i;
3791 if (raw_regno >= FIRST_PSEUDO_REGISTER)
3792 raw_regno -= FIRST_PSEUDO_REGISTER;
3793#ifdef REG_ALLOC_ORDER
3794 regno = reg_alloc_order[raw_regno];
3795#else
3796 regno = raw_regno;
3797#endif
3798
3799 /* Can it support the mode we need? */
3800 if (!targetm.hard_regno_mode_ok (regno, mode))
3801 continue;
3802
3803 success = true;
3804 for (j = 0; success && j < hard_regno_nregs (regno, mode); j++)
3805 {
3806 /* Don't allocate fixed registers. */
3807 if (fixed_regs[regno + j])
3808 {
3809 success = false;
3810 break;
3811 }
3812 /* Don't allocate global registers. */
3813 if (global_regs[regno + j])
3814 {
3815 success = false;
3816 break;
3817 }
3818 /* Make sure the register is of the right class. */
3819 if (! TEST_HARD_REG_BIT (reg_class_contents[cl], bit: regno + j))
3820 {
3821 success = false;
3822 break;
3823 }
3824 /* And that we don't create an extra save/restore. */
3825 if (! crtl->abi->clobbers_full_reg_p (regno: regno + j)
3826 && ! df_regs_ever_live_p (regno + j))
3827 {
3828 success = false;
3829 break;
3830 }
3831
3832 if (! targetm.hard_regno_scratch_ok (regno + j))
3833 {
3834 success = false;
3835 break;
3836 }
3837
3838 /* And we don't clobber traceback for noreturn functions. */
3839 if ((regno + j == FRAME_POINTER_REGNUM
3840 || regno + j == HARD_FRAME_POINTER_REGNUM)
3841 && (! reload_completed || frame_pointer_needed))
3842 {
3843 success = false;
3844 break;
3845 }
3846
3847 if (TEST_HARD_REG_BIT (set: *reg_set, bit: regno + j)
3848 || TEST_HARD_REG_BIT (set: live, bit: regno + j))
3849 {
3850 success = false;
3851 break;
3852 }
3853 }
3854
3855 if (success)
3856 {
3857 add_to_hard_reg_set (regs: reg_set, mode, regno);
3858
3859 /* Start the next search with the next register. */
3860 if (++raw_regno >= FIRST_PSEUDO_REGISTER)
3861 raw_regno = 0;
3862 search_ofs = raw_regno;
3863
3864 return gen_rtx_REG (mode, regno);
3865 }
3866 }
3867
3868 search_ofs = 0;
3869 return NULL_RTX;
3870}
3871
3872/* Forget all currently tracked instructions, only remember current
3873 LIVE regset. */
3874
3875static void
3876peep2_reinit_state (regset live)
3877{
3878 int i;
3879
3880 /* Indicate that all slots except the last holds invalid data. */
3881 for (i = 0; i < MAX_INSNS_PER_PEEP2; ++i)
3882 peep2_insn_data[i].insn = NULL;
3883 peep2_current_count = 0;
3884
3885 /* Indicate that the last slot contains live_after data. */
3886 peep2_insn_data[MAX_INSNS_PER_PEEP2].insn = PEEP2_EOB;
3887 peep2_current = MAX_INSNS_PER_PEEP2;
3888
3889 COPY_REG_SET (peep2_insn_data[MAX_INSNS_PER_PEEP2].live_before, live);
3890}
3891
3892/* Copies frame related info of an insn (OLD_INSN) to the single
3893 insn (NEW_INSN) that was obtained by splitting OLD_INSN. */
3894
3895void
3896copy_frame_info_to_split_insn (rtx_insn *old_insn, rtx_insn *new_insn)
3897{
3898 bool any_note = false;
3899 rtx note;
3900
3901 if (!RTX_FRAME_RELATED_P (old_insn))
3902 return;
3903
3904 RTX_FRAME_RELATED_P (new_insn) = 1;
3905
3906 /* Allow the backend to fill in a note during the split. */
3907 for (note = REG_NOTES (new_insn); note ; note = XEXP (note, 1))
3908 switch (REG_NOTE_KIND (note))
3909 {
3910 case REG_FRAME_RELATED_EXPR:
3911 case REG_CFA_DEF_CFA:
3912 case REG_CFA_ADJUST_CFA:
3913 case REG_CFA_OFFSET:
3914 case REG_CFA_REGISTER:
3915 case REG_CFA_EXPRESSION:
3916 case REG_CFA_RESTORE:
3917 case REG_CFA_SET_VDRAP:
3918 any_note = true;
3919 break;
3920 default:
3921 break;
3922 }
3923
3924 /* If the backend didn't supply a note, copy one over. */
3925 if (!any_note)
3926 for (note = REG_NOTES (old_insn); note ; note = XEXP (note, 1))
3927 switch (REG_NOTE_KIND (note))
3928 {
3929 case REG_FRAME_RELATED_EXPR:
3930 case REG_CFA_DEF_CFA:
3931 case REG_CFA_ADJUST_CFA:
3932 case REG_CFA_OFFSET:
3933 case REG_CFA_REGISTER:
3934 case REG_CFA_EXPRESSION:
3935 case REG_CFA_RESTORE:
3936 case REG_CFA_SET_VDRAP:
3937 add_reg_note (new_insn, REG_NOTE_KIND (note), XEXP (note, 0));
3938 any_note = true;
3939 break;
3940 default:
3941 break;
3942 }
3943
3944 /* If there still isn't a note, make sure the unwind info sees the
3945 same expression as before the split. */
3946 if (!any_note)
3947 {
3948 rtx old_set, new_set;
3949
3950 /* The old insn had better have been simple, or annotated. */
3951 old_set = single_set (insn: old_insn);
3952 gcc_assert (old_set != NULL);
3953
3954 new_set = single_set (insn: new_insn);
3955 if (!new_set || !rtx_equal_p (new_set, old_set))
3956 add_reg_note (new_insn, REG_FRAME_RELATED_EXPR, old_set);
3957 }
3958
3959 /* Copy prologue/epilogue status. This is required in order to keep
3960 proper placement of EPILOGUE_BEG and the DW_CFA_remember_state. */
3961 maybe_copy_prologue_epilogue_insn (old_insn, new_insn);
3962}
3963
3964/* While scanning basic block BB, we found a match of length MATCH_LEN + 1,
3965 starting at INSN. Perform the replacement, removing the old insns and
3966 replacing them with ATTEMPT. Returns the last insn emitted, or NULL
3967 if the replacement is rejected. */
3968
3969static rtx_insn *
3970peep2_attempt (basic_block bb, rtx_insn *insn, int match_len, rtx_insn *attempt)
3971{
3972 int i;
3973 rtx_insn *last, *before_try, *x;
3974 rtx eh_note, as_note;
3975 rtx_insn *old_insn;
3976 rtx_insn *new_insn;
3977 bool was_call = false;
3978
3979 /* If we are splitting an RTX_FRAME_RELATED_P insn, do not allow it to
3980 match more than one insn, or to be split into more than one insn. */
3981 old_insn = peep2_insn_data[peep2_current].insn;
3982 if (RTX_FRAME_RELATED_P (old_insn))
3983 {
3984 if (match_len != 0)
3985 return NULL;
3986
3987 /* Look for one "active" insn. I.e. ignore any "clobber" insns that
3988 may be in the stream for the purpose of register allocation. */
3989 if (active_insn_p (attempt))
3990 new_insn = attempt;
3991 else
3992 new_insn = next_active_insn (attempt);
3993 if (next_active_insn (new_insn))
3994 return NULL;
3995
3996 /* We have a 1-1 replacement. Copy over any frame-related info. */
3997 copy_frame_info_to_split_insn (old_insn, new_insn);
3998 }
3999
4000 /* If we are splitting a CALL_INSN, look for the CALL_INSN
4001 in SEQ and copy our CALL_INSN_FUNCTION_USAGE and other
4002 cfg-related call notes. */
4003 for (i = 0; i <= match_len; ++i)
4004 {
4005 int j;
4006 rtx note;
4007
4008 j = peep2_buf_position (n: peep2_current + i);
4009 old_insn = peep2_insn_data[j].insn;
4010 if (!CALL_P (old_insn))
4011 continue;
4012 was_call = true;
4013
4014 new_insn = attempt;
4015 while (new_insn != NULL_RTX)
4016 {
4017 if (CALL_P (new_insn))
4018 break;
4019 new_insn = NEXT_INSN (insn: new_insn);
4020 }
4021
4022 gcc_assert (new_insn != NULL_RTX);
4023
4024 CALL_INSN_FUNCTION_USAGE (new_insn)
4025 = CALL_INSN_FUNCTION_USAGE (old_insn);
4026 SIBLING_CALL_P (new_insn) = SIBLING_CALL_P (old_insn);
4027
4028 for (note = REG_NOTES (old_insn);
4029 note;
4030 note = XEXP (note, 1))
4031 switch (REG_NOTE_KIND (note))
4032 {
4033 case REG_NORETURN:
4034 case REG_SETJMP:
4035 case REG_TM:
4036 case REG_CALL_NOCF_CHECK:
4037 add_reg_note (new_insn, REG_NOTE_KIND (note),
4038 XEXP (note, 0));
4039 break;
4040 default:
4041 /* Discard all other reg notes. */
4042 break;
4043 }
4044
4045 /* Croak if there is another call in the sequence. */
4046 while (++i <= match_len)
4047 {
4048 j = peep2_buf_position (n: peep2_current + i);
4049 old_insn = peep2_insn_data[j].insn;
4050 gcc_assert (!CALL_P (old_insn));
4051 }
4052 break;
4053 }
4054
4055 /* If we matched any instruction that had a REG_ARGS_SIZE, then
4056 move those notes over to the new sequence. */
4057 as_note = NULL;
4058 for (i = match_len; i >= 0; --i)
4059 {
4060 int j = peep2_buf_position (n: peep2_current + i);
4061 old_insn = peep2_insn_data[j].insn;
4062
4063 as_note = find_reg_note (old_insn, REG_ARGS_SIZE, NULL);
4064 if (as_note)
4065 break;
4066 }
4067
4068 i = peep2_buf_position (n: peep2_current + match_len);
4069 eh_note = find_reg_note (peep2_insn_data[i].insn, REG_EH_REGION, NULL_RTX);
4070
4071 /* Replace the old sequence with the new. */
4072 rtx_insn *peepinsn = peep2_insn_data[i].insn;
4073 last = emit_insn_after_setloc (attempt,
4074 peep2_insn_data[i].insn,
4075 INSN_LOCATION (insn: peepinsn));
4076 if (JUMP_P (peepinsn) && JUMP_P (last))
4077 CROSSING_JUMP_P (last) = CROSSING_JUMP_P (peepinsn);
4078 before_try = PREV_INSN (insn);
4079 delete_insn_chain (insn, peep2_insn_data[i].insn, false);
4080
4081 /* Re-insert the EH_REGION notes. */
4082 if (eh_note || (was_call && nonlocal_goto_handler_labels))
4083 {
4084 edge eh_edge;
4085 edge_iterator ei;
4086
4087 FOR_EACH_EDGE (eh_edge, ei, bb->succs)
4088 if (eh_edge->flags & (EDGE_EH | EDGE_ABNORMAL_CALL))
4089 break;
4090
4091 if (eh_note)
4092 copy_reg_eh_region_note_backward (eh_note, last, before_try);
4093
4094 if (eh_edge)
4095 for (x = last; x != before_try; x = PREV_INSN (insn: x))
4096 if (x != BB_END (bb)
4097 && (can_throw_internal (x)
4098 || can_nonlocal_goto (x)))
4099 {
4100 edge nfte, nehe;
4101 int flags;
4102
4103 nfte = split_block (bb, x);
4104 flags = (eh_edge->flags
4105 & (EDGE_EH | EDGE_ABNORMAL));
4106 if (CALL_P (x))
4107 flags |= EDGE_ABNORMAL_CALL;
4108 nehe = make_edge (nfte->src, eh_edge->dest,
4109 flags);
4110
4111 nehe->probability = eh_edge->probability;
4112 nfte->probability = nehe->probability.invert ();
4113
4114 peep2_do_cleanup_cfg |= purge_dead_edges (nfte->dest);
4115 bb = nfte->src;
4116 eh_edge = nehe;
4117 }
4118
4119 /* Converting possibly trapping insn to non-trapping is
4120 possible. Zap dummy outgoing edges. */
4121 peep2_do_cleanup_cfg |= purge_dead_edges (bb);
4122 }
4123
4124 /* Re-insert the ARGS_SIZE notes. */
4125 if (as_note)
4126 fixup_args_size_notes (before_try, last, get_args_size (as_note));
4127
4128 /* Scan the new insns for embedded side effects and add appropriate
4129 REG_INC notes. */
4130 if (AUTO_INC_DEC)
4131 for (x = last; x != before_try; x = PREV_INSN (insn: x))
4132 if (NONDEBUG_INSN_P (x))
4133 add_auto_inc_notes (x, PATTERN (insn: x));
4134
4135 /* If we generated a jump instruction, it won't have
4136 JUMP_LABEL set. Recompute after we're done. */
4137 for (x = last; x != before_try; x = PREV_INSN (insn: x))
4138 if (JUMP_P (x))
4139 {
4140 peep2_do_rebuild_jump_labels = true;
4141 break;
4142 }
4143
4144 return last;
4145}
4146
4147/* After performing a replacement in basic block BB, fix up the life
4148 information in our buffer. LAST is the last of the insns that we
4149 emitted as a replacement. PREV is the insn before the start of
4150 the replacement. MATCH_LEN + 1 is the number of instructions that were
4151 matched, and which now need to be replaced in the buffer. */
4152
4153static void
4154peep2_update_life (basic_block bb, int match_len, rtx_insn *last,
4155 rtx_insn *prev)
4156{
4157 int i = peep2_buf_position (n: peep2_current + match_len + 1);
4158 rtx_insn *x;
4159 regset_head live;
4160
4161 INIT_REG_SET (&live);
4162 COPY_REG_SET (&live, peep2_insn_data[i].live_before);
4163
4164 gcc_assert (peep2_current_count >= match_len + 1);
4165 peep2_current_count -= match_len + 1;
4166
4167 x = last;
4168 do
4169 {
4170 if (INSN_P (x))
4171 {
4172 df_insn_rescan (x);
4173 if (peep2_current_count < MAX_INSNS_PER_PEEP2)
4174 {
4175 peep2_current_count++;
4176 if (--i < 0)
4177 i = MAX_INSNS_PER_PEEP2;
4178 peep2_insn_data[i].insn = x;
4179 df_simulate_one_insn_backwards (bb, x, &live);
4180 COPY_REG_SET (peep2_insn_data[i].live_before, &live);
4181 }
4182 }
4183 x = PREV_INSN (insn: x);
4184 }
4185 while (x != prev);
4186 CLEAR_REG_SET (&live);
4187
4188 peep2_current = i;
4189}
4190
4191/* Add INSN, which is in BB, at the end of the peep2 insn buffer if possible.
4192 Return true if we added it, false otherwise. The caller will try to match
4193 peepholes against the buffer if we return false; otherwise it will try to
4194 add more instructions to the buffer. */
4195
4196static bool
4197peep2_fill_buffer (basic_block bb, rtx_insn *insn, regset live)
4198{
4199 int pos;
4200
4201 /* Once we have filled the maximum number of insns the buffer can hold,
4202 allow the caller to match the insns against peepholes. We wait until
4203 the buffer is full in case the target has similar peepholes of different
4204 length; we always want to match the longest if possible. */
4205 if (peep2_current_count == MAX_INSNS_PER_PEEP2)
4206 return false;
4207
4208 /* If an insn has RTX_FRAME_RELATED_P set, do not allow it to be matched with
4209 any other pattern, lest it change the semantics of the frame info. */
4210 if (RTX_FRAME_RELATED_P (insn))
4211 {
4212 /* Let the buffer drain first. */
4213 if (peep2_current_count > 0)
4214 return false;
4215 /* Now the insn will be the only thing in the buffer. */
4216 }
4217
4218 pos = peep2_buf_position (n: peep2_current + peep2_current_count);
4219 peep2_insn_data[pos].insn = insn;
4220 COPY_REG_SET (peep2_insn_data[pos].live_before, live);
4221 peep2_current_count++;
4222
4223 df_simulate_one_insn_forwards (bb, insn, live);
4224 return true;
4225}
4226
4227/* Perform the peephole2 optimization pass. */
4228
4229static void
4230peephole2_optimize (void)
4231{
4232 rtx_insn *insn;
4233 bitmap live;
4234 int i;
4235 basic_block bb;
4236
4237 peep2_do_cleanup_cfg = false;
4238 peep2_do_rebuild_jump_labels = false;
4239
4240 df_set_flags (DF_LR_RUN_DCE);
4241 df_note_add_problem ();
4242 df_analyze ();
4243
4244 /* Initialize the regsets we're going to use. */
4245 for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i)
4246 peep2_insn_data[i].live_before = BITMAP_ALLOC (obstack: &reg_obstack);
4247 search_ofs = 0;
4248 live = BITMAP_ALLOC (obstack: &reg_obstack);
4249
4250 FOR_EACH_BB_REVERSE_FN (bb, cfun)
4251 {
4252 bool past_end = false;
4253 int pos;
4254
4255 rtl_profile_for_bb (bb);
4256
4257 /* Start up propagation. */
4258 bitmap_copy (live, DF_LR_IN (bb));
4259 df_simulate_initialize_forwards (bb, live);
4260 peep2_reinit_state (live);
4261
4262 insn = BB_HEAD (bb);
4263 for (;;)
4264 {
4265 rtx_insn *attempt, *head;
4266 int match_len;
4267
4268 if (!past_end && !NONDEBUG_INSN_P (insn))
4269 {
4270 next_insn:
4271 insn = NEXT_INSN (insn);
4272 if (insn == NEXT_INSN (BB_END (bb)))
4273 past_end = true;
4274 continue;
4275 }
4276 if (!past_end && peep2_fill_buffer (bb, insn, live))
4277 goto next_insn;
4278
4279 /* If we did not fill an empty buffer, it signals the end of the
4280 block. */
4281 if (peep2_current_count == 0)
4282 break;
4283
4284 /* The buffer filled to the current maximum, so try to match. */
4285
4286 pos = peep2_buf_position (n: peep2_current + peep2_current_count);
4287 peep2_insn_data[pos].insn = PEEP2_EOB;
4288 COPY_REG_SET (peep2_insn_data[pos].live_before, live);
4289
4290 /* Match the peephole. */
4291 head = peep2_insn_data[peep2_current].insn;
4292 attempt = peephole2_insns (PATTERN (insn: head), head, &match_len);
4293 if (attempt != NULL)
4294 {
4295 rtx_insn *last = peep2_attempt (bb, insn: head, match_len, attempt);
4296 if (last)
4297 {
4298 peep2_update_life (bb, match_len, last, prev: PREV_INSN (insn: attempt));
4299 continue;
4300 }
4301 }
4302
4303 /* No match: advance the buffer by one insn. */
4304 peep2_current = peep2_buf_position (n: peep2_current + 1);
4305 peep2_current_count--;
4306 }
4307 }
4308
4309 default_rtl_profile ();
4310 for (i = 0; i < MAX_INSNS_PER_PEEP2 + 1; ++i)
4311 BITMAP_FREE (peep2_insn_data[i].live_before);
4312 BITMAP_FREE (live);
4313 if (peep2_do_rebuild_jump_labels)
4314 rebuild_jump_labels (get_insns ());
4315 if (peep2_do_cleanup_cfg)
4316 cleanup_cfg (CLEANUP_CFG_CHANGED);
4317}
4318
4319/* Common predicates for use with define_bypass. */
4320
4321/* Helper function for store_data_bypass_p, handle just a single SET
4322 IN_SET. */
4323
4324static bool
4325store_data_bypass_p_1 (rtx_insn *out_insn, rtx in_set)
4326{
4327 if (!MEM_P (SET_DEST (in_set)))
4328 return false;
4329
4330 rtx out_set = single_set (insn: out_insn);
4331 if (out_set)
4332 return !reg_mentioned_p (SET_DEST (out_set), SET_DEST (in_set));
4333
4334 rtx out_pat = PATTERN (insn: out_insn);
4335 if (GET_CODE (out_pat) != PARALLEL)
4336 return false;
4337
4338 for (int i = 0; i < XVECLEN (out_pat, 0); i++)
4339 {
4340 rtx out_exp = XVECEXP (out_pat, 0, i);
4341
4342 if (GET_CODE (out_exp) == CLOBBER || GET_CODE (out_exp) == USE)
4343 continue;
4344
4345 gcc_assert (GET_CODE (out_exp) == SET);
4346
4347 if (reg_mentioned_p (SET_DEST (out_exp), SET_DEST (in_set)))
4348 return false;
4349 }
4350
4351 return true;
4352}
4353
4354/* True if the dependency between OUT_INSN and IN_INSN is on the store
4355 data not the address operand(s) of the store. IN_INSN and OUT_INSN
4356 must be either a single_set or a PARALLEL with SETs inside. */
4357
4358bool
4359store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
4360{
4361 rtx in_set = single_set (insn: in_insn);
4362 if (in_set)
4363 return store_data_bypass_p_1 (out_insn, in_set);
4364
4365 rtx in_pat = PATTERN (insn: in_insn);
4366 if (GET_CODE (in_pat) != PARALLEL)
4367 return false;
4368
4369 for (int i = 0; i < XVECLEN (in_pat, 0); i++)
4370 {
4371 rtx in_exp = XVECEXP (in_pat, 0, i);
4372
4373 if (GET_CODE (in_exp) == CLOBBER || GET_CODE (in_exp) == USE)
4374 continue;
4375
4376 gcc_assert (GET_CODE (in_exp) == SET);
4377
4378 if (!store_data_bypass_p_1 (out_insn, in_set: in_exp))
4379 return false;
4380 }
4381
4382 return true;
4383}
4384
4385/* True if the dependency between OUT_INSN and IN_INSN is in the IF_THEN_ELSE
4386 condition, and not the THEN or ELSE branch. OUT_INSN may be either a single
4387 or multiple set; IN_INSN should be single_set for truth, but for convenience
4388 of insn categorization may be any JUMP or CALL insn. */
4389
4390bool
4391if_test_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
4392{
4393 rtx out_set, in_set;
4394
4395 in_set = single_set (insn: in_insn);
4396 if (! in_set)
4397 {
4398 gcc_assert (JUMP_P (in_insn) || CALL_P (in_insn));
4399 return false;
4400 }
4401
4402 if (GET_CODE (SET_SRC (in_set)) != IF_THEN_ELSE)
4403 return false;
4404 in_set = SET_SRC (in_set);
4405
4406 out_set = single_set (insn: out_insn);
4407 if (out_set)
4408 {
4409 if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1))
4410 || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2)))
4411 return false;
4412 }
4413 else
4414 {
4415 rtx out_pat;
4416 int i;
4417
4418 out_pat = PATTERN (insn: out_insn);
4419 gcc_assert (GET_CODE (out_pat) == PARALLEL);
4420
4421 for (i = 0; i < XVECLEN (out_pat, 0); i++)
4422 {
4423 rtx exp = XVECEXP (out_pat, 0, i);
4424
4425 if (GET_CODE (exp) == CLOBBER)
4426 continue;
4427
4428 gcc_assert (GET_CODE (exp) == SET);
4429
4430 if (reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 1))
4431 || reg_mentioned_p (SET_DEST (out_set), XEXP (in_set, 2)))
4432 return false;
4433 }
4434 }
4435
4436 return true;
4437}
4438
4439static unsigned int
4440rest_of_handle_peephole2 (void)
4441{
4442 if (HAVE_peephole2)
4443 peephole2_optimize ();
4444
4445 return 0;
4446}
4447
4448namespace {
4449
4450const pass_data pass_data_peephole2 =
4451{
4452 .type: RTL_PASS, /* type */
4453 .name: "peephole2", /* name */
4454 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4455 .tv_id: TV_PEEPHOLE2, /* tv_id */
4456 .properties_required: 0, /* properties_required */
4457 .properties_provided: 0, /* properties_provided */
4458 .properties_destroyed: 0, /* properties_destroyed */
4459 .todo_flags_start: 0, /* todo_flags_start */
4460 TODO_df_finish, /* todo_flags_finish */
4461};
4462
4463class pass_peephole2 : public rtl_opt_pass
4464{
4465public:
4466 pass_peephole2 (gcc::context *ctxt)
4467 : rtl_opt_pass (pass_data_peephole2, ctxt)
4468 {}
4469
4470 /* opt_pass methods: */
4471 /* The epiphany backend creates a second instance of this pass, so we need
4472 a clone method. */
4473 opt_pass * clone () final override { return new pass_peephole2 (m_ctxt); }
4474 bool gate (function *) final override
4475 {
4476 return (optimize > 0 && flag_peephole2);
4477 }
4478 unsigned int execute (function *) final override
4479 {
4480 return rest_of_handle_peephole2 ();
4481 }
4482
4483}; // class pass_peephole2
4484
4485} // anon namespace
4486
4487rtl_opt_pass *
4488make_pass_peephole2 (gcc::context *ctxt)
4489{
4490 return new pass_peephole2 (ctxt);
4491}
4492
4493namespace {
4494
4495const pass_data pass_data_split_all_insns =
4496{
4497 .type: RTL_PASS, /* type */
4498 .name: "split1", /* name */
4499 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4500 .tv_id: TV_NONE, /* tv_id */
4501 .properties_required: 0, /* properties_required */
4502 PROP_rtl_split_insns, /* properties_provided */
4503 .properties_destroyed: 0, /* properties_destroyed */
4504 .todo_flags_start: 0, /* todo_flags_start */
4505 .todo_flags_finish: 0, /* todo_flags_finish */
4506};
4507
4508class pass_split_all_insns : public rtl_opt_pass
4509{
4510public:
4511 pass_split_all_insns (gcc::context *ctxt)
4512 : rtl_opt_pass (pass_data_split_all_insns, ctxt)
4513 {}
4514
4515 /* opt_pass methods: */
4516 /* The epiphany backend creates a second instance of this pass, so
4517 we need a clone method. */
4518 opt_pass * clone () final override
4519 {
4520 return new pass_split_all_insns (m_ctxt);
4521 }
4522 unsigned int execute (function *) final override
4523 {
4524 split_all_insns ();
4525 return 0;
4526 }
4527
4528}; // class pass_split_all_insns
4529
4530} // anon namespace
4531
4532rtl_opt_pass *
4533make_pass_split_all_insns (gcc::context *ctxt)
4534{
4535 return new pass_split_all_insns (ctxt);
4536}
4537
4538namespace {
4539
4540const pass_data pass_data_split_after_reload =
4541{
4542 .type: RTL_PASS, /* type */
4543 .name: "split2", /* name */
4544 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4545 .tv_id: TV_NONE, /* tv_id */
4546 .properties_required: 0, /* properties_required */
4547 .properties_provided: 0, /* properties_provided */
4548 .properties_destroyed: 0, /* properties_destroyed */
4549 .todo_flags_start: 0, /* todo_flags_start */
4550 .todo_flags_finish: 0, /* todo_flags_finish */
4551};
4552
4553class pass_split_after_reload : public rtl_opt_pass
4554{
4555public:
4556 pass_split_after_reload (gcc::context *ctxt)
4557 : rtl_opt_pass (pass_data_split_after_reload, ctxt)
4558 {}
4559
4560 /* opt_pass methods: */
4561 bool gate (function *) final override
4562 {
4563 /* If optimizing, then go ahead and split insns now. */
4564 return optimize > 0;
4565 }
4566
4567 unsigned int execute (function *) final override
4568 {
4569 split_all_insns ();
4570 return 0;
4571 }
4572
4573}; // class pass_split_after_reload
4574
4575} // anon namespace
4576
4577rtl_opt_pass *
4578make_pass_split_after_reload (gcc::context *ctxt)
4579{
4580 return new pass_split_after_reload (ctxt);
4581}
4582
4583static bool
4584enable_split_before_sched2 (void)
4585{
4586#ifdef INSN_SCHEDULING
4587 return optimize > 0 && flag_schedule_insns_after_reload;
4588#else
4589 return false;
4590#endif
4591}
4592
4593namespace {
4594
4595const pass_data pass_data_split_before_sched2 =
4596{
4597 .type: RTL_PASS, /* type */
4598 .name: "split3", /* name */
4599 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4600 .tv_id: TV_NONE, /* tv_id */
4601 .properties_required: 0, /* properties_required */
4602 .properties_provided: 0, /* properties_provided */
4603 .properties_destroyed: 0, /* properties_destroyed */
4604 .todo_flags_start: 0, /* todo_flags_start */
4605 .todo_flags_finish: 0, /* todo_flags_finish */
4606};
4607
4608class pass_split_before_sched2 : public rtl_opt_pass
4609{
4610public:
4611 pass_split_before_sched2 (gcc::context *ctxt)
4612 : rtl_opt_pass (pass_data_split_before_sched2, ctxt)
4613 {}
4614
4615 /* opt_pass methods: */
4616 bool gate (function *) final override
4617 {
4618 return enable_split_before_sched2 ();
4619 }
4620
4621 unsigned int execute (function *) final override
4622 {
4623 split_all_insns ();
4624 return 0;
4625 }
4626
4627}; // class pass_split_before_sched2
4628
4629} // anon namespace
4630
4631rtl_opt_pass *
4632make_pass_split_before_sched2 (gcc::context *ctxt)
4633{
4634 return new pass_split_before_sched2 (ctxt);
4635}
4636
4637namespace {
4638
4639const pass_data pass_data_split_before_regstack =
4640{
4641 .type: RTL_PASS, /* type */
4642 .name: "split4", /* name */
4643 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4644 .tv_id: TV_NONE, /* tv_id */
4645 .properties_required: 0, /* properties_required */
4646 .properties_provided: 0, /* properties_provided */
4647 .properties_destroyed: 0, /* properties_destroyed */
4648 .todo_flags_start: 0, /* todo_flags_start */
4649 .todo_flags_finish: 0, /* todo_flags_finish */
4650};
4651
4652class pass_split_before_regstack : public rtl_opt_pass
4653{
4654public:
4655 pass_split_before_regstack (gcc::context *ctxt)
4656 : rtl_opt_pass (pass_data_split_before_regstack, ctxt)
4657 {}
4658
4659 /* opt_pass methods: */
4660 bool gate (function *) final override;
4661 unsigned int execute (function *) final override
4662 {
4663 split_all_insns ();
4664 return 0;
4665 }
4666
4667}; // class pass_split_before_regstack
4668
4669bool
4670pass_split_before_regstack::gate (function *)
4671{
4672#if HAVE_ATTR_length && defined (STACK_REGS)
4673 /* If flow2 creates new instructions which need splitting
4674 and scheduling after reload is not done, they might not be
4675 split until final which doesn't allow splitting
4676 if HAVE_ATTR_length. Selective scheduling can result in
4677 further instructions that need splitting. */
4678#ifdef INSN_SCHEDULING
4679 return !enable_split_before_sched2 () || flag_selective_scheduling2;
4680#else
4681 return !enable_split_before_sched2 ();
4682#endif
4683#else
4684 return false;
4685#endif
4686}
4687
4688} // anon namespace
4689
4690rtl_opt_pass *
4691make_pass_split_before_regstack (gcc::context *ctxt)
4692{
4693 return new pass_split_before_regstack (ctxt);
4694}
4695
4696namespace {
4697
4698const pass_data pass_data_split_for_shorten_branches =
4699{
4700 .type: RTL_PASS, /* type */
4701 .name: "split5", /* name */
4702 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
4703 .tv_id: TV_NONE, /* tv_id */
4704 .properties_required: 0, /* properties_required */
4705 .properties_provided: 0, /* properties_provided */
4706 .properties_destroyed: 0, /* properties_destroyed */
4707 .todo_flags_start: 0, /* todo_flags_start */
4708 .todo_flags_finish: 0, /* todo_flags_finish */
4709};
4710
4711class pass_split_for_shorten_branches : public rtl_opt_pass
4712{
4713public:
4714 pass_split_for_shorten_branches (gcc::context *ctxt)
4715 : rtl_opt_pass (pass_data_split_for_shorten_branches, ctxt)
4716 {}
4717
4718 /* opt_pass methods: */
4719 bool gate (function *) final override
4720 {
4721 /* The placement of the splitting that we do for shorten_branches
4722 depends on whether regstack is used by the target or not. */
4723#if HAVE_ATTR_length && !defined (STACK_REGS)
4724 return true;
4725#else
4726 return false;
4727#endif
4728 }
4729
4730 unsigned int execute (function *) final override
4731 {
4732 split_all_insns_noflow ();
4733 return 0;
4734 }
4735
4736}; // class pass_split_for_shorten_branches
4737
4738} // anon namespace
4739
4740rtl_opt_pass *
4741make_pass_split_for_shorten_branches (gcc::context *ctxt)
4742{
4743 return new pass_split_for_shorten_branches (ctxt);
4744}
4745
4746/* (Re)initialize the target information after a change in target. */
4747
4748void
4749recog_init ()
4750{
4751 /* The information is zero-initialized, so we don't need to do anything
4752 first time round. */
4753 if (!this_target_recog->x_initialized)
4754 {
4755 this_target_recog->x_initialized = true;
4756 return;
4757 }
4758 memset (s: this_target_recog->x_bool_attr_masks, c: 0,
4759 n: sizeof (this_target_recog->x_bool_attr_masks));
4760 for (unsigned int i = 0; i < NUM_INSN_CODES; ++i)
4761 if (this_target_recog->x_op_alt[i])
4762 {
4763 free (ptr: this_target_recog->x_op_alt[i]);
4764 this_target_recog->x_op_alt[i] = 0;
4765 }
4766}
4767

source code of gcc/recog.cc