forked from FINAKON/HelpProject
1. Initial Commit - a boiler plate code and POC to realize the concept of context sensitive help 2. Frontend code written in ReactJS 3. Backend code written in Java, Spring Boot Framework 4. Frontend Start: pre-requisites : node, npm npm run dev ==> to start the frontend vite server 5. Backend Start: pre-requisites : java, mvn mvn spring-boot:run ==> to start the backend server 6. Visit http://localhost:5173/ for basic demo of help, press F1 in textboxes 7. Visit http://localhost:5173/editor and enter "admin123" to add/modify texts. Happy Coding !!! Thank you, Bhargava.
241 lines
7.0 KiB
JavaScript
241 lines
7.0 KiB
JavaScript
/**
|
||
* @import {
|
||
* Code,
|
||
* Construct,
|
||
* Event,
|
||
* Point,
|
||
* Resolver,
|
||
* State,
|
||
* TokenizeContext,
|
||
* Tokenizer,
|
||
* Token
|
||
* } from 'micromark-util-types'
|
||
*/
|
||
|
||
import { push, splice } from 'micromark-util-chunked';
|
||
import { classifyCharacter } from 'micromark-util-classify-character';
|
||
import { resolveAll } from 'micromark-util-resolve-all';
|
||
/** @type {Construct} */
|
||
export const attention = {
|
||
name: 'attention',
|
||
resolveAll: resolveAllAttention,
|
||
tokenize: tokenizeAttention
|
||
};
|
||
|
||
/**
|
||
* Take all events and resolve attention to emphasis or strong.
|
||
*
|
||
* @type {Resolver}
|
||
*/
|
||
// eslint-disable-next-line complexity
|
||
function resolveAllAttention(events, context) {
|
||
let index = -1;
|
||
/** @type {number} */
|
||
let open;
|
||
/** @type {Token} */
|
||
let group;
|
||
/** @type {Token} */
|
||
let text;
|
||
/** @type {Token} */
|
||
let openingSequence;
|
||
/** @type {Token} */
|
||
let closingSequence;
|
||
/** @type {number} */
|
||
let use;
|
||
/** @type {Array<Event>} */
|
||
let nextEvents;
|
||
/** @type {number} */
|
||
let offset;
|
||
|
||
// Walk through all events.
|
||
//
|
||
// Note: performance of this is fine on an mb of normal markdown, but it’s
|
||
// a bottleneck for malicious stuff.
|
||
while (++index < events.length) {
|
||
// Find a token that can close.
|
||
if (events[index][0] === 'enter' && events[index][1].type === 'attentionSequence' && events[index][1]._close) {
|
||
open = index;
|
||
|
||
// Now walk back to find an opener.
|
||
while (open--) {
|
||
// Find a token that can open the closer.
|
||
if (events[open][0] === 'exit' && events[open][1].type === 'attentionSequence' && events[open][1]._open &&
|
||
// If the markers are the same:
|
||
context.sliceSerialize(events[open][1]).charCodeAt(0) === context.sliceSerialize(events[index][1]).charCodeAt(0)) {
|
||
// If the opening can close or the closing can open,
|
||
// and the close size *is not* a multiple of three,
|
||
// but the sum of the opening and closing size *is* multiple of three,
|
||
// then don’t match.
|
||
if ((events[open][1]._close || events[index][1]._open) && (events[index][1].end.offset - events[index][1].start.offset) % 3 && !((events[open][1].end.offset - events[open][1].start.offset + events[index][1].end.offset - events[index][1].start.offset) % 3)) {
|
||
continue;
|
||
}
|
||
|
||
// Number of markers to use from the sequence.
|
||
use = events[open][1].end.offset - events[open][1].start.offset > 1 && events[index][1].end.offset - events[index][1].start.offset > 1 ? 2 : 1;
|
||
const start = {
|
||
...events[open][1].end
|
||
};
|
||
const end = {
|
||
...events[index][1].start
|
||
};
|
||
movePoint(start, -use);
|
||
movePoint(end, use);
|
||
openingSequence = {
|
||
type: use > 1 ? "strongSequence" : "emphasisSequence",
|
||
start,
|
||
end: {
|
||
...events[open][1].end
|
||
}
|
||
};
|
||
closingSequence = {
|
||
type: use > 1 ? "strongSequence" : "emphasisSequence",
|
||
start: {
|
||
...events[index][1].start
|
||
},
|
||
end
|
||
};
|
||
text = {
|
||
type: use > 1 ? "strongText" : "emphasisText",
|
||
start: {
|
||
...events[open][1].end
|
||
},
|
||
end: {
|
||
...events[index][1].start
|
||
}
|
||
};
|
||
group = {
|
||
type: use > 1 ? "strong" : "emphasis",
|
||
start: {
|
||
...openingSequence.start
|
||
},
|
||
end: {
|
||
...closingSequence.end
|
||
}
|
||
};
|
||
events[open][1].end = {
|
||
...openingSequence.start
|
||
};
|
||
events[index][1].start = {
|
||
...closingSequence.end
|
||
};
|
||
nextEvents = [];
|
||
|
||
// If there are more markers in the opening, add them before.
|
||
if (events[open][1].end.offset - events[open][1].start.offset) {
|
||
nextEvents = push(nextEvents, [['enter', events[open][1], context], ['exit', events[open][1], context]]);
|
||
}
|
||
|
||
// Opening.
|
||
nextEvents = push(nextEvents, [['enter', group, context], ['enter', openingSequence, context], ['exit', openingSequence, context], ['enter', text, context]]);
|
||
|
||
// Always populated by defaults.
|
||
|
||
// Between.
|
||
nextEvents = push(nextEvents, resolveAll(context.parser.constructs.insideSpan.null, events.slice(open + 1, index), context));
|
||
|
||
// Closing.
|
||
nextEvents = push(nextEvents, [['exit', text, context], ['enter', closingSequence, context], ['exit', closingSequence, context], ['exit', group, context]]);
|
||
|
||
// If there are more markers in the closing, add them after.
|
||
if (events[index][1].end.offset - events[index][1].start.offset) {
|
||
offset = 2;
|
||
nextEvents = push(nextEvents, [['enter', events[index][1], context], ['exit', events[index][1], context]]);
|
||
} else {
|
||
offset = 0;
|
||
}
|
||
splice(events, open - 1, index - open + 3, nextEvents);
|
||
index = open + nextEvents.length - offset - 2;
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
// Remove remaining sequences.
|
||
index = -1;
|
||
while (++index < events.length) {
|
||
if (events[index][1].type === 'attentionSequence') {
|
||
events[index][1].type = 'data';
|
||
}
|
||
}
|
||
return events;
|
||
}
|
||
|
||
/**
|
||
* @this {TokenizeContext}
|
||
* Context.
|
||
* @type {Tokenizer}
|
||
*/
|
||
function tokenizeAttention(effects, ok) {
|
||
const attentionMarkers = this.parser.constructs.attentionMarkers.null;
|
||
const previous = this.previous;
|
||
const before = classifyCharacter(previous);
|
||
|
||
/** @type {NonNullable<Code>} */
|
||
let marker;
|
||
return start;
|
||
|
||
/**
|
||
* Before a sequence.
|
||
*
|
||
* ```markdown
|
||
* > | **
|
||
* ^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function start(code) {
|
||
marker = code;
|
||
effects.enter('attentionSequence');
|
||
return inside(code);
|
||
}
|
||
|
||
/**
|
||
* In a sequence.
|
||
*
|
||
* ```markdown
|
||
* > | **
|
||
* ^^
|
||
* ```
|
||
*
|
||
* @type {State}
|
||
*/
|
||
function inside(code) {
|
||
if (code === marker) {
|
||
effects.consume(code);
|
||
return inside;
|
||
}
|
||
const token = effects.exit('attentionSequence');
|
||
|
||
// To do: next major: move this to resolver, just like `markdown-rs`.
|
||
const after = classifyCharacter(code);
|
||
|
||
// Always populated by defaults.
|
||
|
||
const open = !after || after === 2 && before || attentionMarkers.includes(code);
|
||
const close = !before || before === 2 && after || attentionMarkers.includes(previous);
|
||
token._open = Boolean(marker === 42 ? open : open && (before || !close));
|
||
token._close = Boolean(marker === 42 ? close : close && (after || !open));
|
||
return ok(code);
|
||
}
|
||
}
|
||
|
||
/**
|
||
* Move a point a bit.
|
||
*
|
||
* Note: `move` only works inside lines! It’s not possible to move past other
|
||
* chunks (replacement characters, tabs, or line endings).
|
||
*
|
||
* @param {Point} point
|
||
* Point.
|
||
* @param {number} offset
|
||
* Amount to move.
|
||
* @returns {undefined}
|
||
* Nothing.
|
||
*/
|
||
function movePoint(point, offset) {
|
||
point.column += offset;
|
||
point.offset += offset;
|
||
point._bufferIndex += offset;
|
||
} |