Bhargava 6063bd1724 Help Project:
1. Initial Commit - a boiler plate code and POC to realize the concept of context
sensitive help
2. Frontend code written in ReactJS
3. Backend code written in Java, Spring Boot Framework
4. Frontend Start:
        pre-requisites : node, npm
	npm run dev  ==> to start the frontend vite server
5. Backend Start:
	pre-requisites : java, mvn
        mvn spring-boot:run  ==> to start the backend server
6. Visit http://localhost:5173/ for basic demo of help, press F1 in textboxes
7. Visit http://localhost:5173/editor and enter "admin123" to add/modify texts.

Happy Coding !!!

Thank you,
Bhargava.
2025-07-04 15:54:13 +05:30

163 lines
3.2 KiB
JavaScript
Raw Permalink Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

/**
* @import {
* Construct,
* Resolver,
* State,
* TokenizeContext,
* Tokenizer,
* Token
* } from 'micromark-util-types'
*/
import { factorySpace } from 'micromark-factory-space';
import { markdownLineEnding } from 'micromark-util-character';
import { subtokenize } from 'micromark-util-subtokenize';
/**
* No name because it must not be turned off.
* @type {Construct}
*/
export const content = {
resolve: resolveContent,
tokenize: tokenizeContent
};
/** @type {Construct} */
const continuationConstruct = {
partial: true,
tokenize: tokenizeContinuation
};
/**
* Content is transparent: its parsed right now. That way, definitions are also
* parsed right now: before text in paragraphs (specifically, media) are parsed.
*
* @type {Resolver}
*/
function resolveContent(events) {
subtokenize(events);
return events;
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeContent(effects, ok) {
/** @type {Token | undefined} */
let previous;
return chunkStart;
/**
* Before a content chunk.
*
* ```markdown
* > | abc
* ^
* ```
*
* @type {State}
*/
function chunkStart(code) {
effects.enter("content");
previous = effects.enter("chunkContent", {
contentType: "content"
});
return chunkInside(code);
}
/**
* In a content chunk.
*
* ```markdown
* > | abc
* ^^^
* ```
*
* @type {State}
*/
function chunkInside(code) {
if (code === null) {
return contentEnd(code);
}
// To do: in `markdown-rs`, each line is parsed on its own, and everything
// is stitched together resolving.
if (markdownLineEnding(code)) {
return effects.check(continuationConstruct, contentContinue, contentEnd)(code);
}
// Data.
effects.consume(code);
return chunkInside;
}
/**
*
*
* @type {State}
*/
function contentEnd(code) {
effects.exit("chunkContent");
effects.exit("content");
return ok(code);
}
/**
*
*
* @type {State}
*/
function contentContinue(code) {
effects.consume(code);
effects.exit("chunkContent");
previous.next = effects.enter("chunkContent", {
contentType: "content",
previous
});
previous = previous.next;
return chunkInside;
}
}
/**
* @this {TokenizeContext}
* Context.
* @type {Tokenizer}
*/
function tokenizeContinuation(effects, ok, nok) {
const self = this;
return startLookahead;
/**
*
*
* @type {State}
*/
function startLookahead(code) {
effects.exit("chunkContent");
effects.enter("lineEnding");
effects.consume(code);
effects.exit("lineEnding");
return factorySpace(effects, prefixed, "linePrefix");
}
/**
*
*
* @type {State}
*/
function prefixed(code) {
if (code === null || markdownLineEnding(code)) {
return nok(code);
}
// Always populated by defaults.
const tail = self.events[self.events.length - 1];
if (!self.parser.constructs.disable.null.includes('codeIndented') && tail && tail[1].type === "linePrefix" && tail[2].sliceSerialize(tail[1], true).length >= 4) {
return ok(code);
}
return effects.interrupt(self.parser.constructs.flow, nok, ok)(code);
}
}