BonkLM is a TypeScript/JavaScript library providing production-ready validators to protect LLM applications against common AI-specific attack vectors including prompt injection, jailbreaks, PII leakage, and toxic output.
# npm
npm install bonklm
# pnpm
pnpm add bonklm
# yarn
yarn add bonklmimport { PromptInjectionValidator } from 'bonklm'
const validator = new PromptInjectionValidator()
const result = await validator.validate({
input: userMessage,
context: systemPrompt,
})
if (!result.safe) {
console.warn('Potential prompt injection:', result.reason)
// reject or sanitize the input
}import { ValidatorChain, PromptInjectionValidator, PIILeakValidator } from 'bonklm'
const chain = new ValidatorChain([
new PromptInjectionValidator({ sensitivity: 'high' }),
new PIILeakValidator({ patterns: ['email', 'phone', 'ssn'] }),
])
const result = await chain.validate({ input: userMessage })
if (!result.safe) {
throw new Error(`Validation failed: ${result.violations.join(', ')}`)
}Full API reference, configuration options, and advanced usage patterns are available in the GitHub repository README and the inline JSDoc documentation.
BonkLM is open-source under the MIT license. Contributions, bug reports, and feature requests are welcome.
BlackUnicornSecurity/bonklm