Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
76 changes: 76 additions & 0 deletions documentation/modules/auxiliary/scanner/http/ollama_info.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
## Vulnerable Application

This module identifies ollama instances and enumerates the LLM
models which have been loaded and are running.

### Building Image

Write the following dockerfile.

```dockerfile
FROM ollama/ollama

EXPOSE 11434

VOLUME /root/.ollama

RUN /bin/ollama serve & \
sleep 5 && \
/bin/ollama pull llama3.2:1b && \
/bin/ollama pull qwen3.5:0.8b && \
/bin/ollama pull smollm:135m && \
printf 'FROM smollm:135m\nSYSTEM "you are an AI assistant and this is your system prompt"\n' > /Modelfile && \
/bin/ollama create my-model -f /Modelfile

RUN printf '#!/bin/bash\n/bin/ollama serve &\nsleep 3\ncurl -s http://localhost:11434/api/chat -d '"'"'{"model":"my-model","stream":false,"messages":[{"role":"user","content":"warmup"}]}'"'"'\nwait\n' > /start.sh && \
chmod +x /start.sh

ENTRYPOINT []
CMD ["/start.sh"]
```

Build and start it.

```
docker build -t my-ollama .
docker run -d -p 11434:11434 --name my-ollama my-ollama
```

## Verification Steps

1. Start the ollama docker
2. Start msfconsole
3. Do: `use auxiliary/scanner/http/ollama_info`
4. Do: `set rhosts [IPs]`
5. Do: `run`
6. You should get information about the models in the ollama instance

## Options

## Scenarios

### Docker image

```
msf > use auxiliary/scanner/http/ollama_info
msf auxiliary(scanner/http/ollama_info) > set rhosts 127.0.0.1
rhosts => 127.0.0.1
msf auxiliary(scanner/http/ollama_info) > run
[*] Checking 127.0.0.1
[*] Found model: my-model:latest
[*] Found model: smollm:135m
[*] Found model: qwen3.5:0.8b
[*] Found model: llama3.2:1b
[*] 127.0.0.1 Ollama Models
=======================

Name Release Status Size Parameter Size Temperature System Prompt
---- ------- ------ ---- -------------- ----------- -------------
llama3.2 1b Installed 1.23 GB 1.2B N/A N/A
my-model latest Running 130.77 MB 134.52M 0.2 you are an AI assistant and this is your system prompt
qwen3.5 0.8b Installed 988.05 MB 873.44M 1 N/A
smollm 135m Installed 87.49 MB 134.52M 0.2 N/A

[*] Scanned 1 of 1 hosts (100% complete)
[*] Auxiliary module execution completed
```
189 changes: 189 additions & 0 deletions modules/auxiliary/scanner/http/ollama_info.rb
Original file line number Diff line number Diff line change
@@ -0,0 +1,189 @@
##
# This module requires Metasploit: https://metasploit.com/download
# Current source: https://github.com/rapid7/metasploit-framework
##

class MetasploitModule < Msf::Auxiliary
include Msf::Exploit::Remote::HttpClient
include Msf::Auxiliary::Scanner

def initialize(info = {})
super(
update_info(
info,
'Name' => 'Ollama Scanner',
'Description' => %q{
This module identifies ollama instances and enumerates the LLM
models which have been loaded and are running.
},
'License' => MSF_LICENSE,
'Author' => [
'h00die'
],
'References' => [
['URL', 'https://ollama.readthedocs.io/en/api/']
],
'Notes' => {
'Stability' => [CRASH_SAFE],
'Reliability' => [],
'SideEffects' => []
}
)
)

register_options(
[
Opt::RPORT(11434),
OptString.new('TARGETURI', [true, 'Base URI', '/']),
]
)
end

def humanize(bytes)
return '0 B' if bytes <= 0

units = ['B', 'KB', 'MB', 'GB', 'TB']
i = [(Math.log2(bytes) / 10).to_i, units.length - 1].min
'%.2f %s' % [bytes.to_f / (1024**i), units[i]]
end

def ollama?
res = send_request_cgi({ 'uri' => normalize_uri(datastore['TARGETURI']) })

return res.body == 'Ollama is running' if res && res.code == 200

nil
end

# documenting that this is here, but nunused
# def generate
# res = send_request_cgi({ 'uri' => normalize_uri(datastore['TARGETURI'], 'api', 'generate') })
#
# return res.get_json_document if res && res.code == 200
#
# nil
# end

def list_local_models
res = send_request_cgi({ 'uri' => normalize_uri(datastore['TARGETURI'], 'api', 'tags') })

return res.get_json_document if res && res.code == 200

nil
end

def list_running_models
res = send_request_cgi({ 'uri' => normalize_uri(datastore['TARGETURI'], 'api', 'ps') })

return res.get_json_document if res && res.code == 200

nil
end

def get_model_info(model)
post_data = {
'model' => model
}
post_json = JSON.generate(post_data)
res = send_request_cgi({
'method' => 'POST',
'ctype' => 'application/json',
'data' => post_json,
'uri' => normalize_uri(target_uri.path, 'api', 'show')
})

return res.get_json_document if res && res.code == 200

nil
end

def get_temperature(details)
unless details.nil? || details['parameters'].nil?
details['parameters'].each_line do |line|
next unless line.start_with?('temperature')

return line.split[1]
end
end
'N/A'
end

def get_system_prompt(details)
unless details.nil? || details['modelfile'].nil?
details['modelfile'].each_line do |line|
next unless line.start_with?('SYSTEM ')

return line.split('SYSTEM ')[1]
end
end
'N/A'
end

def run_host(ip)
vprint_status("Checking #{ip}")
unless ollama?
vprint_error('Ollama instance not found')
return
end
models_table = Rex::Text::Table.new(
'Header' => "#{ip} Ollama Models",
'Indent' => 2,
'Columns' => [
'Name',
'Release',
'Status',
'Size',
'Parameter Size',
'Temperature',
'System Prompt'
]
)
running_names = []
running_models_res = list_running_models
if running_models_res.nil?
vprint_error('Could not retrieve running models (endpoint unreachable or returned non-200)')
end
(running_models_res&.fetch('models', nil) || []).each do |model|
vprint_status(" Found model: #{model['name']}")
details = get_model_info(model['name'])
temperature = get_temperature(details)
system_prompt = get_system_prompt(details)

models_table << [
model['name'].split(':')[0],
model['name'].split(':')[1],
'Running',
humanize(model['size']),
details.dig('details', 'parameter_size'),
temperature,
system_prompt
]
running_names << model['name']
end
installed_models_res = list_local_models
if installed_models_res.nil?
vprint_error('Could not retrieve local models (endpoint unreachable or returned non-200)')
return
end
(installed_models_res['models'] || []).each do |model|
next if running_names.include?(model['name'])

vprint_status(" Found model: #{model['name']}")
details = get_model_info(model['name'])
temperature = get_temperature(details)
system_prompt = get_system_prompt(details)

models_table << [
model['name'].split(':')[0],
model['name'].split(':')[1],
'Installed',
humanize(model['size']),
details.dig('details', 'parameter_size'),
temperature,
system_prompt
]
end

print_status(models_table.to_s)
end
end
Loading