Skip to content

Commit

Permalink
initial
Browse files Browse the repository at this point in the history
  • Loading branch information
ZanderChang committed Nov 29, 2020
0 parents commit cf73101
Show file tree
Hide file tree
Showing 20 changed files with 3,670 additions and 0 deletions.
28 changes: 28 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
# rlfuzz

Inspired by [rlfuzz](https://github.com/spolu/rlfuzz)

## Installation

```sh
# tested in Ubuntu 16.04.6
# for LAVA-M
sudo apt install libacl1 libacl1-dev

# for AFL
sudo sh -c "echo core > /proc/sys/kernel/core_pattern"

for i in `ls /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor` ; do sudo sh -c "echo performance > $i"; done

# install rlfuzz, testeded in Python 3.8.5
pip install .

# open main.ipynb in ./examples with jupyter, it's a simple test for DDPGAgent
```

## Add New Environments

* modify `rlfuzz/setup.py` to add new target binary in `package_data`.
* modify `rlfuzz/rlfuzz/__init__.py` to register new envs.
* modify `rlfuzz/rlfuzz/envs/__init__.py` to import new env class defined in `rlfuzz/rlfuzz/envs/fuzz_lava_m_env.py`.
* modify `rlfuzz/rlfuzz/mods/Makefile` to generate new target binary in `rlfuzz/rlfuzz/mods/lava-m-mod/`.
1 change: 1 addition & 0 deletions examples/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
.ipynb_checkpoints
242 changes: 242 additions & 0 deletions examples/main.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,242 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import warnings\n",
"warnings.filterwarnings('ignore',category=FutureWarning)\n",
"\n",
"import numpy as np\n",
"import gym\n",
"import sys\n",
"from tqdm import tqdm\n",
"import time\n",
"\n",
"# pip install .\n",
"import rlfuzz as rf\n",
"\n",
"# pip install tensorflow\n",
"from tensorflow.keras.models import Sequential, Model\n",
"from tensorflow.keras.layers import Dense, Activation, Flatten, Input, Concatenate\n",
"from tensorflow.keras.optimizers import Adam\n",
"\n",
"# pip install keras-rl2\n",
"from rl.agents import DDPGAgent\n",
"from rl.memory import SequentialMemory\n",
"from rl.random import OrnsteinUhlenbeckProcess"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"ENV_NAME = 'FuzzBase64-v0'\n",
"env = gym.make(ENV_NAME)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"env.setDiscreteEnv()\n",
"print(env.action_space.n)\n",
"print(env.observation_space.shape)\n",
"nb_actions = env.action_space.n\n",
"nb_observation = env.observation_space.shape[0]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"actor_input = Input(shape=(1,) + env.observation_space.shape, name='actor_observation_input')\n",
"f_actor_input = Flatten()(actor_input)\n",
"x = Dense(1024, activation='relu')(f_actor_input)\n",
"x = Dense(64, activation='relu')(x)\n",
"y = Dense(nb_actions, activation='tanh')(x)\n",
"actor = Model(inputs=actor_input, outputs=y, name='Actor')\n",
"actor.summary()\n",
"\n",
"critic_action_input = Input(shape=(env.action_space.n), name='critic_action_input')\n",
"critic_observation_input = Input(shape=(1,) + env.observation_space.shape, name='critic_observation_input')\n",
"f_critic_observation_input = Flatten()(critic_observation_input)\n",
"x = Concatenate()([critic_action_input, f_critic_observation_input])\n",
"x = Dense(1024, activation='relu')(x)\n",
"x = Dense(64, activation='relu')(x)\n",
"y = Dense(1, activation='sigmoid')(x)\n",
"critic = Model(inputs=[critic_action_input, critic_observation_input], outputs=y, name='Critic')\n",
"critic.summary()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"agent = DDPGAgent(nb_actions=nb_actions, \n",
" actor=actor, \n",
" critic=critic, \n",
" critic_action_input=critic_action_input, \n",
" memory=SequentialMemory(limit=100000, window_length=1), \n",
" nb_steps_warmup_critic=2000, \n",
" nb_steps_warmup_actor=2000, \n",
" random_process=OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3), \n",
" gamma=.99, \n",
" target_model_update=1e-3\n",
" )\n",
"agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"history = agent.fit(env, nb_steps=5000, visualize=False, verbose=1) # 执行nb_steps步,nb_max_episode_steps步后将done=True\n",
"\n",
"# import pandas as pd\n",
"# pd.DataFrame(history.history).to_csv('../logs/rl_ddpg_{}_history.csv'.format(ENV_NAME))\n",
"# agent.save_weights('../model/ddpg_{}_weights.h5f'.format(ENV_NAME), overwrite=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import matplotlib.pyplot as plt\n",
"import seaborn as sns\n",
"sns.set()\n",
"\n",
"def show_graghs(env, history):\n",
" data = env.input_len_history\n",
" plt.figure(figsize=(20,8))\n",
"\n",
" plt.subplot(221)\n",
" plt.plot(data, marker='o', markersize=2, linewidth=1)\n",
" plt.xlabel('step')\n",
" plt.ylabel('length')\n",
"\n",
" plt.axhline(y=max(data), color='r', linewidth=1, linestyle='--')\n",
" plt.text(0, max(data), str(max(data)), fontdict={'size': 8, 'color': 'r'})\n",
" if len(history) > 0:\n",
" for n in history['nb_steps']:\n",
" plt.axvline(x=n, color='r', linewidth=1, linestyle='--')\n",
" plt.text(n, 0, str(n), fontdict={'size': 8, 'color': 'r'})\n",
"\n",
" data = env.transition_count\n",
" plt.subplot(222)\n",
" plt.plot(data, marker='o', markersize=2, linewidth=1)\n",
" plt.xlabel('step')\n",
" plt.ylabel('transition_count')\n",
" plt.axhline(y=max(data), color='r', linewidth=1, linestyle='--')\n",
" plt.text(0, max(data), str(max(data)), fontdict={'size': 8, 'color': 'r'})\n",
" plt.axhline(y=min(data), color='r', linewidth=1, linestyle='--')\n",
" plt.text(0, min(data), str(min(data)), fontdict={'size': 8, 'color': 'r'})\n",
" if len(history) > 0:\n",
" for n in history['nb_steps']:\n",
" plt.axvline(x=n, color='r', linewidth=1, linestyle='--')\n",
" print('[+] Avg of last 1000 steps: {}'.format(sum(data[-1000:])/1000))\n",
"\n",
" data = env.reward_history\n",
" plt.subplot(224)\n",
" plt.plot(data, linewidth=1)\n",
" plt.xlabel('step')\n",
" plt.ylabel('reward_history')\n",
" plt.axhline(y=max(data), color='r', linewidth=1, linestyle='--')\n",
" plt.text(0, max(data), str(max(data)), fontdict={'size': 8, 'color': 'r'})\n",
" plt.axhline(y=min(data), color='r', linewidth=1, linestyle='--')\n",
" plt.text(0, min(data), str(min(data)), fontdict={'size': 8, 'color': 'r'})\n",
"\n",
" from collections import Counter\n",
" data = env.mutate_history\n",
" ct = Counter(data)\n",
" plt.subplot(223)\n",
" plt.barh(list(ct.keys()), [ ct[k] for k in ct.keys() ])\n",
" plt.yticks(range(env.mutate_size), \n",
" ['EraseBytes', 'InsertByte', 'InsertRepeatedBytes', 'ChangeByte', 'ChangeBit', \n",
" 'ShuffleBytes', 'ChangeASCIIInteger', 'ChangeBinaryInteger', 'CopyPart'])\n",
" plt.xlabel('step')\n",
" # plt.ylabel('action')\n",
"\n",
"# plt.savefig('../logs/rl_ddpg_{}.png'.format(ENV_NAME))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"show_graghs(env, history.history)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# 加载训练模型\n",
"newAgent = DDPGAgent(nb_actions=nb_actions, \n",
" actor=actor, \n",
" critic=critic, \n",
" critic_action_input=critic_action_input, \n",
" memory=SequentialMemory(limit=100000, window_length=1), \n",
" nb_steps_warmup_critic=2000, \n",
" nb_steps_warmup_actor=2000, \n",
" random_process=OrnsteinUhlenbeckProcess(size=nb_actions, theta=.15, mu=0., sigma=.3), \n",
" gamma=.99, \n",
" target_model_update=1e-3\n",
" )\n",
"newAgent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])\n",
"newAgent.load_weights('../model/ddpg_{}_weights.h5f'.format(ENV_NAME))\n",
"\n",
"newEnv = gym.make(ENV_NAME)\n",
"start = time.time()\n",
"newHistory = newAgent.test(newEnv, visualize=False, nb_max_episode_steps=5000)\n",
"end = time.time()\n",
"print('[+] {} min(s)'.format((end - start) / 60))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "RLFuzz",
"language": "python",
"name": "rlfuzz"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.5"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
1 change: 1 addition & 0 deletions rlfuzz/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
__pycache__
57 changes: 57 additions & 0 deletions rlfuzz/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
import os

from gym.envs.registration import register

def afl_forkserver_path():
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(
package_directory, 'mods/afl-2.52b-mod/afl-2.52b/afl-forkserver',
)

# base64_afl
def base64_target_path():
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(
package_directory, 'mods/lava-m-mod/base64_afl',
)

register(
id='FuzzBase64-v0',
entry_point='rlfuzz.envs:FuzzBase64Env',
)

# md5sum_afl
def md5sum_target_path():
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(
package_directory, 'mods/lava-m-mod/md5sum_afl',
)

register(
id='FuzzMd5sum-v0',
entry_point='rlfuzz.envs:FuzzMd5sumEnv',
)

# uniq_afl
def uniq_target_path():
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(
package_directory, 'mods/lava-m-mod/uniq_afl',
)

register(
id='FuzzUniq-v0',
entry_point='rlfuzz.envs:FuzzUniqEnv',
)

# who_afl
def who_target_path():
package_directory = os.path.dirname(os.path.abspath(__file__))
return os.path.join(
package_directory, 'mods/lava-m-mod/who_afl',
)

register(
id='FuzzWho-v0',
entry_point='rlfuzz.envs:FuzzWhoEnv',
)
4 changes: 4 additions & 0 deletions rlfuzz/coverage/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from rlfuzz.coverage.coverage import Coverage
from rlfuzz.coverage.coverage import Afl
from rlfuzz.coverage.coverage import PATH_MAP_SIZE
from rlfuzz.coverage.forkclient import ForkClient
Loading

0 comments on commit cf73101

Please sign in to comment.