Skip to content

chore: run black on new commits #103

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 4 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
name: ci

on:
workflow_dispatch:
push:
branches: [main]
pull_request:
branches: [main]

jobs:
ci:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v3

- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11.4'

- name: Set up UV
uses: astral-sh/setup-uv@v5

- name: Install dependencies
run: |
python -m venv env
source env/bin/activate
uv pip install --pre -r requirements.txt
uv pip install --pre -r requirements_api.txt

- name: Run Black
run: |
source env/bin/activate
black --check .
10 changes: 7 additions & 3 deletions TEST_get_emission.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,19 @@
import time
import random


def main():
subtensor = bt.subtensor(network="test")

while True:
subnet = subtensor.subnet(netuid=96)
print(f"Tempo: {subnet.tempo} Block: {subtensor.block} alpha_out_emission: {subnet.alpha_out_emission.tao} alpha_out: {subnet.alpha_out.tao} ")

print(
f"Tempo: {subnet.tempo} Block: {subtensor.block} alpha_out_emission: {subnet.alpha_out_emission.tao} alpha_out: {subnet.alpha_out.tao} "
)

sleep_time = 60 + random.uniform(-30, 30)
time.sleep(sleep_time)


if __name__ == "__main__":
main()
main()
12 changes: 3 additions & 9 deletions docs/stream_tutorial/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,7 @@ async def query_synapse(my_uid, wallet_name, hotkey, network, netuid):
wallet = bt.wallet(name=wallet_name, hotkey=hotkey)

# instantiate the metagraph with provided network and netuid
metagraph = bt.metagraph(
netuid=netuid, network=network, sync=True, lite=False
)
metagraph = bt.metagraph(netuid=netuid, network=network, sync=True, lite=False)

# Grab the axon you're serving
axon = metagraph.axons[my_uid]
Expand All @@ -40,9 +38,7 @@ async def query_synapse(my_uid, wallet_name, hotkey, network, netuid):
dendrite = bt.dendrite(wallet=wallet)

async def main():
responses = await dendrite(
[axon], syn, deserialize=False, streaming=True
)
responses = await dendrite([axon], syn, deserialize=False, streaming=True)

for resp in responses:
i = 0
Expand Down Expand Up @@ -73,9 +69,7 @@ async def main():
required=True,
help="Your unique miner ID on the chain",
)
parser.add_argument(
"--netuid", type=int, required=True, help="Network Unique ID"
)
parser.add_argument("--netuid", type=int, required=True, help="Network Unique ID")
parser.add_argument(
"--wallet_name", type=str, default="default", help="Name of the wallet"
)
Expand Down
4 changes: 1 addition & 3 deletions docs/stream_tutorial/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,7 @@ def get_config() -> "bt.Config":
help="Chain endpoint to connect to.",
)
# Adds override arguments for network and netuid.
parser.add_argument(
"--netuid", type=int, default=1, help="The chain subnet uid."
)
parser.add_argument("--netuid", type=int, default=1, help="The chain subnet uid.")

parser.add_argument(
"--miner.root",
Expand Down
18 changes: 5 additions & 13 deletions docs/stream_tutorial/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,7 @@ def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}")

# The axon handles request processing, allowing validators to send this process requests.
self.axon = axon or bt.axon(
wallet=self.wallet, port=self.config.axon.port
)
self.axon = axon or bt.axon(wallet=self.wallet, port=self.config.axon.port)
# Attach determiners which functions are called when servicing a request.
bt.logging.info(f"Attaching forward function to axon.")
print(f"Attaching forward function to axon. {self._prompt}")
Expand All @@ -79,13 +77,11 @@ def __init__(self, config=None, axon=None, wallet=None, subtensor=None):
self.request_timestamps: Dict = {}

@abstractmethod
def config(self) -> "bt.Config":
...
def config(self) -> "bt.Config": ...

@classmethod
@abstractmethod
def add_args(cls, parser: argparse.ArgumentParser):
...
def add_args(cls, parser: argparse.ArgumentParser): ...

def _prompt(self, synapse: StreamPrompting) -> StreamPrompting:
"""
Expand Down Expand Up @@ -162,9 +158,7 @@ def run(self):
self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor)

# Start starts the miner's axon, making it active on the network.
bt.logging.info(
f"Starting axon server on port: {self.config.axon.port}"
)
bt.logging.info(f"Starting axon server on port: {self.config.axon.port}")
self.axon.start()

# --- Run until should_exit = True.
Expand Down Expand Up @@ -347,9 +341,7 @@ async def _prompt(text: str, send: Send):
processing steps or modify how tokens are sent back to the client.
"""
bt.logging.trace("HI. _PROMPT()")
input_ids = tokenizer(
text, return_tensors="pt"
).input_ids.squeeze()
input_ids = tokenizer(text, return_tensors="pt").input_ids.squeeze()
buffer = []
bt.logging.debug(f"Input text: {text}")
bt.logging.debug(f"Input ids: {input_ids}")
Expand Down
4 changes: 1 addition & 3 deletions docs/stream_tutorial/protocol.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@ async def process_streaming_response(self, response: StreamingResponse):
"""
if self.completion is None:
self.completion = ""
bt.logging.debug(
"Processing streaming response (StreamingSynapse base class)."
)
bt.logging.debug("Processing streaming response (StreamingSynapse base class).")
async for chunk in response.content.iter_any():
bt.logging.debug(f"Processing chunk: {chunk}")
tokens = chunk.decode("utf-8").split("\n")
Expand Down
105 changes: 64 additions & 41 deletions neurons/miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
# DEALINGS IN THE SOFTWARE.

import os

# Set USE_TORCH=1 environment variable to use torch instead of numpy
os.environ["USE_TORCH"] = "1"

Expand All @@ -32,16 +33,21 @@

from omega.base.miner import BaseMinerNeuron
from omega.imagebind_wrapper import ImageBind
from omega.miner_utils import search_and_diarize_youtube_videos, search_and_embed_youtube_videos
from omega.miner_utils import (
search_and_diarize_youtube_videos,
search_and_embed_youtube_videos,
)
from omega.augment import LocalLLMAugment, OpenAIAugment, NoAugment
from omega.utils.config import QueryAugment
from omega.constants import VALIDATOR_TIMEOUT, VALIDATOR_TIMEOUT_AUDIO
from omega.diarization_pipeline import CustomDiarizationPipeline


class Miner(BaseMinerNeuron):
"""
Your miner neuron class. You should use this class to define your miner's behavior. In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs.
"""

def __init__(self, config=None):
super(Miner, self).__init__(config=config)
query_augment_type = QueryAugment(self.config.neuron.query_augment)
Expand All @@ -53,59 +59,74 @@ def __init__(self, config=None):
self.augment = OpenAIAugment(device=self.config.neuron.device)
else:
raise ValueError("Invalid query augment")



self.diarization_pipeline = CustomDiarizationPipeline(
overlap_detection_model_id = "tezuesh/overlapped-speech-detection",
overlap_detection_model_id="tezuesh/overlapped-speech-detection",
diarization_model_id="tezuesh/diarization",
# device="cuda"
)
self.imagebind = ImageBind(v2=True)

async def forward_videos(
self, synapse: omega.protocol.Videos
) :
async def forward_videos(self, synapse: omega.protocol.Videos):
# Scrape Youtube videos
bt.logging.info(f"Received scraping request: {synapse.num_videos} videos for query '{synapse.query}'")

bt.logging.info(
f"Received scraping request: {synapse.num_videos} videos for query '{synapse.query}'"
)

start = time.time()

synapse.video_metadata = search_and_embed_youtube_videos(
self.augment(synapse.query), synapse.num_videos, self.imagebind
)

time_elapsed = time.time() - start

if len(synapse.video_metadata) == synapse.num_videos and time_elapsed < VALIDATOR_TIMEOUT:
bt.logging.info(f"–––––– SCRAPING SUCCEEDED: Scraped {len(synapse.video_metadata)}/{synapse.num_videos} videos in {time_elapsed} seconds.")
else:
bt.logging.error(f"–––––– SCRAPING FAILED: Scraped {len(synapse.video_metadata)}/{synapse.num_videos} videos in {time_elapsed} seconds.")

if (
len(synapse.video_metadata) == synapse.num_videos
and time_elapsed < VALIDATOR_TIMEOUT
):
bt.logging.info(
f"–––––– SCRAPING SUCCEEDED: Scraped {len(synapse.video_metadata)}/{synapse.num_videos} videos in {time_elapsed} seconds."
)
else:
bt.logging.error(
f"–––––– SCRAPING FAILED: Scraped {len(synapse.video_metadata)}/{synapse.num_videos} videos in {time_elapsed} seconds."
)

return synapse

async def forward_audios(
self, synapse: omega.protocol.Audios
) -> omega.protocol.Audios:
bt.logging.info(f"Received youtube audio scraping and diarization request: {synapse.num_audios} audios for query '{synapse.query}'")

bt.logging.info(
f"Received youtube audio scraping and diarization request: {synapse.num_audios} audios for query '{synapse.query}'"
)

start = time.time()

synapse.audio_metadata = search_and_diarize_youtube_videos(
self.augment(synapse.query), synapse.num_audios, self.diarization_pipeline, self.imagebind
self.augment(synapse.query),
synapse.num_audios,
self.diarization_pipeline,
self.imagebind,
)

time_elapsed = time.time() - start

if len(synapse.audio_metadata) == synapse.num_audios and time_elapsed < VALIDATOR_TIMEOUT_AUDIO:
bt.logging.info(f"–––––– SCRAPING SUCCEEDED: Scraped {len(synapse.audio_metadata)}/{synapse.num_audios} audios in {time_elapsed} seconds.")

if (
len(synapse.audio_metadata) == synapse.num_audios
and time_elapsed < VALIDATOR_TIMEOUT_AUDIO
):
bt.logging.info(
f"–––––– SCRAPING SUCCEEDED: Scraped {len(synapse.audio_metadata)}/{synapse.num_audios} audios in {time_elapsed} seconds."
)
else:
bt.logging.error(f"–––––– SCRAPING FAILED: Scraped {len(synapse.audio_metadata)}/{synapse.num_audios} audios in {time_elapsed} seconds.")
bt.logging.error(
f"–––––– SCRAPING FAILED: Scraped {len(synapse.audio_metadata)}/{synapse.num_audios} audios in {time_elapsed} seconds."
)
return synapse

async def blacklist(
self, synapse: bt.Synapse
) -> typing.Tuple[bool, str]:
async def blacklist(self, synapse: bt.Synapse) -> typing.Tuple[bool, str]:
"""
Determines whether an incoming request should be blacklisted and thus ignored. Your implementation should
define the logic for blacklisting requests based on your needs and desired security parameters.
Expand Down Expand Up @@ -156,20 +177,25 @@ async def blacklist(
return True, "Non-validator hotkey"

stake = self.metagraph.S[uid].item()
if self.config.blacklist.validator_min_stake and stake < self.config.blacklist.validator_min_stake:
bt.logging.warning(f"Blacklisting request from {synapse.dendrite.hotkey} [uid={uid}], not enough stake -- {stake}")
if (
self.config.blacklist.validator_min_stake
and stake < self.config.blacklist.validator_min_stake
):
bt.logging.warning(
f"Blacklisting request from {synapse.dendrite.hotkey} [uid={uid}], not enough stake -- {stake}"
)
return True, "Stake below minimum"

bt.logging.trace(
f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}"
)
return False, "Hotkey recognized!"

async def blacklist_videos(
self, synapse: omega.protocol.Videos
) -> typing.Tuple[bool, str]:
return await self.blacklist(synapse)

async def blacklist_audios(
self, synapse: omega.protocol.Audios
) -> typing.Tuple[bool, str]:
Expand Down Expand Up @@ -206,23 +232,20 @@ async def priority(self, synapse: bt) -> float:
)
return prirority

async def priority_videos(
self, synapse: omega.protocol.Videos
) -> float:
async def priority_videos(self, synapse: omega.protocol.Videos) -> float:
return await self.priority(synapse)

async def priority_audios(
self, synapse: omega.protocol.Audios
) -> float:

async def priority_audios(self, synapse: omega.protocol.Audios) -> float:
return await self.priority(synapse)

def save_state(self):
"""
We define this function to avoid printing out the log message in the BaseNeuron class
that says `save_state() not implemented`.
"""
pass


# This is the main function, which runs the miner.
if __name__ == "__main__":
with Miner() as miner:
Expand Down
18 changes: 13 additions & 5 deletions neurons/test_miner.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,28 @@

if time_elapsed > VALIDATOR_TIMEOUT or len(video_metadata_list) < num_videos:
if time_elapsed > VALIDATOR_TIMEOUT:
print(f"Searching took {time_elapsed} seconds, which is longer than the validator timeout of {VALIDATOR_TIMEOUT} seconds")
print(
f"Searching took {time_elapsed} seconds, which is longer than the validator timeout of {VALIDATOR_TIMEOUT} seconds"
)

if len(video_metadata_list) < num_videos:
print(f"Only got {len(video_metadata_list)} videos, which is less than the requested {num_videos} videos")
print(
f"Only got {len(video_metadata_list)} videos, which is less than the requested {num_videos} videos"
)
else:
print(f"SUCCESS! Search and embed took {time_elapsed} seconds and got {len(video_metadata_list)} videos")
print(
f"SUCCESS! Search and embed took {time_elapsed} seconds and got {len(video_metadata_list)} videos"
)


if len(video_metadata_list) == 0:
print("No videos found")
else:
videos = Videos(query=query, num_videos=num_videos, video_metadata=video_metadata_list)
videos = Videos(
query=query, num_videos=num_videos, video_metadata=video_metadata_list
)
response = requests.get(
"https://dev-sn24-api.omegatron.ai/api/count_unique",
json=videos.to_serializable_dict(videos)
json=videos.to_serializable_dict(videos),
)
print(response.json())
Loading